blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
683a0d4fbccfbe8e816eb6374cf52060952ab309
|
73eaf8da1446e91f31d2a81a2276ce79cc18a90b
|
/performance/performance_mod.myd.2.R
|
a65332d5ed80e279d751c4e2d28484c6115a376e
|
[] |
no_license
|
leandrocara/Msc.Tesis-SnowWebPlatform
|
8797133c7b55bbba3b950f147159fdeef5e23cad
|
b41124e7afc00ac4790d298cdc2307bfc432ef41
|
refs/heads/master
| 2021-10-27T11:42:59.352687
| 2018-11-22T13:41:10
| 2018-11-22T13:41:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,018
|
r
|
performance_mod.myd.2.R
|
rm(list = ls())
library("raster")
rcl<- function(x,y){
reclassify(x, y, include.lowest=FALSE, right=NA)}
corte <- function(x){substr(x,pos,pos+6)}
# lee archivos .tif de un directorio
f.1 <- function(x,y){
x<- list.files(y,corte(x),full.names = T)
return(x[grepl(x = x,pattern = ".tif$")])}
#########
#### funciones de performance
m <- function(x){
return(timestamp(quiet = T))
}
f.t2<- function(x){
hh<- substr(x,21,22);mm<- substr(x,24,25);ss<- substr(x,27,28)
hh[2]<- substr(x[2],21,22);mm[2]<- substr(x[2],24,25);ss[2]<- substr(x[2],27,28)
hh <- as.numeric(hh);mm <- as.numeric(mm);ss <- as.numeric(ss);
if((ss[2]-ss[1])>=0){ss[3] <- (ss[2]-ss[1])}else{ss[3] <- 60+(ss[2]-ss[1]);mm[1] <- mm[1]+1 }
if((mm[2]-mm[1])>=0){mm[3] <- mm[2]-mm[1]}else{mm[3] <- 60+(mm[2]-mm[1]);hh[1] <- hh[1]+1}
if((hh[2]-hh[1])>=0){hh[3] <- hh[2]-hh[1]}else{hh[3] <- 24+(hh[2]-hh[1])}
return(paste(sprintf("%02d",hh[3]),sprintf("%02d",mm[3]),sprintf("%02d",ss[3]),sep=":"))
}
pr <- function(x){
unique(getValues(x))
}
###################
# ##############################################################
# 0-40: Soil # 237: inland water
# 40-100: snow cover # 239: ocean
# 200: missing data # 250: cloud
# 201: no decision # 254: detector saturated
# 211: night # 255: fill
# ##############################################################
dir.mod <-"~/TESIS/test2/mod/"
dir.myd <- "~/TESIS/test2/myd/"
dir.mod.c <-"~/TESIS/test2/c_mod/"
dir.myd.c <- "~/TESIS/test2/c_myd/"
dir.mod.myd.c <- "~/TESIS/test2/c_mod_myd/"
dir.mod.myd <- "~/TESIS/test2/mod_myd/"
# lmod <- list.files("~/TESIS/test2/mod/")
# lmyd <- list.files("~/TESIS/test2/myd/")
pos <- 10
lmod <- list.files(path=dir.mod,pattern = ".tif$")
lmyd <- list.files(path=dir.myd,pattern = ".tif$")
# lmod <- f.1(lmod,dir.mod)
# lmyd <- f.1(lmyd,dir.myd)
nodata <- c(200,201,211,237,239,254,250,255)
suelo <- c(seq(0,39))
nieve <- c(seq(40,100))
acc <- vector()
tabla <- data.frame()
### matríz para armar la imágenes de nubes
capa.nubes <- matrix(ncol=2,c(250,seq(0,100),200,201,211,237,239,254,255,1,rep(0,108)))
snow.bare.clouds <- as.matrix(data.frame(col1=c(suelo,nieve,nodata),
col2=c(rep(1,length(suelo)),
rep(2,length(nieve)),
rep(0,length(nodata)))))
bool.clouds <- as.matrix(data.frame(col1=c(NA,0,1),col2=c(1,0,0)))
# na20 <- as.matrix(data.frame(col1=c(NA),col2=c(0)))
# cero2na <- as.matrix(data.frame(col1=c(0),col2=c(NA)))
# i <- 1
############################################################################################################
{d1 <- m()
for(i in 1:5000){
# Leo las imágenes
# levanto los rasters
mod <- raster("~/TESIS/test2/MOD10A1.A2016089.h12v12.006.2016104051228.NDSI_Snow_Cover.tif")
# mod <- raster(paste(dir.mod,lmod,sep="/"))
c.mod<- rcl(mod, capa.nubes)
mod<- rcl(mod, snow.bare.clouds)# NA = nubes, 0 = suelo, 1 = nieve
# grabo mod, grabo c.mod, renombro c.mod
### solo para el testing
lmod <- list.files(path=dir.mod,pattern = ".tif$")
writeRaster(mod,paste(dir.mod,lmod,sep="/"),format="GTiff", overwrite=T,datatype='INT1U')
writeRaster(c.mod,paste(dir.mod.c,"/MOD10A1.A",corte(lmod),".clouds.cover.tif",sep=""),
format="GTiff", overwrite=T,datatype='INT1U')
file.rename(paste(dir.mod,lmod,sep="/"),
paste(dir.mod,"/",substr(lmod,1,nchar(lmod)-8),"_",sprintf("%03d",i),".tif",sep=""))
############################################################################################################
myd <- raster("~/TESIS/test2/MYD10A1.A2016089.h12v12.006.2016091123302.NDSI_Snow_Cover.tif")
# myd <- raster(paste(dir.myd,lmyd[j],sep="/"))
c.myd<- rcl(myd, capa.nubes)# 0 = nubes, 1 = suelo, 2 = nieve
myd<- rcl(myd, snow.bare.clouds)# 0 = nubes, 1 = suelo, 2 = nieve
lmyd <- list.files(path=dir.myd,pattern = ".tif$")
writeRaster(myd,paste(dir.myd,lmyd,sep="/"),format="GTiff", overwrite=T,datatype="INT1U")
writeRaster(c.myd,paste(dir.myd.c,"/MYD10A1.A",corte(lmyd),".clouds.cover.tif"
,sep=""),format="GTiff", overwrite=T,datatype='INT1U')
file.rename(paste(dir.myd,lmyd,sep="/"),
paste(dir.myd,"/",substr(lmyd,1,nchar(lmyd)-8),"_",sprintf("%03d",i),".tif",sep=""))
###########################################################################################################
# c.mod[which(is.na(getValues(c.mod)))] <- 0
# c.myd[which(is.na(getValues(c.myd)))] <- 0
writeRaster((c.mod+c.myd)-(c.mod*c.myd),
paste(dir.mod.myd.c,"/MOD.MYD.A",substr(lmyd,pos,pos+6),
".clouds.max.tif",sep=""),format="GTiff", overwrite=T,datatype='INT1U')
### este está ok así
writeRaster((c.mod*c.myd),paste(dir.mod.myd.c,"/MOD.MYD.A",substr(lmyd,pos,pos+6),
".clouds.min.tif",sep=""),
format="GTiff", overwrite=T,datatype='INT1U')
###########################################################################################################
mask.mod <- reclassify(mod, bool.clouds, include.lowest=FALSE, right=NA)
mod.myd <- mask.mod*myd
mod.myd <- mod+ mod.myd
writeRaster(mod.myd,paste(dir.mod.myd,"/MOD.MYD.A",substr(lmyd,pos,pos+6),
".snow.cover.area.tif",sep=""),format="GTiff", overwrite=T,datatype='INT1U')
for(h in 1:7){
tab<- getValues(mod.myd)
largo<- mod.myd@ncols*mod.myd@nrows
clouds.cover<- length(na.omit(getValues(mod.myd)))
snow.cover<- length(which(getValues(mod.myd)==1))
tabla[i,1] <- corte(lmod)
tabla[i,2] <- (clouds.cover*100)/largo
tabla[i,3] <- snow.cover*100/(tabla[i,2]*largo/100)
tabla[i,4]
}
}
d1[2] <- m()
d1[3] <- f.t2(d1)
write.csv(d1,paste(dir.mod,"t1.csv"))
}
###
# Este test concluye en unos resultados para las imágenes de
# mod/myd 0 1 2
# cmod/cmyd 0 1
# modmyd 0 1 2
#
|
a01bf38b3b1f6f25268d7c1b3e7d7db2cb81fe2b
|
7805360d25ad23a66027ec7bb6cb6eef0721bcca
|
/R/selectData.R
|
7a2fad260989ab9e71fefc39020233a8953c21e5
|
[] |
no_license
|
kuanghuangying/KuangHuangyingTools
|
9fc2a4e09021f868841bc318ca27280b577a84bf
|
d888c5701ce63d7c00d1821a4eb18653a75a684a
|
refs/heads/master
| 2021-01-25T10:01:04.639002
| 2018-03-04T03:51:21
| 2018-03-04T03:51:21
| 123,335,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 191
|
r
|
selectData.R
|
#' slice my data and select 3 columns
#' @export
selectData <- function () {
selectedData <- cleanData %>%
dplyr::select(Petal.Length, Petal.Width, Species)
return(selectedData)
}
|
ff2564a9b70b4594045ae5a758b02f0a7d7e37ce
|
b103e27f98c7c65f62f6d11ee351ad40cc256a20
|
/database_helper.R
|
2738cc92a4d6cfcd1732e3deafc4f03688fee9dc
|
[] |
no_license
|
martyciz/smecko
|
cce0f4b93109e0776597f0a1380d6b62c6298a67
|
afb546077a64c384eafe8ac12426d017d4ef6d83
|
refs/heads/master
| 2021-01-10T07:19:04.419536
| 2015-10-23T14:23:21
| 2015-10-23T14:23:21
| 44,817,877
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 491
|
r
|
database_helper.R
|
library(yaml)
library(RMySQL)
retrieve_data <- function(sql) {
db_config <- yaml.load_file("db_config.yml")
mydb <- dbConnect(MySQL(),
host=db_config$db$host,
dbname=db_config$db$name,
user=db_config$db$user,
password=db_config$db$pass,
CharSet='utf8')
dbGetQuery(mydb, "SET NAMES 'utf8'")
rs <- dbSendQuery(mydb, sql)
data <- fetch(rs, n=-1)
dbDisconnect(mydb)
return(data)
}
|
0398e82ef92affe0d2e8ae7748dd187420c45cf9
|
55f749ccdaba1b891195d81e186321432d506532
|
/QTL_analyses/RQTL2_F5_mapJLR_clean_v1.R
|
4caaec279f34ef7848880f645cc3c2fd227707dc
|
[] |
no_license
|
itliao/IpomoeaNectarQTL
|
01390f36a746276c58cc7830e31aa6794f35df40
|
4059eca1fa33b77c03dd6cfa72696d9d1300ea52
|
refs/heads/main
| 2023-04-13T17:51:45.477236
| 2021-12-20T18:34:43
| 2021-12-20T18:34:43
| 365,070,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,988
|
r
|
RQTL2_F5_mapJLR_clean_v1.R
|
setwd("~/Dropbox (Duke Bio_Ea)/Rausher Lab/Current_lab_members/Irene_Liao_Data/Scripts_Instructions/QTL/RQTLfiles")
library(qtl2)
# Goal - QTL analyses with qtl2, specifically using the LOCO method of identifying QTLs
# using LOCO, one QTL per chromosome, genome-wide significance threshold - part of the GWS QTLs
# using LOCO, multiple QTLs per chromosome, chromosome-wide significance creates the "All QTLs" set
###################
# get files ready #
###################
#create zip
zip_datafiles("mapJLR_F5_200817.yaml", overwrite=TRUE)
#path to zipped files
zipfile <- "mapJLR_F5_200817.zip"
#then read in zip
mapA4 <- read_cross2(zipfile)
##########################################
# follow procedures in R/qtl2 user guide #
##########################################
#step1: calculate genotype probabilities
map <- insert_pseudomarkers(mapA4$gmap, step=1)
pr <- calc_genoprob(mapA4, map, error_prob=0.001)
#leave one chromosome out - LOCO method
#scan chrom using kinship matrix data from all other chrom
kin_loco <- calc_kinship(pr, "loco")
#perform genome scan with loco
out_klo <- scan1(pr, mapA4$pheno, kin_loco)
#output - matrix of LOD scores, positions x phenotypes
#plot() to plot LOD curves
#lodcolumn - column/trait to plot
par(mar=c(5.1, 4.1, 1.1, 1.1))
#plot genome scan results per trait with different y-axis limits
pdf("QTLs_mapJLR_LOCO_201216.pdf")
for (i in 1:13){
print(i)
plot(out_klo, map, lodcolumn = i, col="slateblue", ylim=c(0, 5))
plot(out_klo, map, lodcolumn = i, col="violetred", ylim=c(0, 10))
plot(out_klo, map, lodcolumn = i, col="darkgreen", ylim=c(0, 20))
}
dev.off()
#perform permutation test - establish stats significant
#n_perm = number of permutation replicates.
operm4 <- scan1perm(pr, mapA4$pheno, kin_loco, n_perm=1000)
sum4 <- summary(operm4, alpha=c(0.2, 0.1, 0.05, 0.01))
save(operm4, file="opermLOCO_RQTL2.RData")
write.table(sum4, file = "200817_RQTL2/sum4perm_mapJLRklo1000_200817.txt")
#find_peaks() - id peaks
#LOD support, use drop 1.5 with peakdrop 1.8
#find the 0.05 genome-wide threshold for each trait
peaksCW <- find_peaks(out_klo, map, threshold=3.258233314, peakdrop=1.8, drop=1.5)
peaksCT <- find_peaks(out_klo, map, threshold=3.271583122, peakdrop=1.8, drop=1.5)
peaksCL <- find_peaks(out_klo, map, threshold=3.229508436, peakdrop=1.8, drop=1.5)
peaksCLV <- find_peaks(out_klo, map, threshold=3.109889231, peakdrop=1.8, drop=1.5)
peaksSL <- find_peaks(out_klo, map, threshold=3.073749999, peakdrop=1.8, drop=1.5)
peaksMFL <- find_peaks(out_klo, map, threshold=3.284543513, peakdrop=1.8, drop=1.5)
peaksPL <- find_peaks(out_klo, map, threshold=3.225302135, peakdrop=1.8, drop=1.5)
peaksSM <- find_peaks(out_klo, map, threshold=3.252094823, peakdrop=1.8, drop=1.5)
peaksSdL <- find_peaks(out_klo, map, threshold=3.108473759, peakdrop=1.8, drop=1.5)
peaksSW <- find_peaks(out_klo, map, threshold=3.180948084, peakdrop=1.8, drop=1.5)
peaksNV <- find_peaks(out_klo, map, threshold=3.223716159, peakdrop=1.8, drop=1.5)
peaksNSC <- find_peaks(out_klo, map, threshold=3.174653448, peakdrop=1.8, drop=1.5)
peaksNS <- find_peaks(out_klo, map, threshold=3.237713343, peakdrop=1.8, drop=1.5)
sink("200817_RQTL2/peaks_LOD_LOCO_operm1000_200817.txt")
print("CorollaWidth")
print(peaksCW)
print("CorollaThroat")
print(peaksCT)
print("CorollaLength")
print(peaksCL)
print("OverallCorollaLength")
print(peaksCLV)
print("SepalLength")
print(peaksSL)
print("LongestStamenLength")
print(peaksMFL)
print("PistilLength")
print(peaksPL)
print("SeedMass")
print(peaksSM)
print("SeedLength")
print(peaksSdL)
print("SeedWidth")
print(peaksSW)
print("NectarVolume")
print(peaksNV)
print("SugarConcentration")
print(peaksNSC)
print("NectarySize")
print(peaksNS)
sink()
###########################################
# chromosome-level significance with LOCO #
###########################################
# create leave-one-chromosome-out kinship matrix
kinship1 <- calc_kinship(pr, "loco")[[1]]
kinship2 <- calc_kinship(pr, "loco")[[2]]
kinship3 <- calc_kinship(pr, "loco")[[3]]
kinship4 <- calc_kinship(pr, "loco")[[4]]
kinship5 <- calc_kinship(pr, "loco")[[5]]
kinship6 <- calc_kinship(pr, "loco")[[6]]
kinship7 <- calc_kinship(pr, "loco")[[7]]
kinship8 <- calc_kinship(pr, "loco")[[8]]
kinship9 <- calc_kinship(pr, "loco")[[9]]
kinship10 <- calc_kinship(pr, "loco")[[10]]
kinship11 <- calc_kinship(pr, "loco")[[11]]
kinship12 <- calc_kinship(pr, "loco")[[12]]
kinship13 <- calc_kinship(pr, "loco")[[13]]
kinship14 <- calc_kinship(pr, "loco")[[14]]
kinship15 <- calc_kinship(pr, "loco")[[15]]
# find chromosome-level significance for loco with 10,000 permutations
operm1_klo <- scan1perm(pr[,"1"], mapA4$pheno, kinship1, n_perm=10000)
operm2_klo <- scan1perm(pr[,"2"], mapA4$pheno, kinship2, n_perm=10000)
operm3_klo <- scan1perm(pr[,"3"], mapA4$pheno, kinship3, n_perm=10000)
operm4_klo <- scan1perm(pr[,"4"], mapA4$pheno, kinship4, n_perm=10000)
operm5_klo <- scan1perm(pr[,"5"], mapA4$pheno, kinship5, n_perm=10000)
operm6_klo <- scan1perm(pr[,"6"], mapA4$pheno, kinship6, n_perm=10000)
operm7_klo <- scan1perm(pr[,"7"], mapA4$pheno, kinship7, n_perm=10000)
operm8_klo <- scan1perm(pr[,"8"], mapA4$pheno, kinship8, n_perm=10000)
operm9_klo <- scan1perm(pr[,"9"], mapA4$pheno, kinship9, n_perm=10000)
operm10_klo <- scan1perm(pr[,"10"], mapA4$pheno, kinship10, n_perm=10000)
operm11_klo <- scan1perm(pr[,"11"], mapA4$pheno, kinship11, n_perm=10000)
operm12_klo <- scan1perm(pr[,"12"], mapA4$pheno, kinship12, n_perm=10000)
operm13_klo <- scan1perm(pr[,"13"], mapA4$pheno, kinship13, n_perm=10000)
save(operm1_klo, file="opermLOCO_chr1_RQTL2.RData")
save(operm2_klo, file="opermLOCO_chr2_RQTL2.RData")
save(operm3_klo, file="opermLOCO_chr3_RQTL2.RData")
save(operm4_klo, file="opermLOCO_chr4_RQTL2.RData")
save(operm5_klo, file="opermLOCO_chr5_RQTL2.RData")
save(operm6_klo, file="opermLOCO_chr6_RQTL2.RData")
save(operm7_klo, file="opermLOCO_chr7_RQTL2.RData")
save(operm8_klo, file="opermLOCO_chr8_RQTL2.RData")
save(operm9_klo, file="opermLOCO_chr9_RQTL2.RData")
save(operm10_klo, file="opermLOCO_chr10_RQTL2.RData")
save(operm11_klo, file="opermLOCO_chr11_RQTL2.RData")
save(operm12_klo, file="opermLOCO_chr12_RQTL2.RData")
save(operm13_klo, file="opermLOCO_chr13_RQTL2.RData")
sink("200817_RQTL2/sumChromPerm10000_KLO_200819.txt")
summary(operm1_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm2_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm3_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm4_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm5_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm6_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm7_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm8_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm9_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm10_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm11_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm12_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
summary(operm13_klo, alpha=c(0.2, 0.1, 0.05, 0.01))
sink()
#peaks with max and min (0.05) at the chrom level
#multiple peaks on a chromosome
#peakdrop indicates the amount that the LOD curve must drop below the lowest of two adjacent peaks
peaksMaxChrom <- find_peaks(out_klo, map, threshold=2.14, drop=1.5)
peaksMaxChrom2 <- find_peaks(out_klo, map, threshold=2.14, peakdrop=1.8, drop=1.5)
peaksMinChrom <- find_peaks(out_klo, map, threshold=1.73, drop=1.5)
peaksMinChrom2 <- find_peaks(out_klo, map, threshold=1.73, peakdrop=1.8, drop=1.5)
sink("200817_RQTL2/peaks_ChromKLO_operm10000_200819.txt")
print("Max of max, 1 peak")
print(peaksMaxChrom)
print("Max of max, 2 peak")
print(peaksMaxChrom2)
print("Min of min, 1 peak")
print(peaksMinChrom)
print("Min of min, 2 peak")
print(peaksMinChrom2)
sink()
############################################################
# to find markers at confidence interval locations (in cM) #
############################################################
#read in files with peaks information
peaksKLO <- read.table("200817_RQTL2/peaks_forMarkers_KLO_all_200820.txt", sep="\t", header=TRUE, stringsAsFactor=FALSE)
peaksHK <- read.table("200817_RQTL2/peaks_forMarkers_HK_all_200820.txt", sep="\t", header=TRUE, stringsAsFactor=FALSE)
#use "map" to find markers - these include the pseudomarkers that were inserted
pmar <- find_marker(map, 4, 74)
#iterate through the dataframe to get the markers
peakList <- list()
loList <- list()
hiList <- list()
for (i in 1:nrow(peaksKLO)){
chr <- peaksKLO[i,3]
peak <- peaksKLO[i,4]
ci_lo <- peaksKLO[i,6]
ci_hi <- peaksKLO[i,7]
pmarP <- find_marker(map, chr, peak)
pmarL <- find_marker(map, chr, ci_lo)
pmarH <- find_marker(map, chr, ci_hi)
peakList[[i]] <- pmarP
loList[[i]] <- pmarL
hiList[[i]] <- pmarH
}
#make the list into a dataframe column
peakCol <- do.call(rbind,peakList)
loCol <- do.call(rbind,loList)
hiCol <- do.call(rbind,hiList)
#add/append the "datafames" into the main dataframe
peaksKLO <- cbind(peaksKLO,peakCol)
peaksKLO<- cbind(peaksKLO,loCol)
peaksKLO <- cbind(peaksKLO,hiCol)
#write to new table
write.table(peaksKLO, file = "peaks_ChromKLO_operm10000_markers_200820.txt", append = FALSE, quote = TRUE, sep = "\t", eol = "\n", na = "NA", dec = ".", row.names = TRUE, col.names = TRUE)
# get closest physical location of pseudomarkers
# (interpolated positions for the pseudomarkers)
tointerp <- list("1" = c(pos1.204=204, pos1.205=205, pos1.206=206, pos1.207=207, pos1.69=69, pos1.70=70),
"2" = c(pos2.50=50),
"3" = c(pos3.141=141, pos3.32=32, pos3.51=51, pos3.52=52),
"4" = c(pos4.186=186, pos4.197=197, pos4.224=224, pos4.52=52, pos4.61=61),
"5" = c(pos5.169=169, pos5.202=202, pos5.26=26, pos5.27=27, pos5.98=98),
"6" = c(pos6.159=159, pos6.57=57),
"7" = c(pos7.43=43),
"8" = c(pos8.215=215),
"9" = c(pos9.28=28, pos9.35=35),
"11" = c(pos11.27=27),
"12" = c(pos12.116=116, pos12.119=119, pos12.84=84),
"13" = c(pos13.142=142, pos13.6=6, pos13.65=65, pos13.69=69, pos13.26=26),
"14" = c(pos14.163=163, pos14.18=18, pos14.182=182, pos14.184=184, pos14.96=96, pos14.97=97),
"15" = c(pos15.10=10, pos15.7=7)
)
sink("200817_RQTL2/PseudoMarkers_KLO_200820.txt")
interp_map(tointerp, mapA4$gmap, mapA4$pmap)
sink()
###################################################################################
# find means, n, genotype info of QTL peaks from all QTLs, chromosome-level, LOCO #
# use information to calculate RHE #
###################################################################################
#re-read in the file (couldn't figure out how to drop the levels in the original file)
peaksKLOnew <- read.table("200817_RQTL2/peaks_ChromKLO_operm10000_markers_200820.txt", sep="\t", header=TRUE, stringsAsFactor=FALSE)
pdf("200817_RQTL2/GenoPheno_10000KLO_200820.pdf")
sink("200817_RQTL2/GenoPhenoValues_10000KLO_200820.txt")
for (i in 1:nrow(peaksKLOnew)){
chrom <- peaksKLOnew[i,3]
peak <- peaksKLOnew[i,4]
trait <- peaksKLOnew[i,2]
pL <- peaksKLOnew[i,8]
pLa <- find_marker(mapA4$gmap, chrom, peak)
g <- maxmarg(pr, map, chr=chrom, pos=peak, return_char = TRUE)
g2 <- maxmarg(pr, mapA4$gmap, chr=chrom, pos=peak, return_char = TRUE)
p <- plot_pxg(g, mapA4$pheno[,trait], ylab = trait, SEmult=2, force_labels = TRUE, omit_points = FALSE)
num <- table(g)
print(trait)
print(chrom)
print(peak)
print(pL)
print(g)
print(pLa)
print(g2)
}
sink()
dev.off()
sink("200817_RQTL2/GenoPhenoMeansNum_10000KLO_200820.txt")
for (i in 1:nrow(peaksKLOnew)){
chrom <- peaksKLOnew[i,3]
peak <- peaksKLOnew[i,4]
trait <- peaksKLOnew[i,2]
pL <- peaksKLOnew[i,8]
pLa <- find_marker(mapA4$gmap, chrom, peak)
g <- maxmarg(pr, map, chr=chrom, pos=peak, return_char = TRUE)
g2 <- maxmarg(pr, mapA4$gmap, chr=chrom, pos=peak, return_char = TRUE)
p <- plot_pxg(g, mapA4$pheno[,trait], ylab = trait, SEmult=2, force_labels = TRUE, omit_points = FALSE)
num <- table(g)
print(trait)
print(chrom)
print(peak)
print(pL)
print(p)
print(num)
}
sink()
#################################################################################
# find means, n, genotype info of markers from R/qtl final stepwise set of QTLs #
# use the conservative (0.05, HK) set of QTLs #
# use information to calculate RHE #
#################################################################################
peaksHKFinal <- read.csv("200805_RQTL/jitter/Final_qtlCon_markers_pos_200827.csv", sep=",", header=TRUE, stringsAsFactors = FALSE)
#single putative QTL position, plot_pxg()
#vector of genotypes from maxmarg()
#return_char = TRUE, vector of character strings with genotype labels
#creates plots
pdf("200817_RQTL2/Final_GenoPheno_qtlCon_200827.pdf")
sink("200817_RQTL2/Final_GenoPhenoValues_qtlCon_200827.txt") #only for marker and genotypes, don't print means and numbers
for (i in 1:nrow(peaksHKFinal)){
chrom <- peaksHKFinal[i,3]
peak <- peaksHKFinal[i,4]
trait <- peaksHKFinal[i,2]
pL <- peaksHKFinal[i,5]
pLa <- find_marker(mapA4$gmap, chrom, peak)
g <- maxmarg(pr, map, chr=chrom, pos=peak, return_char = TRUE)
g2 <- maxmarg(pr, mapA4$gmap, chr=chrom, pos=peak, return_char = TRUE)
p <- plot_pxg(g, mapA4$pheno[,trait], ylab = trait, SEmult=2, force_labels = TRUE, omit_points = FALSE)
num <- table(g)
print(trait)
print(chrom)
print(peak)
print(pL) #peak marker
print(g) #all the genotypes
print(pLa) #peak marker
print(g2) # all the genotypes
}
sink()
dev.off()
sink("200817_RQTL2/Final_GenoPhenoMeansNum_qtlCon_200827.txt") #only for means and numbers, don't print pL and g
for (i in 1:nrow(peaksHKFinal)){
chrom <- peaksHKFinal[i,3]
peak <- peaksHKFinal[i,4]
trait <- peaksHKFinal[i,2]
pL <- peaksHKFinal[i,5]
pLa <- find_marker(mapA4$gmap, chrom, peak)
g <- maxmarg(pr, map, chr=chrom, pos=peak, return_char = TRUE)
g2 <- maxmarg(pr, mapA4$gmap, chr=chrom, pos=peak, return_char = TRUE)
p <- plot_pxg(g, mapA4$pheno[,trait], ylab = trait, SEmult=2, force_labels = TRUE, omit_points = FALSE)
num <- table(g)
print(trait)
print(chrom)
print(pL) #peak marker
print(peak)
print(p) #print the means
print(num) #print the numbers of 11 and 22
}
sink()
###################################################
# create summary "barplots" for both sets of QTLs #
###################################################
#read in the peak files from qtlMerge (GWS QTLs) and LOCO2c (all QTLs)
qtlMerge <- read.table("200817_RQTL2/peaks_qtlMerge_201004.csv", sep=",",header=TRUE, stringsAsFactor=FALSE)
LOCO2c <- read.table("200817_RQTL2/peaks_LOCO2c_201004.csv", sep=",", header=TRUE, stringsAsFactor=FALSE)
pdf("PeaksSummary_201004.pdf")
plot_peaks(qtlMerge, map)
plot_peaks(LOCO2c, map)
dev.off()
|
aa2733910459b01e0c3e99b86cd6a7f6e2cc0c92
|
9719937c30d935bd575af95c496b8c2e9e7c69a5
|
/tests/warning.R
|
ebc5706f804663caeccec6b98a5d73dd7192aaf2
|
[] |
no_license
|
kismsu/animint
|
4a804da60bef85d54e34d3273ab912e7631f805d
|
e25d01d8d6e3f91735fa246161e6ab9a40219206
|
refs/heads/master
| 2021-01-15T21:20:00.434346
| 2014-07-22T14:29:55
| 2014-07-22T14:29:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,099
|
r
|
warning.R
|
library(animint)
data(WorldBank)
check <- function(p, w=""){
stopifnot(is.character(w))
stopifnot(length(w)==1)
if(is.ggplot(p)){
p <- list(plot=p)
}
stopifnot(is.list(p))
list(plot=p, warn=w)
}
wb <- ggplot()+
geom_point(aes(life.expectancy, fertility.rate, size=population),
data=WorldBank)
motion.area <-
list(scatter=ggplot()+
geom_point(aes(life.expectancy, fertility.rate, clickSelects=country,
showSelected=year, colour=region, size=population),
data=WorldBank)+
geom_text(aes(life.expectancy, fertility.rate, label=country,
showSelected=country, showSelected2=year),
data=WorldBank)+
scale_size_continuous(range=c(3, 10))+
make_text(WorldBank, 55, 9, "year"),
ts=ggplot()+
make_tallrect(WorldBank, "year")+
geom_line(aes(year, life.expectancy, group=country, colour=region,
clickSelects=country),
data=WorldBank, size=4, alpha=3/5),
time=list(variable="year",ms=3000),
bar=ggplot()+
geom_bar(aes(country, life.expectancy, fill=region,
showSelected=year, clickSelects=country),
data=WorldBank, stat="identity", position="identity")+
coord_flip() + theme_animint(height = 1500),
duration=list(year=1000))
tornado.warn <- paste0("stat_bin is unpredictable ",
"when used with clickSelects/showSelected.\n",
"Use ddply to do the binning ",
"or use make_bar if using geom_bar/geom_histogram.")
library(maps)
data(UStornadoes)
stateOrder <- data.frame(state = unique(UStornadoes$state)[order(unique(UStornadoes$TornadoesSqMile), decreasing=T)], rank = 1:49) # order states by tornadoes per square mile
UStornadoes$state <- factor(UStornadoes$state, levels=stateOrder$state, ordered=TRUE)
UStornadoes$weight <- 1/UStornadoes$LandArea
# useful for stat_bin, etc.
USpolygons <- map_data("state")
USpolygons$state = state.abb[match(USpolygons$region, tolower(state.name))]
tornado.bar <-
list(map=ggplot()+
geom_polygon(aes(x=long, y=lat, group=group),
data=USpolygons, fill="black", colour="grey") +
geom_segment(aes(x=startLong, y=startLat, xend=endLong, yend=endLat,
showSelected=year),
colour="#55B1F7", data=UStornadoes),
ts=ggplot()+
geom_bar(aes(year, clickSelects=year),data=UStornadoes))
## check(x) means there should be no warning and check(x,"warning")
## means that the code should produce "warning"
to.check <-
list(check(wb),
check(wb+scale_size_area(),
"geom_point with size=0 will be invisible"),
check(motion.area),
check(tornado.bar, tornado.warn))
for(L in to.check){
generated <- ""
tryCatch({
gg2animint(L$plot, open.browser=FALSE)
}, warning=function(w){
generated <<- w$mes
})
if(generated != L$warn){
print(L$warn)
print(generated)
stop("expected first, but got second")
}
}
|
83fa5b4f8aa448f62cb52679cf2c510c7367780f
|
2379787de6dfb0e65b59dbf1a5b9cd74071415a4
|
/ui.R
|
25036615bcc69fed80d64148998f819b75a34a40
|
[] |
no_license
|
shpotes/Visualization
|
2c8fd13bd32993cf26254a17537fbfbc4d3c1134
|
cbc050ba8e667391eb8e0f0cc3ceb29130f8c1a4
|
refs/heads/master
| 2020-09-16T22:12:10.326610
| 2017-06-16T22:17:38
| 2017-06-16T22:17:38
| 94,488,504
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,225
|
r
|
ui.R
|
library(leaflet)
library(ggplot2)
library(plotly)
navbarPage("EAFIT",
#img(src='logo_eafit_completo.png', width=70, height=35), id="nav",
tabPanel("Centro de Egresados",
h1("Centro de Egresados"),
tags$video(src =
"Línea del Tiempo - Centro de Egresados - Universidad EAFIT.mp4",
type = "video/mp4", autoplay = NA, controls = NA)),
tabPanel("Mapa Interactivo",
div(class="outer",
tags$head(
includeCSS("styles.css"),
includeScript("gomap.js")
),
leafletOutput("map", width="100%", height="100%"),
absolutePanel(id = "controls", class = "panel panel-default",
fixed = TRUE, draggable = TRUE, top = 60, left = "auto",
right = 20, bottom = "auto", width = 330, height = "auto",
h2("Localizacion de los eafitenses"),
sliderInput("year", "Año", min = 2001, max = 2017, value = 2001,
animate = animationOptions(interval = 5000, loop = F)),
plotlyOutput("Gener", height = 250, width = 250),
plotlyOutput("Carrera", height = 250, width = 250)
),
tags$div(id="cite",
'Vigilada Mineducación.'
)
)
)
)
|
bcb0cbe1db4404c155f99c29c4825a40e7b2abf2
|
ff2b55f75a9802b8de761eee6a353f9b8058b08c
|
/man/ADPIRA.Rd
|
5fdb43487a730796b8307da0f09d7e3c74f9b651
|
[] |
no_license
|
FCACollin/rpack_pira
|
426d344de5eb426240608aba725a8c328b8779e1
|
852db8a6175fe2126202b3316ba576d47ecf163c
|
refs/heads/master
| 2023-03-08T15:58:50.987587
| 2021-02-20T18:22:15
| 2021-02-20T18:22:15
| 287,980,600
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 466
|
rd
|
ADPIRA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-ADPIRA.R
\docType{data}
\name{ADPIRA}
\alias{ADPIRA}
\title{ADPIRA}
\format{
Information about the fornat
}
\source{
UMB.
}
\usage{
ADPIRA
}
\description{
Analysis Dataset for the study of Progression Independent of Relapse Activity
in the Polish RRMS patient population.
}
\examples{
attr(ADPIRA, "metadata")
sapply(ADPIRA, attr, which = "label")
head(ADPIRA)
}
\keyword{datasets}
|
8d081968646df24aa5abb4a26e81b164b39b5b08
|
89c706327fbac52418ccda18e44d1c98bd7759e7
|
/dashboard/AppCode/shiny/server/pages/main.R
|
5f6b708b37b04e405cd8a3ada9cc39a4026fffda
|
[] |
no_license
|
rickdott/Montecristo
|
3d313f434a2a0bec167e271a0d27b2e058125ae5
|
fa66a9d6b6d0555d1e882ebec4a7d79340e2ddf6
|
refs/heads/master
| 2023-03-29T23:51:16.121366
| 2021-04-03T08:22:07
| 2021-04-03T08:22:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,363
|
r
|
main.R
|
pages.main.getPage <- function() {
tagList(
div("Very pretty main page, a lot of time went into this! :)")
# div(class = "mainPage-wrapper",
# h1(strings[string.main.pageTitle, LANG]),
# div(class = "mainPage-frontpage mainPage-aboutApp",
# div(class = "mainPage-frontpage-aboutApp-text",
# h2(strings[string.main.aboutTheApp, LANG]),
# p(strings[string.main.aboutTheApp.paragraph, LANG]),
# ),
# div(class = "mainPage-frontpage-aboutApp-picture",
# img(src = "images/football-app.png"),
# )
# ),
# div(class = "mainPage-frontpage mainPage-appFeatures",
# div(class = "mainPage-frontpage-appFeatures-text",
# h2(strings[string.main.appFeatures, LANG]),
# p(strings[string.main.appFeatures.paragraph, LANG]),
# ),
# div(class = "mainPage-frontpage-appFeatures-picture",
# img(src = "images/field-plot.jpg"),
# )
# ),
# div(class = "mainPage-frontpage mainPage-aboutUs",
# div(class = "mainPage-frontpage-aboutUs-text",
# h2(strings[string.main.aboutUs, LANG]),
# p(strings[string.main.aboutUs.paragraph, LANG]),
# ),
# div(class = "mainPage-frontpage-aboutUs-picture",
# img(src = "images/bowling.jpg"),
# )
# )
# )
)
}
|
403bb5d725a3c5df37ab4676764c1f9dba08cfa0
|
78a13b567411aa7b33b6717ceece8966e9254589
|
/R/zzz.R
|
ceb6e298b9537d3e526b38fb38d0597fab1168f6
|
[
"MIT"
] |
permissive
|
jemus42/attrakttv
|
8a6162a0058a21129da93b0d803c9162d519d65b
|
06a3d3953a11dfafdc9824c3e715559096dbc3cf
|
refs/heads/master
| 2021-07-19T02:34:23.231630
| 2021-07-03T19:43:02
| 2021-07-03T19:43:02
| 209,893,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 789
|
r
|
zzz.R
|
#
# .onLoad <- function(libname, pkgname) {
# # op <- options()
# # op.trakt <- list(
# # trakt_db_path = "~/db"
# # )
# # toset <- !(names(op.trakt) %in% names(op))
# # if (any(toset)) options(op.trakt[toset])
#
# # local({
# # temp_path <- file.path(system.file(package = "attrakttv"), "db")
# #
# # if (!file.exists(Sys.getenv("trakt_db_path", unset = temp_path))) {
# # dir.create(Sys.getenv("trakt_db_path", unset = temp_path), recursive = TRUE)
# #
# # db_init()
# # }
# #
# # })
#
# }
globalVariables(c(
"trakt", "cache_date", "available_translations", "genres", "updated_at",
"first_aired", "show_id", "likes", "tvdb", ".", "lang",
"seed_shows", "seed_episodes", "seed_seasons", "seed_requests", "seed_posters",
"title"
))
|
20bab06a6a1871d7f05fc461a035e957f0ab2b34
|
7a7c964628a66093748692a756ac045b4731bcc6
|
/joe/scripts/not interesting/IT_sitedate_specific.R
|
7e66aca0ec4970cb169a72f94622816f08c8d651
|
[] |
no_license
|
reverteiros/floral_traits
|
85b9a71c0dd1cd34edda41ac3d0b87213ee0a96f
|
d2020502e9eb3c152849c8408652e87e8de3f893
|
refs/heads/master
| 2022-01-24T13:34:53.301712
| 2022-01-11T17:52:45
| 2022-01-11T17:52:45
| 136,968,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,228
|
r
|
IT_sitedate_specific.R
|
# this script is going to look at the unweighted / cwm IT of all the flowers present at one sitedate...for now, you need to run the entirety of cwm_cwidth_IT before running this script for unweighted IT
require(plyr)
require(lattice)
quartz()
fspecies$sitedate<-as.factor(fspecies$sitedate)
#subset so that only sitedates with more than 2 points are considered...this is called sd
i<-"2013_FHT_2"
fspecies$sitedate<-as.factor(fspecies$sitedate)
sd<-data.frame()
for(i in levels(fspecies$sitedate)){
sdnew<-fspecies[which(fspecies$sitedate==i),]
if(length(sdnew$sitedate)>2){
sd<-rbind(sd, sdnew)
}}
sd<-droplevels(sd)
write.csv(sd, 'data/sd.csv')
#make plots for CWM for each sitedate that has more than 2 points
xyplot(cwm~width|sitedate, data=sd,
par.strip.text=list(cex=.75),
main="Community Weighted Visitor IT by Corolla Width",
xlab="Corolla Width (mm)",
ylab="IT (mm)",
)
# now generate the spearman ranks for each of the sitedate plots
rwct<-0
sd$sitedate<-as.factor(sd$sitedate)
spearman<-data.frame(matrix(nrow=90, ncol=2))
for(i in unique(levels(sd$sitedate))){
print(i)
test<-sd[which(sd$sitedate==i),]
spear<-cor(test$cwm, test$width, method = "spearman")
newrow<-as.vector(c(i, spear))
rwct<-rwct+1
spearman[rwct,]<-newrow
i
}
spearman
write.csv(spearman, 'data/cwm_IT_spearman.csv')
# Make plots for unw_IT instead of cwm
xyplot(unw_IT~width|sitedate, data=sd,
par.strip.text=list(cex=.75),
main="Unweighted Visitor IT by Corolla Width",
xlab="Corolla Width (mm)",
ylab="IT (mm)",
)
# now generate the spearman ranks for each of the sitedate plots
rwct<-0
sd$sitedate<-as.factor(sd$sitedate)
spearman<-data.frame(matrix(nrow=90, ncol=2))
for(i in unique(levels(sd$sitedate))){
print(i)
test<-sd[which(sd$sitedate==i),]
spear<-cor(test$unw_IT, test$width, method = "spearman")
newrow<-as.vector(c(i, spear))
rwct<-rwct+1
spearman[rwct,]<-newrow
i
}
spearman
write.csv(spearman, 'data/unw_IT_spearman.csv')
# make plots for all sitedates...including those with less than 2 points
# sitedate=fspecies$sitedate
# xyplot(unw_IT~width|sitedate, data=fspecies)
# xyplot(cwm~width|sitedate, data=fspecies)
|
f9c16299682ad349d025321321b1fa5f97acb078
|
3f3b6cff551953eb133b81d95337268df1e23235
|
/R_Code/6jan/sc1.r
|
2fd9044e7c5bf68c41594c6a162ac0c3c669c8dc
|
[] |
no_license
|
therealrahulsahu/c_sample
|
31a5a79233a3b54d803418c6c1ccf72bceb67935
|
0b8026a7ee0186eeac9b53df0c22b5db9fd1c5fb
|
refs/heads/master
| 2021-08-11T03:34:52.437837
| 2020-07-02T06:16:50
| 2020-07-02T06:16:50
| 198,704,023
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
sc1.r
|
#Q.1
x=as.integer(readline("Enter No. : "))
if(x>0)
print("Positive")
|
291ab7798de59698aa21cb666fe5827112376725
|
0171da74586a079e97269ba9b7a8c4146c204cd0
|
/R/plotOverview.R
|
364df9d8dfe1b86d2f65c895d8c98788841394aa
|
[] |
no_license
|
jtleek/derfinder-1
|
bfda042e772224abbc911d94e0ba2c66fe5e9d08
|
a88996a426a899a5d319c628e3b9411237145caa
|
refs/heads/master
| 2021-01-22T13:42:50.235009
| 2013-11-08T18:19:41
| 2013-11-08T18:19:41
| 14,240,696
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,589
|
r
|
plotOverview.R
|
#' Plot a karyotype overview of the genome with the identified regions
#'
#' Plots an overview of the genomic locations of the identified regions (see \link{calculatePvalues}) in a karyotype view. The coloring can be done either by significant regions according to their p-values, significant by adjusted p-values, or by annotated region if using \link[bumphunter]{annotateNearest}.
#'
#'
#' @param regions The \code{$regions} output from \link{calculatePvalues}.
#' @param annotation The output from running \link[bumphunter]{annotateNearest} on the output from \link{calculatePvalues}. It is only required if \code{type="annotation"}.
#' @param type Must be either \code{pval}, \code{qval} or \code{annotation}. It determines whether the plot coloring should be done according to significant p-values (<0.05), significant q-values (<0.10) or annotation regions.
#' @param base_size Base point size of the plot. This argument is passed to \link[ggplot2]{element_text} (\code{size} argument).
#' @param areaRel The relative size for the area label when \code{type="pval"} or \code{type="qval"}. Can be useful when making high resolution versions of these plots in devices like CairoPNG.
#' @param legend.position This argument is passed to \link[ggplot2]{theme}. From ggplot2: the position of legends. ("left", "right", "bottom", "top", or two-element numeric vector).
#' @param significantCut A vector of length two specifiying the cutoffs used to determine significance. The first element is used to determine significance for the p-values and the second element is used for the q-values.
#'
#' @return A ggplot2 plot that is ready to be printed out. Tecnically it is a ggbio object.
#'
#' @seealso \link{calculatePvalues}, \link[bumphunter]{annotateNearest}
#' @author Leonardo Collado-Torres
#' @export
#' @importFrom GenomicRanges seqlengths "seqlengths<-" seqinfo
#' @importMethodsFrom ggbio autoplot layout_karyogram
#' @importFrom ggplot2 aes labs scale_colour_manual scale_fill_manual geom_text rel geom_segment xlab theme element_text element_blank
#'
#' @examples
#' ## Construct toy data
#' chrs <- paste0("chr", c(1:22, "X", "Y"))
#' chrs <- factor(chrs, levels=chrs)
#' suppressMessages(library("GenomicRanges"))
#' regs <- GRanges(rep(chrs, 10), ranges=IRanges(runif(240, 1, 4e7), width=1e3), significant=sample(c(TRUE, FALSE), 240, TRUE, p=c(0.05, 0.95)), significantQval=sample(c(TRUE, FALSE), 240, TRUE, p=c(0.05, 0.95)), area=rnorm(240))
#' annotation <- data.frame(region=sample(c("upstream", "promoter", "overlaps 5'", "inside", "overlaps 3'", "close to 3'", "downstream"), 240, TRUE))
#'
#' ## Type pval
#' plotOverview(regs)
#'
#' ## Type qval
#' plotOverview(regs, type="qval")
#'
#' ## Annotation
#' plotOverview(regs, annotation, type="annotation")
#'
#' ## Resize the plots if needed.
#'
#' \dontrun{
#' ## You might prefer to leave the legend at ggplot2's default option: right
#' plotOverview(regs, legend.position="right")
#'
#' ## Although the legend looks better on the bottom
#' plotOverview(regs, legend.position="bottom")
#'
#' ## Example knitr chunk for higher res plot using the CairoPNG device
#' ```{r overview, message=FALSE, fig.width=7, fig.height=9, dev="CairoPNG", dpi=300}
#' plotOverview(regs, base_size=30, areaRel=10, legend.position=c(0.95, 0.12))
#' ```
#'
#' ## For more custom plots, take a look at the ggplot2 and ggbio packages
#' ## and feel free to look at the code of this function:
#' plotOverview
#' }
plotOverview <- function(regions, annotation=NULL, type="pval", base_size=12, areaRel=4, legend.position=c(0.85, 0.12), significantCut=c(0.05, 0.10)) {
stopifnot(type %in% c("pval", "qval", "annotation"))
stopifnot(length(significantCut) == 2 & all(significantCut >=0 & significantCut <=1))
## Keeping R CMD check happy
hg19Ideogram <- significant <- midpoint <- area <- x <- y <- xend <- significantQval <- region <- NULL
## Assign chr lengths if needed
if(any(is.na(seqlengths(regions)))) {
message(paste(Sys.time(), "plotOverview: assigning chromosome lengths from hg19!!!"))
data(hg19Ideogram, package = "biovizBase", envir = environment())
seqlengths(regions) <- seqlengths(hg19Ideogram)[names(seqlengths(regions))]
}
## Graphical setup
ann_text <- data.frame(x=225e6, y=10, lab="Area", seqnames="chrX")
ann_line <- data.frame(x=200e6, xend=215e6, y=10, seqnames="chrX")
## Make the plot
if(type == "pval") {
## P-value plot
result <- autoplot(seqinfo(regions)) +
layout_karyogram(regions, aes(fill=significant, color=significant), geom="rect", base_size=30) +
layout_karyogram(regions, aes(x=midpoint, y=area), geom="line", color="coral1", ylim=c(10, 20)) +
labs(title=paste0("Overview of regions found in the genome; significant: p-value <", significantCut[1])) +
scale_colour_manual(values=c("chartreuse4", "wheat2"), limits=c("TRUE", "FALSE")) +
scale_fill_manual(values=c("chartreuse4", "wheat2"), limits=c("TRUE", "FALSE")) +
geom_text(aes(x=x, y=y), data=ann_text, label="Area", size=rel(areaRel)) +
geom_segment(aes(x=x, xend=xend, y=y, yend=y), data=ann_line, colour="coral1") +
xlab("Genomic coordinate") +
theme(text=element_text(size=base_size), legend.background=element_blank(), legend.position=legend.position)
} else if (type == "qval") {
## Adjusted p-value plot
result <- autoplot(seqinfo(regions)) +
layout_karyogram(regions, aes(fill=significantQval, color=significantQval), geom="rect") +
layout_karyogram(regions, aes(x=midpoint, y=area), geom="line", color="coral1", ylim=c(10, 20)) +
labs(title=paste0("Overview of regions found in the genome; significant: q-value <", significantCut[2])) +
scale_colour_manual(values=c("chartreuse4", "wheat2"), limits=c("TRUE", "FALSE")) +
scale_fill_manual(values=c("chartreuse4", "wheat2"), limits=c("TRUE", "FALSE")) +
geom_text(aes(x=x, y=y), data=ann_text, label="Area", size=rel(areaRel)) +
geom_segment(aes(x=x, xend=xend, y=y, yend=y), data=ann_line, colour="coral1") +
xlab("Genomic coordinate") +
theme(text=element_text(size=base_size), legend.background=element_blank(), legend.position=legend.position)
} else {
## Annotation region plot
stopifnot(is.null(annotation) == FALSE)
regions$region <- annotation$region
result <- autoplot(seqinfo(regions)) +
layout_karyogram(regions, aes(fill=region, color=region), geom="rect") +
labs(title="Annotation region (if available)") +
xlab("Genomic location") +
theme(text=element_text(size=base_size), legend.background=element_blank(), legend.position=legend.position)
}
return(result)
}
|
ab3c60e868dc3af7017bcd3edffa40feaa0767ea
|
a462a24ff937e151e8151f3a1bdc9c3714b12c0e
|
/tests_st.R
|
ef9add355f88510fe5616248f222737f755e541c
|
[] |
no_license
|
noeliarico/kemeny
|
b4cbcac57203237769252de2c50ce959aa4ca50e
|
50819f8bf0d19fb29a0b5c6d2ee031e8a811497d
|
refs/heads/main
| 2023-03-29T14:36:37.931286
| 2023-03-16T09:04:12
| 2023-03-16T09:04:12
| 330,797,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 562
|
r
|
tests_st.R
|
# Los que propongo con el baseline
# me vs mebb
t.test(res_me_8$exec_time, res_mebb_8$exec_time)
t.test(res_me_9$exec_time, res_mebb_9$exec_time)
t.test(res_me_10$exec_time, res_mebb_10$exec_time)
# me vs mebbrcw
t.test(res_me_8$exec_time, res_mebbrcw_8$exec_time)
t.test(res_me_9$exec_time, res_mebbrcw_9$exec_time)
t.test(res_me_10$exec_time, res_mebbrcw_10$exec_time)
# El mejor con el otro
t.test(res_mebb_8$exec_time, res_mebbrcw_8$exec_time)
t.test(res_mebb_9$exec_time, res_mebbrcw_9$exec_time)
t.test(res_mebb_10$exec_time, res_mebbrcw_10$exec_time)
|
2b3fc674da0dcb312377696b2483682d5cf98939
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spatstat/examples/is.marked.ppp.Rd.R
|
79d7667a6d627453bb4dd7b1307f218f03fd77e2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 260
|
r
|
is.marked.ppp.Rd.R
|
library(spatstat)
### Name: is.marked.ppp
### Title: Test Whether A Point Pattern is Marked
### Aliases: is.marked.ppp
### Keywords: spatial manip
### ** Examples
data(cells)
is.marked(cells) #FALSE
data(longleaf)
is.marked(longleaf) #TRUE
|
c83aff29d5b55f20f349a19346d62ac0c12667e3
|
e528ea2de3e0b68907260b1f25f520dfe0fe7945
|
/part4/textmining/classfication/libsvm.R
|
7e0dce3bbca390ea22aa7e6a097106dc8575e37b
|
[] |
no_license
|
datasci-info/ms-partner-training-20160308
|
f283b5a30abb79adb87e5ab14f8e275536100ab8
|
df41c64b4bf6e04b7ff51fb204cfa523b0581898
|
refs/heads/master
| 2016-08-11T13:13:35.095398
| 2016-03-08T07:02:34
| 2016-03-08T07:02:34
| 53,187,363
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
libsvm.R
|
install.packages("e1071")
library(e1071)
m = svm(Y~.,data=df,kernel = "linear")
m$kernel
?svm
m$SV
colnames(m$SV)
pred = predict(m,df)
table(pred,df$Y)
w = t(m$SV) %*% m$coefs
w
|
8f83ee267a92db243095810f53cf87c6318cc612
|
f1922b98a7c06db029a8f412ed17f2abb10b0617
|
/BarrelFIP.R
|
a491eabb34e42baf735bc2e49b9c58f49119b5d1
|
[] |
no_license
|
dompartipilo/barrelfip
|
3bc86b002e97365fb3403b3d24752b0263325efa
|
6411ade18deaec134b3814295db1f9b8c72e81aa
|
refs/heads/master
| 2020-07-01T23:48:51.872654
| 2020-03-12T14:48:08
| 2020-03-12T14:48:08
| 201,349,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,201
|
r
|
BarrelFIP.R
|
library(rvest)
library(dplyr)
library(tidyr)
library(stringr)
#read table of pitchers' statistics from baseball-reference
bref = "https://www.baseball-reference.com/leagues/MLB/2019-standard-pitching.shtml#players_standard_pitching::none"
bref = read_html(bref)
table = bref %>% html_nodes(xpath = '//comment()') %>%
html_text() %>%
paste(collapse = '') %>%
read_html() %>%
html_node('table#players_standard_pitching') %>%
html_table() %>%
.[colSums(is.na(.)) < nrow(.)]
table <- data.frame(table[table$Name != "Name",])
table <- as.data.frame(table)
table[-c(2, 4, 5)] <- sapply(table[-c(2, 4, 5)], as.numeric)
#convert partial innings to correct decimals
table$IP = gsub("\\.1", ".33", table$IP)
table$IP = gsub("\\.2", ".66", table$IP)
#summarize using relevant metrics
breftable = table %>%
group_by(Name) %>%
summarise(bf = sum(as.numeric(BF)),
kpct = sum(as.numeric(SO))*100/sum(as.numeric(BF)),
bbpct = sum(as.numeric(BB))*100/sum(as.numeric(BF)),
hbppct = sum(as.numeric(HBP))*100/sum(as.numeric(BF)),
era = round(sum(as.numeric(ER))*9/sum(as.numeric(IP)), 2)) %>%
filter(bf > 100)
#separate lastname and firstname for merging/joining later
breftable$firstname = sapply(strsplit(breftable$Name, "\\s+"), function(x) x[1])
breftable$lastname = sapply(strsplit(breftable$Name, "\\s+"), function(x) x[length(x)])
breftable$lastname = str_replace(breftable$lastname, '\\*', '')
breftable = breftable %>%
select(-c(Name))
#collect statcast data
url <- "https://baseballsavant.mlb.com/leaderboard/statcast?type=pitcher&year=2019&position=&team=&min=100"
brl = read_html(url)
brl <- brl %>%
html_nodes("script")
info = as.character(brl[10])
obs = as.vector(strsplit(info, "name"))
#select strings that contain names
namecands = c()
for (i in 2:length(obs[[1]])){
namecands = c(namecands, substr(obs[[1]][i], 4, 30))
}
names = c()
for (j in 1:length(namecands)){
names = c(names, sub('",.*', '', namecands[j]))
}
names <- names[(1:(length(names)/3))*3-2]
lastname = sub(',.*', '', names)
firstname = sub('.*, ', '', names)
#collect barrel rate info
brlpa = c()
for (k in 1:length(obs[[1]])){
if (grepl("barrels_per_pa", obs[[1]][k]) == TRUE){
brlpos = gregexpr("barrels_per_pa", obs[[1]][k])[[1]][1]
brlpastring = substr(obs[[1]][k], brlpos + 17, brlpos + 21)
brlpastring <- gsub("\"", "", brlpastring, fixed = TRUE)
brlpastring <- gsub(",", "", brlpastring)
brlpa <- c(brlpa, brlpastring)
}
}
barrels = data.frame(
lastname = lastname,
firstname = firstname,
brl_pa = as.numeric(brlpa)
)
#merge data
data = merge(breftable, barrels)
data$bbhbppct = data$bbpct + data$hbppct
#build linear regression model
model = lm(data = data, formula = era ~ kpct + bbhbppct + brl_pa)
#predict using model (adjust batters faced in filter function if needed)
data$BarrelFIP = round(predict(model), 2)
brlDF <- data %>%
filter(bf > 150) %>%
select(firstname, lastname, bf, era, BarrelFIP) %>%
mutate(diff = era - BarrelFIP)
View(brlDF)
|
2c34649ed859051692c50fe9befde8990ee0c775
|
04a7c98ebecf2db764395c90455e8058711d8443
|
/man/asv_best_PC_df.Rd
|
a4b737a08f4c7d831a6e14c7bb5314735bde044b
|
[] |
no_license
|
Alice-MacQueen/switchgrassGWAS
|
f9be4830957952c7bba26be4f953082c6979fdf2
|
33264dc7ba0b54aff031620af171aeedb4d8a82d
|
refs/heads/master
| 2022-02-01T01:12:40.807451
| 2022-01-17T20:56:20
| 2022-01-17T20:56:20
| 198,465,914
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 668
|
rd
|
asv_best_PC_df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pvdiv_gwas.R
\name{asv_best_PC_df}
\alias{asv_best_PC_df}
\title{Return best number of PCs in terms of lambda_GC following Cattrell's rule.}
\usage{
asv_best_PC_df(df)
}
\arguments{
\item{df}{Dataframe of phenotypes where the first column is NumPCs and
subsequent column contains lambda_GC values for some phenotype.}
}
\value{
A dataframe containing the best lambda_GC value and number of PCs
for each phenotype in the data frame.
}
\description{
Given a dataframe created using pvdiv_lambda_GC, this function
returns the lambda_GC that is closest to 1 for each column in the
dataframe.
}
|
27a69f126bb52cc203e2634124db387ba07b39fa
|
e5a9f6ab465cd0f28c26f95fc781ba59927edf8b
|
/R/mult-mshapes.R
|
28c2b184308040851492d9244acde36190d38001
|
[] |
no_license
|
stas-malavin/Momocs
|
550266555ab7724a01ca3b58777bb623de433566
|
44789d9bce9fe923a9af128f0493d16a73fe9fdd
|
refs/heads/master
| 2020-12-26T15:49:20.330404
| 2017-03-08T20:43:13
| 2017-03-08T20:43:13
| 59,151,808
| 0
| 1
| null | 2016-05-18T21:15:59
| 2016-05-18T21:15:58
| null |
UTF-8
|
R
| false
| false
| 6,482
|
r
|
mult-mshapes.R
|
##### mean shapes on coefficients todo: better handling of $slots
##### (eg r2 for Opn, etc.)
#' Mean shape calculation for Coo, Coe, etc.
#'
#' Quite a versatile function that calculates mean (or median, or whatever function)
#' on list or an array of shapes, an Ldk object. It can also be used on OutCoe and OpnCoe objects.
#' In that case, the reverse transformation (from coefficients to shapes) is calculated, (within
#' groups defined with the fac argument if provided) and the Coe object is returned.
#'
#' @param x a list, array, Ldk, LdkCoe, OutCoe or OpnCoe or PCA object
#' @param fac factor from the $fac slot (only for Coe objects). See examples below.
#' @param FUN a function to compute the mean shape (\link{mean} by default, by \link{median} can be considered)
#' @param nb.pts numeric the number of points for calculated shapes (only Coe objects)
#' @param ... useless here.
#' @return the averaged shape; on Coe objects, a list with two components: \code{$Coe} object of the same class, and
#' \code{$shp} a list of matrices of (x, y) coordinates.
#' @details Note that on Coe objects, the average can be made within levels of the passed $fac (if any);
#' in that case, the other columns of the fac are also returned, usingthe first row within every level, but they may
#' not be representive of the group. Also notice that for PCA objects, mean scores are returned
#' within a PCA object (accesible with PCA$x) that can be plotted directly but other slots are left
#' unchanged.
#' @rdname mshapes
#' @examples
#' #### on shapes
#' data(wings)
#' mshapes(wings)
#' mshapes(wings$coo)
#' data(bot)
#' mshapes(coo_sample(bot, 24)$coo)
#' stack(wings)
#' coo_draw(mshapes(wings))
#'
#' data(bot)
#' bot.f <- efourier(bot, 12)
#' mshapes(bot.f) # the mean (global) shape
#' ms <- mshapes(bot.f, 'type')
#' ms$Coe
#' class(ms$Coe)
#' ms <- ms$shp
#' coo_plot(ms$beer)
#' coo_draw(ms$whisky, border='forestgreen')
#' tps_arr(ms$whisky, ms$beer) #etc.
#'
#' data(olea)
#' op <- npoly(filter(olea, view=='VL'), 5)
#' ms <- mshapes(op, 'var') #etc
#' ms$Coe
#' panel(Opn(ms$shp), names=TRUE)
#'
#' data(wings)
#' wp <- fgProcrustes(wings, tol=1e-4)
#' ms <- mshapes(wp, 1)
#' ms$Coe
#' panel(Ldk(ms$shp), names=TRUE) #etc.
#' panel(ms$Coe) # equivalent (except the $fac slot)
#' @rdname mshapes
#' @export
mshapes <- function(x, ...) {
UseMethod("mshapes")
}
#' @rdname mshapes
#' @export
mshapes.list <- function(x, FUN=mean, ...) {
A <- ldk_check(x)
return(apply(A, 1:2, FUN, na.rm = TRUE))
}
#' @rdname mshapes
#' @export
mshapes.array <- function(x, FUN=mean, ...) {
if (length(dim(x)) == 3) {
A <- ldk_check(x)
return(apply(A, 1:2, FUN, na.rm = TRUE))
}
}
#' @rdname mshapes
#' @export
mshapes.Ldk <- function(x, FUN=mean, ...) {
Ldk <- x
A <- ldk_check(Ldk$coo)
return(apply(A, 1:2, mean, na.rm = TRUE))
}
#' @rdname mshapes
#' @export
mshapes.OutCoe <- function(x, fac, FUN=mean, nb.pts = 120, ...) {
OutCoe <- x
nb.h <- ncol(OutCoe$coe)/4 #todo
if (missing(fac)) {
message("no 'fac' provided, returns meanshape")
coe.mshape <- apply(OutCoe$coe, 2, FUN)
xf <- coeff_split(coe.mshape, nb.h, 4)
return(efourier_i(xf, nb.pts = nb.pts))
}
f <- OutCoe$fac[, fac]
fl <- levels(f)
shp <- list()
rows <- numeric()
coe <- matrix(NA, nrow = nlevels(f), ncol = ncol(OutCoe$coe),
dimnames = list(fl, colnames(OutCoe$coe)))
for (i in seq(along = fl)) {
coe.i <- OutCoe$coe[f == fl[i], ]
rows[i] <- which(f == fl[i])[1]
if (is.matrix(coe.i)) {
coe.i <- apply(coe.i, 2, FUN)
}
coe[i, ] <- coe.i
xf <- coeff_split(cs = coe.i, nb.h = nb.h, cph = 4)
shp[[i]] <- efourier_i(xf, nb.h = nb.h, nb.pts = nb.pts)
}
names(shp) <- fl
Coe2 <- OutCoe
Coe2$coe <- coe
Coe2$fac <- slice(Coe2$fac, rows)
return(list(Coe = Coe2, shp = shp))
}
#' @rdname mshapes
#' @export
mshapes.OpnCoe <- function(x, fac, FUN=mean, nb.pts = 120, ...) {
OpnCoe <- x
#todo: check if method is all identical
p <- pmatch(tolower(OpnCoe$method[1]), c("opoly", "npoly", "dfourier"))
if (is.na(p)) {
warning("unvalid method. efourier is used.\n")
} else {
method_i <- switch(p, opoly_i, npoly_i, dfourier_i) # dfourier_i
}
n <- length(OpnCoe$mshape) #todo
if (missing(fac)) {
message("no 'fac' provided, returns meanshape")
coe.mshape <- apply(OpnCoe$coe, 2, FUN)
mod.mshape <- OpnCoe$mod
mod.mshape$coefficients <- coe.mshape
return(method_i(mod.mshape))
}
f <- OpnCoe$fac[, fac]
fl <- levels(f)
shp <- list()
rows <- numeric()
coe <- matrix(NA, nrow = nlevels(f), ncol = ncol(OpnCoe$coe),
dimnames = list(fl, colnames(OpnCoe$coe)))
mod.mshape <- OpnCoe$mod
for (i in seq(along = fl)) {
coe.i <- OpnCoe$coe[f == fl[i], ]
rows[i] <- which(f == fl[i])[1]
if (is.matrix(coe.i)) {
coe.i <- apply(coe.i, 2, FUN)
}
mod.mshape$coeff <- coe.i
coe[i, ] <- coe.i
shp[[i]] <- method_i(mod.mshape)
}
names(shp) <- fl
Coe2 <- OpnCoe
Coe2$coe <- coe
Coe2$fac <- slice(Coe2$fac, rows)
return(list(Coe = Coe2, shp = shp))
}
#' @rdname mshapes
#' @export
mshapes.LdkCoe <- function(x, fac, FUN=mean, ...) {
LdkCoe <- x
if (missing(fac)) {
message("no 'fac' provided. Returns meanshape")
return(mshapes(LdkCoe$coo))
}
f <- LdkCoe$fac[, fac]
fl <- levels(f)
shp <- list()
rows <- numeric()
for (i in seq(along = fl)) {
shp[[i]] <- mshapes(LdkCoe$coo[f == fl[i]], FUN=FUN)
rows[i] <- which(f == fl[i])[1]
}
names(shp) <- fl
Coe2 <- Ldk(shp, fac=slice(LdkCoe$fac, rows))
return(list(Coe = Coe2, shp = shp))
}
#' @rdname mshapes
#' @export
mshapes.PCA <- function(x, fac, ...){
# cehck for single individuals within a group..
x0 <- x
f <- x$fac[, fac]
x <- x$x
res <- matrix(NA, nrow=nlevels(f), ncol=ncol(x),
dimnames=list(levels(f), colnames(x)))
for (i in seq(along=levels(f))){
x.i <- x[f == levels(f)[i], ]
if (!is.matrix(x.i)) {
res[i, ] <- x.i
next()
}
res[i, ] <- apply(x.i, 2, mean)
}
x0$x <- res
# should retain the true name and not "fac"
x0$fac <- data.frame(fac=levels(f))
x0
}
#' @export
#' @rdname mshapes
MSHAPES <- mshapes
##### end mshapes
|
64cd4871b94f242ef94a08d12e8273feb428a42b
|
3124d10b460158c08f40e4490a557bfcb98510dd
|
/3 writing functions.R
|
c1e132399aeeb4ab5c8d6119bae66f8404f491e8
|
[] |
no_license
|
ChenKozulin/writing-functions
|
a555cc7aabe84b5c52b8f8bbf5c4235fb374aa41
|
933e68b09d4534d4c888f6ba1b87e28d92683591
|
refs/heads/master
| 2020-07-22T07:25:12.153407
| 2016-12-12T15:47:05
| 2016-12-12T15:47:05
| 73,831,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,161
|
r
|
3 writing functions.R
|
#' Penman Montieth Model with canopy conductance
#'
#' THis function computer evapotranspiration based on Penman Montieth Model
#' including canopy conductance
#' Canopy conductance paramters:
#'
#' @param Cleaf (mmol m⁻² s⁻¹) water vapor exiting through the canopy
#' @param Carbon % of carbon dioxide (CO2) in the environment
#' @param RLWC (root/leaf water content)
#' @param StoDen (No. of stomata/mm^2 leaf) stomatal density
#' @author Chen
#' @return canopy_conductance (mm/day)
# Determine leaf conductance (mmol m⁻² s⁻¹)
cleaf<- c(17.577,17.841,20.208,25.942,26.274,27.051,27.792,28.1,29.008,29.276)
#Determine % of CO2 in the environment
Carbon <- c(16.937,17.893,18.817,19.765,20.745,20.683,18.680,17.647,16.609)
#Determine LRWC (no units)
LRWC<-c(69.70,68.54,67.94,67.7,67.29,67.06,66.86,66.73,66.57)
#Determine StoDen (mm⁻²)
STDN<-c(25.95,26.74,27.69,28.79,29.19,30.33,31.99,32.16,32.38)
Canop_Conductance=function (cleaf,Carbon,LRWC,STDN,Rnet,vdp,Tair){
conductance= (cleaf*Carbon*LRWC*STDN*Rnet*vdp*Tair)
result=data.farme(cleaf,Carbon,LRWC,STDN)
return(conductance)
return(plot(conductance,result$Carbon))
}
|
f66af1fa7c7d74b8efd9d58a10cc7a5da52fc296
|
6e1dd29fe70ee95bb0971e42881f8bc8275a4eb7
|
/man/util_summary.Rd
|
8f6793ccbd9880826f7e4e8af575ff74547275e9
|
[] |
no_license
|
githubfun/PortfolioEffectHFT
|
9592efeb6d21f7cff1758e2217bd1e00c9ab7c6b
|
66d8ef8a1266da762a2e4f2b805245efb23ba725
|
refs/heads/master
| 2018-05-02T23:20:29.881634
| 2016-09-17T07:12:35
| 2016-09-17T07:12:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 993
|
rd
|
util_summary.Rd
|
\name{util_summary}
\alias{util_summary}
\title{Portfolio Summary Plot}
\usage{util_summary(portfolio, bw = FALSE)
}
\arguments{
\item{portfolio}{Portfolio object created using \link[=portfolio_create]{portfolio_create( )} function}
\item{bw}{Black and white color scheme flag.}
}
\description{
Plots a number of charts to give summary of a given portfolio.
}
\author{Kostin Andrey <andrey.kostin@portfolioeffect.com>}
\seealso{\code{\link{portfolio_create}} }
\examples{
\dontrun{
dateStart = "2014-11-17 09:30:00"
dateEnd = "2014-11-17 16:00:00"
portfolio=portfolio_create(dateStart,dateEnd)
portfolio_settings(portfolio,portfolioMetricsMode="price",windowLength = '3600s',
resultsSamplingInterval='60s')
positionAAPL=position_add(portfolio,'AAPL',100)
positionC=position_add(portfolio,'C',300)
positionGOOG=position_add(portfolio,'GOOG',150)
util_summary(portfolio)
}}
\keyword{PortfolioEffectHFT}
%\concept{plot market data, plot}
\keyword{util_summary}
|
d3c5767811e441c55fea6c9b4aef445127007cab
|
c477a475ed696cba156f9cb99e649617fa1779f9
|
/inst/tests/sourcev2.R
|
4973e612f04279cf312024526c78394a391f5f4f
|
[] |
no_license
|
cran/clinDR
|
fc07112734fbee827bab68fe9f357192f0eae1af
|
94ad283337e7811d5b4ef38c588db7f086c7b27d
|
refs/heads/master
| 2023-08-20T20:09:34.510253
| 2023-08-09T04:20:05
| 2023-08-09T05:30:30
| 113,064,616
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,407
|
r
|
sourcev2.R
|
fitModel<-function(id,y,trt,visit,prmean0,prsd0,prmean,prsd,gparm1=3,gparm2=1.5,
mcmc=mcmc.control()){
## id must be 1,2,3... without any skipped indices (a patient with
## no observed resp must be deleted)
## trt must be 1,2
## visits must be numbered sequential 1,2,..., individual visits can be skipped
## but there must be at least 1 measurement for some patient at each visit
## resp is 0/1
## remove na
indNA<-!is.na(y)
id<-id[indNA]
y<-y[indNA]
trt<-trt[indNA]
visit<-visit[indNA]
### check and format inputs
trtcheck<-sort(unique(trt))
ntrt<-length(trtcheck)
if(any(trtcheck!=1:ntrt))stop('trt must be sequentially numbered without skipping')
if(!all(y %in% c(0,1)))stop('y must be 0/1')
idcheck<-sort(unique(id))
nsubj<-max(idcheck)
if(any(idcheck!=1:nsubj))stop('id must be sequentially numbered without skipping')
vcheck<-sort(unique(visit))
nvisit<-max(vcheck)
if(any(vcheck!=1:nvisit))stop('visits must be sequentially numbered')
N<-length(id)
ntrt1<-ntrt-1
### stan fit
indata<-c('N','nsubj','nvisit','ntrt','ntrt1','id','trt','visit','y',
'prmean0','prsd0','prmean','prsd','gparm1','gparm2')
parameters<-c('beta','sigma','theta')
estanmod<-stan_model(file='imputeMIL2.stan',
save_dso=TRUE,auto_write=FALSE,model_name='imputeMI')
stanfit<-sampling(estanmod,data=indata,
chains=mcmc$chains,
warmup=mcmc$warmup,iter=mcmc$iter,thin=mcmc$thin,seed=mcmc$seed,
pars=parameters,
control=list(adapt_delta=mcmc$adapt_delta),
cores = mcmc$chains
)
#############################################################
### convert generated parms to R matrices
beta<-as.matrix(stanfit,pars='beta')
beta<-array(as.vector(beta),dim=c(nrow(beta),max(visit),max(trt)))
sigma<-as.vector(as.matrix(stanfit,pars='sigma'))
theta<-as.matrix(stanfit,pars='theta')
return(list(stanfit=stanfit,beta=beta,sigma=sigma,theta=theta))
}
inputmi<-function(id,trt,y,visit,trtsel=1:2,vsel=max(visit)){
### data set with 2 selected trts and a single
### visit on 1 record per patient is created
### missing y MUST be represented by NA in the input data set
indsub<- (trt%in%trtsel) & visit==vsel
id<-id[indsub]
trt<-trt[indsub]
y<-y[indsub]
visit<-visit[indsub]
m<-!is.na(y)
return(list(id=id,trt=trt,y=y,m=m))
}
miprobs<-function(mdat,vsel,beta,sigma,nimp=100){
## observed data and imputation probabilities conditional
## on mcmc parameters for missing data
if(nimp>dim(beta)[1])stop('too many imputations requested')
nsubj<- length(mdat$id)
probs<-matrix(numeric(nsubj*nimp),ncol=nimp)
probs[mdat$m==1,]<-mdat$y[mdat$m==1]
missind<-which(mdat$m==0)
for(imp in c(1:nimp)){
for(i in missind){
probs[i,imp]<-plogis(beta[imp,vsel,mdat$trt[i]]+sigma[imp]*theta[imp,i])
}
}
return(probs)
}
midat<-function(mprobs,trt,m,deltat=0,deltac=0,f=mnse, returnYimp=FALSE, ...){
### multiple imputation results
### mi probs output by miprobs
### trt and m output by inputmi
### deltat, deltac for tipping point analyses
###
### default complete data SE using mnse with delmn=0
### add delmn=xx to change the assumed dif in mnse
### other se-computing functions can be supplied
### they must have the same call and return forms
### as mnse. different additional parameters can
### be supplied (in place of delmn)
nsubj<-nrow(mprobs)
nimp<-ncol(mprobs)
ptimp<-numeric(nimp)
pcimp<-numeric(nimp)
sedelimp<-numeric(nimp)
yimp<-matrix(numeric(nsubj*nimp),ncol=nimp)
yimp[m==1,]<-mprobs[m==1,1] ## observed data
missind<-which(m==0)
nmis<-length(missind)
trtmis<-trt[m==0]
if(nmis>0){
doff<-rep(deltac,nmis)
doff[trtmis==max(trt)]<-deltat
for(imp in 1:nimp){
genprob<-mprobs[missind,imp]-doff
genprob[genprob<0]<-0
genprob[genprob>1]<-1
yimp[missind,imp]<-rbinom(nmis,1,genprob)
impout<-f(yimp[,imp],trt, ...)
ptimp[imp]<-impout$phatt
pcimp[imp]<-impout$phatc
sedelimp[imp]<-impout$sedel
}
}else stop('no missing responses')
difp<-ptimp-pcimp
miest<-mean(difp)
vb<-var(difp)
vw<-mean(sedelimp^2)
mise<-sqrt( vw + (1+(1/nimp))*vb ) ### validated vs mitools package
midf<-(nimp-1)*( 1 + vw/((1+1/nimp)*vb))^2
if(!returnYimp){return(list(miest=miest,mise=mise,midf=midf))
}else return(list(miest=miest,mise=mise,midf=midf,yimp=yimp))
}
mnse<-function(y,trt,delmn=0){
## mietenen se calculation
nt<-sum(trt==2)
nc<-sum(trt==1)
phatt<-mean(y[trt==2])
phatc<-mean(y[trt==1])
theta<-nt/nc
d<-phatc*delmn*(1-delmn)
c<-delmn^2-delmn*(2*phatc+theta+1)+phatc+theta*phatt
b<- -(1+theta+phatc+theta*phatt-delmn*(theta+2))
a<-1+theta
vu<-b^3/(3*a)^3 - b*c/(6*a^2) + d/(2*a)
u<-sign(vu)*sqrt( b^2/(3*a)^2 - c/(3*a) )
w<-(pi + acos(vu/u^3))/3
pcr<-2*u*cos(w) - b/(3*a)
ptr<- pcr+delmn
sec<-sqrt( pcr*(1-pcr)/nc )
setrt<-sqrt( ptr*(1-ptr)/nt )
sedel<- sqrt( pcr*(1-pcr)/nc + ptr*(1-ptr)/nt )
return(list(phatc=phatc,phatt=phatt,sedel=sedel))
}
binNorm<-function(y,trt, ...){
nt<-sum(trt==2)
nc<-sum(trt==1)
phatt<-mean(y[trt==2])
phatc<-mean(y[trt==1])
sedel<-sqrt( phatc*(1-phatc)/nc + phatt*(1-phatt)/nt )
return(list(phatc=phatc,phatt=phatt,sedel=sedel))
}
|
8151c131b804dce4ad025aa4e1933db7a9fde5ef
|
bd1fe05a42481abc1e647ea909d64229ad10fe5c
|
/sentiment_test.R
|
d91a294c7b261a247a273ccd7cd3c9fe522147a7
|
[] |
no_license
|
Science-for-Nature-and-People/soc-twitter
|
9f4fabc750b45bc4df169a980dd0fd2f640dc7d6
|
9f0371169984e5f0ffd9aad26b0ee57d7e49ccf6
|
refs/heads/master
| 2021-06-26T21:25:23.553795
| 2020-10-20T04:01:04
| 2020-10-20T04:01:04
| 130,888,103
| 1
| 4
| null | 2020-05-07T22:24:29
| 2018-04-24T17:08:13
|
HTML
|
UTF-8
|
R
| false
| false
| 3,035
|
r
|
sentiment_test.R
|
#########################################
# Testing sentiment analysis with tweets#
# This scripts analyzes twitter data #
#########################################
library(tidytext)
library(wordcloud)
library(tidyverse)
library(dplyr)
#### loading data
# not: uncomment if not yet loaded
# twitter.data.full<-stream_in("/home/shares/soilcarbon/Twitter/twitter.json")
#
# class(twitter.data.full)
# names(twitter.data.full)
# str(twitter.data.full)
# test1<-sample_n(twitter.data.full, 10)
### Parsing through tweets ####
# Selecting relevant columns:
main_tweet_columns<-data.frame(cbind(twitter.data.full$actor.displayName, twitter.data.full$actor.summary,
twitter.data.full$body,
twitter.data.full$object.summary, twitter.data.full$postedTime))
##Renaming columns:
colnames(main_tweet_columns)<-c("name", "actorSummary", "tweet_body", "tweet_body_noRT", "time")
## Sample dataset:
##took sample and call the all_tweets_column - started with a small sample of 100 tweets and then enlarge. now chose 90000
# main_tweet_columns_sample<-sample_n(main_tweet_columns, 10) #comment out when running twitter.data.full
##Call whole dataset
main_tweet_columns_sample <- main_tweet_columns
##Separate tweets and make edited column where we can manipulate:
tweets <- main_tweet_columns_sample %>%
select(tweet_body) %>% # take only raw tweets
mutate(tweet_edited=as.character(tweet_body)) %>% # change from character to factor
mutate(tweet_edited=tolower(tweet_body)) %>% #make lower case
# mutate(tweets_edited=str_replace_all(tweets_edited, ' ' , '_')) %>% #put underscores instead of spaces (removed - not necessary)
filter(!is.na(tweet_edited)) #remove NA columns
# note: there are 73074 out of 96553 tweets that are valid. 23479 NA rows.
##Unnest to separate by words
unnest_tweets <- tweets %>%
unnest_tokens(word, tweet_edited) #unnest to get words
##Count table with sorted words by number of times seen
tweet_counts <- unnest_tweets %>%
anti_join(stop_words) %>%
count(word, sort=TRUE) %>%
filter(!word %in% c("https","rt","t.co"))
##Wordcloud
tweet_counts %>%
with(wordcloud(word, n, max.words=200, color=brewer.pal(7,"Dark2")))
###Sentiment analysis
# in general, get_sentiment has afinn scores/ranks fro -5 to +5 for positive or negative sentiment
# get_sentiments("afinn") %>%
# head(20)
#For our Tweets:
tweets_sentiment <- unnest_tweets %>%
left_join(get_sentiments("nrc"), by = "word") %>%
filter(sentiment !="NA")
## Sorting words with associated adjective:
count_sentiment <- tweets_sentiment %>%
count(word, sentiment, sort=TRUE)
count_sentiment
## group sentiment adjectives
total_sentiment <- count_sentiment %>%
group_by(sentiment) %>%
summarise(totals=sum(n)) %>%
arrange(-totals)
total_sentiment
#graph
ggplot(total_sentiment)+
geom_col(aes(x=sentiment, y=totals))
# note: nrc dictionary not best for assessing sentiment about soil health. I.e. soil matched with "disgust."
|
969f3ac197e5834a66e43e16cba2dcb52e8db8a7
|
d2e4e8b0fde53e8e331e275f8a8777650381d5fd
|
/plo6.R
|
0cf79aacea2f3e7b327951a2696c61f373662b29
|
[] |
no_license
|
Shivam-1117/EDA-Course-Project-2
|
3fcd82ca4f6ccf1176cc7113903e3b8cce117550
|
328acaf7262f14b19d2f3d4eeb4f843dacc6e7a1
|
refs/heads/master
| 2022-11-23T19:33:07.705513
| 2020-07-26T14:41:44
| 2020-07-26T14:41:44
| 282,592,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 943
|
r
|
plo6.R
|
scc <-readRDS("Source_Classification_Code.rds")
nei <-readRDS("summarySCC_PM25.rds")
s <- grep("Motor", scc$Short.Name, value = TRUE)
id <- scc[scc$Short.Name %in% s, c(1, 3)]
nei <- nei[nei$SCC %in% id$SCC & nei$fips %in% c("24510", "06037"), c("fips", "SCC", "Emissions", "year")]
renaming <- function(x){
y <- character(0)
if(x == "24510"){
y <- "Baltimore City"
}
else{
y <- "Los Angeles County"
}
y
}
nei$County <- sapply(nei$fips, renaming)
library(tidyverse)
emissions <- data.frame(nei %>% group_by(year, County) %>% summarise(emissions = sum(Emissions)))
library(ggplot2)
g <- ggplot(emissions, aes(year, emissions))
g <- g + geom_line() + facet_grid(.~County) + labs(x = "Year") + labs(y = "Emission (tons)") +
labs(title = "Emissions from motor vehicles sources in Baltimore & Los Angeles County over 1999-2008") +
theme(plot.title = element_text(hjust = 0.5))
print(g)
dev.copy(png, "plot6.png")
dev.off()
|
ff5012fb175a2a013976e09df5f4581802e020b6
|
395ad7f5c669bc493a30c6d2c220534003f184b2
|
/R/functional.difftest.R
|
e574bb5b27cbe612222d4b4c508290201e77382c
|
[
"MIT"
] |
permissive
|
elmahyai/TPDT
|
5a28b5aae310f32a9a45dd4bf3337b909ae050bf
|
5c8714376f235a1f96050bf36c5431f876094056
|
refs/heads/master
| 2021-11-30T21:08:12.787725
| 2017-11-02T13:04:39
| 2017-11-02T13:04:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,583
|
r
|
functional.difftest.R
|
#' @export
#' @import fda
# Main function for TPDT
functional.difftest <- function(rawdata = NULL, funcdata = NULL, N = 10, Nsim, B = 1000,
shift = 0, sigma = 0, dependent = F, deriv = 0, ncores = ncores){
# determine which kind of data is used
if(is.null(funcdata)){
warning('internal warning: No functional data object provided to functional.difftest! Please only use next code chunk experimentally!')
if(is.null(rawdata)){
time <- seq(0, 10, .1)
y1 <- sin(time) + rnorm(length(time), sd = 0.01)
y2 <- sin(time) + rnorm(length(time), sd = 0.01) + shift
}
else{
time <- rawdata$time
y1 <- rawdata$y1
y2 <- rawdata$y2
}
basis <- fda::create.bspline.basis(range(time), 15) ###############
Par <- fda::fdPar(basis, 2, lambda = .1) ###############
func1 <- fda::smooth.basis(time, y1, Par)$fd
func2 <- fda::smooth.basis(time, y2, Par)$fd
group1 <- rfunc(N, func1, sigma = sigma)
group2 <- rfunc(N, func2, sigma = sigma)
}
else{
group1 <- funcdata$func1
group2 <- funcdata$func2
basis <- group1$basis
}
if(deriv > 0){
group1 <- fda::deriv.fd(group1, deriv)
group2 <- fda::deriv.fd(group2, deriv)
}
# compute functional differnces and mean
dif <- group1 - group2
mdif <- fda::mean.fd(dif)
# compute statistic u and contained variability
u0 <- compute.u(dif = dif, basis = basis, dependent = dependent)
sig0 <- u0$sigma
u0 <- u0$stat
# resample from a functional constant at 0
m0 <- mdif
m0$coefs <- m0$coefs*0
# how many to resample
if(missing(Nsim)) Nsim <- ncol(dif$coefs)
# resampling of u
mcfunc <- function(b){
simdif <- rfunc(N = Nsim, func = m0, sigma = sig0)
compute.u(dif = simdif, basis = basis)$stat
}
# if more than one core, avoid overhead by using ncores threads
# with the same number of repetitions to do in parallel
# if(ncores > 1) {
# inds <- split(1:B, 1:ncores)
# resample.u <- unlist(parallel::mclapply(inds, function(b_vector) {
# sapply(b_vector, mcfunc)
# }, mc.cores = ncores))
# } else {
resample.u <- unlist(parallel::mclapply(1:B, mcfunc, mc.cores = ncores,
mc.preschedule = TRUE))
# }
# resample.u <- sapply(resample.u, c)
# compute raw p-value
pval <- mean(u0 < resample.u)
# return the statistic for the original data, the sampling distribution and the p-value
return(list(stat = u0, resam = resample.u, pval = pval))
}
|
b5abd90a8b316ebd24bc10ea07f03d29dcbaa9be
|
bfc6805099bffaa9d166a8fbdc803728e1b3fdf0
|
/0_Data_Simulation/Simulate_Data1.R
|
fe2125cec7df4aa8af963af8b7027a6df0479606
|
[] |
no_license
|
etzkorn/Causal_Activity
|
f76d011d5501418bc495fdeca44242d3fc45e718
|
c2a6c86c1cb303b01942705810b3ce9da7896a90
|
refs/heads/master
| 2020-07-27T07:05:47.386906
| 2018-04-01T23:56:08
| 2018-04-01T23:56:08
| 73,860,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,414
|
r
|
Simulate_Data1.R
|
###############################################
# This is the most simple data-generating scenario:
# step counts only depend on the treatment (amputation)
# In this script we generate data for the
# following scenario:
# We sample N people from a population of 10,000
# and each person has recorded step counts on one-minute
# intervals over the course of a day. Values for amputation
# status are randomly and independently
# assigned to each person with probability 0.5.
# We observe only step counts for the assigned “treatment”.
# Step counts are not dependent on any other unobserved
# factors.
###############################################
# Set Parameters
# population size
popN = 10000
# sample size
N = 30
###############################################
# Population Generating Curves
# (Not expectations, just a place to start)
# population under A = 1
t = 1:1440
population.a1 = 7 * (sin(t*pi/1440)^9) * (sin(t*pi/1440*8)+1) + 3*sin(t*pi/1440)^3 + 0.1
# plot(population.a1~t, type="l", col="blue")
# population under A = 0
population.a0 = 3 * (sin(t*pi/1440)^10) * (sin(t*pi/1440*10+150)+1) + 5*sin(t*pi/1440)^5 + 0.1
# lines(population.a0~t, type="l", col="red")
# legend(x=0, y=20, box.lwd = 0, fill = c("red","blue"), legend = c("A = 0", "A = 1"))
# population proportion between curves
pop.prop = population.a1 / (population.a0 + population.a1)
# plot(pop.prop~t, type="l")
###############################################
# Individual E(Steps(i)|A)
# simulate curves
generate.curve = function(population.a1){
t = 1:1440
z = sample(1:180,1)
population.a1 = c(population.a1[z:length(population.a1)], population.a1[1:z-1])
a = abs(rnorm(1,10,1))
b = rnorm(1, 0, 500)
c = abs(rnorm(1,0,1))
((sin((t+b)*pi/1440*a)/(1+c))^2 + 1) * population.a1
}
# expected steps under amputation
individual.a1 = replicate(popN, generate.curve(population.a1))
# expected steps under salvage (everone has the same treatment proportion)
individual.a0 = individual.a1 / pop.prop - individual.a1
# par(mfrow=c(1,2))
# plot.curves(individual.a1[,1:10], "Individual Curves (A = 1)")
# plot.curves(individual.a0[,1:10], "Individual Curves (A = 0)")
###############################################
# Simulate Steps
generate.steps = function(individual.a1){
n = nrow(individual.a1)
m = ncol(individual.a1)
matrix(rpois(n * m, individual.a1),n,m)
}
steps.a1 = generate.steps(individual.a1)
steps.a0 = generate.steps(individual.a0)
# plot.curves(steps.a0[,1:10])
# plot.curves(steps.a1[,1:10])
###############################################
# Get correct answers from entire population
individual.effects = individual.a1-individual.a0
average.effect = rowMeans(individual.effects)
average.a1 = rowMeans(individual.a1)
average.a0 = rowMeans(individual.a0)
# par(mfrow=c(1,3))
# plot(average.effect, type="l")
# plot(average.a1, type="l")
# plot(average.a0, type="l")
###############################################
# Save Data
random.samp = sample(1:popN, N)
a = sample(0:1, N, replace=T)==1
Y = rbind(t(steps.a1[,random.samp][,a]),
t(steps.a0[,random.samp][,!a]))
A = c(rep(1, sum(a)), rep(0, N-sum(a)))
rm(list = setdiff(ls(), c("A", "Y", "average.effect", "average.a1", "average.a0")))
save.image(file="0_Data_Simulation/Simulation1.Rdata")
|
1483e3fbac2d0515ed90cc78a6f41e1113477e6b
|
5b355f0222b604f2a907001966864e3caabdefa0
|
/scripts/wrapperReadYaml.R
|
8ce0a6877d073f6da7f8738fc578a79f65f1b0e9
|
[] |
no_license
|
PolinaPavlovich/Storshow
|
d3ba6aa16f7c96173caf0c967c76e5308771871a
|
ac17a2b5d9d119f9c712e7119eb3a8b0840d337e
|
refs/heads/main
| 2023-07-27T17:11:21.409294
| 2021-09-03T10:07:08
| 2021-09-03T10:07:08
| 332,421,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76
|
r
|
wrapperReadYaml.R
|
inputDir <- "git_repo/scripts"
source(file.path(inputDir, "help_script.R"))
|
0757900cac1097b3584c9c7f8b61f84ead53928c
|
002095834e32fdae1cae1ae59f4d1e03f826d8ed
|
/Run_SA.R
|
f77a5ad335055c40de754192757a9057ef68c4fe
|
[] |
no_license
|
harrietlmills/UPBEAT-effects
|
accb3d90d0904c9503a1e9d06634f71f2ee4e79a
|
c7ee930aaf711591392106f1388bca9683216bca
|
refs/heads/master
| 2020-04-17T22:09:33.164855
| 2019-01-22T14:13:05
| 2019-01-22T14:13:05
| 166,982,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,946
|
r
|
Run_SA.R
|
##### Run sensitivity analyses
rm(list=ls(all=TRUE))
# NAME THE RESULTS FILE
results_file <- paste0("SensAnalysis_", gsub(" ", "_", paste0(substr(date(), 5, 10), "_", substr(date(), 21, 24))))
# loading packages
library(xlsx) # working with excel
library(lmerTest) # mixed models
library(boot) # bootstrapping
library(quantreg) # quantile regression
##### set working directory
setwd("//ads.bris.ac.uk/filestore/BRMS/Research/Metabolic profiles in UPBEAT/RScripts/SensitivityAnalyses/")
##### source functions for each of the various sensitivity analyses
source('Functions_forSA.R')
##### read in main result and write to excel file
MainResult <- read.csv("MainResult.csv")
write.xlsx(MainResult, file=paste0(results_file, ".xlsx"), sheetName="Main Analyses")
##### MLM variations
SA_MLM_variations("SD", RemoveOutliers=FALSE, results_file)
SA_MLM_variations("SD", RemoveOutliers=TRUE, results_file)
SA_MLM_variations("IQR", RemoveOutliers=FALSE, results_file)
SA_MLM_variations("IQR", RemoveOutliers=TRUE, results_file)
##### ttest bootstrapped variations
ttest_bootstrap_variations("SD", RemoveOutliers=FALSE, results_file)
ttest_bootstrap_variations("SD", RemoveOutliers=TRUE, results_file)
ttest_bootstrap_variations("IQR", RemoveOutliers=FALSE, results_file)
ttest_bootstrap_variations("IQR", RemoveOutliers=TRUE, results_file)
MADS(RemoveOutliers=FALSE, results_file)
MADS(RemoveOutliers=TRUE, results_file)
##### Quantile regression
qreg_diffcentile(0.5, RemoveOutliers=FALSE, results_file=results_file)
qreg_diffcentile(0.5, RemoveOutliers=TRUE, MADprop=3.5, results_file=results_file)
qreg_diffcentile(0.75, RemoveOutliers=FALSE, results_file=results_file)
qreg_diffcentile(0.75, RemoveOutliers=TRUE, MADprop=3.5, results_file=results_file)
##### Proportion of outliers
Proportion_outliers(2.24, results_file=results_file)
Proportion_outliers(3.5, results_file=results_file)
|
ba00388d51a4b5e1dbda0f857397acf7b9bda84d
|
a8124c3361ec462e076fbe246c3571672a28a54b
|
/R/function.R
|
5a98d183904d7e9592c904e3843e0ab5f7d05ab6
|
[
"MIT"
] |
permissive
|
ashifujjmanRafi/code-snippets
|
80ea1300fb2bb5bf4bc1c2fb01222a42127c0438
|
24bd4b81564887822a0801a696001fcbeb6a7a75
|
refs/heads/master
| 2023-02-17T04:35:32.779975
| 2021-01-12T02:14:47
| 2021-01-12T02:14:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 796
|
r
|
function.R
|
mysum = function(x, y=2) {
z <- x + y
return(z)
}
ret = mysum(3,5)
print(ret)
ret = mysum(y=10, x=10)
print(ret)
# if we not pass any value for y. y take it default values
mysum = function(x, y=2) {
z <- x + y
return(z)
}
ret = mysum(3)
print(ret)
# ret = mysum() # ERROR!!! at least pass an one value for parameter `x`
# Another check
mysum = function(x=1, y, z=5) {
z <- x + y + z
return(z)
}
ret = mysum(y=3) # we don't pass mysum(3) something like this
print(ret)
# function return multiple value
myeval = function(x, y) {
add = x + y
mul = x * y
result = list('sum'=add, 'mul'=mul)
return(result)
}
print(myeval(10,2))
# inline function
mysum = function(x, y) x + y
myexp = function(x, y) x ^ y
print(mysum(10, 20))
print(myexp(2,3))
|
59a0fb6fded267e2ed925ff08930b222e992a8ec
|
44d4f8c212732ede685b93da43bad99cbeb7f3b1
|
/R/validation/dist/runkl.R
|
5e24c3d4de5eb9b65a6ca3f62e62f25ae387e001
|
[] |
no_license
|
sharlec/HTTP-video-QoS-to-QoE
|
31c3fd7bb4708c72a982dbc732d38e7be9aa0a28
|
50b91796a35db46c5f211ecade70db8d56fbf9fd
|
refs/heads/master
| 2020-09-09T04:00:54.851590
| 2017-05-06T06:38:34
| 2017-05-06T06:38:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 423
|
r
|
runkl.R
|
library('entropy')
dokl <- function(df){
k <- KL.plugin(df$original, df$predicted)
return(k)
}
dirname <- commandArgs(trailingOnly = TRUE)[1]
filenames <- list.files(dirname, pattern="*.txt", full.names=TRUE)
data <- lapply(filenames, read.csv)
KLdivergence <- lapply(data, dokl)
out <- cbind(filenames, KLdivergence)
outfile <- paste("./out_",dirname, sep="")
print(outfile)
print(out)
|
e48e711892953a14bed28605cf5b1ca154b9b95b
|
08b4eaf203fbbe87b09fdb2dc96b5d11fff2c171
|
/R/utils_preprocessing.R
|
e24a24f5ea9bb074d51c15014bf0447944baa22e
|
[] |
no_license
|
cran/scDiffCom
|
a8f28d7f92acfba6b84e123707c437300a9adfd9
|
26fbcb29d53a04e49208cb38f3e515f4a59827aa
|
refs/heads/master
| 2023-07-09T07:30:59.085372
| 2021-08-17T06:20:05
| 2021-08-17T06:20:05
| 397,309,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,761
|
r
|
utils_preprocessing.R
|
extract_analysis_inputs <- function(
seurat_object,
celltype_column_id,
sample_column_id,
condition_column_id,
cond1_name,
cond2_name,
assay,
slot,
log_scale,
threshold_min_cells,
LRI_table,
LRI_species,
verbose
) {
seurat_inputs <- extract_seurat_inputs(
seurat_object = seurat_object,
celltype_column_id = celltype_column_id,
sample_column_id = sample_column_id,
condition_column_id = condition_column_id,
assay = assay,
slot = slot,
log_scale = log_scale,
threshold_min_cells = threshold_min_cells,
verbose = verbose
)
LRI_inputs <- extract_LRI_inputs(
data = seurat_inputs$data,
LRI_table = LRI_table,
LRI_species = LRI_species,
verbose = verbose
)
condition_inputs <- extract_condition_inputs(
sample_column_id = sample_column_id,
condition_column_id = condition_column_id,
cond1_name = cond1_name,
cond2_name = cond2_name,
metadata = seurat_inputs$metadata,
verbose = verbose
)
list(
data_tr = DelayedArray::t(LRI_inputs$data),
metadata = seurat_inputs$metadata,
cell_types = seurat_inputs$cell_types,
LRI = LRI_inputs$LRI,
max_nL = LRI_inputs$max_nL,
max_nR = LRI_inputs$max_nR,
condition = condition_inputs
)
}
extract_seurat_inputs <- function(
seurat_object,
celltype_column_id,
sample_column_id,
condition_column_id,
assay,
slot,
log_scale,
threshold_min_cells,
verbose
) {
cell_type <- NULL
mes <- "Extracting data from assay '"
if (slot == "data") {
mes <- paste0(
mes,
assay,
"' and slot 'data' (assuming normalized log1p-transformed data)."
)
} else if (slot == "counts") {
mes <- paste0(
mes,
assay,
" and slot 'counts' (assuming normalized non-log1p-transformed data)."
)
}
if (verbose) message(mes)
temp_data <- Seurat::GetAssayData(
object = seurat_object,
slot = slot,
assay = assay
)
if(!methods::is(temp_data, "dgCMatrix")) {
stop(
paste0(
"slot ",
slot,
" of 'seurat_object' must be of class 'dgCMatrix'"
)
)
}
if (slot == "data" & !log_scale) {
if (verbose) {
message(
paste0(
"Converting normalized data from log1p-transformed ",
"to non-log1p-transformed."
)
)
}
temp_data <- expm1(temp_data)
}
if (slot == "counts" & log_scale) {
if (verbose) {
message(
"Converting data from non-log1p-transformed to log1p-transformed."
)
}
temp_data <- log1p(temp_data)
}
temp_md <- copy(x = seurat_object[[]])
temp_md <- setDT(
x = temp_md,
keep.rownames = "cell_id"
)
if (!(celltype_column_id %in% names(temp_md))) {
stop(
paste0(
"Can't find column '",
celltype_column_id,
"' in the meta.data of 'seurat_object'")
)
}
if (!is.null(sample_column_id)) {
if (!(sample_column_id %in% names(temp_md))) {
stop(
paste0(
"Can't find column '",
sample_column_id,
"' in the meta.data of 'seurat_object'"
)
)
}
}
if (!is.null(condition_column_id)) {
if (!(condition_column_id %in% names(temp_md))) {
stop(
paste0(
"Can't find column '",
condition_column_id,
"' in the meta.data of 'seurat_object'"
)
)
}
}
cols_to_keep <- c(
"cell_id",
celltype_column_id,
sample_column_id,
condition_column_id
)
temp_md <- temp_md[, cols_to_keep, with = FALSE]
temp_md[, names(temp_md) := lapply(.SD, as.character)]
if (!is.null(condition_column_id)) {
temp_cond <- unique(temp_md[[condition_column_id]])
if (length(temp_cond) != 2) {
stop(
paste0(
"meta.data ",
condition_column_id,
" of 'seurat_object' must contain exactly two groups (",
length(temp_cond),
" supplied)."
)
)
}
if(!is.null(sample_column_id)) {
temp_cols <- c(sample_column_id, condition_column_id)
temp_md_sample <- unique(temp_md[, temp_cols, with = FALSE])
temp_samples <- unique(temp_md[[sample_column_id]])
if (length(temp_samples) != nrow(temp_md_sample)) {
stop(
paste0(
"Column ",
sample_column_id,
" of 'seurat_object' must match to column",
condition_column_id
)
)
}
new_colnames <- c(
"cell_id",
"cell_type",
"sample_id",
"condition"
)
} else {
new_colnames <- c("cell_id", "cell_type", "condition")
}
} else {
if(!is.null(sample_column_id)) {
stop(
paste0(
"Parameter 'seurat_column_id' must be supplied ",
"when parameter 'seurat_sample_id' is not NULL"
)
)
} else {
new_colnames <- c("cell_id", "cell_type")
}
}
setnames(
x = temp_md,
old = cols_to_keep,
new = new_colnames
)
if(any(grepl("_", temp_md[["cell_type"]], fixed = TRUE))) {
warning(
"Underscores ('_') are not allowed in cell-type names: replacing with '-'"
)
temp_md[, cell_type := gsub(
pattern = "_",
replacement = "-",
x = cell_type,
fixed = TRUE)]
}
celltypes_filtered <- filter_celltypes(
metadata = temp_md,
threshold_min_cells = threshold_min_cells
)
metadata <- temp_md[cell_type %in% celltypes_filtered, ]
data <- temp_data[, colnames(temp_data) %in% metadata$cell_id]
mes <- paste0(
"Input data: ",
nrow(data),
" genes, ",
ncol(data),
" cells and ",
length(unique(metadata$cell_type)),
" cell-types."
)
if (verbose) message(mes)
list(
data = data,
metadata = metadata,
cell_types = celltypes_filtered
)
}
filter_celltypes <- function(
metadata,
threshold_min_cells
) {
if ("condition" %in% colnames(metadata)) {
filt <- apply(
X = table(
metadata$cell_type,
metadata$condition
) >= threshold_min_cells,
MARGIN = 1,
FUN = all
)
} else {
filt <- table(metadata$cell_type) >= threshold_min_cells
}
res <- names(filt[filt])
if(length(res) < 2) {
stop(
paste0(
"Inputs must contain at least 2 cell-types with at least ",
threshold_min_cells,
" cells (in each condition)."
)
)
}
res
}
extract_LRI_inputs <- function(
data,
LRI_table,
LRI_species,
verbose
) {
LIGAND_1 <- LIGAND_2 <- RECEPTOR_1 <- RECEPTOR_2 <- RECEPTOR_3 <- NULL
cols_compulsory <- c(
"LRI",
"LIGAND_1",
"LIGAND_2",
"RECEPTOR_1",
"RECEPTOR_2",
"RECEPTOR_3"
)
if (!all(cols_compulsory %in% names(LRI_table))) {
stop(
paste0(
"'LRI_table' must contain the columns ",
paste0(
cols_compulsory,
collapse = ", "
)
)
)
}
LRI_keep <- LRI_table[, cols_compulsory, with = FALSE]
LRI_keep <- unique(LRI_keep)
LRI_keep <- LRI_keep[
LIGAND_1 %in% rownames(data) &
RECEPTOR_1 %in% rownames(data) &
LIGAND_2 %in% c(rownames(data), NA) &
RECEPTOR_2 %in% c(rownames(data), NA) &
RECEPTOR_3 %in% c(rownames(data), NA),
]
LRI_genes <- unique(c(
unique(LRI_keep$LIGAND_1),
unique(LRI_keep$LIGAND_2),
unique(LRI_keep$RECEPTOR_1),
unique(LRI_keep$RECEPTOR_2),
unique(LRI_keep$RECEPTOR_3)
))
data_keep <- data[rownames(data) %in% LRI_genes, ]
n_ID <- length(unique(LRI_keep$LRI))
if (n_ID == 0) {
stop(
paste0(
"There are no genes from 'LRI_table' in 'seurat_object'.",
" Have you supplied 'seurat_object' with correctly formatted gene ",
" symbols for your species (namely HGNC or MGI)? "
)
)
}
if (n_ID <= 10) {
warning(
paste0(
"Only ",
n_ID,
" ligand-receptor interactions found in the dataset.",
" Have you supplied 'seurat_object' with correctly formatted gene ",
" symbols for your species (namely HGNC or MGI)? "
)
)
}
if (all(is.na(LRI_keep$RECEPTOR_1)) | all(is.na(LRI_keep$LIGAND_1))) {
stop(
paste0(
"'LRI_table' must not contain only NA in columns ",
"'LIGAND_1'' or 'RECEPTOR_1'."
)
)
} else {
if (all(is.na(LRI_keep$LIGAND_2))) {
max_nL <- 1
LRI_keep <- base::subset(LRI_keep, select = -c(LIGAND_2))
} else {
max_nL <- 2
}
if (all(is.na(LRI_keep$RECEPTOR_2)) & all(is.na(LRI_keep$RECEPTOR_3))) {
max_nR <- 1
LRI_keep <- base::subset(
LRI_keep,
select = -c(RECEPTOR_2, RECEPTOR_3)
)
} else if (all(is.na(LRI_keep$RECEPTOR_2))) {
stop(
paste0(
"'LRI_table' must not contain only NA in column ",
"'RECEPTOR_2'' and non-NA in column 'RECEPTOR_3'."
)
)
} else if (all(is.na(LRI_keep$RECEPTOR_3))) {
max_nR <- 2
LRI_keep <- base::subset(
LRI_keep,
select = -c(RECEPTOR_3)
)
} else {
max_nR <- 3
}
}
mes <- paste0(
"Input ligand-receptor database: ",
length(unique(LRI_table$LRI)),
" ",
LRI_species,
" interactions.\n",
"Number of LRIs that match to genes present in the dataset: ",
n_ID,
"."
)
if (verbose) message(mes)
list(
data = data_keep,
LRI = LRI_keep,
max_nL = max_nL,
max_nR = max_nR
)
}
extract_condition_inputs <- function(
sample_column_id,
condition_column_id,
cond1_name,
cond2_name,
metadata,
verbose
) {
if (is.null(condition_column_id)) {
cond_info <- list(
is_cond = FALSE,
is_samp = FALSE
)
if (!is.null(cond1_name) | !is.null(cond2_name)) {
warning(
paste0(
"'condition_column_id' is NULL but either 'cond1_name' or ",
"'cond2_name' is not NULL."
)
)
}
} else {
if (is.null(sample_column_id)) {
cond_info <- list(
is_cond = TRUE,
is_samp = FALSE
)
} else {
cond_info <- list(
is_cond = TRUE,
is_samp = TRUE
)
}
conds <- unique(metadata$condition)
if (cond1_name == conds[[1]] & cond2_name == conds[[2]]) {
cond_info$cond1 <- conds[[1]]
cond_info$cond2 <- conds[[2]]
} else if (cond1_name == conds[[2]] & cond2_name == conds[[1]]) {
cond_info$cond1 <- conds[[2]]
cond_info$cond2 <- conds[[1]]
} else {
stop(
paste0(
"Either 'cond1_name' or 'cond2_name' does not match ",
"with the content of 'condition_column_id':",
conds[[1]],
" and ",
conds[[2]],
"."
)
)
}
}
mes <- "Type of analysis to be performed:"
if (!cond_info$is_cond) {
mes <- paste0(
mes,
" detection analysis without conditions."
)
} else {
mes <- paste0(
mes,
" differential analysis between ",
cond_info$cond1,
" and ",
cond_info$cond2,
" cells"
)
if(cond_info$is_samp) {
mes <- paste0(
mes,
" (based on samples resampling and cells permutation)."
)
} else {
mes <- paste0(
mes,
"."
)
}
}
if (verbose) message(mes)
cond_info
}
|
45a1c900752ecd93b0135ac340e1adde184c80b4
|
232c8b0213342e9e973ec8ffb695743759ee89b3
|
/R/bayou-mcmc-utilities.R
|
167d60c313880b4479c9c53bf9c2edab8373581d
|
[] |
no_license
|
uyedaj/bayou
|
304c98ba9516fb91688b345fb33c9a41765d06cd
|
b623758bf7b08900e2cd60c9247c2650b564d06b
|
refs/heads/master
| 2021-07-05T03:02:21.376172
| 2021-05-10T14:51:11
| 2021-05-10T14:51:11
| 21,963,529
| 19
| 10
| null | 2019-11-06T18:58:40
| 2014-07-18T01:29:03
|
HTML
|
UTF-8
|
R
| false
| false
| 22,637
|
r
|
bayou-mcmc-utilities.R
|
#' Loads a bayou object
#'
#' \code{load.bayou} loads a bayouFit object that was created using \code{bayou.mcmc()}
#'
#' @param bayouFit An object of class \code{bayouFit} produced by the function \code{bayou.mcmc()}
#' @param saveRDS A logical indicating whether the resulting chains should be saved as an *.rds file
#' @param file An optional filename (possibly including path) for the saved *.rds file
#' @param cleanup A logical indicating whether the files produced by \code{bayou.mcmc()} should be removed.
#' @param ref A logical indicating whether a reference function is also in the output
#'
#' @details If both \code{save.Rdata} is \code{FALSE} and \code{cleanup} is \code{TRUE}, then \code{load.bayou} will trigger a
#' warning and ask for confirmation. In this case, if the results of \code{load.bayou()} are not stored in an object,
#' the results of the MCMC run will be permanently deleted.
#'
#' @examples
#' \dontrun{
#' data(chelonia)
#' tree <- chelonia$phy
#' dat <- chelonia$dat
#' prior <- make.prior(tree)
#' fit <- bayou.mcmc(tree, dat, model="OU", prior=prior,
#' new.dir=TRUE, ngen=5000)
#' chain <- load.bayou(fit, save.Rdata=FALSE, cleanup=TRUE)
#' plot(chain)
#' }
#' @export
load.bayou <- function(bayouFit, saveRDS=TRUE, file=NULL, cleanup=FALSE, ref=FALSE){
tree <- bayouFit$tree
dat <- bayouFit$dat
outname <- bayouFit$outname
model <- bayouFit$model
model.pars <- bayouFit$model.pars
startpar <- bayouFit$startpar
dir <- bayouFit$dir
outpars <- model.pars$parorder[!(model.pars$parorder %in% model.pars$rjpars)]
rjpars <- model.pars$rjpars
#mapsr2 <- read.table(file="mapsr2.dta",header=FALSE)
#mapsb <- read.table(file="mapsb.dta",header=FALSE)
#mapst2 <- read.table(file="mapst2.dta",header=FALSE)
mapsr2 <- scan(file=paste(dir,outname,".loc",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
mapsb <- scan(file=paste(dir,outname,".sb",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
mapst2 <- scan(file=paste(dir,outname,".t2",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
pars.out <- scan(file=paste(dir,outname,".pars",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
rjpars.out <- scan(file=paste(dir,outname,".rjpars",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
rjpars.out <- lapply(strsplit(rjpars.out,"[[:space:]]+"),as.numeric)
pars.out <- lapply(strsplit(pars.out,"[[:space:]]+"),as.numeric)
mapsr2 <- lapply(strsplit(mapsr2,"[[:space:]]+"),as.numeric)
mapsb <- lapply(strsplit(mapsb,"[[:space:]]+"),as.numeric)
mapst2 <- lapply(strsplit(mapst2,"[[:space:]]+"),as.numeric)
chain <- list()
chain$gen <- sapply(pars.out,function(x) x[1])
chain$lnL <- sapply(pars.out,function(x) x[2])
chain$prior <- sapply(pars.out,function(x) x[3])
if(ref==TRUE){
chain$ref <- sapply(pars.out, function(x) x[4])
}
parLs <- lapply(startpar, length)[outpars]
j=4+as.numeric(ref)
if(length(outpars) > 0){
for(i in 1:length(outpars)){
chain[[outpars[i]]] <- lapply(pars.out, function(x) as.vector(x[j:(j+parLs[[i]]-1)]))#unlist(res[[4]][,j:(j+parLs[[i]]-1)],F,F)
if(parLs[[i]]==1) chain[[outpars[i]]]=unlist(chain[[outpars[i]]])
j <- j+1+parLs[[i]]-1
}
}
chain$sb <- mapsb
chain$loc <- mapsr2
chain$t2 <- mapst2
#j=4
#if(length(outpars) > 0){
# for(i in 1:length(outpars)){
# chain[[outpars[i]]] <- sapply(pars.out, function(x) x[j])
# j <- j+1
# }
#}
if(length(rjpars >0)){
nrjpars <- length(rjpars)
for(i in 1:length(rjpars)){
chain[[rjpars[i]]] <- lapply(rjpars.out, function(x) unlist((x[(1+length(x)/nrjpars*(i-1)):(1+i*length(x)/nrjpars-1)]),F,F))
}
}
attributes(chain)$model <- model
attributes(chain)$model.pars <- model.pars
attributes(chain)$tree <- tree
attributes(chain)$dat <- dat
class(chain) <- c("bayouMCMC", "list")
if(saveRDS==FALSE & cleanup==TRUE){
ans <- toupper(readline("Warning: You have selected to delete all created MCMC files and not to save them as an .rds file.
Your mcmc results will not be saved on your hard drive. If you do not output to a object, your results will be lost.
Continue? (Y or N):"))
cleanup <- ifelse(ans=="Y", TRUE, FALSE)
}
if(saveRDS){
if(is.null(file)){
saveRDS(chain, file=paste(bayouFit$dir, outname, ".chain.rds",sep=""))
cat(paste("file saved to", paste(bayouFit$dir,"/",outname,".chain.rds\n",sep="")))
} else {
saveRDS(chain, file=file)
cat(paste("file saved to", file))
}
}
if(cleanup){
if(bayouFit$tmpdir){
unlink(dir,T,T)
cat(paste("deleting temporary directory", dir))
} else {
file.remove(paste(dir, outname, ".loc", sep=""))
file.remove(paste(dir, outname, ".t2", sep=""))
file.remove(paste(dir, outname, ".sb", sep=""))
file.remove(paste(dir, outname, ".pars", sep=""))
file.remove(paste(dir, outname, ".rjpars", sep=""))
}
}
return(chain)
}
#' Calculate Gelman's R statistic
#'
#' @param parameter The name or number of the parameter to calculate the statistic on
#' @param chain1 The first bayouMCMC chain
#' @param chain2 The second bayouMCMC chain
#' @param freq The interval between which the diagnostic is calculated
#' @param start The first sample to calculate the diagnostic at
#' @param plot A logical indicating whether the results should be plotted
#' @param ... Optional arguments passed to \code{gelman.diag(...)} from the \code{coda} package
#'
#' @export
gelman.R <- function(parameter,chain1,chain2,freq=20,start=1,
plot=TRUE, ...){
R <- NULL
R.UCI <- NULL
int <- seq(start,length(chain1[[parameter]]),freq)
for(i in 1:length(int)){
chain.list <- mcmc.list(mcmc(chain1[[parameter]][1:int[i]]),mcmc(chain2[[parameter]][1:int[i]]))
GD <- gelman.diag(chain.list)
R[i] <- GD$psrf[1]
R.UCI[i] <- GD$psrf[2]
}
if(plot==TRUE){
plot(chain1$gen[int],R,main=paste("Gelman's R:",parameter),xlab="Generation",ylab="R", ...)
lines(chain1$gen[int],R,lwd=2)
lines(chain1$gen[int],R.UCI,lty=2)
}
return(data.frame("R"=R,"UCI.95"=R.UCI))
}
# Function for calculation of the posterior quantiles. Only needed for simulation study, not generally called by the user.
.posterior.Q <- function(parameter,chain1,chain2,pars,burnin=0.3){
postburn <- round(burnin*length(chain1$gen),0):length(chain1$gen)
chain <- mcmc.list(mcmc(chain1[[parameter]][postburn]),mcmc(chain2[[parameter]][postburn]))
posterior.q <- summary(chain,quantiles=seq(0,1,0.005))$quantiles
q <- which(names(sort(c(pars[[parameter]],posterior.q)))=="")
Q <- ((q-1)/2-0.25)/100#((q-1)+(simpar$pars$alpha-posterior.q[q-1])/(posterior.q[q+1]-posterior.q[q-1]))/100
Q
}
#' Return a posterior of shift locations
#'
#' @param chain A bayouMCMC chain
#' @param tree A tree of class 'phylo'
#' @param burnin A value giving the burnin proportion of the chain to be discarded
#' @param simpar An optional bayou formatted parameter list giving the true values (if data were simulated)
#' @param mag A logical indicating whether the average magnitude of the shifts should be returned
#'
#' @return A data frame with rows corresponding to postordered branches. \code{pp} indicates the
#' posterior probability of the branch containing a shift. \code{magnitude of theta2} gives the average
#' value of the new optima after a shift. \code{naive SE of theta2} gives the standard error of the new optima
#' not accounting for autocorrelation in the MCMC and \code{rel location} gives the average relative location
#' of the shift on the branch (between 0 and 1 for each branch).
#'
#' @export
Lposterior <- function(chain,tree,burnin=0, simpar=NULL,mag=TRUE){
pb.start <- ifelse(burnin>0,round(length(chain$gen)*burnin,0),1)
postburn <- pb.start:length(chain$gen)
chain <- lapply(chain, function(x) x[postburn])
ntips <- length(tree$tip.label)
shifts <- t(sapply(chain$sb,function(x) as.numeric(1:nrow(tree$edge) %in% x)))
theta <- sapply(1:length(chain$theta),function(x) chain$theta[[x]][chain$t2[[x]]])
branch.shifts <- chain$sb
theta.shifts <- tapply(unlist(theta),unlist(branch.shifts),mean)
theta.locs <- tapply(unlist(chain$loc), unlist(branch.shifts), mean)
thetaSE <- tapply(unlist(theta),unlist(branch.shifts),function(x) sd(x)/sqrt(length(x)))
N.theta.shifts <- tapply(unlist(branch.shifts),unlist(branch.shifts),length)
root.theta <- sapply(chain$theta,function(y) y[1])
OS <- rep(NA,length(tree$edge[,1]))
OS[as.numeric(names(theta.shifts))] <- theta.shifts
SE <- rep(NA,length(tree$edge[,1]))
SE[as.numeric(names(thetaSE))] <- thetaSE
locs <- rep(NA,length(tree$edge[,1]))
locs[as.numeric(names(theta.locs))] <- theta.locs
shifts.tot <- apply(shifts,2,sum)
shifts.prop <- shifts.tot/length(chain$gen)
all.branches <- rep(0,nrow(tree$edge))
Lpost <- data.frame("pp"=shifts.prop,"magnitude of theta2"=OS, "naive SE of theta2"=SE,"rel location"=locs/tree$edge.length)
return(Lpost)
}
#' Discards burnin
#'
#' @export
.discard.burnin <- function(chain,burnin.prop=0.3){
lapply(chain,function(x) x[(burnin.prop*length(x)):length(x)])
}
#' Tuning function, not currently used.
.tune.D <- function(D,accept,accept.type){
tuning.samp <- (length(accept)/2):length(accept)
acc <- tapply(accept[tuning.samp],accept.type[tuning.samp],mean)
acc.length <- tapply(accept[tuning.samp],accept.type[tuning.samp],length)
acc.tune <- acc/0.25
acc.tune[acc.tune<0.5] <- 0.5
acc.tune[acc.tune>2] <- 2
D$ak <- acc.tune['alpha']*D$ak
D$sk <- acc.tune['sig2']*D$sk
D$tk <- acc.tune['theta']*D$tk
D$bk <- D$tk*2
D <- lapply(D,function(x){ names(x) <- NULL; x})
return(list("D"=D,"acc.tune"=acc.tune))
}
#' Utility function for retrieving parameters from an MCMC chain
#'
#' @param i An integer giving the sample to retrieve
#' @param chain A bayouMCMC chain
#' @param model The parameterization used, either "OU", "QG" or "OUrepar"
#'
#' @return A bayou formatted parameter list
#'
#' @examples
#' \dontrun{
#' tree <- sim.bdtree(n=30)
#' tree$edge.length <- tree$edge.length/max(branching.times(tree))
#' prior <- make.prior(tree, dists=list(dk="cdpois", dsig2="dnorm",
#' dtheta="dnorm"),
#' param=list(dk=list(lambda=15, kmax=32),
#' dsig2=list(mean=1, sd=0.01),
#' dtheta=list(mean=0, sd=3)),
#' plot.prior=FALSE)
#' pars <- priorSim(prior, tree, plot=FALSE, nsim=1)$pars[[1]]
#' dat <- dataSim(pars, model="OU", phenogram=FALSE, tree)$dat
#' fit <- bayou.mcmc(tree, dat, model="OU", prior=prior,
#' new.dir=TRUE, ngen=5000, plot.freq=NULL)
#' chain <- load.bayou(fit, save.Rdata=TRUE, cleanup=TRUE)
#' plotBayoupars(pull.pars(300, chain), tree)
#' }
#' @export pull.pars
pull.pars <- function(i,chain,model="OU"){
if(is.character(model)){
model.pars <- switch(model, "OU"=model.OU, "QG"=model.QG, "OUrepar"=model.OUrepar)#, "bd"=model.bd)
} else {
model.pars <- model
model <- "Custom"
}
parorder <- c(model.pars$parorder, model.pars$shiftpars)
pars <- lapply(parorder,function(x) chain[[x]][[i]])
names(pars) <- parorder
return(pars)
}
#' Combine mcmc chains
#'
#' @param chain.list The first chain to be combined
#' @param thin A number or vector specifying the thinning interval to be used. If a single value,
#' then the same proportion will be applied to all chains.
#' @param burnin.prop A number or vector giving the proportion of burnin from each chain to be
#' discarded. If a single value, then the same proportion will be applied to all chains.
#'
#' @return A combined bayouMCMC chain
#'
#' @export
combine.chains <- function(chain.list, thin=1, burnin.prop=0){
nns <- lapply(chain.list, function(x) names(x))
if(length(burnin.prop) == 1){
burnins <- rep(burnin.prop, length(chain.list))
} else burnins <- burnin.prop
if(length(thin) == 1){
thins <- rep(thin, length(chain.list))
}
Ls <- sapply(chain.list, function(x) length(x$gen))
if(!all(sapply(nns, function(x) setequal(nns[[1]], x)))){
stop ("Not all chains have the same named elements and cannot be combined")
} else {
nn <- nns[[1]]
}
for(i in 1:length(chain.list)) chain.list[[i]]$gen <- chain.list[[i]]$gen + 0.1*i
postburns <- lapply(1:length(chain.list), function(x) seq(max(c(floor(burnins[x]*Ls[x]),1)), Ls[x], thins[x]))
chains <- setNames(vector("list", length(nns[[1]])), nns[[1]])
attributes(chains) <- attributes(chain.list[[1]])
for(i in 1:length(nn)){
chains[[nn[i]]] <- do.call(c, lapply(1:length(chain.list), function(x) chain.list[[x]][[nn[i]]][postburns[[x]]]))
}
attributes(chains)$burnin <- 0
return(chains)
}
#' S3 method for printing bayouMCMC objects
#'
#' @param x A mcmc chain of class 'bayouMCMC' produced by the function bayou.mcmc and loaded into the environment using load.bayou
#' @param ... Additional arguments
#'
#' @export
#' @method print bayouMCMC
print.bayouMCMC <- function(x, ...){
cat("bayouMCMC object \n")
nn <- names(x)
if("model.pars" %in% names(attributes(x))){
model.pars <- attributes(x)$model.pars
cat("shift-specific/reversible-jump parameters: ", model.pars$rjpars, "\n", sep="")
o <- match(c("gen", "lnL", "prior", model.pars$parorder, model.pars$shiftpars), names(x))
} else {
cat("No model specification found in attributes", "\n")
o <- 1:length(x)
}
for(i in o){
cat("$", nn[i], " ", sep="")
cat(class(x[[i]]), " with ", length(x[[i]]), " elements", "\n", sep="")
if(class(x[[i]])=="numeric" & length(x[[i]]) > 0) cat(x[[i]][1:min(c(length(x[[i]]), 5))])
if(class(x[[i]])=="list" & length(x[[i]]) > 0) print(x[[i]][1:min(c(length(x[[i]]), 2))])
if(class(x[[i]])=="numeric" & length(x[[i]]) > 5) cat(" ...", "\n")
if(class(x[[i]])=="list" & length(x[[i]]) > 2) cat(" ...", "\n")
cat("\n")
}
}
.buildControl <- function(pars, prior, move.weights=list("alpha"=4,"sig2"=2,"theta"=4, "slide"=2,"k"=10)){
splitmergepars <- attributes(prior)$splitmergepars
ct <- unlist(move.weights)
total.weight <- sum(ct)
ct <- ct/sum(ct)
ct <- as.list(ct)
if(move.weights$k > 0){
bmax <- attributes(prior)$parameters$dsb$bmax
nbranch <- 2*attributes(prior)$parameters$dsb$ntips-2
prob <- attributes(prior)$parameters$dsb$prob
if(length(prob)==1){
prob <- rep(prob, nbranch)
prob[bmax==0] <- 0
}
if(length(bmax)==1){
bmax <- rep(bmax, nbranch)
bmax[prob==0] <- 0
}
type <- max(bmax)
if(type == Inf){
maxK <- attributes(prior)$parameters$dk$kmax
maxK <- ifelse(is.null(maxK), attributes(prior)$parameters$dsb$ntips*2, maxK)
maxK <- ifelse(!is.finite(maxK), attributes(prior)$parameters$dsb$ntips*2, maxK)
bdFx <- attributes(prior)$functions$dk
bdk <- 1-sqrt(cumsum(c(0,bdFx(0:maxK,log=FALSE))))*0.9
}
if(type==1){
maxK <- nbranch-sum(bmax==0)
bdk <- (maxK - 0:maxK)/maxK
}
ct$bk <- bdk
ct$dk <- (1-bdk)
ct$sb <- list(bmax=bmax, prob=prob)
}
if("k" %in% names(move.weights) & "slide" %in% names(move.weights)){
if(move.weights$slide > 0 & move.weights$k ==0){
bmax <- attributes(prior)$parameters$dsb$bmax
prob <- attributes(prior)$parameters$dsb$prob
ct$sb <- list(bmax=bmax, prob=prob)
}
}
attributes(ct)$splitmergepars <- splitmergepars
return(ct)
}
#bdFx <- function(ct,max,pars,...){
# dk <- cumsum(c(0,dpois(0:max,pars$lambda*T)))
# bk <- 0.9-dk+0.1
# return(list(bk=bk,dk=dk))
#}
.updateControl <- function(ct, pars, fixed){
if(pars$k==0){
ctM <- ct
R <- sum(unlist(ctM[names(ctM) %in% c("slide","pos")],F,F))
ctM[names(ctM) == "slide"] <- 0
nR <- !(names(ctM) %in% c(fixed, "bk","dk","slide", "sb"))
ctM[nR] <-lapply(ct[names(ctM)[nR]],function(x) x+R/sum(nR))
ct <- ctM
}
return(ct)
}
.store.bayou <- function(i, pars, ll, pr, store, samp, chunk, parorder, files){
if(i%%samp==0){
j <- (i/samp)%%chunk
if(j!=0 & i>0){
store$sb[[j]] <- pars$sb
store$t2[[j]] <- pars$t2
store$loc[[j]] <- pars$loc
parline <- unlist(pars[parorder])
store$out[[j]] <- c(i,ll,pr,parline)
} else {
#chunk.mapst1[chunk,] <<- maps$t1
#chunk.mapst2[chunk,] <<- maps$t2
#chunk.mapsr2[chunk,] <<- maps$r2
store$sb[[chunk]] <- pars$sb
store$t2[[chunk]] <- pars$t2
store$loc[[chunk]] <- pars$loc
parline <- unlist(pars[parorder])
store$out[[chunk]] <- c(i,ll,pr,parline)
#write.table(chunk.mapst1,file=mapst1,append=TRUE,col.names=FALSE,row.names=FALSE)
#write.table(chunk.mapst2,file=mapst2,append=TRUE,col.names=FALSE,row.names=FALSE)
#write.table(chunk.mapsr2,file=mapsr2,append=TRUE,col.names=FALSE,row.names=FALSE)
lapply(store$out,function(x) cat(c(x,"\n"),file=files$pars.output,append=TRUE))
lapply(store$sb,function(x) cat(c(x,"\n"),file=files$mapsb,append=TRUE))
lapply(store$t2,function(x) cat(c(x,"\n"),file=files$mapst2,append=TRUE))
lapply(store$loc,function(x) cat(c(x,"\n"),file=files$mapsloc,append=TRUE))
#chunk.mapst1 <<- matrix(0,ncol=dim(oldmap)[1],nrow=chunk)
#chunk.mapst2 <<- matrix(0,ncol=dim(oldmap)[1],nrow=chunk)
#chunk.mapsr2 <<- matrix(0,ncol=dim(oldmap)[1],nrow=chunk)
#out <<- list()
store$sb <- list()
store$t2 <- list()
store$loc <- list()
store$out <- list()
}
}
return(store)
}
#' S3 method for printing bayouFit objects
#'
#' @param x A 'bayouFit' object produced by \code{bayou.mcmc}
#' @param ... Additional parameters passed to \code{print}
#'
#' @export
#' @method print bayouFit
print.bayouFit <- function(x, ...){
cat("bayou modelfit\n")
cat(paste(x$model, " parameterization\n\n",sep=""))
cat("Results are stored in directory\n")
out<-(paste(x$dir, x$outname,".*",sep=""))
cat(out,"\n")
cat(paste("To load results, use 'load.bayou(bayouFit)'\n\n",sep=""))
cat(paste(length(x$accept), " generations were run with the following acceptance probabilities:\n"))
accept.prob <- round(tapply(x$accept,x$accept.type,mean),2)
prop.N <- tapply(x$accept.type,x$accept.type,length)
print(accept.prob, ...)
cat(" Total number of proposals of each type:\n")
print(prop.N, ...)
}
#' Set the burnin proportion for bayouMCMC objects
#'
#' @param chain A bayouMCMC chain or an ssMCMC chain
#' @param burnin The burnin proportion of samples to be discarded from downstream analyses.
#'
#' @return A bayouMCMC chain or ssMCMC chain with burnin proportion stored in the attributes.
#'
#' @export
set.burnin <- function(chain, burnin=0.3){
cl <- class(chain)[1]
attributes(chain)$burnin = burnin
if(cl=="bayouMCMC") {
class(chain) <- c("bayouMCMC", "list")
}
if(cl=="ssMCMC"){
class(chain) <- c("ssMCMC", "list")
}
return(chain)
}
#' S3 method for summarizing bayouMCMC objects
#'
#' @param object A bayouMCMC object
#' @param ... Additional arguments passed to \code{print}
#'
#' @return An invisible list with two elements: \code{statistics} which provides
#' summary statistics for a bayouMCMC chain, and \code{branch.posteriors} which summarizes
#' branch specific data from a bayouMCMC chain.
#'
#' @export
#' @method summary bayouMCMC
summary.bayouMCMC <- function(object, ...){
tree <- attributes(object)$tree
model <- attributes(object)$model
model.pars <- attributes(object)$model.pars
if(is.null(attributes(object)$burnin)){
start <- 1
} else {
start <- round(attributes(object)$burnin*length(object$gen),0)
}
cat("bayou MCMC chain:", max(object$gen), "generations\n")
cat(length(object$gen), "samples, first", eval(start), "samples discarded as burnin\n")
postburn <- start:length(object$gen)
object <- lapply(object,function(x) x[postburn])
parorder <- c("lnL", "prior", model.pars$parorder)
outpars <- parorder[!(parorder %in% model.pars$rjpars)]
summat <- matrix(unlist(object[outpars]),ncol=length(outpars))
colnames(summat) <- outpars
if(length(model.pars$rjpars) > 0){
for(i in model.pars$rjpars){
summat <- cbind(summat, sapply(object[[i]],function(x) x[1]))
colnames(summat)[ncol(summat)] <- paste("root.",i,sep="")
}
sum.rjpars <- lapply(model.pars$rjpars, function(x) summary(coda::mcmc(unlist(object[[x]]))))
} else {
sum.rjpars <- NULL
}
#summat <- cbind(summat, "root"=sapply(object$theta,function(x) x[1]))
sum.1vars <- summary(coda::mcmc(summat))
HPDs <- apply(summat,2,function(x) HPDinterval(mcmc(x), 0.95))
statistics <- rbind(cbind(sum.1vars$statistics, "Effective Size" = effectiveSize(summat), "HPD95Lower"=HPDs[1,], "HPD95Upper"=HPDs[2,]))
if(length(model.pars$rjpars) > 0){
for(i in 1:length(model.pars$rjpars)){
statistics <- rbind(statistics,c(sum.rjpars[[i]]$statistics[1:2],rep(NA,5)))
rownames(statistics)[nrow(statistics)] <- paste("all", model.pars$rjpars[i],sep=" ")
}
}
cat("\n\nSummary statistics for parameters:\n")
print(statistics, ...)
Lpost <- Lposterior(object, tree)
Lpost.sorted <- Lpost[order(Lpost[,1],decreasing=TRUE),]
cat("\n\nBranches with posterior probabilities higher than 0.1:\n")
print(Lpost.sorted[Lpost.sorted[,1]>0.1,], ...)
out <- list(statistics=statistics, branch.posteriors=Lpost)
invisible(out)
}
#' Stores a flat file
#'
.store.bayou2 <- function(i, pars, outpars, rjpars, ll, pr, store, samp, chunk, parorder, files, ref=numeric(0)){
if(i%%samp==0){
j <- (i/samp)%%chunk
if(j!=0 & i>0){
store$sb[[j]] <- pars$sb
store$t2[[j]] <- pars$t2
store$loc[[j]] <- pars$loc
parline <- unlist(pars[outpars])
store$out[[j]] <- c("gen"=i, "lik"=ll, "prior"=pr, "ref"=ref, parline)
store$rjpars[[j]] <- unlist(pars[rjpars])
} else {
store$sb[[chunk]] <- pars$sb
store$t2[[chunk]] <- pars$t2
store$loc[[chunk]] <- pars$loc
parline <- unlist(pars[outpars])
store$out[[chunk]] <- c("gen"=i, "lik"=ll, "prior"=pr, "ref"=ref, parline)
store$rjpars[[chunk]] <- unlist(pars[rjpars])
lapply(store$out,function(x) cat(c(x,"\n"), file=files$pars.output,append=TRUE))
lapply(store$rjpars,function(x) cat(c(x,"\n"),file=files$rjpars,append=TRUE))
lapply(store$sb,function(x) cat(c(x,"\n"),file=files$mapsb,append=TRUE))
lapply(store$t2,function(x) cat(c(x,"\n"),file=files$mapst2,append=TRUE))
lapply(store$loc,function(x) cat(c(x,"\n"),file=files$mapsloc,append=TRUE))
store <- list()
}
}
return(store)
}
|
1f9174fa49360968b0fc32be20c424dc5f65e9e7
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googledataflowv1b3.auto/man/ShellTask.Rd
|
6c3ed4ca28385e25ae4dfb1883ca80dc8a813f57
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 514
|
rd
|
ShellTask.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataflow_objects.R
\name{ShellTask}
\alias{ShellTask}
\title{ShellTask Object}
\usage{
ShellTask(exitCode = NULL, command = NULL)
}
\arguments{
\item{exitCode}{Exit code for the task}
\item{command}{The shell command to run}
}
\value{
ShellTask object
}
\description{
ShellTask Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A task which consists of a shell command for the worker to execute.
}
|
18494f549ab1a648d2ad541d36ab0ad04dae6267
|
f3093af2209a4f58150f61650811808479b06b65
|
/Fns/fnCalcMEASO_TimeSeries_Effort.R
|
61b80349dafc7edef5cddde4a2cc40e8b488084a
|
[] |
no_license
|
AndrewJConstable/Southern-Ocean-Catch
|
56636217ad105b0a279cde6a8f7f97a56184a9c9
|
11c1b5370557c5fe2ca345c197e3ff4c6d60af47
|
refs/heads/master
| 2023-08-03T14:45:08.558446
| 2023-07-21T01:12:44
| 2023-07-21T01:12:44
| 291,855,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,869
|
r
|
fnCalcMEASO_TimeSeries_Effort.R
|
fnCalcMEASO_TimeSeries_Effort<-function(m # MEASO threee-letter area code eg. "AOA"
,mData # dataset to be summarised
,DepVar # the name of the dependent variable aggregated in the variable, m
,CheckCatch # if CheckCatch then summarise Catch in total and for records not NA
){
mD<-mData[which(mData[,"MEASO"] %in% m),] # subset data for MEASO area
res<-NULL
if(nrow(mD)>0){
if(nrow(mD[!is.na(mD[,DepVar]),])>0){
res<-aggregate(mD[,DepVar],by=list(mD[,"Split_Year"]),sum,na.rm=TRUE) # remove missing values. return zero if all missing
} else {
Yrs<-unique(mD[,"Split_Year"])
res<-data.frame(Yrs, rep(NA,length(Yrs)))
} # end if all is.na
dimnames(res)[[2]]<-c("Split_Year",DepVar)
if(CheckCatch) {
res_catchTotal<-aggregate(mD[,"CatchTotal"],by=list(mD[,"Split_Year"]),sum,na.rm=TRUE)
dimnames(res_catchTotal)[[2]]<-c("Split_Year","CatchTotal")
res_catchNotNAeffort <-aggregate(mD[,"CatchNotNAdv"],by=list(mD[,"Split_Year"]),sum,na.rm=TRUE)
dimnames(res_catchNotNAeffort)[[2]]<-c("Split_Year","CatchNotNAdv")
res_Catch<-merge(res_catchTotal,res_catchNotNAeffort,by="Split_Year",all=TRUE)
res_Catch[is.na(res_Catch[,"CatchNotNAdv"]),"CatchNotNAdv"]<-0
res_Catch<-cbind(res_Catch,res_Catch[,"CatchNotNAdv"]/res_Catch[,"CatchTotal"])
dimnames(res_Catch)[[2]][ncol(res_Catch)]<-"PropCatchNotNA"
res<-merge(res,res_Catch,by="Split_Year",all=TRUE)
}
res<-bind_cols(data.frame(MEASO = rep(m,nrow(res))),res[order(res[,"Split_Year"]),])
if(sum(res[!is.na(res[,DepVar]),DepVar])==0) res<-NULL
} # end if nrow>0
return(res)
} #end fnCalcMEASO_TimeSeries
|
cf1f7390fa51c6fbf781ea952df010aba9c8bfba
|
5b7a0942ce5cbeaed035098223207b446704fb66
|
/man/lsExportResponses.Rd
|
1ec7570a00740826d6d97252c827d75870581a4d
|
[
"MIT"
] |
permissive
|
k127/LimeRick
|
4f3bcc8c2204c5c67968d0822b558c29bb5392aa
|
a4d634981f5de5afa5b5e3bee72cf6acd284c92a
|
refs/heads/master
| 2023-04-11T21:56:54.854494
| 2020-06-19T18:36:05
| 2020-06-19T18:36:05
| 271,702,292
| 0
| 1
| null | 2020-06-12T03:45:14
| 2020-06-12T03:45:14
| null |
UTF-8
|
R
| false
| true
| 1,463
|
rd
|
lsExportResponses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lsExportResponses.R
\name{lsExportResponses}
\alias{lsExportResponses}
\title{Export responses}
\usage{
lsExportResponses(
surveyID,
lang = NULL,
completionStatus = "all",
headingType = "code",
responseType = "short",
lsAPIurl = getOption("lsAPIurl"),
sessionKey = NULL
)
}
\arguments{
\item{surveyID}{ID of the survey}
\item{lang}{\emph{(optional)} Language code for the language to be used (This affects headers if \code{headingType} is not \code{"code"})}
\item{completionStatus}{\emph{(optional)} Response completion status. One out of \code{"complete"}, \code{"incomplete"}, \code{"all"}}
\item{headingType}{\emph{(optional)} Type of the column headers (mainly questions). One out of \code{"code"}, \code{"full"}, \code{"abbreviated"}}
\item{responseType}{\emph{(optional)} Answer codes (\code{"short"}) where applicable, or full answer texts (\code{"long"})}
\item{lsAPIurl}{\emph{(optional)} The URL of the \emph{LimeSurvey RemoteControl 2} JSON-RPC API}
\item{sessionKey}{\emph{(optional)} Authentication token, see \code{\link{lsGetSessionKey}()}}
}
\value{
A data frame with the survey responses
}
\description{
Export responses
}
\examples{
\dontrun{
lsExportResponses("123456", completionStatus = "incomplete", headingType = "abbreviated")
}
}
\references{
\url{https://api.limesurvey.org/classes/remotecontrol_handle.html#method_export_responses}
}
|
c10a2296727d78de5dc25c80e657970a92072737
|
5be5233c70855f78773e177f9a2ff5795aafb8c5
|
/cbsots/tests/testthat/test_get_ts_82596NED.R
|
af11180ff58ff91ae465e6cf16f7abac04efa7c2
|
[] |
no_license
|
timemod/cbsots
|
5057c3d38754aae175776d857f9c4916a9e5af73
|
3523b0eaa87eeee6425d80cbceb9668ca76c3ce1
|
refs/heads/master
| 2023-06-22T22:35:10.602907
| 2023-06-12T08:06:00
| 2023-06-12T08:06:00
| 121,116,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 746
|
r
|
test_get_ts_82596NED.R
|
library(cbsots)
library(testthat)
rm(list = ls())
# Use UTF-8 endocing, because the Titles contains diacritical characters
# and the data files have been created with UTF-8 encoding
options(encoding = "UTF-8")
ts_code_file <- "tscode/tscode_82596NED.rds"
#edit_ts_code(ts_code_file)
ts_code <- readRDS(ts_code_file)
source("utils/check_ts_table.R")
id <- "82596NED"
raw_cbs_dir <- "raw_cbs_data"
test_that(id, {
expect_silent(result1 <- get_ts(id, ts_code, refresh = FALSE,
min_year = 2015, raw_cbs_dir = raw_cbs_dir,
frequencies = NA))
t <- system.time(
check <- check_ts_table(result1, id, raw_cbs_dir = raw_cbs_dir)
)
expect_true(check)
})
|
3d8351cf1e38a66505c0f9a860dbde1405d8becf
|
ee99fdde8656c0324d2e540a0ba94fabed28a95c
|
/scripts/s04_mask_clip.r
|
de8a819b19228891459ce621e1e246126789ef8e
|
[] |
no_license
|
Chris35Wills/synthetic_channel_mesh
|
b68b3eb57e857b583e473972f6768c05d89a53ae
|
cab157e42ea684a34d4a75aa247d5fcb8ad7fbc7
|
refs/heads/master
| 2020-12-03T02:00:26.537645
| 2017-07-17T10:07:44
| 2017-07-17T10:07:44
| 95,893,204
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,040
|
r
|
s04_mask_clip.r
|
# Program: *mask_clip.r
# Functions to clip coverage of the synthetic mesh keeping points within the channel according to a mask
# Also limits overflow into other channels at confluences
#
# @author Chris Williams
# @date: 08/03/16
if (!require("raster")) install.packages("raster")
if (!require("sp")) install.packages("sp")
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("tools")) install.packages("tools")
# Plot points over raster
plot_mask_and_points<-function(mask, mask_extent=extent(-10000, 25000, -750000, -700000), pnts, centre=TRUE){
mask_crop=crop(mask, mask_extent)
map.p <- rasterToPoints(mask_crop)
map.df <- data.frame(map.p)
colnames(map.df)<-c('x','y','Mask')
if (centre==TRUE){
mask_pnt_plot=ggplot(map.df, aes(x,y)) +
geom_raster(aes(fill=Mask)) +
geom_point(data=pnts, aes(cl_x,cl_y, colour='red'), pch=20, na.rm=TRUE)
} else {
mask_pnt_plot=ggplot(map.df, aes(x,y)) +
geom_raster(aes(fill=Mask)) +
geom_point(data=pnts, aes(x,y, colour='red'), pch=20, na.rm=TRUE)
}
mask_pnt_plot = mask_pnt_plot +
theme_bw() +
coord_equal() +
xlab("Easting") +
ylab("Northing")
mask_pnt_plot
}
# The normal points go eitehr side of the centreline
# The centreline is not always the true centre but will be someoewhere near
# Consequently, the lengths of the normals either side should be approx equal
# This is not always the case such as where a channel passes a confluence - in
# this case the normal can go up the confluence before it reaches non-ocean
# portions of the mask which clip it
#
# Where a normal on one side is more than 1.5x the length of the other, it is
# cropped to the max length of its opposite
# e.g. right side = 1000 m
# left side = 2000 m
# corrected left side = 1000 m
clip_side_length_outliers<-function(data, diff_factor=1.5, verbose=0){
length_before=nrow(data)
left=subset(data, side==2)
right=subset(data, side==1)
# If dist of node from centre is greater than the diff_factor multiplied by the max dist of the other side,
# limit it to the max length of the other side multiplied by the diff_factor
if (max(right$cl_dist_r) > (diff_factor*max(left$cl_dist_l))) {
right=right[right$cl_dist_r<=(diff_factor*max(left$cl_dist_l)),]
} else if (max(left$cl_dist_l) > (diff_factor*max(right$cl_dist_r))) {
left=left[left$cl_dist_l<=(diff_factor*max(right$cl_dist_r)),]
}
data=rbind(left, right)
length_after=nrow(data)
if (verbose!=0){
print(paste0("Length before: ", length_before))
print(paste0("Length after: ", length_after))
}
return(data)
}
#' Keep only normals that are less than or equal to the mean normal length
#' we add the mean to the mean to give some more leeqay i.e. to relax the
#' clipping a bit - otherwise this ovely favours the shorter segments....
second_pass_normal_filter<-function(data){
print("Running second filter pass....")
print(paste0("Length before: ", nrow(data)))
#data=all_keeps
m_length_l=mean(data$cl_dist_l)
m_length_r=mean(data$cl_dist_r)
limit_length=min(m_length_l, m_length_r)
data=data[data$cl_dist_l<=(limit_length+(limit_length)),] # keep only normals that are less than or equal to the mean normal length
data=data[data$cl_dist_r<=(limit_length+(limit_length)),] # we add the mean to the mean to give some more leeqay i.e. to relax the clipping a bit - otherwise this ovely favours the shorter segments....
print(paste0("Length after: ", nrow(data)))
return(data)
}
#' Clip mesh according to mask with OPTIONAL additional clip factor to minimize overall normal length
#' This optional length clip comes into play where a channel goes past a confluence and there is potential
#' for the normal to go up another channels centreline
mask_clipper<-function(path, mask, glob_path="*REARRANGED.csv", limit_different_edge_lengths=FALSE, diff_factor=1.25){
filenames=Sys.glob(paste0(path, glob_path))
if(length(filenames)==0){
stop("No files found. Check you glob string (glob_path).")
}
for (f in filenames){
#f=filenames[1]
print(paste0("Working on: ", basename(f)))
data=read.csv(f)
coordinates(data)<-~x+y
data$mask_value=extract(mask, data)
data=as.data.frame(data)
xmin=min(data$x)-4000
xmax=max(data$x)+4000
ymin=min(data$y)-4000
ymax=max(data$y)+4000
plotting=FALSE
if (plotting==TRUE){plot_mask_and_points(mask, mask_extent=extent(xmin, xmax, ymin, ymax), pnts=data, centre=FALSE)}
# Drop points if not ocean
dropped_data=data[(data$mask_value!=0),]
data=data[!(data$mask_value!=0),] # drop all points where mask is not 0
if (plotting==TRUE){plot_mask_and_points(mask, mask_extent=extent(xmin, xmax, ymin, ymax), pnts=data, centre=FALSE)}
# Drop points where there is a break in the cumualtive distance from centreline
# -- delete rows from remaining data frame that have larger distances from the centreline than points already removed sharing the same centreline point
all_fids=unique(data$cl_id)
all_keeps=data.frame('x'=NA,
'y'=NA,
'cl_x'=NA,
'cl_y'=NA,
'cl_id'=NA,
'cl_dist_r'=NA,
'cl_dist_l'=NA,
'path'=NA,
'side'=NA,
'mask_value'=NA)
for (fid in all_fids){
#print(paste0("Working on: ", fid))
# subset keeps by fid and select left and right
keeps=subset(data, cl_id==fid)
keeps_r=subset(keeps, side==1)
keeps_l=subset(keeps, side==2)
# subset drops by fid and select left and right
drops=subset(dropped_data, cl_id==fid)
drops_r=subset(drops, side==1)
drops_l=subset(drops, side==2)
# calculate the minimum cl_dist for this fid that was dropped
min_dropped_cl_dist_r=min(drops_r$cl_dist_r)
min_dropped_cl_dist_l=min(drops_l$cl_dist_l)
# Keep only remianing points less than the min cl_dist value (from the drops)
# Anything greater in the keeps dataframe is in another channel and we need to get rid of it
# print(paste0("Points before second drop (right): ", nrow(keeps_r)))
# print(paste0("Points before second drop (left): ", nrow(keeps_l)))
keeps_r=keeps_r[keeps_r$cl_dist_r<min_dropped_cl_dist_r,]
keeps_l=keeps_l[keeps_l$cl_dist_l<min_dropped_cl_dist_l,]
# print(paste0("Points after second drop (right): ", nrow(keeps_r)))
# print(paste0("Points after second drop (left): ", nrow(keeps_l)))
keeps=rbind(keeps_r, keeps_l)
#x11()
#plot_mask_and_points(mask, mask_extent=extent(min(keeps$x)-5000, max(keeps$x)+5000, min(keeps$y)-5000, max(keeps$y)+5000), pnts=keeps, centre=FALSE)
if (limit_different_edge_lengths==TRUE){
keeps=clip_side_length_outliers(keeps, diff_factor=diff_factor, verbose=0)
}
#x11()
#plot_mask_and_points(mask, mask_extent=extent(min(keeps$x)-5000, max(keeps$x)+5000, min(keeps$y)-5000, max(keeps$y)+5000), pnts=keeps, centre=FALSE)
# append r and l to a dataframe....
all_keeps=rbind(all_keeps,keeps)
}
all_keeps=na.omit(all_keeps)
if (limit_different_edge_lengths==TRUE){
all_keeps=second_pass_normal_filter(all_keeps)
}
if (plotting==TRUE){
plot_mask_and_points(mask, mask_extent=extent(xmin, xmax, ymin, ymax), pnts=all_keeps, centre=FALSE)
}
# Write to file
ofile=paste0(file_path_sans_ext(f), "_CLIPPED.csv")
write.csv(all_keeps, ofile, row.names=FALSE)
}
#x11()
#print("PLOTTING SHOULD NOW TAKE PLACE...")
#plot_mask_and_points(mask, mask_extent=extent(xmin, xmax, ymin, ymax), pnts=all_keeps, centre=FALSE)
cat(" \n")
cat("*********************\n")
cat("Normal clip complete.\n")
cat("*********************\n")
}
if (getOption('run.main', default=TRUE)) {
print("Run from import ...")
#print("Now running code with example code (will fail if earlier scripts in the processing chain have not already been run)")
#path="../test_outputs/"
#maskF="../test_data/aoi_mask.tif"
#mask=raster(maskF)
#glob_path="*REARRANGED.csv"
#mask_clipper(path, mask, limit_different_edge_lengths=TRUE, diff_factor=2, glob_path=glob_path)
}
|
93e5a047df134303fbe35190c0286830cff64f66
|
c23b034a6600759c25c948265502cde27f3d2080
|
/exposure/exposure/v10/v10.graph.combined.r
|
6f2fc76f77cdba265d652f9d1905397188fed67e
|
[] |
no_license
|
YWAN446/Exposure-Assessment-Model
|
f6c5553ba9d561d62b6e05dd62238b55ff870f10
|
35b0d8e7d3fe70d36ab824329252b41a75c9d3ce
|
refs/heads/master
| 2020-03-12T16:53:44.149873
| 2018-04-23T20:31:43
| 2018-04-23T20:31:43
| 130,725,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,002
|
r
|
v10.graph.combined.r
|
pdf(paste("./output/","exposure","-hh-combined-by-age",".pdf",sep=""));
for (k.age in 1:3){
if (k.age==1){
int.dirt1<-HH.1[[14]]
int.flo1<-HH.1[[16]]
int.offgr1<-HH.1[[15]]
int.drain1<-HH.1[[13]]
int.septage1<-HH.1[[17]]
int.produce1<-HH.1[[18]]
dw1<-HH.1[[19]]
int.total1<-HH.1[[20]]
}
if (k.age==2){
int.dirt2<-HH.2[[14]]
int.flo2<-HH.2[[16]]
int.offgr2<-HH.2[[15]]
int.drain2<-HH.2[[13]]
int.septage2<-HH.2[[17]]
int.produce2<-HH.2[[18]]
dw2<-HH.2[[19]]
int.total2<-HH.2[[20]]
}
if (k.age==3){
int.dirt3<-HH.3[[14]]
int.flo3<-HH.3[[16]]
int.offgr3<-HH.3[[15]]
int.drain3<-HH.3[[13]]
int.septage3<-HH.3[[17]]
int.produce3<-HH.3[[18]]
dw3<-HH.3[[19]]
int.total3<-HH.3[[20]]
}
}
####################################################################;
layout(matrix(c(1,2), 2, 1, byrow = TRUE), heights=c(1,3))
par(mar=c(0.5, 4, 2, 0.5))
barplot(c(frac0(int.dirt1[,]),frac0(int.dirt2[,]),frac0(int.dirt3[,]),
frac0(int.flo1[,]),frac0(int.flo2[,]),frac0(int.flo3[,]),
frac0(int.offgr1[,]),frac0(int.offgr2[,]),frac0(int.offgr3[,]),
frac0(int.drain1[,]),frac0(int.drain2[,]),frac0(int.drain3[,]),
frac0(int.septage1[,]),frac0(int.septage2[,]),frac0(int.septage3[,]),
frac0(int.produce1[,]),frac0(int.produce2[,]),frac0(int.produce3[,]),
frac0(dw1[,1,]),frac0(dw2[,1,]),frac0(dw2[,1,]),
frac0(dw2[,2,]),frac0(dw2[,2,]),frac0(dw2[,2,]),
frac0(int.total1[,]),frac0(int.total2[,]),frac0(int.total3[,])),
ylab="fraction 0",
main="Combined Four Neighborhoods",las = 2,
# names=c("","dirt","","","floor","","","offgr","","","drain","","","septage","","","food","","","tap water","","","sachet water",""),
col=c("antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4",
"antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4"),
ylim=c(0,1),cex.names = 0.8);
grid(nx=NA, ny=NULL);
legend(-0.7,1,bty="n", cex=0.7, title="Age Group",c("0-1","1-2","2-5"),fill=c("antiquewhite2","antiquewhite3","antiquewhite4"))
par(mar=c(4, 4, 0.5, 0.5))
#####################################################################;
z.exp <- mergequan(cbind(log10(as.vector(non0(int.dirt1[,]))),log10(as.vector(non0(int.dirt2[,]))),log10(as.vector(non0(int.dirt3[,]))), # dirt floor -> hand-mouth
log10(as.vector(non0(int.flo1[,]))),log10(as.vector(non0(int.flo2[,]))),log10(as.vector(non0(int.flo3[,]))), # concrete floor -> hand-mouth
log10(as.vector(non0(int.offgr1[,]))),log10(as.vector(non0(int.offgr2[,]))),log10(as.vector(non0(int.offgr3[,]))), # off ground -> hand-mouth
log10(as.vector(non0(int.drain1[,]))),log10(as.vector(non0(int.drain2[,]))),log10(as.vector(non0(int.drain3[,]))), # drain -> hand-mouth
log10(as.vector(non0(int.septage1[,]))),log10(as.vector(non0(int.septage2[,]))),log10(as.vector(non0(int.septage3[,]))), # septage -> hand-mouth
log10(as.vector(non0(int.produce1[,]))),log10(as.vector(non0(int.produce2[,]))),log10(as.vector(non0(int.produce3[,]))), # eat -> ingest
log10(as.vector(non0(dw1[,1,]))),log10(as.vector(non0(dw2[,1,]))),log10(as.vector(non0(dw3[,1,]))), # drink tap water
log10(as.vector(non0(dw1[,2,]))),log10(as.vector(non0(dw2[,2,]))),log10(as.vector(non0(dw3[,2,]))), # drink sachet water
log10(as.vector(non0(int.total1[,]))),log10(as.vector(non0(int.total2[,]))),log10(as.vector(non0(int.total3[,])))), c());
#c("","dirt","","","floor","","","offgr","","","drain","","","septage","","","food","","","tap water","","","sachet water","","","Total",""));
bxp(z.exp,outline=FALSE,ylim=c(0,20),las = 2,xaxt="n",
boxfill=c("antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4",
"antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4","antiquewhite2","antiquewhite3","antiquewhite4",
"antiquewhite2","antiquewhite3","antiquewhite4"),
ylab="10log(dose)",#main=paste(neighbourhoods[k.neighb]),
cex.axis = 0.8);
points(1:27,cbind(log10(mean(as.vector(non0(int.dirt1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.dirt2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.dirt3[,])),na.rm=TRUE)), # dirt floor -> hand-mouth
log10(mean(as.vector(non0(int.flo1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.flo2[,])),na.rm=TRUE)), log10(mean(as.vector(non0(int.flo3[,])),na.rm=TRUE)),# concrete floor -> hand-mouth
log10(mean(as.vector(non0(int.offgr1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.offgr2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.offgr3[,])),na.rm=TRUE)), # off ground -> hand-mouth
log10(mean(as.vector(non0(int.drain1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.drain2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.drain3[,])),na.rm=TRUE)), # drain -> hand-mouth
log10(mean(as.vector(non0(int.septage1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.septage2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.septage3[,])),na.rm=TRUE)), # septage -> hand-mouth
log10(mean(as.vector(non0(int.produce1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.produce2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.produce3[,])),na.rm=TRUE)), # eat -> ingest
log10(mean(as.vector(non0(dw1[,1,])),na.rm=TRUE)),log10(mean(as.vector(non0(dw2[,1,])),na.rm=TRUE)),log10(mean(as.vector(non0(dw3[,1,])),na.rm=TRUE)), # drink tap water
log10(mean(as.vector(non0(dw1[,2,])),na.rm=TRUE)),log10(mean(as.vector(non0(dw2[,2,])),na.rm=TRUE)),log10(mean(as.vector(non0(dw3[,2,])),na.rm=TRUE)), # drink sachet water
log10(mean(as.vector(non0(int.total1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.total2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.total3[,])),na.rm=TRUE))),
col="black",cex=1
)
points(1:27,cbind(log10(mean(as.vector(non0(int.dirt1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.dirt2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.dirt3[,])),na.rm=TRUE)), # dirt floor -> hand-mouth
log10(mean(as.vector(non0(int.flo1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.flo2[,])),na.rm=TRUE)), log10(mean(as.vector(non0(int.flo3[,])),na.rm=TRUE)),# concrete floor -> hand-mouth
log10(mean(as.vector(non0(int.offgr1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.offgr2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.offgr3[,])),na.rm=TRUE)), # off ground -> hand-mouth
log10(mean(as.vector(non0(int.drain1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.drain2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.drain3[,])),na.rm=TRUE)), # drain -> hand-mouth
log10(mean(as.vector(non0(int.septage1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.septage2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.septage3[,])),na.rm=TRUE)), # septage -> hand-mouth
log10(mean(as.vector(non0(int.produce1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.produce2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.produce3[,])),na.rm=TRUE)), # eat -> ingest
log10(mean(as.vector(non0(dw1[,1,])),na.rm=TRUE)),log10(mean(as.vector(non0(dw2[,1,])),na.rm=TRUE)),log10(mean(as.vector(non0(dw3[,1,])),na.rm=TRUE)), # drink tap water
log10(mean(as.vector(non0(dw1[,2,])),na.rm=TRUE)),log10(mean(as.vector(non0(dw2[,2,])),na.rm=TRUE)),log10(mean(as.vector(non0(dw3[,2,])),na.rm=TRUE)), # drink sachet water
log10(mean(as.vector(non0(int.total1[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.total2[,])),na.rm=TRUE)),log10(mean(as.vector(non0(int.total3[,])),na.rm=TRUE))),
col="black",cex=1.5
)
grid(nx=NA, ny=NULL);
legend(0,20,bty="n", cex=0.8, title="Age Group",c("0-1","1-2","2-5"),fill=c("antiquewhite2","antiquewhite3","antiquewhite4"))
axis(1, at=c(2,5,8,11,14,17,20,23,26), labels=c("dirt","floor","offgr","drain",
"septage","food","tap water","sachet","total"), las=1, cex.axis=0.8)
dev.off();
|
f2349063f092d27ae13269c39e2fb280a4aee331
|
ca06fec45eaaa886c34910541aa1b8012989f947
|
/movielens Project- Jersson Placido.R
|
72518e1739d702d245935423f16fd8146c0a105d
|
[] |
no_license
|
jepeteso/movielens
|
3c41bc5075152900562cb595cc854d8cdb9ba61e
|
c034ddd491088093f17e274b61422bce63725a22
|
refs/heads/master
| 2023-04-02T02:59:06.345315
| 2021-04-02T21:09:24
| 2021-04-02T21:09:24
| 354,122,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,419
|
r
|
movielens Project- Jersson Placido.R
|
#####################################################################################
# Create the train set (edx) and test set (validation set| final hold-out test set)
####################################################################################
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
#load the required libraries
library(tidyverse)
library(caret)
library(data.table)
library(lubridate)
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
# if using R 3.6 or earlier:
#movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
# title = as.character(title),
#genres = as.character(genres))
# if using R 4.0 or later:
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)`
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
###################################################################
# Create train and test sets from the edx data set
###################################################################
set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)`
edx_test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.2, list = FALSE) #train set is 80% of the original edx data
edx_train_set<- edx[-edx_test_index,]
edx_test_set <- edx[edx_test_index,]
# Verify the partition
dim(edx_train_set)
dim(edx_test_set)
# Make sure userId and movieId in test set are also in train set
validationedx <- edx_test_set %>%
semi_join(edx_train_set, by = "movieId") %>%
semi_join(edx_train_set, by = "userId")
# Add rows removed from validationedx set back into edx_train_set
removed_edx <- anti_join(edx_test_set, validationedx)
edx_train_set<- rbind(edx_train_set, removed_edx)
# Definition of root mean squared error (RMSE) function
RMSE <- function(predicted_ratings,true_ratings){
sqrt(mean((predicted_ratings - true_ratings)^2))
}
######################################################################
# Building the Recommendation System utilizing the mean of the rating
######################################################################
med_hat <- mean(edx_train_set$rating) # rating mean calculation
naive_edx_rmse <- RMSE(med_hat, validationedx$rating) # naive rmse calculation
edx_rmse_results <- data_frame(method = "Naive model with just the mean", RMSE = naive_edx_rmse)
edx_rmse_results %>% knitr::kable()
######################################################################
# Recommendation system utilizing movieID
#####################################################################
meanedx <- mean(edx_train_set$rating) #mean calculation
movie_med <- edx_train_set %>%
group_by(movieId) %>%
summarize(b_im = mean(rating - meanedx)) #mean rating per movieID
# Rating prediction using movieID as unique predictor
predicted_ratings_edx <- meanedx + validationedx %>%
left_join(movie_med, by='movieId') %>%
.$b_im
#Model 1 RMSE CALCULATION
model_1_rmse_1 <- RMSE(predicted_ratings_edx, validationedx$rating)
rmse_results_1 <- bind_rows(edx_rmse_results,
data_frame(method="Movie Effect Model",
RMSE = model_1_rmse_1 ))
# models comparison
rmse_results_1 %>% knitr::kable()
#######################################################
# Recommendation system utilizing movieID and userID
######################################################
user_med <- edx_train_set %>%
left_join(movie_med, by='movieId') %>%
group_by(userId) %>%
summarize(b_um = mean(rating - meanedx - b_im))
predicted_ratings_2 <- validationedx %>%
left_join(movie_med, by='movieId') %>%
left_join(user_med, by='userId') %>%
mutate(pred = meanedx + b_im + b_um) %>%
.$pred
#Model 2 RMSE CALCULATION
model_2_rmse_2 <- RMSE(predicted_ratings_2, validationedx$rating)
rmse_results_2 <- bind_rows(rmse_results_1,
data_frame(method="Movie + User Effects Model",
RMSE = model_2_rmse_2 ))
# models comparison
rmse_results_2 %>% knitr::kable()
######################################################################
#Regularized model using movieID
######################################################################
lambda <- 3
movie_reg_med_edx <- edx_train_set %>%
group_by(movieId) %>%
summarize(b_im = sum(rating - meanedx)/(n()+lambda), n_i = n())
predicted_ratings_3 <- validationedx %>%
left_join(movie_reg_med_edx, by='movieId') %>%
mutate(pred = meanedx + b_im) %>%
.$pred
model_3_rmse_3 <- RMSE(predicted_ratings_3, validationedx$rating)
rmse_results_3 <- bind_rows(rmse_results_2,
data_frame(method="Regularized Movie Effect Model",
RMSE = model_3_rmse_3 ))
rmse_results_3 %>% knitr::kable()
#######################################################################
#Regularized Movie + User Effect Model
######################################################################
lambdas <- seq(0, 10, 0.25)
rmses_edx <- sapply(lambdas, function(l){
med <- mean(edx_train_set$rating)
b_im <- edx_train_set %>%
group_by(movieId) %>%
summarize(b_im = sum(rating - med)/(n()+l))
b_um <- edx_train_set %>%
left_join(b_im, by="movieId") %>%
group_by(userId) %>%
summarize(b_um = sum(rating - b_im - med)/(n()+l))
predicted_ratings <-
validationedx %>%
left_join(b_im, by = "movieId") %>%
left_join(b_um, by = "userId") %>%
mutate(pred = med + b_im + b_um) %>%
.$pred
return(RMSE(predicted_ratings, validationedx$rating))
})
# lambdas vs RMSE plot
qplot(lambdas, rmses_edx)
# selected lambda value
lambda_rmses_edx <- data_frame(model= "Regularized Movie + User Effect Model", lambda=lambdas[which.min(rmses_edx)])
lambda_rmses_edx %>% knitr::kable()
#RMSE comparisons
rmse_results_4 <- bind_rows(rmse_results_3,
data_frame(method="Regularized Movie + User Effect Model",
RMSE = min(rmses_edx)))
rmse_results_4 %>% knitr::kable()
#######################################################################
#Regularized Movie + User Effect Model + Date (week)
######################################################################
library(lubridate)
edx_train_date <- mutate(edx_train_set, date= as_datetime(timestamp)) #transform time stamp in datetime
edx_test_date <- mutate(validationedx, date= as_datetime(timestamp)) #transform time stamp in datetime
lambdas <- seq(0, 10, 0.25)
rmses_edx_date <- sapply(lambdas, function(l){
med <- mean(edx_train_date$rating)
b_im <- edx_train_date %>%
group_by(movieId) %>%
summarize(b_im = sum(rating - med)/(n()+l))
b_um <- edx_train_date %>%
left_join(b_im, by="movieId") %>%
group_by(userId) %>%
summarize(b_um = sum(rating - b_im - med)/(n()+l))
d_um <- edx_train_date %>%
left_join(b_im, by = "movieId") %>%
left_join(b_um, by = "userId") %>%
mutate(date = round_date(date, unit = "week")) %>%
group_by(date) %>% summarize(d_um = sum(rating - b_im - med- b_um)/(n()+l))
predicted_ratings12 <-
edx_test_date %>%
left_join(b_im, by = "movieId") %>%
left_join(b_um, by = "userId") %>%
mutate(date = round_date(date, unit = "week")) %>%
left_join(d_um, by = "date") %>%
mutate(pred = med + b_im + b_um+ d_um) %>%
.$pred
RMSE(predicted_ratings12, edx_test_date$rating)
})
# lambdas vs RMSE plot
qplot(lambdas, rmses_edx_date)
#selected lambda value
lambda_rmses_edx_date <- data_frame(model= "Regularized Movie + User Effect + Date(week) Model", lambda=lambdas[which.min(rmses_edx_date)])
lambda_rmses_edx_date %>% knitr::kable() #best lambda
#RMSE COMPARISONS
rmse_results_5 <- bind_rows(rmse_results_4,
data_frame(method="Regularized Movie + User Effect + Date(week) Model",
RMSE = min(rmses_edx_date)))
rmse_results_5 %>% knitr::kable()
#######################################################################
#Regularized Movie + User Effect Model + rating date (week)+ genres
######################################################################
library(lubridate)
edx_train_date <- mutate(edx_train_set, date= as_datetime(timestamp)) #transform time stamp in datetime
edx_train_date1 <- mutate(edx_train_date, date = round_date(date, unit = "week"))#transform date in weeks
edx_test_date <- mutate(validationedx, date= as_datetime(timestamp)) #transform time stamp in datetime
edx_test_date1 <- mutate(edx_test_date, date = round_date(date, unit = "week"))#transform date in weeks
options(digits = 5)
lambdas <- seq(0, 10, 0.25)
rmses_edx_date_ge <- sapply(lambdas, function(l){
med <- mean(edx_train_date1$rating)
b_im <- edx_train_date1 %>%
group_by(movieId) %>%
summarize(b_im = sum(rating - med)/(n()+l))
b_um <- edx_train_date1 %>%
left_join(b_im, by="movieId") %>%
group_by(userId) %>%
summarize(b_um = sum(rating - b_im - med)/(n()+l))
d_um <- edx_train_date1 %>%
left_join(b_im, by = "movieId") %>%
left_join(b_um, by = "userId") %>%
group_by(date) %>% summarize(d_um = sum(rating - b_im - med- b_um)/(n()+l))
g_um <- edx_train_date1 %>%
left_join(b_im, by = "movieId") %>%
left_join(b_um, by = "userId") %>%
left_join(d_um, by = "date") %>%
group_by(genres)%>% summarize(g_um = sum(rating - b_im - med- b_um - d_um)/(n()+l))
predicted_ratings14 <-
edx_test_date1 %>%
left_join(b_im, by = "movieId") %>%
left_join(b_um, by = "userId") %>%
left_join(d_um, by = "date") %>%
left_join(g_um, by = "genres")%>%
mutate(pred = med + b_im + b_um + d_um + g_um) %>%
.$pred
RMSE(predicted_ratings14, edx_test_date1$rating)
})
#PLOT LAMBDAS VS RMSE
qplot(lambdas, rmses_edx_date)
#SELECTED LAMBDA
lambdae_edx_date_ge <- data_frame(model= "Regularized Movie + User Effect + Date(week) + Genre Model", lambda=lambdas[which.min(rmses_edx_date_ge)])
lambdae_edx_date_ge %>% knitr::kable()
#RSME COMPARISON TABLE
rmse_results_6 <- bind_rows(rmse_results_5,
data_frame(method="Regularized Movie + User Effect + Date(week) + Genre Model",
RMSE = min(rmses_edx_date_ge)))
rmse_results_6 %>% knitr::kable()
#######################################################################
# VALIDATION OF THE MODEL USING THE ORIGINAL PARTION EDX AND VALIDATION
#Regularized Movie + User Effect Model + rating date (week)+ genres
#######################################################################
library(lubridate)
edx_dates <- mutate(edx, date= as_datetime(timestamp)) #transform time stamp in datetime
edx_dates1 <- mutate(edx_dates, date = round_date(date, unit = "week"))#transform date in weeks
validation_date <- mutate(validation, date= as_datetime(timestamp)) #transform time stamp in datetime
validation_date1 <- mutate(validation_date, date = round_date(date, unit = "week"))#transform date in weeks
options(digits = 5)
lambdas <- seq(0, 10, 0.25)
rmses_edx_final<- sapply(lambdas, function(l){
medfi <- mean(edx_dates1$rating)
b_imfi <- edx_dates1%>%
group_by(movieId) %>%
summarize(b_imfi = sum(rating - medfi)/(n()+l))
b_umfi <- edx_dates1 %>%
left_join(b_imfi, by="movieId") %>%
group_by(userId) %>%
summarize(b_umfi = sum(rating - b_imfi - medfi)/(n()+l))
d_umfi <- edx_dates1 %>%
left_join(b_imfi, by = "movieId") %>%
left_join(b_umfi, by = "userId") %>%
group_by(date) %>% summarize(d_umfi = sum(rating - b_imfi - medfi- b_umfi)/(n()+l))
g_umfi <- edx_dates1 %>%
left_join(b_imfi, by = "movieId") %>%
left_join(b_umfi, by = "userId") %>%
left_join(d_umfi, by = "date") %>%
group_by(genres)%>% summarize(g_umfi = sum(rating - b_imfi - medfi- b_umfi - d_umfi)/(n()+l))
predicted_ratingsf <-
validation_date1 %>%
left_join(b_imfi, by = "movieId") %>%
left_join(b_umfi, by = "userId") %>%
left_join(d_umfi, by = "date") %>%
left_join(g_umfi, by = "genres")%>%
mutate(pred = medfi + b_imfi + b_umfi + d_umfi + g_umfi) %>%
.$pred
RMSE(predicted_ratingsf, validation_date1$rating)
})
#LAMBDA VS RMSE PLOT
qplot(lambdas, rmses_edx_final)
#SELECTED LAMBDA
lambda_final<- data_frame(model= "Regularized Movie + User + Date + Genre, Validation data", lambda=lambdas[which.min(rmses_edx_final)])
lambda_final %>% knitr::kable()
#RMSE OBTAINED
RMSE_Final <- data_frame(model= "Regularized Movie + User + Date + Genre, Validation data", RMSE=min(rmses_edx_final))
RMSE_Final %>% knitr::kable()
#RMSE COMPARISON
rmse_results_final <- bind_rows(rmse_results_6,
data_frame(method="Regularized Movie + User + Date + Genre, Validation data",
RMSE = min(rmses_edx_final)))
RMSE_Final %>% knitr::kable()
|
3b4d7e1b91f36b663f26bb428d6e984cb6789d61
|
88be221ad6071f4742bbfc7aae11c89efc8d58c8
|
/lib/performance measure.R
|
15f7d02dd4fa62042d397d30c87e14b54e253dc0
|
[] |
no_license
|
claudialeee/Optical-Character-Recognition
|
c25c108b41ba49d61eda36eaa5d6414bdf03484a
|
7505e94b3bc2f714681585d229f4a39b7d2f12b3
|
refs/heads/master
| 2020-04-11T00:46:05.709872
| 2018-12-03T19:33:25
| 2018-12-03T19:33:25
| 161,395,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,208
|
r
|
performance measure.R
|
###################################
## input: dataframe with corrected words(first column) and corresponding truth words(second column)
# the order of columns only affect line 19. It should be corrected words
## output: performance measure table
###################################
measure <- function(df) {
load("../output/data.RData")
load("../output/current_ground_truth_txt.RData")
load("../output/tesseract_vec.RData")
# word level evaluation
# old interaction
ground_truth_vec <- str_split(paste(current_ground_truth_txt, collapse = " ")," ")[[1]]
old_intersect_vec <- vecsets::vintersect(tolower(ground_truth_vec), tolower(tesseract_vec))
# new interaction
error_word <- as.vector(dat$error_token[dat$label == 0])
correct_word <- setdiff(tolower(tesseract_vec), error_word)
error_correct_word <- df[, 1] # corrected words
tesseract_delete_error_vec <- c(correct_word, error_correct_word)
new_intersect_vec <- vecsets::vintersect(tolower(ground_truth_vec), tolower(tesseract_delete_error_vec))
OCR_performance_table <- data.frame("Tesseract" = rep(NA,4),
"Tesseract_with_postprocessing" = rep(NA,4))
row.names(OCR_performance_table) <- c("word_wise_recall","word_wise_precision",
"character_wise_recall","character_wise_precision")
OCR_performance_table["word_wise_recall","Tesseract"] <- length(old_intersect_vec)/length(ground_truth_vec)
OCR_performance_table["word_wise_precision","Tesseract"] <- length(old_intersect_vec)/length(tesseract_vec)
OCR_performance_table["word_wise_recall","Tesseract_with_postprocessing"] <- length(new_intersect_vec)/length(ground_truth_vec)
OCR_performance_table["word_wise_precision","Tesseract_with_postprocessing"] <- length(new_intersect_vec)/length(tesseract_delete_error_vec)
# character-level evaluation
# old interaction
ground_truth_vec_char <- str_split(paste(ground_truth_vec, collapse = ""),"")[[1]]
tesseract_vec_char <- str_split(paste(tesseract_vec, collapse = ""), "")[[1]]
old_intersect_vec_char <- vecsets::vintersect(tolower(ground_truth_vec_char), tolower(tesseract_vec_char))
# new interaction
# function used to compare pairs of words in character level
fun <- function(row) {
return(vecsets::vintersect(str_split(row[1],""), str_split(row[2],"")))
}
correct_char <- str_split(paste(correct_word, collapse = ""),"")[[1]]
new_intersect_vec_char <- c(vecsets::vintersect(tolower(ground_truth_vec_char), tolower(correct_char)), apply(df, 1, fun))
OCR_performance_table["character_wise_recall","Tesseract"] <- length(old_intersect_vec_char)/length(ground_truth_vec_char)
OCR_performance_table["character_wise_precision","Tesseract"] <- length(old_intersect_vec_char)/length(tesseract_vec_char)
OCR_performance_table["character_wise_recall","Tesseract_with_postprocessing"] <- length(new_intersect_vec_char)/length(ground_truth_vec_char)
OCR_performance_table["character_wise_precision","Tesseract_with_postprocessing"] <- length(new_intersect_vec_char)/length(tesseract_delete_error_vec_char)
return(kable(OCR_performance_table, caption="Summary of OCR performance"))
}
|
fe5980508873438f280f7ab1dc07e4ed4767b07a
|
0d454f32f0c1d538e9907435f361c51bfa988f67
|
/R/rearray.R
|
6bd4890eab5b5a65164f56048ffd107728ee1418
|
[] |
no_license
|
jscamac/jagstools
|
3218b00b03b176d0257588ec184e9bfe954e18b1
|
462c3f690f7a3489f75cc236823197994591c20b
|
refs/heads/master
| 2021-01-18T08:29:35.207840
| 2015-03-10T23:21:00
| 2015-03-10T23:21:00
| 31,931,926
| 1
| 0
| null | 2015-03-10T01:28:16
| 2015-03-10T01:28:16
| null |
UTF-8
|
R
| false
| false
| 1,883
|
r
|
rearray.R
|
# x = the jags object
# param = the exact name of the parameter to be converted back to an array
# (can be a vector of param names, in which case a list of arrays will be returned,
# can also be 'all', in which case all parameters with dimensions will be returned)
# fields = the names of jags summary columns to include as array slices (last index)
rearray <- function(x, param='all', fields='all') {
if(identical(param, 'all')) param <- '.*'
results <- jagsresults(x, paste(paste(param, '\\[[0-9]+(,[0-9]+)+\\]', sep=''),
collapse='|'), regex=TRUE)
if(!length(results)) stop(sprintf('No arrays found in object %s', deparse(substitute(x))))
if(identical(fields, 'all')) fields <- colnames(results)
if(!(all(fields %in% colnames(results))) & !(all(fields %in% seq_len(ncol(results))))) {
stop(sprintf("fields must be either 'all', or a character vector of jags summary column names, which may include: %s",
toString(colnames(results))))
}
results <- results[, fields, drop=FALSE]
splt_results <- split(as.data.frame(results), sapply(strsplit(row.names(results), '\\[|,|\\]'), function(x) x[1]))
# row.matches adapted from https://stat.ethz.ch/pipermail/r-help/2000-July/007393.html
row.matches <- function(y, X) {
i <- seq(nrow(X))
j <- 0
while(length(i) && (j <- j + 1) <= ncol(X))
i <- i[X[i, j] == y[j]]
i
}
lapply(splt_results, function(x) {
ind <- do.call(rbind, strsplit(row.names(x), '\\[|,|\\]'))[, -1, drop=FALSE]
ind <- apply(ind, 2, as.numeric)
a <- array(dim=c(apply(ind, 2, max), ncol(x)))
arrind <- arrayInd(seq_along(a), dim(a))
invisible(lapply(1:nrow(ind), function(i) {
a[row.matches(ind[i,], arrind[, -ncol(arrind)])] <<- unlist(x[i,])
}))
dimnames(a)[[length(dim(a))]] <- as.list(colnames(x))
return(drop(a))
})
}
|
0661820a3193df122bdcef47c120f1ca618d4fa3
|
e06d96c26b6faa3f85e39c3e0f79be9678effa7d
|
/Archive_July/code/code_paper/plot_R_r_akd_FULL.R
|
663b9b9dcc9e7734f61b3d5ec495e06139fd32aa
|
[] |
no_license
|
juliachenc/covid19
|
9d2a3653032fd311582a7a7408f0a2cd76960f04
|
0d329b968876fd96ba390baee30e015778ea02f8
|
refs/heads/main
| 2023-06-04T15:56:16.962225
| 2021-06-23T18:34:32
| 2021-06-23T18:34:32
| 343,538,883
| 1
| 1
| null | 2021-04-05T19:19:08
| 2021-03-01T19:53:08
|
HTML
|
UTF-8
|
R
| false
| false
| 17,617
|
r
|
plot_R_r_akd_FULL.R
|
#############################################
## PLOT CODE
## Creates plots for time varying parameters:
## R(t)
## r(t)
## Alpha(t), Kappa(t), Delta(t)
plot.together <- function(traj.CI=traj.CI, data.in=data.in, init.date.data=NULL, date.offset.4plot=NULL, time.steps.4plot, vars.to.plot, y.lab.in, y.max.in, chart.title) {
if(is.null(init.date.data)) {
init.date.data <- "2020-03-01"}
if(is.null(date.offset.4plot)){
date.offset.4plot=0}
###########
### traj.CI
###########
## Filter only to variable of interest
traj.CI <- traj.CI %>% dplyr::filter(state.name %in% vars.to.plot)
## Select only more recent dates
init.date <- init.date.data
init.date <- as.Date(init.date) #as.Date(lubridate::ydm(init.date))
startDatePlot <- init.date - date.offset.4plot -1 #15
endDatePlot <- startDatePlot + time.steps.4plot #- 40 # the constant 40 because the traj are not aligned to start date
traj.CI <- traj.CI %>% dplyr::filter(date >= startDatePlot) %>% dplyr::filter(date < endDatePlot)
## Add title
traj.CI$title <- chart.title
###########
### data.in
###########
## Data in -- plot only for the selected variable
if(!is.null(data.in)){
if(any(vars.to.plot %in% colnames(data.in))) { # FIX LATER -- REMOVE TMP
## Filter only to variable of interest
vars.to.extract <- vars.to.plot[vars.to.plot %in% colnames(data.in) ]
data.in<- data.in %>% dplyr::select(vars.to.extract)
## ALIGN DATES: DATA
no_obs <- nrow(data.in)
step <- 0:(no_obs-1)
date <- init.date + step
data.date <- cbind(date,data.in)
rownames(data.date) <- date
#data.date$date <- NULL
## Select only more recent dates
data.date <- data.date %>% dplyr::filter(date > startDatePlot)
data <- reshape2::melt(data.date, measure.vars = c(2:ncol(data.date)), variable.name = "state.name")
}
else {data = NULL}
}
#####################
### colors and names
#####################
longnames <- c("Susceptible",
"New Obs. Infected",
"Current Obs. Infected",
"Cum. Obs. Infected",
"Current Tot. Infected",
"Cum. Tot. Infected",
"New in Hospital",
"Current in Hospital",
"Cum. in Hospital",
"Current in ICU",
"Cum. in ICU",
"Current Ventilation",
"Cum. Ventilation",
"New Deaths",
"Cum. Deaths",
"Recovered",
"R0(t)",
"Alpha(t)",
"Kappa(t)",
"Delta(t)",
"r(t)",
"CFR",
"IFR"
)
names(longnames) <- c(
"S",
"I_detect_new",
"I",
"Idetectcum",
"Itot",
"Itotcum",
"H_new",
"Htot",
"Htotcum",
"Q",
"Qcum",
"V",
"Vcum",
"D_new",
"D",
"R",
"Rt",
"Alpha_t",
"Kappa_t",
"Delta_t",
"r_t",
"CFR",
"IFR"
)
## Colors
cols.list <- c(
"salmon",
"sandybrown",
"navajowhite3",
"olivedrab4",
"olivedrab2",
"mediumseagreen",
"mediumaquamarine",
"mediumturquoise",
"cyan2",
"lightskyblue",
"steelblue2",
"mediumpurple",
"mediumorchid",
"plum1",
"violetred1",
"deeppink4",
"grey50",
"mediumturquoise",
"lightskyblue",
"violetred1",
"grey50",
"grey50",
"grey50"
)
names(cols.list) <- names(longnames)
color.this.var <- as.character(cols.list[vars.to.plot])
##################
### CREATE PLOT
##################
p <- ggplot(data = traj.CI,
aes(x = date,
y = median, ymin = low_95, ymax = up_95,
color = state.name,
fill = state.name,
group = state.name))
p <- p + geom_ribbon(data = traj.CI,
aes(x = date,
y = median, ymin = low_50, ymax = up_50,
color = state.name,
fill = state.name,
group = state.name),alpha = .5, inherit.aes=TRUE, color=FALSE)
p <- p + scale_fill_manual(values = c(color.this.var),labels = longnames) + scale_color_manual(values = c(color.this.var), labels = longnames)
p <- p + geom_line() + geom_ribbon(alpha = 0.2, color = FALSE)
# if(!is.null(data)){
# p <- p + geom_point(data = data,
# aes(x = date, y = value,
# color = state.name),
# alpha = 0.7,
# inherit.aes = FALSE)
# }
p <- p + theme_bw() + theme(legend.title = element_blank())
p <- p + scale_x_date(limits = as.Date(c(startDatePlot,endDatePlot)), date_breaks = "2 weeks" , date_labels = "%d-%b-%y")
p <- p + scale_y_continuous(limits = c(0,y.max.in), breaks = seq(from = 0, to = y.max.in, by = y.max.in/10))
p <- p + theme(axis.text.x = element_text(angle = 90),
strip.text.x = element_text(size = 12, face = "bold"))
# p <- p + ylab(paste0("Number ", as.character(longnames[var.to.plot]))) + xlab(NULL)
#p <- p + ylab("Probability") + xlab(NULL)
p <- p + ylab(y.lab.in) + xlab(NULL)
#p <- p + labs(title = title.input)
#p<-p+theme(plot.title = element_text(size = 12, hjust = 0.5, face="bold"))
p <- p + facet_grid(. ~ title)
p
}
plot.param.t <- function(ABC_out){
ABC.par <- ABC_out$param
#################################
## Plot fn with CI
# plot.together <- function(traj.CI=traj.CI, data.in=data.in, init.date.data=NULL, date.offset.4plot=NULL, time.steps.4plot, vars.to.plot, y.lab.in, y.max.in, chart.title) {
#
# if(is.null(init.date.data)) {
# init.date.data <- "2020-03-01"}
# if(is.null(date.offset.4plot)){
# date.offset.4plot=0}
#
# ###########
# ### traj.CI
# ###########
#
# ## Filter only to variable of interest
# traj.CI <- traj.CI %>% dplyr::filter(state.name %in% vars.to.plot)
#
# ## Select only more recent dates
# init.date <- init.date.data
# init.date <- as.Date(init.date) #as.Date(lubridate::ydm(init.date))
# startDatePlot <- init.date - date.offset.4plot -1 #15
# endDatePlot <- startDatePlot + time.steps.4plot #- 40 # the constant 40 because the traj are not aligned to start date
# traj.CI <- traj.CI %>% dplyr::filter(date >= startDatePlot) %>% dplyr::filter(date < endDatePlot)
#
# ## Add title
# traj.CI$title <- chart.title
#
# ###########
# ### data.in
# ###########
#
# ## Data in -- plot only for the selected variable
# if(!is.null(data.in)){
#
# if(any(vars.to.plot %in% colnames(data.in))) { # FIX LATER -- REMOVE TMP
#
# ## Filter only to variable of interest
# vars.to.extract <- vars.to.plot[vars.to.plot %in% colnames(data.in) ]
#
# data.in<- data.in %>% dplyr::select(vars.to.extract)
#
# ## ALIGN DATES: DATA
# no_obs <- nrow(data.in)
# step <- 0:(no_obs-1)
# date <- init.date + step
# data.date <- cbind(date,data.in)
# rownames(data.date) <- date
# #data.date$date <- NULL
#
# ## Select only more recent dates
# data.date <- data.date %>% dplyr::filter(date > startDatePlot)
# data <- reshape2::melt(data.date, measure.vars = c(2:ncol(data.date)), variable.name = "state.name")
# }
#
# else {data = NULL}
# }
#
# #####################
# ### colors and names
# #####################
#
# longnames <- c("Susceptible",
# "New Obs. Infected",
# "Current Obs. Infected",
# "Cum. Obs. Infected",
# "Current Tot. Infected",
# "Cum. Tot. Infected",
# "New in Hospital",
# "Current in Hospital",
# "Cum. in Hospital",
# "Current in ICU",
# "Cum. in ICU",
# "Current Ventilation",
# "Cum. Ventilation",
# "New Deaths",
# "Cum. Deaths",
# "Recovered",
# "R0(t)",
# "Alpha(t)",
# "Kappa(t)",
# "Delta(t)",
# "r(t)",
# "CFR",
# "IFR"
# )
#
# names(longnames) <- c(
# "S",
# "I_detect_new",
# "I",
# "Idetectcum",
# "Itot",
# "Itotcum",
# "H_new",
# "Htot",
# "Htotcum",
# "Q",
# "Qcum",
# "V",
# "Vcum",
# "D_new",
# "D",
# "R",
# "Rt",
# "Alpha_t",
# "Kappa_t",
# "Delta_t",
# "r_t",
# "CFR",
# "IFR"
# )
#
# ## Colors
#
# cols.list <- c(
# "salmon",
# "sandybrown",
# "navajowhite3",
# "olivedrab4",
# "olivedrab2",
# "mediumseagreen",
# "mediumaquamarine",
# "mediumturquoise",
# "cyan2",
# "lightskyblue",
# "steelblue2",
# "mediumpurple",
# "mediumorchid",
# "plum1",
# "violetred1",
# "deeppink4",
# "grey50",
# "mediumturquoise",
# "lightskyblue",
# "violetred1",
# "grey50",
# "grey50",
# "grey50"
# )
# names(cols.list) <- names(longnames)
# color.this.var <- as.character(cols.list[vars.to.plot])
#
# ##################
# ### CREATE PLOT
# ##################
#
# p <- ggplot(data = traj.CI,
# aes(x = date,
# y = median, ymin = low_95, ymax = up_95,
# color = state.name,
# fill = state.name,
# group = state.name))
#
# p <- p + geom_ribbon(data = traj.CI,
# aes(x = date,
# y = median, ymin = low_50, ymax = up_50,
# color = state.name,
# fill = state.name,
# group = state.name),alpha = .5, inherit.aes=TRUE, color=FALSE)
#
# p <- p + scale_fill_manual(values = c(color.this.var),labels = longnames) + scale_color_manual(values = c(color.this.var), labels = longnames)
# p <- p + geom_line() + geom_ribbon(alpha = 0.2, color = FALSE)
#
# # if(!is.null(data)){
# # p <- p + geom_point(data = data,
# # aes(x = date, y = value,
# # color = state.name),
# # alpha = 0.7,
# # inherit.aes = FALSE)
# # }
#
# p <- p + theme_bw() + theme(legend.title = element_blank())
# p <- p + scale_x_date(limits = as.Date(c(startDatePlot,endDatePlot)), date_breaks = "2 weeks" , date_labels = "%d-%b-%y")
# p <- p + scale_y_continuous(limits = c(0,y.max.in), breaks = seq(from = 0, to = y.max.in, by = y.max.in/10))
# p <- p + theme(axis.text.x = element_text(angle = 90),
# strip.text.x = element_text(size = 12, face = "bold"))
# # p <- p + ylab(paste0("Number ", as.character(longnames[var.to.plot]))) + xlab(NULL)
# #p <- p + ylab("Probability") + xlab(NULL)
# p <- p + ylab(y.lab.in) + xlab(NULL)
# #p <- p + labs(title = title.input)
# #p<-p+theme(plot.title = element_text(size = 12, hjust = 0.5, face="bold"))
# p <- p + facet_grid(. ~ title)
#
# p
#
# }
#################################
## Confidence intervals
posterior.CI <- function(posterior.var, round.by=4){
median = quantile(posterior.var, c(.5), na.rm=TRUE)
low_95 = quantile(posterior.var, c(.025), na.rm=TRUE)
low_50 = quantile(posterior.var, c(.25), na.rm=TRUE)
mean = mean(posterior.var)
up_50 = quantile(posterior.var, c(.75), na.rm=TRUE)
up_95 = quantile(posterior.var, c(.975), na.rm=TRUE)
posterior.CI <- as.data.frame(cbind(low_95,low_50,median,up_50,up_95))
posterior.CI <- round(posterior.CI, digits=round.by)
return(posterior.CI)
}
#################################
## Put fn(t) in format for plots
format.4.plot <- function(fn_t, fn_y_chr, fn.posterior.CI, fn.name){
fn_y <- as.data.frame(matrix(nrow=length(fn_t), ncol=ncol(fn.posterior.CI) ))
for (i in 1:length(fn_t)){
fn_y[i,] = get(fn_y_chr[i])
}
colnames(fn_y) <- colnames(fn.posterior.CI)
fn_plot <- as.vector(fn_y)
rownames(fn_plot) <- 1:length(fn_t)
fn_plot$date <- fn_t
fn_plot$state.name <- rep(fn.name, length(fn_t))
return(fn_plot)
}
#################################
# GET VARIABLES AND APPLY CI
ABC.par.CI <- apply(ABC_out$param, MARGIN=2, FUN=posterior.CI)
#############################################
## Alpha(t) Kappa(t) Delta(t)
Alpha1.CI <- ABC.par.CI[[6]]
Alpha2.CI <- ABC.par.CI[[11]]
Kappa1.CI <- ABC.par.CI[[7]]
Kappa2.CI <- ABC.par.CI[[12]]
Delta1.CI <- ABC.par.CI[[5]]
Delta2.CI <- ABC.par.CI[[10]]
# GET ORDER OF VALUES
start_time = round(mean(ABC.par[,3]))
alpha_t_readin_path <- path(data.dir, "alpha_t_readin.csv")
alpha_t_readin = as.data.frame(read.csv(alpha_t_readin_path, sep=",",stringsAsFactors = FALSE))
Alpha_t_dates <- as.Date(alpha_t_readin$Alpha_t)
Alpha_t_dates[1] <- Alpha_t_dates[1]-start_time
Alpha.t <- Alpha_t_dates
Alpha.t[length(Alpha.t)] <- Sys.Date()
# ALPHA
Alpha.chr <- alpha_t_readin$Alpha_y
assign("Alpha1",Alpha1.CI)
assign("Alpha2", Alpha2.CI)
Alpha_plot <- format.4.plot(fn_t = Alpha.t, fn_y_chr = Alpha.chr, fn.posterior.CI=Alpha1.CI, fn.name="Alpha_t" )
# KAPPA
Kappa.chr <- alpha_t_readin$Kappa_y
assign("Kappa1",Kappa1.CI)
assign("Kappa2", Kappa2.CI)
Kappa_plot <- format.4.plot(fn_t = Alpha.t, fn_y_chr = Kappa.chr, fn.posterior.CI=Kappa1.CI, fn.name="Kappa_t" )
# DELTA
Delta.chr <- alpha_t_readin$Delta_y
assign("Delta1",Delta1.CI)
assign("Delta2", Delta2.CI)
Delta_plot <- format.4.plot(fn_t = Alpha.t, fn_y_chr = Delta.chr, fn.posterior.CI=Delta1.CI, fn.name="Delta_t" )
# PLOTTING AKD
traj.CI <- rbind(Alpha_plot, Kappa_plot, Delta_plot)
vars.to.plot <- c("Alpha_t","Kappa_t","Delta_t")
data.in <- NULL
y.max.in <- .8
y.lab.in <- "Probability"
chart.title <- "Population-Average Probabilities of Severe Illness"
time.steps.4plot <- 300
AKD_t_plot <- plot.together(traj.CI=traj.CI, data.in=data.in, date.offset.4plot=start_time, time.steps.4plot = time.steps.4plot, vars.to.plot = vars.to.plot, y.lab.in=y.lab.in, y.max.in=y.max.in, chart.title=chart.title)
#AKD_t_plot
#############################################
## R(t)
# GET ORDER OF VALUES
out_R0 <- ABC.par[,1]
out_R0redux1<- ABC.par[,4]
out_R0redux2<- ABC.par[,9]
R0_x_redux1 <- out_R0*out_R0redux1
R0_x_redux2 <- out_R0*out_R0redux2
R0_x_redux3 <- R0_x_redux1
# GET QUANTILES FOR VARIABLE
R0.CI <- posterior.CI(out_R0,4)
R0.redux1.CI <- posterior.CI(R0_x_redux1,4)
R0.redux2.CI <- posterior.CI(R0_x_redux2,4)
R0.redux3.CI <- posterior.CI(R0_x_redux3,4)
# GET ORDER OF VALUES
fn_t_readin_path <- path(data.dir, "fn_t_readin.csv")
fn_t_readin = as.data.frame(read.csv(fn_t_readin_path, sep=",",stringsAsFactors = FALSE))
Beta_t_dates <- as.Date(fn_t_readin$Beta_t)
Beta_t_dates[1] <- Beta_t_dates[1]-start_time
Rt.t <- Beta_t_dates
Rt.t[length(Rt.t)] <- Sys.Date()
Rt.chr <- fn_t_readin$Beta_y
assign("mu.0",R0.CI)
assign("mu.1", R0.redux1.CI)
assign("mu.2", R0.redux2.CI)
assign("mu.3",R0.redux2.CI)
# PUT IN FORMAT FOR PLOTTING
Rt_plot <- format.4.plot(fn_t = Rt.t, fn_y_chr = Rt.chr, fn.posterior.CI=R0.CI, fn.name="Rt" )
# PLOTTING R(t)
traj.CI <- Rt_plot
vars.to.plot <- "Rt"
data.in <- NULL
y.max.in <- 4
y.lab.in <- "R(t)"
chart.title <- "Time-varying Reproductive Number R(t)"
time.steps.4plot <- 300
R_t_plot <- plot.together(traj.CI=traj.CI, data.in=data.in, date.offset.4plot=start_time, time.steps.4plot = time.steps.4plot, vars.to.plot = vars.to.plot, y.lab.in=y.lab.in, y.max.in=y.max.in, chart.title=chart.title)
#R_t_plot
#############################################
## r(t)
# GET ORDER OF VALUES
r1.CI <- ABC.par.CI[[2]]
r2.CI <- ABC.par.CI[[13]]
# GET ORDER OF VALUES
fn_t_readin_path <- path(data.dir, "fn_t_readin.csv")
fn_t_readin = as.data.frame(read.csv(fn_t_readin_path, sep=",",stringsAsFactors = FALSE))
r_t_dates <- as.Date(fn_t_readin$r_t)
r_t_dates[1] <- r_t_dates[1]-start_time
r_t_dates <- na.omit(r_t_dates)
r.t <- r_t_dates
r.t[length(r.t)] <- Sys.Date()
r.chr <- fn_t_readin$r_y
assign("r1",r1.CI)
assign("r2", r2.CI)
# PUT IN FORMAT FOR PLOTTING
r_plot <- format.4.plot(fn_t = r.t, fn_y_chr = r.chr, fn.posterior.CI=r1.CI, fn.name="r_t" )
# PLOTTING r(t)
traj.CI <- r_plot
vars.to.plot <- "r_t"
data.in <- NULL
y.max.in <- 1
y.lab.in <- "r(t)"
chart.title <- "Fraction of observed infections r(t)"
time.steps.4plot <- 300
r_t_plot <- plot.together(traj.CI=traj.CI, data.in=data.in, date.offset.4plot=start_time, time.steps.4plot = time.steps.4plot, vars.to.plot = vars.to.plot, y.lab.in=y.lab.in, y.max.in=y.max.in, chart.title=chart.title)
#r_t_plot
#############################################
## PUT TOGETHER
#AKD_t_plot + R_t_plot + r_t_plot
plot.out <- vector(mode="list", length=3)
plot.out[[1]] <- R_t_plot
plot.out[[2]] <- r_t_plot
plot.out[[3]] <- AKD_t_plot
return(plot.out)
}
|
6355f637d3207f658d52a634548bda96f67bef6c
|
ed7240eaeb54b899882755bce3f68480edac2def
|
/helper_functions/similarityFunctions.R
|
004039053f292f9bb7bafe81d12e51ed394036a1
|
[] |
no_license
|
asRodelgo/NBA
|
09eb2866d90583a22edc75b08c448843cbdac5c7
|
763496e7d913361556945004ca8a040e65771877
|
refs/heads/master
| 2020-05-22T01:31:50.932824
| 2017-12-10T20:58:25
| 2017-12-10T20:58:25
| 60,845,763
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,833
|
r
|
similarityFunctions.R
|
# Find similar players ------------------------------
#
# Using t-sne algorithm, find players that have similar characteristics to a given player.
# The objective is to predict his performance in a given year based on the historical performance
# of similar players (see: Nate Silver's CARMELO or PECOTA systems)
#
# Ex: If I want to predict Pau Gasol numbers for the season he will turn 36, I will start
# with his numbers in the previous seasons and I will adjust according to the average
# evolution of similar players when they turned 36.
#
# Ex: To be able to assign predicted characteristics to a rookie player, I will do a
# similar approach. See functions related to rookies and draft
#
.tSNE_prepareSelected <- function(inputPlayers){
# Players that changed teams in the season have a column Tm == "TOT" with their total stats
# and because I don't care about the team, this should be enough filter
# playerAge <- 34
# num_iter <- 300
# max_num_neighbors <- 20
# playerName <- "Pau Gasol"
data_prepared <- inputPlayers %>%
group_by(Player) %>%
mutate(keep = ifelse(n() > 1, 1, 0), effMin = MP/3936, effFG = FG/(3936*effMin),
effFGA = FGA/(3936*effMin),eff3PM = X3P/(3936*effMin),eff3PA = X3PA/(3936*effMin),
eff2PM = X2P/(3936*effMin),eff2PA = X2PA/(3936*effMin),
effFTM = FT/(3936*effMin),effFTA = FTA/(3936*effMin),
effORB = ORB/(3936*effMin),effDRB = DRB/(3936*effMin),
effTRB = TRB/(3936*effMin),effAST = AST/(3936*effMin),
effSTL = STL/(3936*effMin),effBLK = BLK/(3936*effMin),
effTOV = TOV/(3936*effMin),effPF = PF/(3936*effMin),
effPTS = PTS/(3936*effMin)) %>%
filter(keep == 0 | Tm == "TOT") %>%
filter(effMin*G >= .15) %>% # Played at least 15% of total available minutes
dplyr::select(Player,Pos,Season,Age,FGPer = FG.,FG3Per = X3P., FG2Per = X2P., effFGPer = eFG.,
FTPer = FT., starts_with("eff"),
-Tm,-keep,-G,-GS,-MP,FG,-FGA,-X3P,-X3PA,-X2P,-X2PA,-FG,-FTA,-ORB,-DRB,-TRB,-AST,
-BLK,-TOV,-PF,-FT,-STL,-PTS)
# some players can be the same age during 2 seasons. Pick the one with the most minutes played
data_prepared <- data_prepared %>%
group_by(Player) %>%
filter(effMin >= max(effMin)-.0001)
# t-sne doesn't like NAs. Impute by assigning 0. If NA means no shot attempted, ie,
# either the player didn't play enough time or is really bad at this particular type of shot.
for (i in 4:(ncol(data_prepared)-1)){
data_prepared[is.na(data_prepared[,i]),i] <- 0
}
data_prepared <- as.data.frame(data_prepared)
return(data_prepared)
}
.tSNE_prepare <- function(playerAge,per_Min){
# Players that changed teams in the season have a column Tm == "TOT" with their total stats
# and because I don't care about the team, this should be enough filter
# playerAge <- 34
# num_iter <- 300
# max_num_neighbors <- 20
# playerName <- "Pau Gasol"
data_tsne <- playersHist %>%
group_by(Player,Season) %>%
mutate(keep = ifelse(n() > 1, 1, 0), effMin = MP/3936, effFG = FG/(3936*effMin),
effFGA = FGA/(3936*effMin),eff3PM = X3P/(3936*effMin),eff3PA = X3PA/(3936*effMin),
eff2PM = X2P/(3936*effMin),eff2PA = X2PA/(3936*effMin),
effFTM = FT/(3936*effMin),effFTA = FTA/(3936*effMin),
effORB = ORB/(3936*effMin),effDRB = DRB/(3936*effMin),
effTRB = TRB/(3936*effMin),effAST = AST/(3936*effMin),
effSTL = STL/(3936*effMin),effBLK = BLK/(3936*effMin),
effTOV = TOV/(3936*effMin),effPF = PF/(3936*effMin),
effPTS = PTS/(3936*effMin)) %>%
filter(keep == 0 | Tm == "TOT") %>%
filter(effMin*G >= per_Min) %>% # Played at least X% of total available minutes
dplyr::select(Player,Pos,Season,Age,FGPer = FG.,FG3Per = X3P., FG2Per = X2P., effFGPer = eFG.,
FTPer = FT., starts_with("eff"),
-Tm,-keep,-G,-GS,-MP,FG,-FGA,-X3P,-X3PA,-X2P,-X2PA,-FG,-FTA,-ORB,-DRB,-TRB,-AST,
-BLK,-TOV,-PF,-FT,-STL,-PTS)
# Filter by selected age
data_tsne <- data_tsne %>%
filter(Age == playerAge) %>%
dplyr::select(-Age) # redundant column, same value (playerAge) for all observations
# some players can be the same age during 2 seasons. Pick the one with the most minutes played
data_tsne <- data_tsne %>%
group_by(Player) %>%
filter(effMin >= max(effMin)-.0001)
# t-sne doesn't like NAs. Impute by assigning 0. If NA means no shot attempted, ie,
# either the player didn't play enough time or is really bad at this particular type of shot.
for (i in 4:(ncol(data_tsne)-1)){
data_tsne[is.na(data_tsne[,i]),i] <- 0
}
data_tsne <- as.data.frame(data_tsne)
return(data_tsne)
}
# Use this for the write_tsne_data_All
.tSNE_prepare_All <- function(){
# Players that changed teams in the season have a column Tm == "TOT" with their total stats
# and because I don't care about the team, this should be enough filter
# , effFG = FG/(3936*effMin),effFGA = FGA/(3936*effMin),eff3PM = X3P/(3936*effMin),
# eff2PM = X2P/(3936*effMin),effFTM = FT/(3936*effMin),
data_tsne <- playersHist %>%
group_by(Player,Season) %>%
mutate(keep = ifelse(n() > 1, 1, 0), effMin = MP*82/3936,
eff3PA = X3PA/(3936*effMin),eff3PM = X3P/(3936*effMin),
eff2PA = X2PA/(3936*effMin),eff2PM = X2P/(3936*effMin),
effFTA = FTA/(3936*effMin),effFTM = FT/(3936*effMin),
effORB = ORB/(3936*effMin),effDRB = DRB/(3936*effMin),
effAST = AST/(3936*effMin),
effSTL = STL/(3936*effMin),effBLK = BLK/(3936*effMin),
effTOV = TOV/(3936*effMin),effPF = PF/(3936*effMin),
effPTS = PTS/(3936*effMin)) %>%
filter(keep == 0 | Tm == "TOT") %>%
filter(effMin >= .15) %>% # Played at least 15% of total available minutes
dplyr::select(Player,Pos,Season,Age,Tm, starts_with("eff"))
#P2Per = X2P., P3Per = X3P., FTPer = FT.
# t-sne doesn't like NAs. Impute by assigning 0. If NA means no shot attempted, ie,
# either the player didn't play enough time or is really bad at this particular type of shot.
for (i in 6:(ncol(data_tsne)-1)){
data_tsne[is.na(data_tsne[,i]),i] <- 0
}
# exponential transformation to improve tsne layout
# for (i in 6:(ncol(data_tsne)-1)){
# data_tsne[,i] <- expm1(2*data_tsne[,i])
# }
# Try scaling to [0,1] to improve tsne final shape
# maxs <- apply(data_tsne[,-c(1:5)], 2, max)
# mins <- apply(data_tsne[,-c(1:5)], 2, min)
# data_tsne[,-c(1:5)] <- as.data.frame(scale(data_tsne[,-c(1:5)], center = mins, scale = maxs - mins))
data_tsne <- as.data.frame(data_tsne)
return(data_tsne)
}
.tSNE_compute <- function(num_iter, max_num_neighbors, playerAge){
data_tsne <- .tSNE_prepare(playerAge,per_Min = .15)
# calculate tsne-points Dimensionality reduction to 2-D
if (nrow(data_tsne)>0){
set.seed(456) # reproducitility
tsne_points <- tsne(data_tsne[,-c(1:3)],
max_iter=as.numeric(num_iter),
perplexity=as.numeric(max_num_neighbors),
epoch=num_iter)
} else {
tsne_points <- c()
}
return(tsne_points)
}
# compute colors for regions
.getColors <- function(num_iter, max_num_neighbors,playerAge,colVar){
data_tsne <- .tSNE_prepare(playerAge,per_Min=.15)
if (colVar == "Season"){
colors <- rainbow(length(unique(data_tsne$Season)))
names(colors) <- unique(data_tsne$Season)
} else {
colors <- rainbow(length(unique(data_tsne$Pos)))
names(colors) <- unique(data_tsne$Pos)
}
return(colors)
}
# tsne chart ---------------------------------------------------------
.tSNE_plot <- function(playerName, num_iter, max_num_neighbors, playerAge, colVar){
#tsne_points <- .tSNE_compute(num_iter, max_num_neighbors, playerAge)
tsne_points <- tsneBlock[[playerAge]]
if (length(tsne_points)>0){
par(mar=c(0,0,0,0))
plot(tsne_points,t='n', axes=FALSE, frame.plot = FALSE, xlab = "",ylab = "");
graphics::text(tsne_points,labels=as.character(data_tsne$Player), col=.getColors(num_iter, max_num_neighbors,playerAge,colVar))
} else {
plot(c(1,1),type="n", frame.plot = FALSE, axes=FALSE, ann=FALSE)
graphics::text(1.5, 1,"Not enough data", col="red", cex=2)
}
}
# tsne dist ---------------------------------------------------------
.tSNE_dist <- function(playerName, num_iter, max_num_neighbors, playerAge, firstSeason = NULL){
if(is.null(firstSeason)) firstSeason <- "1979-1980"
data_tsne <- .tSNE_prepare(playerAge,per_Min=.15)
#tsne_points <- .tSNE_compute(num_iter, max_num_neighbors, playerAge)
tsne_points <- tsneBlock[[playerAge]]
if (length(tsne_points)>0 & nrow(filter(data_tsne, Player == playerName, Season >= firstSeason))>0){
# calculate the euclidean distance between the selected player and the rest
dist_mat <- cbind(tsne_points,as.character(data_tsne$Player))
dist_mat <- as.data.frame(dist_mat, stringsAsFactors=FALSE)
dist_mat$V1 <- as.numeric(dist_mat$V1)
dist_mat$V2 <- as.numeric(dist_mat$V2)
distCou1 <- dist_mat[dist_mat[,3]==playerName,1]
distCou2 <- dist_mat[dist_mat[,3]==playerName,2]
dist_mat <- mutate(dist_mat, dist = sqrt((V1-distCou1)^2+(V2-distCou2)^2))
# order by closest distance to selected player
dist_mat <- arrange(dist_mat, dist)[,c(3,4)]
names(dist_mat) <- c("Player","Euclid. distance")
} else {
dist_mat <- data_frame()
}
return(dist_mat)
}
#similarPlayers <- .tSNE_dist("Russell Westbrook",300,20,27)
#head(similarPlayers,20)
# return similar players based on last 5 years performances
# For retired players this will return similar players according to their last 5 seasons
# as NBA player. Unless pickAge is explicitly entered
.similarPlayers <- function(playerName,numberPlayersToCompare, pickAge){
thisAgeFrame <- filter(playersHist, Player == playerName, Season >= paste0(as.numeric(thisYear)-pickAge+18,"-",as.numeric(thisYear)-pickAge+19))
if (nrow(thisAgeFrame) > 0){
#thisAge <- filter(thisAgeFrame, Season == max(as.character(Season)))$Age
minAge <- min(filter(thisAgeFrame, Player == playerName)$Age)
maxAge <- max(filter(thisAgeFrame, Player == playerName)$Age)
if (pickAge >= minAge & pickAge <= maxAge){
thisAge <- pickAge
} else{
thisAge <- maxAge
}
simPlayers <- data.frame()
t <- thisAge-5
while (t <= thisAge){
if (t >= minAge){
thisSimilar <- .tSNE_dist(playerName,300,20,t)
if (nrow(thisSimilar)>0){
thisSimilar <- head(thisSimilar,numberPlayersToCompare)
thisSimilar$Age <- t
if (nrow(simPlayers)>0){
simPlayers <- bind_rows(simPlayers,thisSimilar)
} else {
simPlayers <- thisSimilar
}
t <- t + 1
} else {
t <- t + 1
}
} else {
t <- t + 1
}
}
if (nrow(simPlayers)>0){
simPlayers_5years <- simPlayers %>%
filter(!(Player == playerName)) %>%
group_by(Player) %>%
mutate(numYears = n(),rank5years = mean(`Euclid. distance`)) %>%
distinct(Player, numYears, rank5years) %>%
arrange(desc(numYears),rank5years)
return(simPlayers_5years)
} else { # Player didn't play enough minutes during the period considered
return()
}
} else { # Player doesn't exist
return()
}
}
.predictPlayer <- function(playerName, numberPlayersToCompare,pickAge,numberTeamsForVariation){
# Top 10 more similar to selected player for past 5 years
top10_similar <- head(.similarPlayers(playerName,numberPlayersToCompare,pickAge),numberTeamsForVariation)$Player
thisAgeFrame <- filter(playersHist, Player == playerName, Season >= paste0(as.numeric(thisYear)-pickAge+18,"-",as.numeric(thisYear)-pickAge+19))
if (nrow(thisAgeFrame)>0){
thisAge <- max(filter(thisAgeFrame, Player == playerName)$Age)
} else { # this player has been out of the league for way too long
lastSeasonPlayed <- filter(playersHist, Player == playerName) %>%
arrange(desc(Season)) %>%
head(1)
thisAge <- max(pickAge, pickAge + as.numeric(substr(thisSeason,1,4)) - (as.numeric(substr(lastSeasonPlayed$Season,1,4))+1))
}
# Now calculate average variation in their stats when they went from current age to age + 1
thisAgeData <- .tSNE_prepare(thisAge,per_Min=.001)
#thisAgeData <- read.csv(paste0("data/tsneBlock_",thisAge,".csv"))
namesKeep <- names(thisAgeData)
names(thisAgeData)[2:ncol(thisAgeData)] <- sapply(names(thisAgeData)[2:ncol(thisAgeData)],
function(x) paste0(x,"_",thisAge))
#thisAgeData$Age <- thisAge
nextAgeData <- .tSNE_prepare(thisAge+1,per_Min=.001)
#nextAgeData$Age <- thisAge + 1
names(nextAgeData)[2:ncol(nextAgeData)] <- sapply(names(nextAgeData)[2:ncol(nextAgeData)],
function(x) paste0(x,"_",thisAge+1))
ageData <- merge(thisAgeData,nextAgeData, by="Player")
top10 <- ageData %>%
filter(Player %in% top10_similar)
top10_var <- data.frame()
numCols <- ncol(thisAgeData)
for (i in 1:nrow(top10)){
top10_var[i,1] <- top10$Player[i]
for (j in 4:numCols){
top10_var[i,j-2] <- ifelse(top10[i,j]==0,0,(top10[i,j+numCols-1]-top10[i,j])/top10[i,j])
}
}
names(top10_var) <- namesKeep[c(1,4:length(namesKeep))]
# Median variations for top 10 most similar players
#top10_var <- summarise_each(top10_var, funs(median(.)),-Player)
top10_var <- mutate_if(top10_var,is.logical, as.numeric) %>%
summarise_if(is.numeric, median)
# Apply this variation to predict stats for this player for next season
## ### NOTE: This may fail when player didn't play much at this age. Think about alternatives
predAgeData <- filter(thisAgeData, Player == playerName)
if (nrow(predAgeData[1])>0){
for (i in 1:ncol(top10_var)){
predAgeData[i+3] <- predAgeData[i+3]*(1+top10_var[i])
}
names(predAgeData) <- namesKeep
# Update the Season and Age of the player
predAgeData$Season <- paste0(as.numeric(substr(predAgeData$Season,1,4))+1,"-",
as.numeric(substr(predAgeData$Season,1,4))+2)
predAgeData$Age <- thisAge + 1
} else {
names(predAgeData) <- namesKeep
predAgeData <- mutate(predAgeData, Age = NA, effPTS = NA)
}
return(predAgeData)
}
# Calculate centroid and other measures for selected cluster of points from tSNE
.clusterMath <- function(colTeam,colSeason,colPlayer,colAge,colSkill){
points <- .tSNE_plot_filter(colTeam,colSeason,colPlayer,colAge,colSkill)
centroid <- c(mean(points$x),mean(points$y))
dispersion <- c(sd(points$x),sd(points$y))
}
|
5014238cad5095a01844c840fdd682b949a55e71
|
277dbb992966a549176e2b7f526715574b421440
|
/R_training/실습제출/전나영/191104/dplyr_lab2.R
|
4953bb41c0314fcca610c42ee3299d00ca478aca
|
[] |
no_license
|
BaeYS-marketing/R
|
58bc7f448d7486510218035a3e09d1dd562bca4b
|
03b500cb428eded36d7c65bd8b2ee3437a7f5ef1
|
refs/heads/master
| 2020-12-11T04:30:28.034460
| 2020-01-17T08:47:38
| 2020-01-17T08:47:38
| 227,819,378
| 0
| 0
| null | 2019-12-13T12:06:33
| 2019-12-13T10:56:18
|
C++
|
UTF-8
|
R
| false
| false
| 1,744
|
r
|
dplyr_lab2.R
|
# 문제1
install.packages("ggplot2")
library(ggplot2)
str(mpg)
mpg <- as.data.frame(mpg)
# 1-1
mpg %>% nrow()
mpg %>% ncol()
# 1-2
install.packages("dplyr")
library(dplyr)
mpg %>% head(10)
# 1-3
mpg %>% tail(10)
# 1-4
mpg %>% View()
# 1-5
summary(mpg)
# 1-6
str(mpg)
# 문제2
# 2-1
mpg <- mpg %>% rename(city = cty,
highway = hwy)
# 2-2
mpg %>% head
# 문제3
# 3-1
library(ggplot2)
midwest <- as.data.frame(midwest)
str(midwest)
# 3-2
midwest <- midwest %>% rename(total = poptotal,
asian = popasian)
# 3-3
midwest <- midwest %>% mutate(percent = asian / total * 100)
# 3-4
midwest %>% mutate(p = ifelse(mean(percent) > percent, "large", "small"))
# 문제4
# 4-1
mpg
mpg %>%
filter(displ == 4 | displ == 5) %>%
group_by(displ) %>%
summarise(mean_hwy = mean(highway))
## displ(배기량)이 4인 자동차의 highway(고속도로 연비)가 평균적으로 더 높다.
# 4-2
mpg %>%
filter(manufacturer == "audi" | manufacturer == "toyota") %>%
group_by(manufacturer) %>%
summarise(mean_cty = mean(city))
## toyota의 city(도시 연비)가 평균적으로 더 높다.
# 4-3
mpg %>%
filter(manufacturer == "chevrolet" | manufacturer == "ford" | manufacturer == "honda") %>%
group_by(manufacturer) %>%
summarise(mean_hwy = mean(highway))
# 문제5
# 5-1
new_mpg <- mpg %>% select(class, city)
new_mpg %>% head
# 5-2
new_mpg %>%
filter(class == "suv" | class == "compact") %>%
group_by(class) %>%
summarise(mean_cty = mean(city))
## class(자동차 종류)가 "compact"인 자동차의 city(도시 연비)가 더 높다.
# 문제6
mpg %>%
filter(manufacturer == "audi") %>%
select(model, highway) %>%
arrange(desc(highway)) %>%
head(5)
|
9b3ca29f4757323df2219b91d8a9ed893325c20e
|
ff9eb712be2af2fa24b28ecc75341b741d5e0b01
|
/man/stat_n_text.Rd
|
35d8ec5050f26a387a777b5b559f099263bffc2b
|
[] |
no_license
|
alexkowa/EnvStats
|
715c35c196832480ee304af1034ce286e40e46c2
|
166e5445d252aa77e50b2b0316f79dee6d070d14
|
refs/heads/master
| 2023-06-26T19:27:24.446592
| 2023-06-14T05:48:07
| 2023-06-14T05:48:07
| 140,378,542
| 21
| 6
| null | 2023-05-10T10:27:08
| 2018-07-10T04:49:22
|
R
|
UTF-8
|
R
| false
| false
| 6,787
|
rd
|
stat_n_text.Rd
|
\name{stat_n_text}
\alias{stat_n_text}
\title{
Add Text Indicating the Sample Size to a ggplot2 Plot
}
\description{
For a strip plot or scatterplot produced using the package \link[ggplot2]{ggplot2}
(e.g., with \code{\link[ggplot2]{geom_point}}),
for each value on the \eqn{x}-axis, add text indicating the
number of \eqn{y}-values for that particular \eqn{x}-value.
}
\usage{
stat_n_text(mapping = NULL, data = NULL,
geom = ifelse(text.box, "label", "text"),
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, y.pos = NULL, y.expand.factor = 0.1,
text.box = FALSE, alpha = 1, angle = 0, color = "black",
family = "", fontface = "plain", hjust = 0.5,
label.padding = ggplot2::unit(0.25, "lines"),
label.r = ggplot2::unit(0.15, "lines"), label.size = 0.25,
lineheight = 1.2, size = 4, vjust = 0.5, ...)
}
\arguments{
\item{mapping, data, position, na.rm, show.legend, inherit.aes}{
See the help file for \code{\link[ggplot2]{geom_text}}.
}
\item{geom}{
Character string indicating which \code{geom} to use to display the text.
Setting \code{geom="text"} will use \code{\link[ggplot2]{geom_text}} to display the text, and
setting \code{geom="label"} will use \code{\link[ggplot2]{geom_label}} to display the text.
The default value is \code{geom="text"} unless the user sets \code{text.box=TRUE}.
}
\item{y.pos}{
Numeric scalar indicating the \eqn{y}-position of the text (i.e., the value of the
argument \code{y} that will be used in the call to \code{\link[ggplot2]{geom_text}} or
\code{\link[ggplot2]{geom_label}}). The default value is \code{y.pos=NULL}, in which
case \code{y.pos} is set to the minimum value of all \eqn{y}-values minus
a proportion of the range of all \eqn{y}-values, where the proportion is
determined by the argument \code{y.expand.factor} (see below).
}
\item{y.expand.factor}{
For the case when \code{y.pos=NULL}, a numeric scalar indicating the proportion
by which the range of all \eqn{y}-values should be multiplied by before subtracting
this value from the minimum value of all \eqn{y}-values in order to compute the
value of the argument \code{y.pos} (see above).
The default value is \code{y.expand.factor=0.1}.
}
\item{text.box}{
Logical scalar indicating whether to surround the text with a text box (i.e.,
whether to use \code{\link[ggplot2]{geom_label}} instead of
\code{\link[ggplot2]{geom_text}}). This argument can be overridden by simply
specifying the argument \code{geom}.
}
\item{alpha, angle, color, family, fontface, hjust, vjust, lineheight, size}{
See the help file for \code{\link[ggplot2]{geom_text}} and
the vignette \bold{Aesthetic specifications} at
\url{https://cran.r-project.org/package=ggplot2/vignettes/ggplot2-specs.html}.
}
\item{label.padding, label.r, label.size}{
See the help file for \code{\link[ggplot2]{geom_text}}.
}
\item{\dots}{
Other arguments passed on to \code{\link[ggplot2]{layer}}.
}
}
\details{
See the help file for \code{\link[ggplot2]{geom_text}} for details about how
\code{\link[ggplot2]{geom_text}} and \code{\link[ggplot2]{geom_label}} work.
See the vignette \bold{Extending ggplot2} at
\url{https://cran.r-project.org/package=ggplot2/vignettes/extending-ggplot2.html}
for information on how to create a new stat.
}
\references{
Wickham, H. (2016). \emph{ggplot2: Elegant Graphics for Data Analysis (Use R!)}.
Second Edition. Springer.
}
\author{
Steven P. Millard (\email{EnvStats@ProbStatInfo.com})
}
\note{
The function \code{stat_n_text} is called by the function \code{\link{geom_stripchart}}.
}
\seealso{
\code{\link{geom_stripchart}}, \code{\link{stat_mean_sd_text}},
\code{\link{stat_median_iqr_text}}, \code{\link{stat_test_text}},
\code{\link[ggplot2]{geom_text}}, \code{\link[ggplot2]{geom_label}}.
}
\examples{
# First, load and attach the ggplot2 package.
#--------------------------------------------
library(ggplot2)
#====================
# Example 1:
# Using the built-in data frame mtcars,
# plot miles per gallon vs. number of cylinders
# using different colors for each level of the number of cylinders.
#------------------------------------------------------------------
p <- ggplot(mtcars, aes(x = factor(cyl), y = mpg, color = factor(cyl))) +
theme(legend.position = "none")
p + geom_point() +
labs(x = "Number of Cylinders", y = "Miles per Gallon")
# Now add the sample size for each level of cylinder.
#----------------------------------------------------
dev.new()
p + geom_point() +
stat_n_text() +
labs(x = "Number of Cylinders", y = "Miles per Gallon")
#==========
# Example 2:
# Repeat Example 1, but:
# 1) facet by transmission type,
# 2) make the size of the text smaller.
#--------------------------------------
dev.new()
p + geom_point() +
stat_n_text(size = 3) +
facet_wrap(~ am, labeller = label_both) +
labs(x = "Number of Cylinders", y = "Miles per Gallon")
#==========
# Example 3:
# Repeat Example 1, but specify the y-position for the text.
#-----------------------------------------------------------
dev.new()
p + geom_point() +
stat_n_text(y.pos = 5) +
labs(x = "Number of Cylinders", y = "Miles per Gallon")
#==========
# Example 4:
# Repeat Example 1, but show the sample size in a text box.
#----------------------------------------------------------
dev.new()
p + geom_point() +
stat_n_text(text.box = TRUE) +
labs(x = "Number of Cylinders", y = "Miles per Gallon")
#==========
# Example 5:
# Repeat Example 1, but use the color brown for the text.
#--------------------------------------------------------
dev.new()
p + geom_point() +
stat_n_text(color = "brown") +
labs(x = "Number of Cylinders", y = "Miles per Gallon")
#==========
# Example 6:
# Repeat Example 1, but:
# 1) use the same colors for the text that are used for each group,
# 2) use the bold monospaced font.
#------------------------------------------------------------------
mat <- ggplot_build(p)$data[[1]]
group <- mat[, "group"]
colors <- mat[match(1:max(group), group), "colour"]
dev.new()
p + geom_point() +
stat_n_text(color = colors, size = 5,
family = "mono", fontface = "bold") +
labs(x = "Number of Cylinders", y = "Miles per Gallon")
#==========
# Clean up
#---------
graphics.off()
rm(p, mat, group, colors)
}
\keyword{aplot}
|
93fa4a5c1505577bc85d7a2067f011526771ff51
|
39e6b4b0a85bab8f160f5b0d06f07a67ef0c3ae4
|
/cachematrix.R
|
e576fdeaeebd0acf03121e45a972e1cacdc62e1f
|
[] |
no_license
|
monty111191/ProgrammingAssignment2
|
d1eb1c317f6c1a48c3f78daaa6b28c1c2220f896
|
3aa828bb56841e11b34e14a7685e99785a5cf9f0
|
refs/heads/master
| 2021-01-24T02:52:34.692860
| 2016-01-24T03:55:15
| 2016-01-24T03:55:15
| 50,161,881
| 0
| 0
| null | 2016-01-22T06:15:42
| 2016-01-22T06:15:42
| null |
UTF-8
|
R
| false
| false
| 1,671
|
r
|
cachematrix.R
|
## Taking the inverse of a matrix is usually quick for a singule matrix.
## However, if you need to repeatedly inverse the matrix can be time
## consuming over a large data set. Therefore, it might be of use to cache
## the inverse of a non-changing matrix and bring it up later on rather
## than continually computing it.
## The first function, `makeCacheMatrix` creates a special "matrix" and allows
## the inverse of the matrix to be cached. The function...
## 1. sets the value of the matrix
## 2. gets the value of the matrix
## 3. sets the value of the inverse matrix
## 4. gets the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL ##inv is inverse of matrix x
set <- function(y) {
x <<- y ## <<- assigns a value to an object in an
inv <<- NULL ##environment that is different from the
} ##current environment
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list( set = set,
get = get,
setinv = setinverse,
getinv = getinverse)
}
## The following function calculates the inverse matrix of the special
## "matrix" created with the above function. However, it first checks to
## see if the inverse matrix has already been calculated. If so, it
## `get`s the inverse matrix from the cache and skips the computation.
## Otherwise, it calculates the inverse of the matrix and sets the value
## of the mean in the cache via the `setinverse` function.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
cdc0c8a0284ef9709f4f969262d21682697a04e3
|
d0f777a502a7d8483096e371d14223b414f6720d
|
/ECFGenomeRcpp_v3.30/R/annotate_group_v6.255.R
|
173437633365f733ef6a4d0fd255b306bcc65d1c
|
[] |
no_license
|
horiatodor/ECFGenome-Rcpp
|
86fb14ca2948ba9ba2568a285e6f106b6c806ee5
|
7feee7f81a91f58c40f731987312a741514fa48e
|
refs/heads/master
| 2022-11-15T11:08:53.089067
| 2020-07-09T02:15:58
| 2020-07-09T02:15:58
| 278,220,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,951
|
r
|
annotate_group_v6.255.R
|
#function that takes in a list of pwm_scans, annotates them, and then does the cog thing...
#cuttoff is how far upstream of the start site we want to look. -1000 is there to keep back compatbiliity
#operon tells us how to handle (if at all) operon structures
#none means no operon structures are taken into consideration
#first only means that operons are taken into consideration only if the hit is in the first gene
#all means the entire operon is taken into consideration regardless of where the gene hit is
#an operon is defined as:
#version 4.1 changes
#changed summarize_hits to discard cutoff first, then do the rest of the concatenation
#added class name
#self -21 where no score (by definition)
#changed some code to take advantage of the fact that the new version of summarize hits returns only 1 of each NOG by default
#preinitialized matrices where possible (unlcear if it helped :/)
#skip the first thing
#added option to skip the other
#version 5 huge changes
#completley rewritten to use scan_pwm3, annotate hits 5.2
#use match and removes a loop from every operoation
#use readr file reads
#version 5.11
#can deal with first ecf not having any hits
#version 5.2
#uses scan and annotate
#requieres truncated fasta v3
#version 5.3
#is able to take as an input a list of truncated fasta
#uses scan_and_annotate 2.2
#version 5.4
#represents the merge point for annotate_group and annotate_group_fast
#is able to take as an input a list operons
#uses scan_and_annotate 2.4 (which has better speed for operons)
#eliminated go_fast
#precomputes all the hits
#v6
#uses scan_and_annotate_v2.5, which depends on truncate_fasta_v5
#many fewer inputs, since theyre all included in other things
#v6.2
#gains the ability to do multiple scan and annotate shuffles at the same time
#as implemented in scan and annotate 2.7
#additional input, which is the shuffled order
#remove annot_hit_summary. we can just use the appropriate call!
#v6.21
#this version calculates and returns
#1. distance from the ECF
#2. spacers that give optimal score
#also, remove annotation from hits location...
#return of additional_information??
#version6.22
#got rid of some checks, ensure compatibility with data_frame output
#v6.23
#can deal with list_of_pwm_10 == NULL
#v6.24
#can do multiple single pwms at a time
#v6.25
#has functionality to take in set of all nogs, etc.
#v6.251
#adds the ability to return information about the strand of of the ECF and the genes
#v6.252
#removed additional_information flag, now uses first_three_cols p/a as flag
#will also return COG class for all genes
#v6.253
#will now also return as part of real hits (ie when first_three_cols is not present) the names of the genes in the organism
#that make up the results - this will be useful for assessing HGT and also looking up genes for people
#v6.254
#handles returning gene distances for each gene for the real hits
#v6.255
#handles returning spacer lengths as well
#slightly changes the way that median distance to the ECF is calculated
#
annotate_group_2 <- function(list_of_pwm_35, list_of_pwm_10, list_of_truncated_fasta, operon = "yes", spacers,
shuffled_order_35 = NA, shuffled_order_10 = NA, cores_to_use = 1,
first_three_cols_and_annot = NULL){
#############################################################################################################################
#if the operon is nit a list make it a list
if (class(operon) != "list"){operon <- as.list(rep(operon, length = length(list_of_pwm_35)))}
#############################################################################################################################
#we dont have a shuffled order...
was_na <- is.na(shuffled_order_35[1])
if (was_na){
#for pwm35
shuffled_order_35 <- matrix(1:dim(list_of_pwm_35[[1]])[2], 1, dim(list_of_pwm_35[[1]])[2])
#for pwm10, if it exists
if (!is.null(list_of_pwm_10)){
shuffled_order_10 <- matrix(1:dim(list_of_pwm_10[[1]])[2], 1, dim(list_of_pwm_10[[1]])[2])
}
}
#############################################################################################################################
#precompute the shuffled ECF lists as a list of lists
#for pwm35
list_of_shuffled_pwm_35 <- list()
for (i in 1:length(list_of_pwm_35)){
list_of_shuffled_pwm_35[[i]] <- lapply(1:dim(shuffled_order_35)[1], function(j) list_of_pwm_35[[i]][,shuffled_order_35[j,]])
}
#for pwm10, if it exists
if (!is.null(list_of_pwm_10)){
#make the list
list_of_shuffled_pwm_10 <- list()
#
for (i in 1:length(list_of_pwm_10)){
list_of_shuffled_pwm_10[[i]] <- lapply(1:dim(shuffled_order_10)[1], function(j) list_of_pwm_10[[i]][,shuffled_order_10[j,]])
}
} else {
#make a list of NA (we cannot pass a list of NULL)
list_of_shuffled_pwm_10 <- rep(NA, length(list_of_pwm_35))
}
#############################################################################################################################
#precompute all of the hits
list_of_hits <- parallel::mclapply(1:length(list_of_pwm_35), function (i) scan_and_annotate(list_of_shuffled_pwm_35[[i]],
list_of_shuffled_pwm_10[[i]],
list_of_truncated_fasta[[i]],
spacer = spacers, operon[[i]]),
mc.cores = cores_to_use, mc.silent = TRUE)
#############################################################################################################################
#now lets annotate each of the things one by one
to_return <- vector("list", length(list_of_hits[[1]]))
#for the number of replicates
for (a in 1:length(list_of_hits[[1]])){
#start here
start_ecf <- 1
#if we have those first three cols
if (!is.null(first_three_cols_and_annot)){
#set up the matrices and get on with it
hits_scores <- cbind(first_three_cols_and_annot,
matrix(NA, dim(first_three_cols_and_annot)[1], length(list_of_pwm_35)))
hit_location <- matrix(NA, dim(first_three_cols_and_annot)[1], length(list_of_pwm_35))
}
#if we dont have the first_three_cols_and_annot, then we need to do the initialization as per usual
if (is.null(first_three_cols_and_annot)){
#initial thing
#preallocate the correct number of columns
hits_annot <- list_of_hits[[start_ecf]][[a]][,13]
hits_cogs <- list_of_hits[[start_ecf]][[a]][,12]
hits_scores <- cbind(list_of_hits[[start_ecf]][[a]][,c(9,4,14)],matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (start_ecf-1)),
list_of_hits[[start_ecf]][[a]][,3], matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (length(list_of_pwm_35[-1])-start_ecf+1)))
hit_location <- cbind(matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (start_ecf-1)),
list_of_hits[[start_ecf]][[a]][,7],matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (length(list_of_pwm_35[-1])-start_ecf+1)))
hit_spacer <- cbind(matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (start_ecf-1)),
list_of_hits[[start_ecf]][[a]][,4],matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (length(list_of_pwm_35[-1])-start_ecf+1)))
hit_distance <- cbind(matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (start_ecf-1)),
list_of_hits[[start_ecf]][[a]][,10],matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (length(list_of_pwm_35[-1])-start_ecf+1)))
hit_gene <- cbind(matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (start_ecf-1)),
list_of_hits[[start_ecf]][[a]][,8],matrix(NA, dim(list_of_hits[[start_ecf]][[a]])[1], (length(list_of_pwm_35[-1])-start_ecf+1)))
start_ecf <- 2
}
#for each thing,
for (i in start_ecf:length(list_of_pwm_35)){
#get the correspondance between matched and unmatched
matched_cogs <- match(list_of_hits[[i]][[a]][,9], hits_scores[,1])
#for all of the hits that match whats already in the thing
if (length(which(!is.na(matched_cogs))) > 0){
#here we need both the hits we are working with and where in the hits_scores they
#should go
hits_of_int <- which(!is.na(matched_cogs))
positons_of_int <- matched_cogs[hits_of_int]
#since the matrix column number is preallocated,
hits_scores[positons_of_int,(3+i)] <- list_of_hits[[i]][[a]][hits_of_int,3]
hit_location[positons_of_int,i] <- list_of_hits[[i]][[a]][hits_of_int,7]
#we only do this for the real, when we dont have F3andA
if (is.null(first_three_cols_and_annot)){
hit_spacer[positons_of_int,i] <- list_of_hits[[i]][[a]][hits_of_int,4]
hit_distance[positons_of_int,i] <- list_of_hits[[i]][[a]][hits_of_int,10]
hit_gene[positons_of_int,i] <- list_of_hits[[i]][[a]][hits_of_int,8]
}
}
#for all of the hits that dont match whats already in the thing
if (length(which(is.na(matched_cogs))) > 0){
#here we only need the positions in annot_hits_summary
hits_of_int <- which(is.na(matched_cogs))
#rbind an appropriately sized matrix to the bottom of hits summary
hits_scores <- rbind(hits_scores, setNames(cbind(list_of_hits[[i]][[a]][hits_of_int, c(9,4,14)],
matrix(NA, length(hits_of_int), (i-1)),
list_of_hits[[i]][[a]][hits_of_int,3],
matrix(NA, length(hits_of_int), (length(list_of_pwm_35) - i))), names(hits_scores)))
hit_location <- rbind(hit_location, setNames(cbind(matrix(NA, length(hits_of_int), (i-1)),
list_of_hits[[i]][[a]][hits_of_int,7],
matrix(NA, length(hits_of_int), (length(list_of_pwm_35) - i))), names(hit_location)))
#we only do this for the real, when we dont have F3andA
if (is.null(first_three_cols_and_annot)){
hit_spacer <- rbind(hit_spacer, setNames(cbind(matrix(NA, length(hits_of_int), (i-1)),
list_of_hits[[i]][[a]][hits_of_int,4],
matrix(NA, length(hits_of_int), (length(list_of_pwm_35) - i))), names(hit_spacer)))
hit_distance <- rbind(hit_distance, setNames(cbind(matrix(NA, length(hits_of_int), (i-1)),
list_of_hits[[i]][[a]][hits_of_int,10],
matrix(NA, length(hits_of_int), (length(list_of_pwm_35) - i))), names(hit_distance)))
hit_gene <- rbind(hit_gene, setNames(cbind(matrix(NA, length(hits_of_int), (i-1)),
list_of_hits[[i]][[a]][hits_of_int,8],
matrix(NA, length(hits_of_int), (length(list_of_pwm_35) - i))), names(hit_gene)))
#and the annotation
hits_annot <- c(hits_annot, list_of_hits[[i]][[a]][hits_of_int,13])
hits_cogs <- c(hits_cogs, list_of_hits[[i]][[a]][hits_of_int,12])
}
}
}
###############################
###############################
###############################
if (is.null(first_three_cols_and_annot)){
#here we get the spacer mode and percentage that are mode
which.is.not.na <- lapply(1:dim(hit_spacer)[1], function (i) which(!is.na(hit_spacer[i,])))
spacer.mode <- unlist(lapply(1:dim(hit_spacer)[1], function (i) Mode(hit_spacer[i,which.is.not.na[[i]]])))
percentage.mode <- rowMeans(Reduce("cbind", lapply(1:dim(hit_spacer)[2], function (i) hit_spacer[,i] == spacer.mode)), na.rm=TRUE)
#here we get the median distance to the ECF start site.
median_minus <- function(a_numeric_vector){
#which are on the same contig
same_contig <- which(a_numeric_vector != 10000000)
normal_ones <- intersect(which(!is.na(a_numeric_vector)), same_contig)
#
return(ifelse(length(normal_ones) == 0, 10000000, median(abs(a_numeric_vector[normal_ones]))))
}
median.dist.to.ecf <- unlist(lapply(1:dim(hit_distance)[1], function (i) median_minus(as.numeric(hit_distance[i,]))))
#here we get whether the gene and the ecf are on the same strand
fraction.different.strand <- unlist(lapply(1:dim(hit_distance)[1], function (i) length(which(hit_distance[i,] < 0))))/
unlist(lapply(1:dim(hit_distance)[1], function (i) length(which(hit_distance[i,] < 9999999))))
median.dist.to.ecf <- median.dist.to.ecf*ifelse(fraction.different.strand >= 0.5, -1, 1)
#put the spacer mode in hits_score
hits_scores[,2] <- paste(spacer.mode, round(percentage.mode,2))
}
#here we get the percentage of things with operon at every position and return it as part of the list
#get the number that are in operons per row
#apply a rule (here we do fraction which are operons)
is.in.operon <- 1-rowMeans(do.call("cbind", lapply(1:dim(hit_location)[2], function (i) hit_location[,i] == -1000)), na.rm=TRUE)
if (is.null(first_three_cols_and_annot)){
to_return[[a]] <- list(cbind(hits_scores,hits_annot),is.in.operon, median.dist.to.ecf, hits_cogs, hit_gene, hit_location, hit_spacer)
}
#if we are just doing shuffled
if (!is.null(first_three_cols_and_annot)){
to_return[[a]] <- list(hits_scores, is.in.operon)
}
}
#the return statements!
if (!was_na){return(to_return)}
if (was_na){return(to_return[[1]])}
}
#mode function just in case
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
|
712b1b2018f492493b2ef18ab697f1b058ef8de2
|
4e8f1eb4fbd4a65cd8c3930f3ce3dbdd6ccd45d7
|
/R/utils.R
|
41594eacd542a4189c21894fe2194e4f8e667754
|
[] |
no_license
|
dreamRs/shinylogs
|
cbcb2d7582f4a2a3950d34c30c370df080ffa38f
|
0195ac0a1f85d213c82143cfee712c9baddd1963
|
refs/heads/master
| 2023-03-19T01:12:52.425499
| 2022-04-18T16:02:24
| 2022-04-18T16:02:24
| 161,770,136
| 94
| 14
| null | 2023-03-14T17:58:50
| 2018-12-14T10:36:28
|
R
|
UTF-8
|
R
| false
| false
| 792
|
r
|
utils.R
|
dropNulls <- function(x) {
x[!vapply(x, is.null, FUN.VALUE = logical(1))]
}
get_timestamp <- function(time = NULL) {
if (is.null(time))
time <- Sys.time()
format(time, format = "%Y-%m-%d %H:%M:%OS3%z")
}
is_sqlite <- function(path) {
is.character(path) && grepl(pattern = "\\.sqlite$", x = path)
}
get_user_ <- function(session) {
if (!is.null(session$user))
return(session$user)
user <- Sys.getenv("SHINYPROXY_USERNAME")
if (!identical(user, "")) {
return(user)
} else {
getOption("shinylogs.default_user", default = Sys.info()[["user"]])
}
}
#' @importFrom jsonlite toJSON
to_console <- function(obj, ...) {
if (!is.null(obj)) {
json <- jsonlite::toJSON(
x = c(obj, ...),
pretty = TRUE, auto_unbox = TRUE
)
print(json)
}
}
|
d696ef2d3a51d3633eb42b2e41add2be1a3d4fc5
|
ae8a72dd35911a3a9d6b472b152e22a382d67d3b
|
/varTest/results/varMod/varModFit.R
|
8f6e2513bfa097344dcfa9f65798eae56630fe7a
|
[] |
no_license
|
inspktrgadget/atlantis
|
883a1555c3c930007ebc475dc3dd5fca14e2d717
|
3a324ea7194f2a93ad54f6f7ce0f4e55dc2419e6
|
refs/heads/master
| 2021-09-10T23:03:37.983689
| 2018-04-03T20:45:54
| 2018-04-03T20:45:54
| 116,151,163
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 771
|
r
|
varModFit.R
|
library(plyr)
library(tidyverse)
library(parallel)
library(Rgadget)
homeDir <- "~/gadget/models/atlantis/varTest/varModels"
setwd(homeDir)
file.create("gadgetFitOutput")
#mod_dir <- dir("varModels")
mod_dir <- sprintf("varModel_%s", c(0.269, 0.276, 0.282, 0.288, 0.294, 0.3))
null_list <-
mclapply(mod_dir, function(x) {
cat("Fitting ", x, "\n")
sink(file = "gadgetFitOutput", append = TRUE)
tmp_fit <- gadget.fit(wgts = sprintf("varModels/%s/WGTS", x),
main.file = sprintf("varModels/%s/WGTS/main.final", x),
printfile.printatstart = 0,
printfile.steps = "all",
rec.len.param = TRUE)
sink()
}, mc.cores = 4)
|
84355bbacec87317a4acd2ed2c4c4c5a00ab6345
|
62fd8b80332420d977bc7da5330c8df821936aef
|
/week 23.R
|
45d594e0e8c186e239b1207e55dd2acba99709e6
|
[] |
no_license
|
NdiranguMartin/TidyTuesday
|
a7dfea863d2add9b544bceae9594508923ef21f2
|
7100d27daf6ee566e9c73c7f384e112aa052eff7
|
refs/heads/master
| 2020-07-08T03:51:05.040442
| 2020-05-31T16:42:52
| 2020-05-31T16:42:52
| 203,556,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34
|
r
|
week 23.R
|
# I will have my week 23 code here
|
f3672c91490ecb9b669946ad9f66c6ff13f45c5a
|
a828726d268e86fd86aec54247fc8bb8211eec43
|
/man/save_brush_history.Rd
|
7f3c3782092c3dc856f6f95278c6c977ae6bedfd
|
[] |
no_license
|
XuChongBo/cranvas
|
b6536c8f43ccaac2ac9f18c1953fe13e9fc94155
|
f5a37363044bdbf946011f2c8ab019cefbefbe39
|
refs/heads/master
| 2020-05-29T11:49:04.840631
| 2013-11-22T16:49:13
| 2013-11-22T16:49:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,906
|
rd
|
save_brush_history.Rd
|
\name{save_brush_history}
\alias{save_brush_history}
\title{Create the brush history}
\usage{
save_brush_history(data, index = selected(data))
}
\arguments{
\item{data}{the mutaframe created by \code{\link{qdata}}}
\item{index}{the indices of rows to be stored in history;
an integer vector or a logical vector (will be coerced to
integers by \code{\link[base]{which}}); by default it is
\code{selected(data)}, i.e., the logical vector
indicating which rows are brushed}
}
\value{
the \code{data} argument will be returned and other
changes occur as side effects
}
\description{
Given the indices of the brushed elements, this function
stores these indices in the \code{\link{brush}} object
and changes the colors of graphical elements permanently
(via changing the \code{.color} column in the data) if in
the persistent brushing mode.
}
\details{
For the transient brushing: the given indices are stored
in the \code{history.list} component of the brush object.
The length of the list of indices is restricted by the
\code{history.size} component of the brush, i.e., old
histories may be removed due to this size restriction.
For the persistent brushing: the given indices of brushed
elements are stored in the \code{persistent.list}
component, and the current brushing color is also saved
to the \code{persistent.color} component. The colors of
brushed elements will be changed permanently. Finally,
the length of the list of indices is also restricted by
the \code{history.size} component of the brush.
We can use these stored information to redraw the brushed
elements later. See \code{\link{brush}} for detailed
explanation of these components.
}
\note{
The changes occur only if the \code{index} argument is
not empty, or when the \code{data} argument is in the
persistent brushing mode, i.e., when \code{brush(data,
'persistent')} is \code{TRUE}. In this case, the returned
\code{data} will be different with the one passed in,
because the brush object attached on it has been changed.
}
\examples{
library(cranvas)
qnrc <- qdata(nrcstat)
selected(qnrc) # all FALSE by default
selected(qnrc)[1:5] <- TRUE # brush first 5 rows
b <- brush(qnrc) # the brush object
b$history.list # this list should be empty by default
save_brush_history(qnrc) # store currently brushed row indices in history
save_brush_history(qnrc, c(6, 7, 10)) # another history
b$history.list # what happened to the brush object?
b$persistent <- TRUE # turn on persistent brushing
b$persistent.list # this list should be empty by default too
save_brush_history(qnrc, c(3, 4, 6, 9)) # permanently brush other 4 rows
b$persistent.list # what happened to the brush object?
b$persistent.color
b$color
b$history.list
}
\author{
Yihui Xie <\url{http://yihui.name}>
}
\seealso{
\code{\link{brush}}, \code{\link{qdata}},
\code{\link{selected}}
}
|
74c7b326e7d6d96637567c3afcda579094f4feb0
|
eea741791ea776e38cde479ef0f201defaae9a4f
|
/cholla_climate_IPM_SOURCE.R
|
7bdbe9c236586043d8505198c4982c9ced6921ae
|
[] |
no_license
|
texmiller/cholla_climate_IPM
|
28f541d9266ad0a3a182833fb713f0852a7bfb85
|
5c16d93001b9941fb00115df6d008e08e140c236
|
refs/heads/master
| 2021-06-05T10:01:51.402460
| 2021-05-06T18:28:11
| 2021-05-06T18:28:11
| 142,463,901
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,037
|
r
|
cholla_climate_IPM_SOURCE.R
|
### Purpose: build IPM using the climate-dependent vital rates that were fit elsewhere
# misc functions -------------------------------------------------------------------
volume <- function(h, w, p){
(1/3)*pi*h*(((w + p)/2)/2)^2
}
invlogit<-function(x){exp(x)/(1+exp(x))}
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
# VITAL RATE FUNCTIONS ----------------------------------------------------
## Note: I am writing these functions specific to the "selected" vital rate
## models following SVS. If I update the variable selection, or if the results
## change once 2017-18 data can be included, then these functions will need
## to be revised
## GROWTH
gxy<-function(x,y,params,rfx){
xb=pmin(pmax(x,params$min.size),params$max.size) #Transforms all values below/above limits in min/max size
growth_increment <-params$grow.mu + params$grow.bsize*xb + rfx[1]
#growth_sd <- params$growvar_b0 * exp(params$growvar_b1 * xb)
growth_sd <- params$grow.sigma.eps
return(dnorm(y,mean=xb+growth_increment,sd=growth_sd))
}
#x_size <- seq(mean_params$min.size,mean_params$max.size,0.1)
#plot(x_size,gxy(x=0,y=x_size,params=mean_params,rfx=0),type="l")
## SURVIVAL
sx<-function(x,params,rfx,PC1,PC2,PC3,extrap=T){
xb=pmin(pmax(x,params$min.size),params$max.size)
pc1=ifelse(extrap==T,PC1[2],pmin(pmax(PC1[2],params$PC1L),params$PC1U))
pc2=ifelse(extrap==T,PC2[2],pmin(pmax(PC2[2],params$PC2L),params$PC2U))
pc3=ifelse(extrap==T,PC3[2],pmin(pmax(PC3[2],params$PC3L),params$PC3U))
p.surv<-params$surv.mu + params$surv.bsize*xb + rfx[2] +
unlist(params$surv.bclim[1,1])*pc1 +
unlist(params$surv.bclim[1,2])*pc2 +
unlist(params$surv.bclim[1,3])*pc3
return(invlogit(p.surv))
}
## COMBINED GROWTH_SURVIVAL
pxy <- function(x,y,params,rfx,PC1,PC2,PC3,extrap=T){
sx(x,params,rfx,PC1,PC2,PC3,extrap)*gxy(x,y,params,rfx)
}
#PRODUCTION OF 1-YO SEEDS IN THE SEED BANK FROM X-SIZED MOMS
flow.x <- function(x,params,rfx,PC1,PC2,PC3,extrap=T){
xb=pmin(pmax(x,params$min.size),params$max.size)
pc1=ifelse(extrap==T,PC1[1],pmin(pmax(PC1[1],params$PC1L),params$PC1U))
pc2=ifelse(extrap==T,PC2[1],pmin(pmax(PC2[1],params$PC2L),params$PC2U))
pc3=ifelse(extrap==T,PC3[1],pmin(pmax(PC3[1],params$PC3L),params$PC3U))
p.flow<-params$flow.mu + rfx[3] + params$flow.bsize*xb +
unlist(params$flow.bclim[1,1])*pc1 +
unlist(params$flow.bclim[1,2])*pc2 +
unlist(params$flow.bclim[3,2])*xb*pc2 +
unlist(params$flow.bclim[1,3])*pc3 +
unlist(params$flow.bclim[3,3])*xb*pc3
return(invlogit(p.flow))
}
fert.x <- function(x,params,rfx,PC1,PC2,PC3,extrap=T){
xb=pmin(pmax(x,params$min.size),params$max.size)
pc2=ifelse(extrap==T,PC2[1],pmin(pmax(PC2[1],params$PC2L),params$PC2U))
pc3=ifelse(extrap==T,PC3[1],pmin(pmax(PC3[1],params$PC3L),params$PC3U))
nfruits<-params$fert.mu + rfx[4] + params$fert.bsize*xb +
unlist(params$fert.bclim[1,2])*pc2 +
unlist(params$fert.bclim[3,2])*pc2*xb +
unlist(params$fert.bclim[1,3])*pc3
return(exp(nfruits))
}
fx<-function(x,params,rfx,PC1,PC2,PC3,extrap=T){
return(flow.x(x,params,rfx,PC1,PC2,PC3,extrap)*fert.x(x,params,rfx,PC1,PC2,PC3,extrap)*params$mu_spf*params$seedsurv)
}
#SIZE DISTRIBUTION OF RECRUITS
recruit.size<-function(y,params){
dnorm(x=y,mean=params$mu_sdlgsize,sd=params$sigma_sdlgsize)
}
# BIGMATRIX ---------------------------------------------------------------
bigmatrix<-function(params,
PC1, ## mean-zero PC values
PC2,
PC3,
random = F, ## If TRUE, the model includes random year deviates
lower.extension = 0, ## I'll need to extend lower and upper beyond true size limits
upper.extension = 0,
rand.seed = NULL, ## random seed for stochastic model runs
mat.size, ## matrix dimensions
rfx = c(0,0,0,0), ## default is no random years effects
extrap=T){
n<-mat.size
L<-params$min.size + lower.extension
U<-params$max.size + upper.extension
#these are the upper and lower integration limits
h<-(U-L)/n #Bin size
b<-L+c(0:n)*h; #Lower boundaries of bins
y<-0.5*(b[1:n]+b[2:(n+1)]); #Bins' midpoints
#these are the boundary points (b) and mesh points (y)
#Set year random effect to 0 by default, modify if random=T
if(random==T){
set.seed(rand.seed)
rfx = rnorm(n=4, mean=0, sd=c(params$grow.sigma.year,
params$surv.sigma.year,
params$flow.sigma.year,
params$fert.sigma.year))
}
# Fertility matrix
Fmat<-matrix(0,(n+2),(n+2))
# 1-yo banked seeds go in top row
Fmat[1,3:(n+2)]<-fx(y,params,rfx,PC1,PC2,PC3,extrap)
# Growth/survival transition matrix
Tmat<-matrix(0,(n+2),(n+2))
# Graduation to 2-yo seed bank = pr(not germinating as 1-yo)
Tmat[2,1]<-(1-params$germ1)
# Graduation from 1-yo bank to cts size = germination * size distn * pre-census survival
Tmat[3:(n+2),1]<- params$germ1 * params$precenus_surv * recruit.size(y,params) * h
# Graduation from 2-yo bank to cts size = germination * size distn * pre-census survival
Tmat[3:(n+2),2]<- params$germ2 * params$precenus_surv * recruit.size(y,params) * h
# Growth/survival transitions among cts sizes
Tmat[3:(n+2),3:(n+2)]<-t(outer(y,y,pxy,params=params,rfx=rfx,PC1=PC1,PC2=PC2,PC3=PC3,extrap=extrap)) * h
# Put it all together
IPMmat<-Fmat+Tmat #Full Kernel is simply a summation ot fertility
#and transition matrix
return(list(IPMmat=IPMmat,Fmat=Fmat,Tmat=Tmat,meshpts=y))
}
# lambdaS Simulations##########################################################
lambdaSim=function(params,climate_window,random=F,##climate_window is a subset of the PCclim data frame
max_yrs,mat_size,lower.extension,upper.extension,extrap=T){
matdim<-mat_size+2
K_t <- matrix(0,matdim,matdim)
rtracker <- rep(0,max_yrs)
n0 <- rep(1/matdim,matdim)
for(t in 1:max_yrs){ #Start loop
## sample a climate year from the window provided (actually an adjacent pair of climate years)
clim_yr <- sample(2:nrow(climate_window),size=1)## sample one of the climate years in the window
#Store matrix
K_t[,]<-bigmatrix(params=params,
PC1=c(climate_window$PC1[clim_yr-1],climate_window$PC1[clim_yr]), ## mean-zero PC values
PC2=c(climate_window$PC2[clim_yr-1],climate_window$PC2[clim_yr]),
PC3=c(climate_window$PC3[clim_yr-1],climate_window$PC3[clim_yr]),
lower.extension = lower.extension, ## I'll need to extend lower and upper beyond true size limits
upper.extension = upper.extension,
mat.size=mat_size,
random=random,
extrap=extrap)$IPMmat
n0 <- K_t[,] %*% n0
N <- sum(n0)
rtracker[t]<-log(N)
n0 <-n0/N
}
#discard initial values (to get rid of transient)
burnin <- round(max_yrs*0.1)
rtracker <- rtracker[-c(1:burnin)]
#Finish and return
#print(proc.time() - ptm)
lambdaS<-exp(mean(rtracker))
return(lambdaS)
}
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# vital rates for SEV climate data ----------------------------------------
## GROWTH--nothing changes (no climate dependence)
## SURVIVAL
sx_SEV<-function(x,params,rfx,PC1,PC2,PC3,extrap=T){
xb=pmin(pmax(x,params$min.size),params$max.size)
pc1=ifelse(extrap==T,PC1[2],pmin(pmax(PC1[2],params$PC1L),params$PC1U))
pc2=ifelse(extrap==T,PC2[2],pmin(pmax(PC2[2],params$PC2L),params$PC2U))
pc3=ifelse(extrap==T,PC3[2],pmin(pmax(PC3[2],params$PC3L),params$PC3U))
p.surv<-params$surv.mu + params$surv.bsize*xb + rfx[2] +
unlist(params$surv.bclim[1,3])*pc3 +
unlist(params$surv.bclim[2,3])*pc3*pc3
return(invlogit(p.surv))
}
## COMBINED GROWTH_SURVIVAL
pxy_SEV <- function(x,y,params,rfx,PC1,PC2,PC3,extrap=T){
sx_SEV(x,params,rfx,PC1,PC2,PC3,extrap)*gxy(x,y,params,rfx)
}
#PRODUCTION OF 1-YO SEEDS IN THE SEED BANK FROM X-SIZED MOMS
flow.x_SEV <- function(x,params,rfx,PC1,PC2,PC3,extrap=T){
xb=pmin(pmax(x,params$min.size),params$max.size)
pc1=ifelse(extrap==T,PC1[1],pmin(pmax(PC1[1],params$PC1L),params$PC1U))
pc2=ifelse(extrap==T,PC2[1],pmin(pmax(PC2[1],params$PC2L),params$PC2U))
pc3=ifelse(extrap==T,PC3[1],pmin(pmax(PC3[1],params$PC3L),params$PC3U))
p.flow<-params$flow.mu + rfx[3] + params$flow.bsize*xb +
unlist(params$flow.bclim[3,1])*xb*pc1 +
unlist(params$flow.bclim[1,2])*pc2 +
unlist(params$flow.bclim[3,3])*xb*pc3
return(invlogit(p.flow))
}
fert.x_SEV <- function(x,params,rfx,PC1,PC2,PC3,extrap=T){
xb=pmin(pmax(x,params$min.size),params$max.size)
pc1=ifelse(extrap==T,PC1[1],pmin(pmax(PC1[1],params$PC1L),params$PC1U))
pc2=ifelse(extrap==T,PC2[1],pmin(pmax(PC2[1],params$PC2L),params$PC2U))
pc3=ifelse(extrap==T,PC3[1],pmin(pmax(PC3[1],params$PC3L),params$PC3U))
nfruits<-params$fert.mu + rfx[4] + params$fert.bsize*xb +
unlist(params$fert.bclim[3,1])*pc1
return(exp(nfruits))
}
fx_SEV<-function(x,params,rfx,PC1,PC2,PC3,extrap=T){
return(flow.x_SEV(x,params,rfx,PC1,PC2,PC3,extrap)*fert.x_SEV(x,params,rfx,PC1,PC2,PC3,extrap)*params$mu_spf*params$seedsurv)
}
# BIGMATRIX ---------------------------------------------------------------
bigmatrix_SEV<-function(params,
PC1, ## mean-zero PC values
PC2,
PC3,
random = F, ## If TRUE, the model includes random year deviates
lower.extension = 0, ## I'll need to extend lower and upper beyond true size limits
upper.extension = 0,
rand.seed = NULL, ## random seed for stochastic model runs
mat.size, ## matrix dimensions
rfx = c(0,0,0,0), ## default is no random years effects
extrap=T){
n<-mat.size
L<-params$min.size + lower.extension
U<-params$max.size + upper.extension
#these are the upper and lower integration limits
h<-(U-L)/n #Bin size
b<-L+c(0:n)*h; #Lower boundaries of bins
y<-0.5*(b[1:n]+b[2:(n+1)]); #Bins' midpoints
#these are the boundary points (b) and mesh points (y)
#Set year random effect to 0 by default, modify if random=T
if(random==T){
set.seed(rand.seed)
rfx = rnorm(n=4, mean=0, sd=c(params$grow.sigma.year,
params$surv.sigma.year,
params$flow.sigma.year,
params$fert.sigma.year))
}
# Fertility matrix
Fmat<-matrix(0,(n+2),(n+2))
# 1-yo banked seeds go in top row
Fmat[1,3:(n+2)]<-fx_SEV(y,params,rfx,PC1,PC2,PC3,extrap)
# Growth/survival transition matrix
Tmat<-matrix(0,(n+2),(n+2))
# Graduation to 2-yo seed bank = pr(not germinating as 1-yo)
Tmat[2,1]<-(1-params$germ1)
# Graduation from 1-yo bank to cts size = germination * size distn * pre-census survival
Tmat[3:(n+2),1]<- params$germ1 * params$precenus_surv * recruit.size(y,params) * h
# Graduation from 2-yo bank to cts size = germination * size distn * pre-census survival
Tmat[3:(n+2),2]<- params$germ2 * params$precenus_surv * recruit.size(y,params) * h
# Growth/survival transitions among cts sizes
Tmat[3:(n+2),3:(n+2)]<-t(outer(y,y,pxy_SEV,params=params,rfx=rfx,PC1=PC1,PC2=PC2,PC3=PC3,extrap=extrap)) * h
# Put it all together
IPMmat<-Fmat+Tmat #Full Kernel is simply a summation ot fertility
#and transition matrix
return(list(IPMmat=IPMmat,Fmat=Fmat,Tmat=Tmat,meshpts=y))
}
|
49bdfda9a87f4e203cdba0e802a795b91d76585c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Ecfun/examples/missing0.Rd.R
|
dd83adebc22461f5e9f1e867d9cfdad02ba7cc7c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
missing0.Rd.R
|
library(Ecfun)
### Name: missing0
### Title: Missing or length 0
### Aliases: missing0
### Keywords: manip
### ** Examples
tstFn <- function(x)missing0(x)
# missing
## Don't show:
stopifnot(
## End(Don't show)
all.equal(tstFn(), TRUE)
## Don't show:
)
## End(Don't show)
# length 0
## Don't show:
stopifnot(
## End(Don't show)
all.equal(tstFn(logical()), TRUE)
## Don't show:
)
## End(Don't show)
# supplied
## Don't show:
stopifnot(
## End(Don't show)
all.equal(tstFn(1), FALSE)
## Don't show:
)
## End(Don't show)
|
db23262e9676ef4e7861ad141bf2f26fd9d4fcb5
|
5d447149434c27efd1c05eeb2dfa05e033ee1f20
|
/03_aus_testing.R
|
10965f0f6d942275bf9ed7cbb9a7947df209ec72
|
[] |
no_license
|
rkodwyer/covid-19
|
84dd5cee68de540f54c0d8239615043d93b5329b
|
d46c586cd56925e0948200cc26a341883d88d979
|
refs/heads/master
| 2022-10-23T11:06:30.397882
| 2020-06-06T04:06:54
| 2020-06-06T04:06:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,878
|
r
|
03_aus_testing.R
|
### IMPORT DATA ---------------------------------------------------------------
# From Guardian Australia JSON feed
# Guardian Australia https://www.theguardian.com/au
aus_guardian_html <- "https://interactive.guim.co.uk/docsdata/1q5gdePANXci8enuiS4oHUJxcxC13d6bjMRSicakychE.json"
aus_org <- fromJSON(aus_guardian_html, flatten = T)
aus_df <- aus_org$sheets$updates
# Replace blanks with NA, remove commas in numbers
aus_df_clean <- aus_df %>%
mutate_all(~na_if(., "")) %>%
mutate_all(~(gsub(",", "", .)))
### CLEAN DATA ----------------------------------------------------------------
# Convert data type, create date_time variable, rename
aus_clean <- suppressWarnings(aus_df_clean %>%
mutate(State = fct_explicit_na(State, na_level = "Missing")) %>%
mutate(Date = dmy(Date)) %>%
mutate(Time = ifelse(!is.na(Time), paste0(Time, ":00"), "00:00:00")) %>%
mutate(date_time = as.POSIXct(Date + lubridate:: hms(Time))) %>%
select(-`Update Source`,-`Notes` ) %>%
mutate_if(is_character, as.numeric) %>%
select(state = State,
date = Date,
date_time = date_time,
cases = `Cumulative case count`,
deaths = `Cumulative deaths`,
tests =`Tests conducted (total)`,
neg = `Tests conducted (negative)`,
icu = `Intensive care (count)`,
hospital = `Hospitalisations (count)`,
recovered = `Recovered (cumulative)`)
)
aus_clean %>%
filter(state == "NSW") %>%
arrange(desc(date)) %>%
glimpse()
# Create dense dataframe
# For each day, select maximum value for all numeric variables
# If neg and cases exists but tests doesn't, tests = neg+cases
# Fill NA with previous value
aus_dense <- aus_clean %>%
mutate_if(is.numeric, ~(if_else(is.na(.), 0, .))) %>%
group_by(state, date) %>%
arrange(date) %>%
summarise_if(is.numeric, max) %>%
mutate_all(~na_if(., 0)) %>%
mutate(tests = if_else(is.na(tests),
if_else(!is.na(neg), neg + cases, tests),
tests)) %>%
fill(cases, deaths, tests, icu, hospital, recovered) %>%
fill(cases, deaths, tests, icu, hospital, recovered, .direction = "up") %>%
select(-neg) %>%
ungroup()
# Add population stats
aus_pop <- tribble(~"province_state", ~"state", ~"population",
"New South Wales", "NSW", 8118000,
"Victoria", "VIC", 6229900,
"Queensland", "QLD", 5115500,
"Western Australia", "WA", 2630600,
"South Australia", "SA", 1756500,
"Tasmania", "TAS", 535500,
"Australian Capital Territory", "ACT", 428100,
"Northern Territory", "NT", 245600) %>%
mutate(state = as.factor(state))
aus_with_pop <- aus_dense %>%
left_join(aus_pop, by = "state")
# Add new cases and per capita variables
aus_test <- aus_with_pop %>%
group_by(state) %>%
mutate(prev_cases = lag(cases, 1),
new_cases = cases - prev_cases,
cases_per_cap = cases/population*10^6,
new_cases_per_cap = new_cases/population*10^6)%>%
mutate(prev_tests= lag(tests, 1),
new_tests = tests - prev_tests)%>%
ungroup() %>%
mutate(pos_test_ratio = cases/tests,
test_per_cap = tests/population * 10^6)
# Filter for more recent day's stats for plotting
aus_test_current <- aus_test %>%
group_by(state) %>%
filter(date == max(date)) %>%
select(state, date, cases, tests, cases_per_cap, pos_test_ratio, test_per_cap) %>%
glimpse()
### PLOT PREP -----------------------------------------------------------------
aus_plot_theme <- theme_minimal() +
theme(axis.title.x = element_text(size = 12, family = "sans"),
axis.title.y = element_text(size = 12, family = "sans"),
axis.text = element_text(size = 12, family = "sans"),
plot.title = element_text(size = 16, face = "bold", family = "sans"),
plot.subtitle = element_text(size = 12, face = "bold", family = "sans"),
plot.caption = element_text(size = 12, face = "italic", hjust = 0, family = "sans"),
legend.position = "None",
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())
label_size <- 4
aus_date <- format(max(aus_test_current$date), "%d %B %Y")
aus_plot_title <- glue("Australian COVID-19 testing to ", aus_date)
# Create colour palette
aus_pal <- brewer.pal(n = 8, name = "Dark2")
# 1. Plot of total cases
total_cases_plot <- aus_test_current %>%
ggplot(aes(x = reorder(state, cases), y = cases)) +
geom_point(aes(colour = reorder(state, cases))) +
geom_segment(aes(yend = 0, xend = state, colour = reorder(state, cases))) +
geom_text(aes(y = cases + 20, label = cases,
colour = reorder(state, cases)), hjust = "left", size = label_size) +
labs(title = aus_plot_title,
subtitle = "Confirmed cases",
x = "",
y = "") +
coord_flip() +
scale_y_continuous(limits = c(0, 3000), breaks = c(0, 500, 1000, 1500, 2000, 2500, 3000)) +
aus_plot_theme +
scale_color_brewer(palette = "Dark2")
# 2. Plot of total tests
total_tests_plot <- aus_test_current %>%
ggplot(aes(x = reorder(state, cases), y = tests/10^3)) +
geom_point(aes(colour = reorder(state, cases))) +
geom_segment(aes(yend = 0, xend = state, colour = reorder(state, cases))) +
geom_text(aes(y = tests/10^3 + 1, label = scales::comma(round(tests/10^3, digits = 1), suffix = "k"),
colour = reorder(state, cases)), hjust = "left", size = label_size) +
labs(subtitle = "Tests administered",
x = "",
y = "") +
coord_flip() +
scale_y_continuous(limits = c(0, 200), breaks = c(0, 50, 100, 150, 200),
labels = scales::comma_format(suffix = "k")) +
aus_plot_theme +
scale_color_brewer(palette = "Dark2")
# 3. Plot of tests per million people
test_per_cap_plot <- aus_test_current %>%
ggplot(aes(x = reorder(state, cases), y = test_per_cap/10^3)) +
geom_point(aes(colour = reorder(state, cases))) +
geom_segment(aes(yend = 0, xend = state, colour = reorder(state, cases))) +
geom_text(aes(y = test_per_cap/10^3 + 0.2, label = round(test_per_cap/10^3, digits = 1),
colour = reorder(state, cases)),
hjust = "left", size = label_size) +
labs(subtitle = "Tests per thousand people",
x = "",
y = "") +
coord_flip() +
aus_plot_theme +
scale_y_continuous(limits = c(0, 20), breaks = c(0, 5, 10, 15, 20)) +
scale_color_brewer(palette = "Dark2")
# 4. Plot of positive case ratio
pos_test_plot <- aus_test_current %>%
ggplot(aes(x = reorder(state, cases), y = pos_test_ratio)) +
geom_point(aes(colour = reorder(state, cases))) +
geom_segment(aes(yend = 0, xend = state, colour = reorder(state, cases))) +
geom_text(aes(y = pos_test_ratio + 0.0025,
label = percent(pos_test_ratio, accuracy = 0.1), colour = reorder(state, cases)),
hjust = "left", size = label_size) +
labs(subtitle = "% Tests positive",
x = "",
y = "") +
coord_flip() +
aus_plot_theme +
scale_y_continuous(labels = percent_format(0.1), limits = c(0, 0.05)) +
scale_color_brewer(palette = "Dark2")
# Combine plots using {patchwork} to form 2x2
patch_1 <- total_cases_plot + total_tests_plot
patch_2 <- test_per_cap_plot + pos_test_plot
patch <- patch_1 / patch_2 # sent to Shiny App
|
8e7788c9a144999ddac9a31ff52c64dc78b40dd9
|
73640cd1b41aac73971607aa9921a22ca9f1f4a9
|
/man/hive_to_hdfs_txt.Rd
|
9b4ed4eab926b2f0dc3b5230382d849c5aca0b7e
|
[] |
no_license
|
mndrake/honeycomb
|
9f2d8be43c5a96634a5f98b12bce8207af2cc8ef
|
6fe6e84cf770f4501ed4883ff4e60717cb27dd9c
|
refs/heads/master
| 2022-12-17T08:22:38.588249
| 2018-02-07T19:37:29
| 2018-02-07T19:37:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 537
|
rd
|
hive_to_hdfs_txt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tbl_Hive.R
\name{hive_to_hdfs_txt}
\alias{hive_to_hdfs_txt}
\title{Export a Hive table to a txt file in HDFS}
\usage{
hive_to_hdfs_txt(hive_tbl, hdfs_path, delim = "\\t")
}
\arguments{
\item{hive_tbl}{A \code{tbl_Hive} object}
\item{hdfs_path}{HDFS path for the resulting file}
\item{delim}{Delimiter for the exported text file}
}
\description{
\code{hive_to_hdfs_txt} will export a Hive table to a delimited text file
in HDFS (tab-delimited by default)
}
|
8e0650fad351b6aa3fda9bc1eda8ecb881df483a
|
3e1f6dfde5c940f7acde208d098e56a54550945f
|
/dash_docs/chapters/sharing_data/examples/scoping_wrong.R
|
2c189d1b1200fe455c5606e89794b610ffea1841
|
[
"MIT"
] |
permissive
|
plotly/dash-docs
|
a4d1b9e450aa19e811f8ae043fd56de330cce63a
|
f494e987701be1085ba9fb7b29bd875ee2146d5b
|
refs/heads/master
| 2023-08-03T02:18:16.257115
| 2021-12-14T18:51:52
| 2021-12-14T18:51:52
| 84,095,619
| 396
| 210
|
MIT
| 2023-01-18T20:29:56
| 2017-03-06T16:30:08
|
Python
|
UTF-8
|
R
| false
| false
| 1,050
|
r
|
scoping_wrong.R
|
library(dash)
library(dashCoreComponents)
library(dashHtmlComponents)
app <- Dash$new()
df <- data.frame(
a = c(1,2,3),
b = c(4,1,4),
c = c('x', 'y', 'z'),
stringsAsFactors=FALSE
)
app$layout(
htmlDiv(
list(
dccDropdown(
id = 'dropdown',
options = list(
list(label = 'x', value = 'x'),
list(label = 'y', value = 'y'),
list(label = 'z', value = 'z')
),
value = 'x'
),
htmlDiv(id='output')
)
)
)
app$callback(output('output', 'children'),
list(input('dropdown', 'value')),
function(val) {
# Here, `df` is an example of a variable that is
# 'outside the scope of this function'.
# It is not safe to modify or reassign this variable
# inside this callback.
# do not do this, this is not safe!
df <<- lapply(df, `[[`, which(df$c == val))
sprintf(paste(c('the output is', unlist(df))))
})
app$run_server()
|
fbc930341a4b66b83326d83c05f63059e7b86428
|
40f4cb44ab742a168ca3f82d36a3e38dcaa6f844
|
/man/loadIsAssociatedTo.Rd
|
2a9a834516671e5894924e6f5d69b143d23e9a21
|
[] |
no_license
|
sankleta/BED
|
34e3f91fceffbb1164e65ab8a4cb24e6431b898b
|
85c5c5ba4bbc927155d454dc6612512c7b197805
|
refs/heads/master
| 2021-04-30T05:55:28.535605
| 2018-02-06T11:18:59
| 2018-02-06T11:18:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 665
|
rd
|
loadIsAssociatedTo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadIsAssociatedTo.R
\name{loadIsAssociatedTo}
\alias{loadIsAssociatedTo}
\title{Feeding BED: Load BE ID associations}
\usage{
loadIsAssociatedTo(d, db1, db2, be = "Gene")
}
\arguments{
\item{d}{a data.frame with information about the associations
to be loaded. It should contain the following fields: "id1" and "id2".
At the end id1 is associated to id2 (this way and not the other).}
\item{db1}{the DB of id1}
\item{db2}{the DB of id2}
\item{be}{a character corresponding to the BE type (default: "Gene")}
}
\description{
Not exported to avoid unintended modifications of the DB.
}
|
38a600b0aca0b7b10208145dc903b44a7704e30e
|
651dc73b7660d3ace5aa640ac9c6f173374756c6
|
/Report Creation.R
|
1233c2648d53ca7ca4b698d4c312ebee54d46481
|
[] |
no_license
|
abhijithasok/Report-management-automation-using-VBA-R-Python
|
28788c47c2325f6868c03f25aee2757babb3b603
|
bd6df1762aba12bffc44038db4da1fd3bb652c22
|
refs/heads/master
| 2021-01-13T04:21:26.278318
| 2016-12-28T05:24:48
| 2016-12-28T05:24:48
| 77,451,982
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,636
|
r
|
Report Creation.R
|
# Many of the packages below meant for enabling R to handle xls and xlsx serve the same-
# -purpose. All are loaded just for options
library(colorspace)
library(ggplot2)
library(ggrepel)
library(devtools)
library(readxl)
library(XLConnect)
library(installr)
library(rJava)
library(xlsx)
library(gdata)
library(gtools)
library(dplyr)
library(openxlsx)
library(rtools)
############################ Raw Data Input #####################################
rawname <- paste0("C:/Users/abhijithasok/Documents/Purchase Daily Dashboards/Raw Data/OMX_PO_ITEM_RATE_ALT_",toupper(format(Sys.Date(),"%d-%b-%Y")),".csv") #The date format flexibility in the name ensures that the particular day's data is correctly picked up
rawdata <- read.csv(file = rawname,header=T,stringsAsFactors = F)
############################# Creating Inc/Dec ##################################
pastdata <- rawdata[rawdata$PO.date != format(Sys.Date()-1,"%d-%b-%y"),] #extracting all purchases from 2 days before and back, from the present date
presentdata <- rawdata[rawdata$PO.date == format(Sys.Date()-1,"%d-%b-%y"),] #extracting all purchases from 1 day before, from the present date
#From this point, 'past' refers to an element(s) in 'pastdata' and 'present' refers to an element(s) in 'presentdata'
uomcon <- read.csv("C:/Users/abhijithasok/Documents/Purchase Daily Dashboards/UOM Conversion list.csv",header=T) #List of unit combos between past & present and their conversion factors
uomuncon <- read.csv("C:/Users/abhijithasok/Documents/Purchase Daily Dashboards/UOM Unconversion.csv",header=T) #List of unit combos that are to be treated as different items, in spite of other parameters being same
### Major operations on pastdata ###
pastdata$pastID <- ""
pastdata$pastID <- 1 : nrow(pastdata) #Unique ID for past instances
presentdata$presentID <- ""
presentdata$presentID <- 1 : nrow(presentdata) #Unique ID for present instances
m <- merge(pastdata,presentdata[,c("Item.desc","Extra.Desc","UOM")],by = c("Item.desc","Extra.Desc"),all.x=T) #Matching every purchase entry in the past to the unit of purchase in the present(if it exists), based on descriptions of the item of purchase
m <- m[!duplicated(m),]
colnames(m)[which(names(m) == "UOM.x")] <- "UOM"
colnames(m)[which(names(m) == "UOM.y")] <- "Present.UOM"
m$pastID<-NULL
m$Different.UOM<-""
m$Different.UOM <- ifelse(m$UOM == m$Present.UOM,0,1) #Checking the rows where past and present units are differing
m$Unconversion.Flag <- ""
uomuncon$Unconversion.Flag <- c(1,1)
m <- merge(m,uomuncon,by.x=c("UOM","Present.UOM"),by.y=c("Unconversion.Past","Unconversion.Present"),all.x=TRUE) #Matching every purchase entry in the past with a flag that indicates whether the entry should be left as is, in spite of past and present units differing
m$Unconversion.Flag.x<-NULL
colnames(m)[which(names(m) == "Unconversion.Flag.y")] <- "Unconversion.Flag"
m$Unit.Price.in.Present.Unit<-""
m <- merge(m,uomcon,by.x=c("UOM","Present.UOM"),by.y=c("UOM.Original","UOM.Convert"),all.x=TRUE)
m$Unit.Price.in.Present.Unit <- ifelse(m$Different.UOM==1 & m$Unconversion.Flag==0,m$Unit.Price/m$Dividing.factor,m$Unit.Price) #Using dividing factor from conversion table to alter unit price if the units are different, and carrying the same value over otherwise
m$Converted.UOM <- ""
m$Converted.UOM <- ifelse(m$UOM == m$Present.UOM,m$UOM,ifelse(m$Unconversion.Flag == 0,m$Present.UOM,m$UOM)) #Final units after all conversions
### Major operations on presentdata ###
presentdata$presentID <-NULL
n <- presentdata
aggmin <- aggregate(Unit.Price.in.Present.Unit~Item.desc+Extra.Desc+Converted.UOM,m,function(x)min(x)) #Computing minimum unit price by item descriptions from the past
aggmax <- aggregate(Unit.Price.in.Present.Unit~Item.desc+Extra.Desc+Converted.UOM,m,function(x)max(x)) #Computing maximum unit price by item descriptions from the past
minmerge <- merge(n,aggmin,by.x=c("Item.desc","Extra.Desc","UOM"),by.y=c("Item.desc","Extra.Desc","Converted.UOM"),all.x=T)
maxmerge <- merge(minmerge,aggmax,by.x=c("Item.desc","Extra.Desc","UOM"),by.y=c("Item.desc","Extra.Desc","Converted.UOM"),all.x=T) #Matching minimum and maximum prices between past and present across item descriptions and units
n <- maxmerge
colnames(n)[which(names(n) == "Unit.Price.in.Present.Unit.x")] <- "Past.Min.Price"
colnames(n)[which(names(n) == "Unit.Price.in.Present.Unit.y")] <- "Past.Max.Price"
n$Past.Avg.Price <- ""
n$Past.Avg.Price <- (n$Past.Min.Price + n$Past.Max.Price)/2
n$Inc.Dec.Prev.Avg <- ""
n$Inc.Dec.Prev.Avg <- ifelse(n$Past.Avg.Price != 0,ifelse(n$Unit.Price>n$Past.Avg.Price,"INCREASE",
ifelse(n$Unit.Price<n$Past.Avg.Price,"DECREASE",
ifelse(n$Unit.Price == n$Past.Avg.Price,"NO CHANGE","NEW ITEM"))),"NEW ITEM") #Marking whether unit price increased, decreased or stayed constant compared to past average or whether the item is being purchased for the first time
n$Change.Amount <- ""
n$Change.Amount <- abs(n$Unit.Price - n$Past.Avg.Price) #Change amount
n$Change.Percentage <- ""
n$Change.Percentage <- ifelse(!is.na((n$Change.Amount/n$Past.Avg.Price)),paste0(round(((n$Change.Amount/n$Past.Avg.Price)*100),digits=2),"%"),"NA") #Change amount in %
n <- n[,c(4:12,1:3,13:27)] #Rearranging columns to match original data variable order
####### Creating Increase and Decrease lists from overall purchased items ########
inclist <- na.omit(n[n$Inc.Dec.Prev.Avg == "INCREASE",])
declist <- na.omit(n[n$Inc.Dec.Prev.Avg == "DECREASE",])
#### Creating workbook with increase and decrease lists (Flexible naming) #######
write.xlsx2(inclist,file = paste0("C:/Users/abhijithasok/Documents/Purchase Daily Dashboards/Inc_Dec/Increase_Decrease workbook - ",format(Sys.Date(),"%b %d, %Y"),".xlsx"),sheetName = "Increase in Price from Past Avg", append = FALSE, row.names = FALSE)
write.xlsx2(declist,file = paste0("C:/Users/abhijithasok/Documents/Purchase Daily Dashboards/Inc_Dec/Increase_Decrease workbook - ",format(Sys.Date(),"%b %d, %Y"),".xlsx"),sheetName = "Decrease in Price from Past Avg", append=TRUE, row.names = FALSE)
####################### Folder preparation for storing generated plots #######################
mainDir <- "C:/Users/abhijithasok/Documents/Purchase Daily Dashboards/Figures"
subDir <- paste0(format(Sys.Date(),"%b %d")," generated figures")
dir.create(file.path(mainDir, subDir))
setwd(file.path(mainDir, subDir))
subDir1 <- paste0("Increase - ",format(Sys.Date(),"%b %d"))
subDir2 <- paste0("Decrease - ",format(Sys.Date(),"%b %d"))
dir.create(file.path(mainDir, subDir, subDir1))
dir.create(file.path(mainDir, subDir, subDir2))
datainc <- inclist
datadec <- declist
################# Increase Figures ###################
for (i in 1:nrow(datainc))
{
itemdesc<-datainc[i,10]
extradesc<-datainc[i,11]
tsdata <- rawdata[which(rawdata$Item.desc==itemdesc & rawdata$Extra.Desc==extradesc), ]
tsdata$PO.date<-as.Date(tsdata$PO.date, "%d-%b-%y")
tsdata$Item.desc<-gsub("/","-",tsdata$Item.desc)
tsdata$Extra.Desc<-gsub("/","-",tsdata$Extra.Desc)
name<-paste0("C:/Users/abhijithasok/Documents/Purchase Daily Dashboards/Figures/",substring(format(Sys.Date(),"%b %d, %Y"),1,6)," generated figures/Increase - ",substring(format(Sys.Date(),"%b %d, %Y"),1,6),"/",i," - ",gsub( " .*$", "", itemdesc),gsub( " .*$", "", extradesc),".jpg") #Plot save destination (flexible naming based on current date, item description, extra description)
tryCatch({
p <- ggplot(tsdata, aes(y=tsdata$Unit.Price, x=tsdata$PO.date, color=gsub( " .*$", "",tsdata$Company.Name)), type="n", xlab="Date", ylab="Unit Price") +
geom_point() + geom_line() + geom_text_repel(aes(label=tsdata$Unit.Price), size=3) + ggtitle(paste(itemdesc,extradesc, sep=" ")) +
labs(x="Date",y="Unit Price") + scale_colour_discrete(name = "Company Name")
ggsave(filename = name, plot=p, width = 25, height = 10, units = "cm") #saving the generated plots
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
################# Decrease Figures ###################
for (i in 1:nrow(datadec))
{
itemdesc<-datadec[i,10]
extradesc<-datadec[i,11]
tsdata <- rawdata[which(rawdata$Item.desc==itemdesc & rawdata$Extra.Desc==extradesc), ]
tsdata$PO.date<-as.Date(as.character(tsdata$PO.date), "%d-%b-%y")
tsdata$Item.desc<-gsub("/","-",tsdata$Item.desc)
tsdata$Extra.Desc<-gsub("/","-",tsdata$Extra.Desc)
name<-paste0("C:/Users/abhijithasok/Documents/Purchase Daily Dashboards/Figures/",substring(format(Sys.Date(),"%b %d, %Y"),1,6)," generated figures/Decrease - ",substring(format(Sys.Date(),"%b %d, %Y"),1,6),"/",i," - ",gsub( " .*$", "", itemdesc),gsub( " .*$", "", extradesc),".jpg") #Plot save destination (flexible naming based on current date, item description, extra description)
tryCatch({
p <- ggplot(tsdata, aes(y=tsdata$Unit.Price, x=tsdata$PO.date, color=gsub( " .*$", "",tsdata$Company.Name)), type="n", xlab="Date", ylab="Unit Price") +
geom_point() + geom_line() + geom_text_repel(aes(label=tsdata$Unit.Price), size=3) + ggtitle(paste(itemdesc,extradesc, sep=" ")) +
labs(x="Date",y="Unit Price") + scale_colour_discrete(name = "Company Name")
ggsave(filename = name, plot=p, width = 25, height = 10, units = "cm") #saving the generated plots
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
|
112b0777204e8d444d6ab264c2884be23229c0e5
|
1fbd3028d66ff1bc14aab8cf415afbe6841b4679
|
/scripts/catalog_mapping_check.R
|
c29ee9f49a0060ddedbb4308b8e3cb9e73ac6208
|
[] |
no_license
|
QingxiaCindyChen/survival_gwas
|
21386cb2ab3cb93ac0bb80d081b0dc68a3c9e705
|
db8cf6ce3a53a88e2af51478d9bf86706630b05e
|
refs/heads/master
| 2023-04-11T10:26:19.435541
| 2021-04-19T18:56:55
| 2021-04-19T18:56:55
| 359,566,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,122
|
r
|
catalog_mapping_check.R
|
library('data.table')
library('readr')
procDir = 'processed'
studyFilename = 'gwas_catalog_v1.0.2-studies_r2018-08-28.tsv'
studyData = read_tsv(file.path(procDir, 'gwas_catalog', studyFilename))
setDT(studyData)
colnames(studyData) = gsub('/|\\[|\\]|\\s', '_', tolower(colnames(studyData)))
phecodeData = read_csv(file.path(procDir, 'phecode_data.csv.gz'),
col_types = 'ccc??????')
setDT(phecodeData)
studyPhecodeMapping = read_csv(file.path(procDir, 'gwas_catalog',
'catalog_study_phecode.csv'), col_types = 'cc')
setDT(studyPhecodeMapping)
d = merge(studyPhecodeMapping, phecodeData[, .(phecode, phenotype)],
by = 'phecode')
d = merge(d, studyData[, .(study_accession, disease_trait, initial_sample_size, study)],
by = 'study_accession')
dSummary = d[, .(nStudies = .N), by = .(phecode, phenotype, disease_trait)]
setnames(dSummary, 'phenotype', 'phecode_description')
dPhecode = unique(d[order(phecode), .(phecode)])
write_tsv(dPhecode, file.path(procDir, 'gwas_catalog', 'phecodes_catalog50.tsv'),
col_names = FALSE)
|
c294a0780b29f78e9dc2caf971ece377fb913764
|
55c414b82fa630447793f152a0fa8d0803c06fc3
|
/pet_demo/demo.R
|
6b6b255239b357fdb88ed3f6c9d73188bcf4f739
|
[] |
no_license
|
LabNeuroCogDevel/data_parsing_scripts
|
f102215a51f4020a923c593e7f501a6559513fa3
|
bbfd0c899621f24dd818a64ef9b4708cb0d9927a
|
refs/heads/master
| 2021-01-21T10:37:35.827295
| 2017-03-08T20:59:46
| 2017-03-08T20:59:46
| 83,460,779
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,408
|
r
|
demo.R
|
library(dplyr)
library(tidyr)
library(xlsx)
library(curl)
gsurl<- 'https://docs.google.com/spreadsheets/d/1Sk5I09hybI-4VJKd_6ST1d_J76fVd1D8zVjZ9vBLYd0/pub?output=xlsx'
curl_download(gsurl,'pet.xslx')
sex <- read.xlsx('pet.xlsx',sheetName="PreTest") # sheet 3
demo <- read.xlsx('pet.xlsx',sheetName="Demographic")# sheet 7
#demo <- read.table('demo.txt',sep="\t",quote=NULL,header=T)
#sex <- read.table('sex.txt',sep="\t",quote=NULL,header=T)
eth <-
demo %>%
select(PETROWID,hispanic,american_indian,asian,black,hawaiian,white) %>%
gather('eth','ethbool',-PETROWID) %>%
filter(ethbool==T) %>%
group_by(PETROWID) %>%
filter(!PETROWID==1) %>% # remove redudant row 1 and 100
summarise(eth=paste(collapse=",",sort(eth)))
# merge the two together and save file
ethsex <- merge(eth,sex[,c('ID','PETROWID','sex')],by='PETROWID') %>% select(ID,eth,sex,PETROWID)
write.table(ethsex,file="eth_sex.csv",sep=',',quote=F,row.names=F)
# # subset just what nathian wants
# nr.list<-c('11482','11484','11228','11486','11468','11487','11489','11275','11490','11491','11425','11492','11493','11248','11495','11496','11497','11370','11498','11393','11499','11501','11502','11503','11504','10195','11506','11507','11488','11509','11510','11512','11395','11513','11508','11514','11515','11516','10985','11517','11518','11519','11520','11338','11521','11522','11524','11434','11526','11527','11528','11529','11530','11531','11533','11535','11536','11537','11538','11540','11541','11542','11543','11544','11546','10880','11547','11048','11548','11549','11550','11551','11554','11555','11557','11558','11560','11561','11562','11564','11565','11270','11568','11570','11571','11573','11574','11575','11576','11577','11578','11579','10982','11581','11582','11589')
# ethsex.sub <- ethsex %>% filter(ID %in% nr.list)
# nr.list [ ! nr.list %in% ethsex.sub$ID ]
# write.table(ethsex.sub,file="eth_sex_sub.csv",sep=',',quote=T,row.names=F)
# check no lunaid repeated
repeatIDs <- ethsex %>% group <- by(ID) %>% summarise(n=n()) %>% filter(n>1)
if(nrow(repeatIDs) > 0L) stop('have repeat IDs! -- not 1-to-1 PETROWID-lunaid')
# count by eth+gender
count <-
ethsex %>%
select(-ID) %>%
group_by(eth,sex) %>%
summarise(n=n()) %>%
spread(sex,n)
count.nonwhite <-
count %>%
ungroup %>%
mutate(eth=ifelse(eth=="white","white-only","non-white")) %>%
group_by(eth) %>%
summarise_each(funs(sum(.,na.rm=T)))
|
ca4f15bf41f98da53a3554fe9f62b4c8b9bf75ed
|
2d8409a80bdf7b6bd03d14cbade1659427198d51
|
/code/covid/make_covid_figures_and_tables.R
|
52af16c77c20dd8f8be510b96f7b6e6ed9b30fe6
|
[] |
no_license
|
joeflack4/ending_hiv
|
1ba6d2d8d4494d10528b97d1a850a44b2cd41fe2
|
7b337e6f978ea0cf6eb17d1a5c55267106952008
|
refs/heads/master
| 2023-07-25T03:07:46.399438
| 2021-08-24T17:20:38
| 2021-08-24T17:20:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,013
|
r
|
make_covid_figures_and_tables.R
|
if (1==2)
{
tab = make.covid.summary.table(df)
write.csv(tab, 'code/covid/summaries/jheem_covid_summary_4_cities.csv')
}
library(ggplot2)
library(scales)
OUTCOME.LABELS = c(incidence="Incident Cases (n)",
new='Reported Cases (n)',
prevalence="Prevalent Cases (n)",
incidence.rate="Incidence (per 100,000)",
new.rate='Reported Diagnoses (per 100,000)',
prevalence.rate="Prevalence (per 100,000)",
diagnosed='Serostatus-Aware (%)',
suppression='Virally Suppressed (%)')
VARIABLE.LABELS = c(sexual.transmission='Sexual Transmission',
testing='HIV Testing',
prep='PrEP Uptake',
suppression='Viral Suppression')
VARIABLE.CATEGORY.LABELS = c(reduction='Maximal Reduction in XXX due to COVID (%)',
time.averaged.reduction='Time-Averaged Reduction in XXX due to COVID (%)',
start.normalize.time="Time When XXX Begins Returning to pre-COVID Levels",
increase='Rebound Increase in XXX as Pandemic Recedes (%)')
VARIABLE.RANGES = rbind(
sexual.transmission = c(lower=0, upper=0.5)
)
##------------##
##-- TABLES --##
##------------##
make.covid.summary.table <- function(df,
years=c(2021, 2030),
outcomes=c('incidence','incidence.rate','new','new.rate', 'prevalence', 'prevalence.rate', 'diagnosed','suppression'),
scenarios=c('base','delayed.hiv.care','rebound.sexual.transmission','rebound.sex.delayed.hiv.care'),
msas=unique(df$msa),
duplicate.msa.name=F,
summary.stat = mean,
include.ci=T,
ci.coverage=.95)
{
rv = NULL
scenarios=c('baseline',scenarios)
ci.alpha = (1-ci.coverage)/2
for (msa in msas)
{
for (scenario in scenarios)
{
col.values = character()
for (outcome in outcomes)
{
for (year in years)
{
col.name = paste0(ifelse(scenario=='baseline','','COVID.'), scenario,
"_", outcome, "_", year)
values = df[df$msa==msa, col.name]
is.pct.outcome = outcome=='diagnosed' || outcome=='suppression'
if (is.pct.outcome)
values = 100*values
one.col.value = format(round(summary.stat(values)), big.mark=',')
if (is.pct.outcome)
one.col.value = paste0(one.col.value, '%')
if (include.ci)
one.col.value = paste0(one.col.value, " [",
format(round(quantile(values, probs=ci.alpha)), big.mark=','),
"-",
format(round(quantile(values, probs=1-ci.alpha)), big.mark=','),
"]"
)
names(one.col.value) = paste0(OUTCOME.LABELS[outcome], ", ", year)
col.values = c(col.values, one.col.value)
}
}
# col.values = c(MSA=ifelse(scenario=='baseline' || duplicate.msa.name,
# as.character(df$msa.name[df$msa==msa][1]), ''),
# Scenario=as.character(COVID.SCENARIO.NAMES[scenario]),
# col.values)
scenario.value = as.character(COVID.SCENARIO.NAMES[scenario])
names(scenario.value) = 'MSA/Scenario'
col.values = c(scenario.value, col.values)
if (scenario=='baseline')
{
header.row = col.values
header.row[] = ''
header.row[1] = as.character(df$msa.name[df$msa==msa][1])
rv = rbind(rv,
as.data.frame(matrix(header.row, nrow=1, dimnames=list(NULL, names(col.values)))))
}
rv = rbind(rv,
as.data.frame(matrix(col.values, nrow=1, dimnames=list(NULL, names(col.values)))))
}
}
rv
}
##-------------##
##-- FIGURES --##
##-------------##
make.variable.vs.outcome.figure <- function(variable=c('sexual.transmission','testing','prep','suppression')[1],
outcome=c('incidence','new','prevalence','diagnosed')[1],
variable.category=c('reduction','time.averaged.reduction','start.normalize.time','increase')[1],
scenario=c('base','delayed.hiv.care','increased.sexual.transmission')[1],
df,
msa=NULL,
years=2020:2030,
outcome.as.percent=T,
point.size=3,
point.shape=19,
alpha=0.8,
add.smoother = T,
label.rho = T)
{
if (!is.null(msa))
df = df[df$msa==msa,]
plot.df = data.frame(
delta=get.delta.outcome(df, outcome=outcome, scenario=scenario, years=years, as.percent=outcome.as.percent),
variable = df[,paste0(variable, '.', variable.category)],
msa=df$msa.name
)
x.label = gsub("XXX", VARIABLE.LABELS[variable], VARIABLE.CATEGORY.LABELS[variable.category])
y.label = paste0("Change in ", OUTCOME.LABELS[outcome],
ifelse(outcome.as.percent, " (%)", ""),
"\n (COVID - baseline)")
rv = ggplot(plot.df) +
geom_point(aes(x=variable, y=delta), size=point.size, shape=point.shape, alpha=alpha) +
xlab(x.label) +
ylab(y.label)
if (outcome.as.percent)
rv = rv + scale_y_continuous(labels = percent)
if (variable.category=='reduction' || variable.category=='time.averaged.reduction')
rv = rv + scale_x_continuous(labels = percent)
if (length(unique(plot.df$msa))>1)
rv = rv + facet_wrap(~msa)
if (add.smoother)
rv = rv + geom_smooth(data=plot.df, aes(x=variable, y=delta))
if (label.rho)
{
msas = unique(plot.df$msa)
rho.df = data.frame(msa=msas,
rho=sapply(msas, function(msa){
mask = plot.df$msa == msa
cor(plot.df$variable[mask], plot.df$delta[mask], method='spearman')
}),
x = max(plot.df$variable),
y = max(plot.df$delta)
)
rho.df$label = paste0("Spearman's rho = ", round(rho.df$rho, 2))
rv = rv + geom_label(data=rho.df, aes(x,y,label=label), vjust='top', hjust='right')
}
rv
}
make.one.variable.reduction.vs.time.figure <- function(variable=c('sexual.transmission','testing','prep','suppression')[1],
outcome=c('incidence','new','prevalence','diagnosed')[1],
df,
msa=NULL,
years=2020:2030,
outcome.as.percent=T,
averted.color = 'green4',
excess.color = 'red2',
no.change.color = 'yellow',
point.size=4,
point.shape=15,
alpha=0.6,
use.tiles=T)
{
if (!is.null(msa))
df = df[df$msa==msa,]
plot.df = data.frame(
delta=get.delta.outcome(df, outcome=outcome, years=years, as.percent=outcome.as.percent),
start.normalize.time = df[,paste0(variable, '.start.normalize.time')],
reduction = df[,paste0(variable, '.reduction')],
msa = df$msa.name
)
color.scale.title = paste0("Change in ", OUTCOME.LABELS[outcome],
ifelse(outcome.as.percent, " (%)", ""),
"\n (COVID - baseline)")
if (use.tiles)
{
tile.df = make.tile.df()
rv = ggplot(plot.df) +
geom_tile(aes(x=x.bin, y=y.bin, color=delta))
}
else
{
rv = ggplot(plot.df) +
geom_point(aes(x=start.normalize.time, y=reduction, color=delta), shape=point.shape, size=point.size, alpha=alpha) +
scale_y_continuous(labels = percent)
}
rv = rv +
xlab(paste0("Time (year) When ", VARIABLE.LABELS[variable]," Begins to Normalize")) +
ylab(paste0("Reduction (%) in ", VARIABLE.LABELS[variable]," Due to COVID"))
if (outcome.as.percent)
rv = rv + scale_color_gradient2(name=color.scale.title,
labels = percent,
low=averted.color, mid=no.change.color, high=excess.color)
else
rv = rv + scale_color_gradient2(name=color.scale.title,
low=averted.color, mid=no.change.color, high=excess.color)
if (length(unique(plot.df$msa))>1)
rv = rv + facet_wrap(~msa)
rv
}
make.two.variable.reduction.figure <- function(variable1=c('sexual.transmission','testing','prep','suppression')[1],
variable2=c('sexual.transmission','testing','prep','suppression')[4],
variable1.category=c('reduction','time.averaged.reduction','start.normalize.time','increase')[1],
variable2.category=c('reduction','time.averaged.reduction','start.normalize.time','increase')[1],
scenario=c('base','delayed.hiv.care','increased.sexual.transmission')[1],
outcome=c('incidence','new','prevalence','diagnosed')[1],
df,
msa=NULL,
years=2020:2025,
outcome.as.percent=T,
averted.color = 'green4',
excess.color = 'red2',
no.change.color = 'white',
point.size=4,
point.shape=15,
alpha=0.6,
use.tiles=T,
show.tile.n=F,
outcome.tile.stat=mean,
bin.width=0.05,
min.change=-0.5,
max.change=0.5)
{
if (!is.null(msa))
df = df[df$msa==msa,]
var1.name = paste0(variable1, ".", variable1.category)
var2.name = paste0(variable2, ".", variable2.category)
plot.df = data.frame(
delta=get.delta.outcome(df, outcome=outcome, scenario=scenario, years=years, as.percent=outcome.as.percent),
reduction1 = df[,var1.name],
reduction2 = df[,var2.name],
msa=df$msa.name
)
color.scale.title = paste0("Change in ", OUTCOME.LABELS[outcome],
ifelse(outcome.as.percent, " (%)", ""),
"\n (COVID - baseline)")
if (use.tiles)
{
tile.df = make.tile.df(plot.df,
x.name = 'reduction1',
y.name = 'reduction2',
outcome = 'delta',
outcome.stat = outcome.tile.stat,
separate.by = 'msa',
x.binwidth = bin.width,
y.binwidth = bin.width,
x.is.percent = T,
y.is.percent = T)
print(round(range(tile.df$delta),2))
tile.df$delta = pmax(min.change, pmin(max.change, tile.df$delta))
rv = ggplot(tile.df) +
geom_tile(aes(x.bin, y.bin, fill=delta))
if (show.tile.n)
rv = rv + geom_text(aes(x.bin, y.bin, label=n))
x.bin.names = attr(tile.df, 'x.bin.names')
y.bin.names = attr(tile.df, 'y.bin.names')
rv = rv +
scale_x_continuous(breaks=1:length(x.bin.names), labels=x.bin.names) +
scale_y_continuous(breaks=1:length(y.bin.names), labels=y.bin.names) +
theme(axis.text.x=element_text(angle = 45, hjust = 1))
}
else
{
plot.df$delta = pmax(min.change, pmin(max.change, plot.df$delta))
rv = ggplot(plot.df) +
geom_point(aes(x=reduction1, y=reduction2, color=delta), shape=point.shape, size=point.size, alpha=alpha) +
scale_y_continuous(labels = percent) + scale_x_continuous(labels = percent)
}
x.label = gsub("XXX", VARIABLE.LABELS[variable1], VARIABLE.CATEGORY.LABELS[variable1.category])
y.label = gsub("XXX", VARIABLE.LABELS[variable2], VARIABLE.CATEGORY.LABELS[variable2.category])
rv = rv +
xlab(x.label) +
ylab(y.label) +
theme(panel.background = element_blank())
if (use.tiles)
scale.color.fn = scale_fill_gradient2
else
scale.color.fn = scale_color_gradient2
if (outcome.as.percent)
rv = rv + scale.color.fn(name=color.scale.title,
labels = percent,
limits=c(min.change,max.change),#c(-1,MAX.FOLD.INCREASE-1),
midpoint=0,
low=averted.color, mid=no.change.color, high=excess.color)
else
rv = rv + scale.color.fn(name=color.scale.title,
limits=c(min.change, max.change), midpoint=0,
low=averted.color, mid=no.change.color, high=excess.color)
if (length(unique(plot.df$msa))>1)
rv = rv + facet_wrap(~msa)
rv = rv + theme(legend.position = 'bottom')
rv
}
##-- HELPER FUNCTIONS --##
get.delta.outcome <- function(df,
outcome,
scenario,
years,
as.percent)
{
baseline.colnames = paste0('baseline_', outcome, '_', years)
covid.colnames = paste0('COVID.', scenario, '_', outcome, '_', years)
if (length(years)==1)
{
baseline = df[,baseline.colnames]
covid = df[,covid.colnames]
}
else
{
baseline = rowSums(df[,baseline.colnames])
covid = rowSums(df[,covid.colnames])
}
rv = covid - baseline
if (as.percent)
rv = rv / baseline# * 100
rv
}
##------------------##
##-- PLOT HELPERS --##
##------------------##
make.tile.df <- function(df,
x.name,
y.name,
outcome,
outcome.stat=mean,
x.binwidth=0.05,
y.binwidth=0.05,
separate.by='msa',
x.is.percent=T,
y.is.percent=T)
{
x.range = c(x.binwidth * floor(min(df[,x.name]) / x.binwidth),
x.binwidth * ceiling(max(df[,x.name]) / x.binwidth))
y.range = c(y.binwidth * floor(min(df[,y.name]) / y.binwidth),
y.binwidth * ceiling(max(df[,y.name]) / y.binwidth))
x.n.bins = (x.range[2] - x.range[1]) / x.binwidth
y.n.bins = (y.range[2] - y.range[1]) / y.binwidth
x.uppers = x.binwidth * (1:x.n.bins)
x.lowers = x.uppers - x.binwidth
orig.x.uppers = x.uppers = x.binwidth * (1:x.n.bins)
x.lowers = x.uppers - x.binwidth
x.uppers[x.n.bins] = Inf
orig.y.uppers = y.uppers = y.binwidth * (1:y.n.bins)
y.lowers = y.uppers - y.binwidth
y.uppers[y.n.bins] = Inf
df$x.bin = sapply(df[,x.name], function(x){
(1:x.n.bins)[x>=x.lowers & x<x.uppers][1]
})
df$y.bin = sapply(df[,y.name], function(y){
(1:y.n.bins)[y>=y.lowers & y<y.uppers][1]
})
rv = NULL
unique.splits = unique(df[,separate.by])
for (split in unique.splits)
{
for (x.bin in 1:x.n.bins)
{
for (y.bin in 1:y.n.bins)
{
mask = df[,separate.by] == split & df$x.bin == x.bin & df$y.bin == y.bin
if (any(mask))
{
rv = rbind(rv,
data.frame(split=split,
x.bin=as.integer(x.bin),
y.bin=as.integer(y.bin),
outcome=outcome.stat(df[mask,outcome]),
n=sum(mask)
))
}
}
}
}
names(rv)[names(rv)=='outcome'] = outcome
names(rv)[names(rv)=='split'] = separate.by
attr(rv, 'x.bin.lowers') = x.lowers
attr(rv, 'x.bin.uppers') = x.uppers
attr(rv, 'y.bin.lowers') = y.lowers
attr(rv, 'y.bin.uppers') = y.uppers
if (x.is.percent)
{
x.lowers = paste0(100*x.lowers, '%')
x.uppers = paste0(100*orig.x.uppers, '%')
}
if (y.is.percent)
{
y.lowers = paste0(100*y.lowers, '%')
y.uppers = paste0(100*orig.y.uppers, '%')
}
attr(rv, 'x.bin.names') = paste0(x.lowers, ' to ', x.uppers)
attr(rv, 'y.bin.names') = paste0(y.lowers, ' to ', y.uppers)
rv
}
|
618ec1de180387d0266ad6724fb55793cbc0c736
|
0f062674ed0146a2dc8e9a3c8b965679bef8d9dd
|
/plot2.R
|
94098b9c88674b92bb52b77f02d2468eb371611d
|
[] |
no_license
|
maybetoffee/explorator_data_analysis
|
5a93a94aef7406b9e3982da346634ddcd2e57e65
|
5909541b9bb50e0477a62295dc4d01ef004a3eb8
|
refs/heads/master
| 2021-01-01T17:14:51.754501
| 2017-07-22T13:41:38
| 2017-07-22T13:41:38
| 98,033,365
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 739
|
r
|
plot2.R
|
getwd()
setwd("D:/coursera_R")
#loading the data
data<-read.table("./household_power_consumption.txt",header=T,sep=";",na.string="?")
#convert the data variable to data format
data$Date<-as.Date(data$Date,format="%d/%m/%Y")
data<-subset(data,subset = (Date>="2007-02-01" & Date<="2007-02-02"))
#convert date and time
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
#plot2
data$datetime <- as.POSIXct(data$datetime)
#POSIXct以符号整数形式存储
attach(data)
plot(Global_active_power ~ datetime, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
#save the image and close the device
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
detach(data)
|
e80347710c762e4e4bbc3c928e0de58563f15e44
|
5adb020e37747a3fbb15fc8d33c94eca682c003d
|
/scripts/07.anova.R
|
ac282cff6e88e2195d613e1504a906a5107bed73
|
[] |
no_license
|
andrew-hipp/oak-morph-2020
|
a28c73a32d3a8dce3b381b3c51cb69ef30b30132
|
f7acc4bbac1536ab422a5ec8ec80c783a58c16ea
|
refs/heads/master
| 2023-04-12T09:37:17.335773
| 2022-08-05T19:37:32
| 2022-08-05T19:37:32
| 140,023,087
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 401
|
r
|
07.anova.R
|
#ANOVA FOR BLADE LENGTH AND SLA
anova.1 <- lm(bladeL ~ site + tree, data = oak.dat)
anova(anova.1)
anova.2 <- lm(Area.Mass ~ site + tree, data = oak.dat)
anova(anova.2)
#ANOVA USING PCA SCORES
PC <- predict(temp, newdata = oak.dat)
oak.dat <- cbind(oak.dat, PC)
anova.3 <- lm(PC1 ~ site + tree, data = oak.dat)
anova(anova.3)
anova.4 <- lm(PC2 ~ site + tree, data = oak.dat)
anova(anova.4)
|
c9bd665cca93dd9087d68b073ed6f380c19d161e
|
3ee83c8a4c66054ce26684a4c032ca5e6f98bd9b
|
/plot2.R
|
85643b3869e3a3d5dad7a08e1b3501f9ca31b0bb
|
[] |
no_license
|
tuomiel1/ExData_Plotting1
|
430208d5a45a295c77c9a07f06f5b0202db52ab9
|
c96dee46130ba0e92931dbb64f67b4fa0fefd86d
|
refs/heads/master
| 2021-01-17T22:30:24.847060
| 2015-12-13T23:51:29
| 2015-12-13T23:59:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,244
|
r
|
plot2.R
|
#The following script implements submission 1/part 2 of Exploratory Data Analysis course in Coursera
#This script depends on:
##A working outside connection to the web: for downloading the data
##Access to data.table and ggplot2 libraries for easier plotting and data handling
##LOAD PACKAGES
require(data.table)
require(ggplot2)
library(scales)
#set locale for english abbreviations on time
Sys.setenv(LANGUAGE="en")
Sys.setlocale("LC_TIME", "English")
##LOAD DATA & MANGLE
#directories;
#define the where the zip file is downloaded
temp <- paste(getwd(), "data.zip", sep = "/")
#define the url of the file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#download:
#download the file as binary
download.file(url = url,temp, mode = "wb")
#unzip the loaded file to the working directory then load a data.table with fread function
dt <- fread(unzip(temp), verbose = TRUE, colClasses = "character" )
#remove the downloaded file, unzipped file will remain in the working directory
unlink(temp)
#mangle:
#setkey for faster index
setkey(dt, Date)
#only observations from 2007-02-01 and 2007-02-02 are needed. Subset the data,
dt <- dt[Date %in% "1/2/2007" | Date %in% "2/2/2007",]
#set variable names to lowercase
setnames(dt, names(dt), tolower(names(dt)))
#set classes to date and time
dt[,datetime:=paste(date,time,sep = " ")]
dt[,datetime:=as.POSIXct(strptime(dt$datetime, format = "%d/%m/%Y %H:%M:%S"))]
#set numeric
dt[,global_active_power:=as.numeric(global_active_power)]
dt[,global_reactive_power :=as.numeric(global_reactive_power)]
dt[,global_intensity :=as.numeric(global_intensity)]
dt[,sub_metering_1 :=as.numeric(sub_metering_1)]
dt[,sub_metering_2 :=as.numeric(sub_metering_2)]
dt[,sub_metering_3 :=as.numeric(sub_metering_3)]
dt[,voltage :=as.numeric(voltage)]
dt[,date:=as.Date(date, format= "%d/%m/%Y")]
#PLOT 2
plot2 <- ggplot() + geom_line(data=dt, aes(x=datetime,y=global_active_power)) +
scale_x_datetime(labels=date_format("%a"), breaks=date_breaks("1 days")) +
ylab("Global Active Power (kilowats)") +
xlab("") +
theme_classic()
#save plot
ggsave(filename = paste(getwd(),"plot2.png",sep = "/"), plot = plot2, width =6.4,height = 6.4, units = "in", dpi = 75)
|
d4187cb2fa9e36bffaff8cdf5e70e8bf2e939324
|
2da2406aff1f6318cba7453db555c7ed4d2ea0d3
|
/inst/snippet/as-matrix.R
|
623b365d041301d611f215539398b168960103c3
|
[] |
no_license
|
rpruim/fastR2
|
4efe9742f56fe7fcee0ede1c1ec1203abb312f34
|
d0fe0464ea6a6258b2414e4fcd59166eaf3103f8
|
refs/heads/main
| 2022-05-05T23:24:55.024994
| 2022-03-15T23:06:08
| 2022-03-15T23:06:08
| 3,821,177
| 11
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 117
|
r
|
as-matrix.R
|
x <- 1:3
A %*% x # vector x treated as a column matrix
as.matrix(x) # explicit conversion to a column matrix
|
6e66f7b847792133d985cf557132983bdecfa05f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rgeos/examples/topo-unary-gSimplify.Rd.R
|
2ef15ea1d53be9a6dd23686e4281c9cae3b0c6ab
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 679
|
r
|
topo-unary-gSimplify.Rd.R
|
library(rgeos)
### Name: gSimplify
### Title: Simplify Geometry
### Aliases: gSimplify
### Keywords: spatial
### ** Examples
p = readWKT(paste("POLYGON((0 40,10 50,0 60,40 60,40 100,50 90,60 100,60",
"60,100 60,90 50,100 40,60 40,60 0,50 10,40 0,40 40,0 40))"))
l = readWKT("LINESTRING(0 7,1 6,2 1,3 4,4 1,5 7,6 6,7 4,8 6,9 4)")
par(mfrow=c(2,4))
plot(p);title("Original")
plot(gSimplify(p,tol=10));title("tol: 10")
plot(gSimplify(p,tol=20));title("tol: 20")
plot(gSimplify(p,tol=25));title("tol: 25")
plot(l);title("Original")
plot(gSimplify(l,tol=3));title("tol: 3")
plot(gSimplify(l,tol=5));title("tol: 5")
plot(gSimplify(l,tol=7));title("tol: 7")
par(mfrow=c(1,1))
|
91bd9a22729cb5d6d4c94040945e7f08c37858aa
|
dae6befcea92b6171d6e592d58ecb7c499a2ae9a
|
/tests/testthat/test-set-pars.R
|
d9403a696ff5ac5db2111b9c68389b1a7230a91d
|
[
"MIT"
] |
permissive
|
krlmlr/term
|
7b85ba675bbdff76e28e89d3c20c6726bd253303
|
f46b5b47455330ce3130ad858ac36055663dfa3d
|
refs/heads/master
| 2020-12-27T08:22:42.952071
| 2020-02-01T23:46:58
| 2020-02-01T23:46:58
| 237,830,909
| 0
| 0
|
NOASSERTION
| 2020-02-02T20:20:03
| 2020-02-02T20:20:02
| null |
UTF-8
|
R
| false
| false
| 1,982
|
r
|
test-set-pars.R
|
context("set-pars")
test_that("set_pars", {
expect_identical(set_pars(as.term("a"), "b"), as.term("b"))
expect_error(
set_pars(as.term("a"), c("b", "a")),
"^`value` must be length 1, not 2[.]$", class = "chk_error"
)
expect_error(
set_pars(as.term(c("a", "a")), c("b", "a", "c")),
"^`value` must be length 1, not 3[.]$", class = "chk_error"
)
expect_error(
set_pars(as.term("a"), ""),
"^`value` must match regular expression", class = "chk_error"
)
expect_error(
set_pars(as.term("a"), "1"),
"^`value` must match regular expression", class = "chk_error"
)
expect_identical(set_pars(as.term(c("a", "b")), c("b", "a")), as.term(c("b", "a")))
expect_identical(set_pars(as.term(c("a", "b")), c("b", "d")), as.term(c("b", "d")))
expect_identical(set_pars(as.term(c("a [ 1]", "b")), c("b", "d")), as.term(c("b [ 1]", "d")))
expect_error(
set_pars(as.term(rep("a", 7)), value = c("gamma", "theta", "rho")),
"^`value` must be length 1, not 3[.]$", class = "chk_error"
)
})
test_that("set_pars missing values", {
expect_error(set_pars(as.term(c("a [ 1]", "b")), c("b", NA)),
"^`value` must not have any missing values[.]$",
class = "chk_error"
)
expect_error(set_pars(NA_term_, "a"), "^`x` must not have any missing values[.]$",
class = "chk_error"
)
expect_error(
set_pars(as.term(c("c c", "b")), "a"),
"^All elements of term vector `x` must be valid[.]$", class = "chk_error"
)
})
test_that("set_pars no values", {
term <- as.term(character(0))
expect_identical(set_pars(term, character(0)), term)
expect_error(set_pars(term, "c"),
"^`value` must be length 0, not 1[.]$",
class = "chk_error")
})
test_that("set_pars missing values", {
term <- as.term(c("a [1]", "a[3,2]", "b", "bb"))
pars(term) <- c("c", "d", "cd")
expect_identical(
term,
structure(c("c [1]", "c[3,2]", "d", "cd"),
class = c("term", "character")
)
)
})
|
ea28d57c7343809e2b8d7fe803c4671d809e8a2f
|
9244358cbe08a51cb2472625b0518671a35e43dd
|
/R/get_overview_options.R
|
801593430f261d92959d5231edc5fb002f8cb3bf
|
[
"MIT"
] |
permissive
|
KWB-R/wasserportal
|
b65461be84b05e62aa32fa75cefc4ac9cc4725f0
|
7ef43fe6ff55a4dfcd8e7ac92cc9a675e65c5c99
|
refs/heads/master
| 2023-09-03T15:39:15.593616
| 2023-02-19T21:37:04
| 2023-02-19T21:37:04
| 344,412,257
| 0
| 0
|
MIT
| 2023-02-19T21:37:06
| 2021-03-04T09:03:26
|
R
|
UTF-8
|
R
| false
| false
| 597
|
r
|
get_overview_options.R
|
#' Wasserportal Berlin: get overview options for stations
#'
#' @return list with shortcuts to station overview tables
#' (`wasserportal.berlin.de/messwerte.php?anzeige=tabelle&thema=<shortcut>`)
#' @export
#'
#' @examples
#' get_overview_options()
#'
get_overview_options <- function()
{
list(
surface_water = list(
water_level = "ows",
flow = "odf",
temperature = "owt",
conductivity = "olf",
ph = "oph",
oxygen_concentration = "oog",
oxygen_saturation = "oos"
),
groundwater = list(
level = "gws",
quality = "gwq"
)
)
}
|
482bcb1274ff7ae64524082e0066960f9877a117
|
c0eecbccaaa2663b670fc5298f793e62017821b5
|
/tests/testthat/test-user.level.functions.R
|
cfcf2c24adecefcd32a73111b1357164f5c21cc4
|
[] |
no_license
|
datapplab/SBGNview
|
6ce7de127da865dfa2306a155b928bbdc9801213
|
bbaeea8a37a23faca63377ee7094dfc4b920a387
|
refs/heads/master
| 2023-04-07T13:55:02.661178
| 2022-06-11T22:25:05
| 2022-06-11T22:25:05
| 189,049,462
| 20
| 7
| null | 2023-03-16T10:32:40
| 2019-05-28T14:55:44
|
R
|
UTF-8
|
R
| false
| false
| 1,908
|
r
|
test-user.level.functions.R
|
library(testthat)
###################################################
test_that("changeDataId for compound", {
cpd.sim.data <- sim.mol.data(mol.type = "cpd",
id.type = "KEGG COMPOUND accession",
nmol = 50000,
nexp = 2)
change.cpd.id <- changeDataId(data.input.id = cpd.sim.data,
input.type = "kegg.ligand",
output.type = "pathwayCommons",
mol.type = "cpd",
sum.method = "sum")
expect_true(nrow(change.cpd.id) > 0)
expect_true(ncol(cpd.sim.data) == ncol(change.cpd.id))
})
###################################################
test_that("downloadSbgn", {
data("sbgn.xmls")
files <- downloadSbgnFile(pathway.id = c("P00001", "P00002"))
files <- gsub(".//", "", files)
get.files <- list.files(path = ".", pattern = "*.sbgn")
expect_identical(files, get.files)
})
###################################################
test_that("sbgn.gsets", {
mol.list <- sbgn.gsets(database = "metacrop",
id.type = "ENZYME",
species = "ath",
output.pathway.name = FALSE,
truncate.name.length = 50)
expect_gte(length(mol.list), 0)
})
###################################################
# test_that("changeIds", {
#
# gdata.bta <- sim.mol.data(mol.type = "gene", id.type = "ENSEMBLPROT",
# species = "bta", nmol = 2000)
# ci.bta <- changeIds(input.ids = names(gdata.bta),
# input.type = "ENSEMBLPROT",
# output.type = "KO",
# mol.type = "gene",
# org = "bta")
#
# expect_true(class(ci.bta) == "list")
# expect_true(length(ci.bta) == 2000)
# })
|
b1e0b0f0330dacf88e6a00ec55472536de4d45e1
|
3ced7fc9cbc72d3d3bc7492c03053581a2edc901
|
/4.4.duration_deregistered.R
|
e72054f695086e582843d6c756afea9a5c5f3dfa
|
[] |
no_license
|
edugonzaloalmorox/survival-quality
|
4553bde947f825cf7e38b768fbea8136792212c1
|
239ef37995fec47d35c348e13ee2da5b83d71c38
|
refs/heads/master
| 2021-04-30T15:27:58.251502
| 2018-02-12T12:04:01
| 2018-02-12T12:04:01
| 121,241,206
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,882
|
r
|
4.4.duration_deregistered.R
|
#################################################
# Fill information regarding those de-registered and those registered
# 1 - September 2017
# @ Edu Gonzalo Almorox
###################################################
library(rio)
library(dplyr)
library(forcats)
library(tibble)
library(ggplot2)
library(lubridate)
library(readxl)
library(janitor)
library(stringr)
ds = import("/Users/Personas/Dropbox/PhD/ch2/paper/quality/agglomeration/data/processed/data_survival.csv")
deactivated = read_xlsx("~/Dropbox/PhD/ch2/paper/quality/agglomeration/data/raw/20170801 De-activated locations.xlsx", sheet = "Sheet1")
# idea: add missing information for those care homes that are deregistered before June 2017 - those that have NA information.
# ------------------------------------------------------------------------------------------------------------------------------
# IDs that have been rated and are registered as deactivated
deact_june = deactivated %>% mutate_at(vars(ends_with("Date")), funs(as.Date)) %>% filter(`Location HSCA End Date` < "2017-06-01")
deact = intersect(unique(deact_june$`Location ID`), unique(ds$`Location ID`)) #those that registered as "deactive"
# datasets to work with
insp_deact = ds %>% filter (`Location ID` %in% deact) # this is the original dataset that I fill
deact.inspected = deact_june %>% filter (`Location ID` %in% deact) # this is the dataset where I get the information from
# Select information to fill in
# note: there are variables that have the same name and variables that have a different name
# check the variables that common and different
common.variables.inactive = intersect(names(insp_deact), names(deact.inspected))
diff.variables.inactive = setdiff(names(insp_deact), names(deact.inspected))
test_common = deact.inspected %>% select(one_of(common.variables.inactive)) # select common variables
test_diff = deact.inspected %>% select(`Location ID`, `Location HSCA End Date`,
`Care homes beds` = `Care homes beds at point location de-activated`,
`Location CCG Code` = `Location Commissioning CCG Code`,
`Location CCG` = `Location Commissioning CCG`, `Provider County` = `Provider - County`)
# dataset with information about those that are inspected but inactive
test_inactive = left_join(test_common, test_diff, by = "Location ID")
# clean the names and link information
# 1-select variables that come from the CQC
insp_deact = insp_deact %>%
clean_names() %>%
mutate_at(vars(ends_with("date")), funs(as.Date))
insp_deact_cqc = insp_deact %>% select(location_id, location_name:provider_parliamentary_constituency, -category, -region, -la)
insp_deact_cqc_clean = insp_deact_cqc %>%
clean_names() %>%
mutate_at(vars(ends_with("date")), funs(as.Date))
test_inactive_clean = test_inactive %>%
clean_names() %>%
mutate_at(vars(ends_with("date")), funs(as.Date))
# Link information to ins_deact (those that are inspected but deregistered)
test_cqc = left_join(insp_deact_cqc_clean, test_inactive_clean, by ="location_id")
# clean up the names
test_clean_cqc = test_cqc %>% select(location_id, publication_date:provider_county.y)
names.clean = names(test_clean_cqc)
names.clean = gsub("\\.y", "", names.clean)
names(test_clean_cqc) <- names.clean
# insert information that is missing
common.names.inactive = intersect(names(insp_deact), names(test_clean_cqc))
diff.names.inactive = setdiff(names(insp_deact), names(test_clean_cqc))
test_diff_names = insp_deact %>% select(location_id, one_of(diff.names.inactive))
test_clean_cqc = left_join(test_clean_cqc, test_diff_names, by = "location_id")
# note: there is an additional variable ---> date of exit from the market
# check duplicates
test_clean_cqc = test_clean_cqc %>% group_by(location_id) %>% unique()
write.csv(test_clean_cqc, "/Users/Personas/Dropbox/PhD/ch2/paper/quality/agglomeration/data/processed/data_inspected_inactive.csv", row.names = FALSE)
# -----------------------------------
# Complete dataset with duration data
# load data_surv and inspected but deregistered
# -----------------------------------
library(rio)
library(dplyr)
library(forcats)
library(tibble)
library(ggplot2)
library(lubridate)
library(readxl)
library(janitor)
library(stringr)
# Load data
data.survival = import("/Users/Personas/Dropbox/PhD/ch2/paper/quality/agglomeration/data/processed/data_survival.csv")
data.survival.deregistered = import("/Users/Personas/Dropbox/PhD/ch2/paper/quality/agglomeration/data/processed/data_inspected_inactive.csv")
# homogenize names of variables and reorder
names.survival = data.survival %>% clean_names()
names.survival = names(names.survival)
names(data.survival) <- names.survival
setdiff(names(data.survival.deregistered), names(data.survival))
data.survival.deregistered = data.survival.deregistered %>% select(one_of(names.survival), location_hsca_end_date) %>% mutate(status = "inactive")
# check -----------------------------------------------------------------------
dereg = data.survival %>% filter(is.na(location_hsca_start_date))
setdiff(unique(dereg$location_id), unique(data.survival.deregistered$location_id))
# "1-155283083" "RXXY4" --> they do not appear in the registry of de-registered care homes
# split the data.frame into registered and registered
reg = data.survival %>% filter(!is.na(location_hsca_start_date)) %>% mutate(location_hsca_end_date = NA, status = "active")
reg_dereg = rbind(data.survival.deregistered, reg) %>% arrange(location_id, time)
# ------------------------------------------
# Complete with geographical information
# ------------------------------------------
geo = import("/Users/Personas/Dropbox/PhD/ch2/paper/quality/agglomeration/data/raw/ONSPD_FEB_2017_UK/Data/ONSPD_FEB_2017_UK.csv")
geo_selection = geo %>% select(pcd,oslaua, lat, long, lsoa11, msoa11) %>% mutate(postcode = gsub("[[:blank:]]", "", pcd))
# select data frame with the postcodes of the care homes
reg_dereg = reg_dereg %>% mutate(postcode = gsub("[[:blank:]]", "", location_postal_code))
reg_post = unique(reg_dereg$postcode)
geoposts = geo_selection %>% filter(postcode %in% reg_post)
test = left_join(reg_dereg, geoposts, by = "postcode") %>% select(-oslaua.x, -lat.x, -long.x, -lsoa11.x, -msoa11.x)
# clean names
names.clean = names(test)
names.clean = gsub("\\.y", "", names.clean)
names(test) <- names.clean
names.regdereg = names(reg_dereg)
names.test = names(test)
setdiff(names.test, names.regdereg)
# -------------------------------------------------------------
test = test %>% select(-pcd)
test1 = test %>% select(one_of(names(reg_dereg)))
write.csv(test1, "/Users/Personas/Dropbox/PhD/ch2/paper/quality/agglomeration/data/processed/data_survival.csv", row.names = FALSE)
|
04df8ce21af29541f71fdf27e57450826acd126f
|
cf5998744c0c76ef67647473da3c1b79d07fbff7
|
/man/read.starbeast.Rd
|
7356b6ec73a49fe95b8788bb01c87eb1cad49dc2
|
[] |
no_license
|
fmichonneau/starbeastPPS
|
00a3797e72ce882475669ce556628d08ec61db5f
|
3c07296a35d327ce840bc25d81a7a183259d9c19
|
refs/heads/master
| 2020-12-24T15:23:36.039533
| 2014-10-31T21:07:18
| 2014-10-31T21:07:18
| 18,690,628
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,013
|
rd
|
read.starbeast.Rd
|
\name{read.starbeast}
\alias{read.starbeast}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
this function reads in the results of a *BEAST phylogenetic analysis}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
read.starbeast(beast.xml, combinedfiledirectory, logfile = "combined.log")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{beast.xml}{
The path and name of the xml file for the *BEAST analysis to be checked.}
\item{combinedfiledirectory}{
The location of the species and gene tree files and the log files generated by *BEAST.
}
\item{logfile}{
the name of the logfile.
}
}
\details{
This function reads in the data (via an xml file), the posterior distribution of gene trees, species trees and model parameters (via the log file). It assumes that the tree files have the name mylocus.combined.trees and the log file is called combined.log. This is because it assumes that the files have had the burn-in removed and been thinned (and possibly multiple runs combined) using a perl script "beast.tree.log.combiner.pl". Running that script is recommended because it's a lot easier than doing all that stuff by hand.
This script takes way too long, primarily because the way that it extracts the among-branch rate variation parameters and the species tree effective population sizes is slow. If anyone has a better way, let Noah Reid know and he'll implement it!
}
\value{
The function returns an object of class "starbeast.data". It is a list that contains several objects:
associations: maps alleles to species
alignments: contains DNA sequence alignments
gene.trees: A list of ape multiphylo objects containing posterior samples of coalescent genealogies for each locus in the analysis. branch rate variation is stored in a vector named "rate" appended to each phylo object.
species.trees: An ape multiphylo object containing posterior samples of the species tree. branch effective population sizes are stored in a vector named "dmv" appended to each phylo object.
log: *BEAST log file values for each sample from the posterior.
genes: A vector of the names for each locus.
genes.to.partitions: Maps sequence data partitions to their respective loci.
ploidy: A vector of numbers assigning ploidy to each locus. All values are 1 if each locus has the same ploidy. If ploidy varies, organellar DNA is 0.5, nuclear DNA is 2.0.
}
\references{
Reid et al. 2012
Heled and Drummond 2010
}
\author{
Noah Reid
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
9a80d53f36f82e223ff0ad34915cd1c09351aa05
|
bf7fcd367258cb8f02540ae11c7cabca55c08250
|
/R/ThorntonHIVRep.R
|
cbd1393cd1f354468c3a16bc2c96ca6b384d2679
|
[] |
no_license
|
zachary-chance1/CI-Assignment-3
|
9102b14739d39b47933b1f8dba798bebc31c9440
|
50ce9aeca929f46b96f527eaa0fc99eb26c82939
|
refs/heads/master
| 2022-07-20T03:14:45.317310
| 2020-05-20T17:12:51
| 2020-05-20T17:12:51
| 265,462,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,424
|
r
|
ThorntonHIVRep.R
|
library(tidyverse)
library(haven)
read_data <- function(df)
{
full_path <- paste("https://raw.github.com/scunning1975/mixtape/master/",
df, sep = "")
df <- read_dta(full_path)
return(df)
}
hiv <- read_data("thornton_hiv.dta")
# creating the permutations
tb <- NULL
permuteHIV <- function(df, random = TRUE){
tb <- df
first_half <- ceiling(nrow(tb)/2)
second_half <- nrow(tb) - first_half
if(random == TRUE){
tb <- tb %>%
sample_frac(1) %>%
mutate(any = c(rep(1, first_half), rep(0, second_half)))
}
lm = lm(got~any+male, data = tb)
deltacoef = lm$coefficients
deltacoef = deltacoef[2]
return(deltacoef)
# te1 <- tb %>%
# filter(any == 1) %>%
# pull(got) %>%
# mean(na.rm = TRUE)
#
# te0 <- tb %>%
# filter(any == 0) %>%
# pull(got) %>%
# mean(na.rm = TRUE)
#
# ate <- te1 - te0
#return(ate)
}
true_effect = permuteHIV(hiv, random = FALSE)
iterations <- 100
permutation <- tibble(
iteration = c(seq(iterations)),
delta = as.numeric(
c(permuteHIV(hiv, random = FALSE), map(seq(iterations-1), ~permuteHIV(hiv, random = TRUE)))
)
)
#calculating the p-value
permutation <- permutation %>%
arrange(-delta) %>%
mutate(rank = seq(iterations))
p_value_100 <- permutation %>%
filter(iteration == 1) %>%
pull(rank)/iterations
iterations <- 1000
permutation <- tibble(
iteration = c(seq(iterations)),
delta = as.numeric(
c(permuteHIV(hiv, random = FALSE), map(seq(iterations-1), ~permuteHIV(hiv, random = TRUE)))
)
)
#calculating the p-value
permutation <- permutation %>%
arrange(-delta) %>%
mutate(rank = seq(iterations))
p_value_1000 <- permutation %>%
filter(iteration == 1) %>%
pull(rank)/iterations
iterations <- 10000
permutation <- tibble(
iteration = c(seq(iterations)),
delta = as.numeric(
c(permuteHIV(hiv, random = FALSE), map(seq(iterations-1), ~permuteHIV(hiv, random = TRUE)))
)
)
#calculating the p-value
permutation <- permutation %>%
arrange(-delta) %>%
mutate(rank = seq(iterations))
p_value_10000 <- permutation %>%
filter(iteration == 1) %>%
pull(rank)/iterations
hist(permutation$delta, freq = FALSE, breaks = 200, main = "Placebo Distribution and True Effect", xlab = "Coefficient on ANY")
abline(v = true_effect)
text(0.4, 10, "True Effect")
text(0.4, 9, as.character(round(true_effect, digits = 4)))
|
9de20b21feccd302d25de4352faf64f7ad2fd633
|
fe268b6c71d1026785606bebe233e2fad88b492c
|
/R/dat_proc.R
|
4b123e7863f70dfad1fc92d3a2b50fc19b01a664
|
[] |
no_license
|
SCCWRP/ClearLakeRisk
|
b8ff1bdc1a0c84d84eb8087df6b233600dddaa6a
|
25e5a677d4b72e1bd10cd9d77520d189dfb04fcb
|
refs/heads/master
| 2020-07-27T04:05:15.512429
| 2019-09-20T21:02:22
| 2019-09-20T21:02:22
| 208,861,587
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,020
|
r
|
dat_proc.R
|
# setup -------------------------------------------------------------------
library(tidyverse)
library(readxl)
library(lubridate)
library(here)
# wrangle bio -------------------------------------------------------------
biodatraw <- read_excel(here('data/raw', 'Refined Biological Results.xlsx'))
# selecting lat, lon, date, time, parameter, value, result, unit
# taking unique values (making some assumptions)
biodat <- biodatraw %>%
select(
station = MonitoringLocationIdentifier,
latitude = `ActivityLocation/LatitudeMeasure`,
longitude = `ActivityLocation/LongitudeMeasure`,
date = ActivityStartDate,
time = `ActivityStartTime/Time`,
param = `CharacteristicName`,
val = ResultMeasureValue,
unit = `ResultMeasure/MeasureUnitCode`
) %>%
unique
# filtering out parameters with few observations, include microcystin and chlorophyll
biodat <- biodat %>%
group_by(param) %>%
mutate(
n = n()
) %>%
ungroup %>%
filter(n > 1000 | grepl('^Microcystin|^Chloroph', param)) %>%
select(-n)
table(biodat[, c('param', 'unit')])
# standardizing names for units
biodat <- biodat %>%
mutate(
unit = case_when(
unit == '%' ~ '% saturatn',
unit == 'ft/sec' ~ 'ft',
unit == 'PSS' ~ 'ppt', # these are equivalent
unit == 'std units' ~ 'None', # for ph
unit == 'uS/cm @25C' ~ 'uS/cm',
grepl('Microcystin', param) & unit == 'ppb' ~ 'ug/l',
T ~ unit
)
)
# remove turbidy as FTU, JTU, don't know what this is
# remove do sat as g/l
# remove conductance as degree c
biodat <- biodat %>%
filter(!(unit %in% c('FTU', 'JTU') & param %in% 'Turbidity')) %>%
filter(!(unit %in% 'g/l' & param %in% 'Dissolved oxygen saturation')) %>%
filter(!(unit %in% 'deg C' & param %in% 'Specific conductance'))
# table(biodat[, c('param', 'unit')])
# standardizing values for different units
biodat <- biodat %>%
mutate(
val = case_when(
val %in% c('ND', 'TRACE') ~ 0,
T ~ as.numeric(val)
),
val = case_when(
param == 'Depth, Secchi disk depth' & unit == 'ft' ~ val * 0.3048, # to m
param == 'Iron' & unit == 'ug/l' ~ val * 0.001, # to mg/l
param == 'Specific conductance' & unit == 'uS/cm' ~ val * 0.001, # to ms/cm
param == 'deg F' ~ (val - 32) * 5 / 9 , # to deg C
param == 'Total dissolved solids' & unit == 'tons/ac ft' ~ val * 0.735468, # to g/l
param == 'Total dissolved solids' & unit == 'mg/l' ~ val * 0.001, # to g/l
T ~ val
),
unit = case_when(
unit == 'ft' ~ 'm',
param == 'Iron' & unit == 'ug/l' ~ 'mg/l',
param == 'Specific conductance' & unit == 'uS/cm' ~ 'mS/cm',
unit == 'deg F' ~ 'deg C',
unit == 'tons/ac ft' ~ 'g/l',
param == 'Total dissolved solids' & unit == 'mg/l' ~ 'g/l',
T ~ unit
)
)
# final minor edits
# rename some values in param
# take daily avg for multiples
biodat <- biodat %>%
mutate(
param = case_when(
param == 'Chlorophyll a (probe)' ~ 'Chlorophyll a',
T ~ param
),
date = ymd(date)
) %>%
select(-time) %>%
group_by(station, date, param, unit) %>%
summarise_all(mean, na.rm = T) %>%
filter(!is.na(val))
# wrangle phy -------------------------------------------------------------
phydatraw <- read_excel(here('data/raw', 'Refined Physical Result.xlsx'))
# selecting station, date, time, parameter, value, result, unit
# taking unique values (making some assumptions)
phydat <- phydatraw %>%
select(
station = MonitoringLocationIdentifier,
date = ActivityStartDate,
time = `ActivityStartTime/Time`,
param = `CharacteristicName`,
val = ResultMeasureValue,
unit = `ResultMeasure/MeasureUnitCode`
) %>%
unique
# filtering out parameters with few observations, include microcystin and chlorophyll
phydat <- phydat %>%
group_by(param) %>%
mutate(
n = n()
) %>%
ungroup %>%
filter(n > 1000 | grepl('^Microcystin|^Chloroph', param)) %>%
select(-n)
# table(phydat[, c('param', 'unit')])
# standardizing names for units
phydat <- phydat %>%
mutate(
unit = case_when(
unit == '%' ~ '% saturatn',
unit == 'ft/sec' ~ 'ft',
unit == 'PSS' ~ 'ppt', # these are equivalent
unit == 'std units' ~ 'None', # for ph
unit == 'uS/cm @25C' ~ 'uS/cm',
grepl('Microcystin', param) & unit == 'ppb' ~ 'ug/l',
T ~ unit
)
)
# remove turbidy as FTU, JTU, don't know what this is
# remove do sat as g/l
# remove conductance as degree c
phydat <- phydat %>%
filter(!(unit %in% c('FTU', 'JTU') & param %in% 'Turbidity')) %>%
filter(!(unit %in% 'g/l' & param %in% 'Dissolved oxygen saturation')) %>%
filter(!(unit %in% 'deg C' & param %in% 'Specific conductance'))
# table(biodat[, c('param', 'unit')])
# standardizing values for different units
phydat <- phydat %>%
mutate(
val = case_when(
val %in% c('ND', 'TRACE') ~ 0,
T ~ as.numeric(val)
),
val = case_when(
param == 'Depth, Secchi disk depth' & unit == 'ft' ~ val * 0.3048, # to m
param == 'Iron' & unit == 'ug/l' ~ val * 0.001, # to mg/l
param == 'Specific conductance' & unit == 'uS/cm' ~ val * 0.001, # to ms/cm
param == 'deg F' ~ (val - 32) * 5 / 9 , # to deg C
param == 'Total dissolved solids' & unit == 'tons/ac ft' ~ val * 0.735468, # to g/l
param == 'Total dissolved solids' & unit == 'mg/l' ~ val * 0.001, # to g/l
T ~ val
),
unit = case_when(
unit == 'ft' ~ 'm',
param == 'Iron' & unit == 'ug/l' ~ 'mg/l',
param == 'Specific conductance' & unit == 'uS/cm' ~ 'mS/cm',
unit == 'deg F' ~ 'deg C',
unit == 'tons/ac ft' ~ 'g/l',
param == 'Total dissolved solids' & unit == 'mg/l' ~ 'g/l',
T ~ unit
)
)
# final minor edits
# rename some values in param
# take daily avg for multiples
phydat <- phydat %>%
mutate(
param = case_when(
param == 'Chlorophyll a (probe)' ~ 'Chlorophyll a',
param == 'Microcystins' ~ 'Microcystin',
T ~ param
),
date = ymd(date)
) %>%
select(-time) %>%
group_by(station, date, param, unit) %>%
summarise_all(mean, na.rm = T) %>%
filter(!is.na(val))
# join phydat with biodat -------------------------------------------------
# these seem to have the same data
# join both
alldat <- full_join(biodat, phydat, c('station', 'date', 'param', 'unit'))
save(alldat, file = 'data/alldat.RData', compress = 'xz')
write.csv(alldat, file = here('data', 'alldat.csv'), row.names = F)
# lake discharge ----------------------------------------------------------
disdatraw <- read_excel(here('data/raw', 'Lake discharge.xlsx'), skip = 11)
disdat <- disdatraw %>%
mutate(
date = as.Date(Date)
) %>%
select(
date,
discharge_cfs = `Dischage (cubic feet per second)`
) %>%
group_by(date) %>%
summarise(discharge_cfs = mean(discharge_cfs, na.rm = T))
save(disdat, file = 'data/disdat.RData', compress = 'xz')
write.csv(disdat, file = here('data', 'disdat.csv'), row.names = F)
|
971dcd6748ce2f31f98eb0c692c7f8ca968700cb
|
06590105205560d7b2d32ef25ed01c41de511e0b
|
/Code/Base/menuTable/module/exploreTable1TabModuleUI.R
|
c01add7a059dcdc94387c7c08436dfef61ecab2f
|
[] |
no_license
|
ai4ir/SmartSolutionShiny
|
c10009dd911f943432ab06dc305e469adb894aa3
|
1a8be48a0cb747ebc8d05cbd502b5d5b3c106962
|
refs/heads/master
| 2023-06-07T22:40:28.446335
| 2021-06-21T04:47:38
| 2021-06-21T04:47:38
| 305,106,080
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,158
|
r
|
exploreTable1TabModuleUI.R
|
exploreTable1TabModuleUI <- function(Id) {
ns <- NS(Id)
fluidPage(
fluidRow(
column(1, actionButton(ns("rowVar"), label="행 변수 선정")
),
column(1 #, actionButton(ns("colVar"), label="열 변수 선정")
),
column(1
),
column(1
),
column(1
),
column(2
),
column(2
),
column(2
),
column(1 #, actionButton(ns("tableUpdate"), label="Table 갱신")
)
),
DT::dataTableOutput(ns("table"))
)
}
exploreTable1TabModule <- function(input, output, session) {
ns <-session$ns
observeEvent(input$rowVar, {
catVar <- extractCatVarName(curSampleExplore)
showModal(ModalRadioButtons(choiceNames=catVar, choiceValues=catVar,
okButtonName=ns("okRowVar"), labelStr="행변수 선정",
strExplain="행변수를 선정하세요",
modalRadioButtonsID=ns("selModalRowVar"),
failed = FALSE))
})
observeEvent(input$okRowVar, {
rowVar <- input$selModalRowVar
colVar <- "bHOT"
removeModal()
if(length(unique(curSampleExplore[,colVar]))==1) {
alert("양분화가 안되었습니다. 양분화 후 실행하세요!!!")
return()
}
output$table <- renderPrint({
tab <- table(curSampleExplore[,rowVar], curSampleExplore[,colVar])
dfSpread <- tab %>% as.data.frame() %>%
spread(key=Var2, value=Freq)
colnames(dfSpread)[1] <- input$selModalRowVar
dfSpread <- dfSpread %>% mutate(inSide=round(100* Hot/(Hot+Normal),2),
outSide=round(100*Normal/(Hot+Normal),2))
dfSpread <- dfSpread[order(dfSpread$outSide),]
output$table <- DT::renderDataTable(DT::datatable({dfSpread}))
})
})
}
|
6b5adbded7ca7c3b555cd38b0a10394f5943ed8a
|
6cd917b5e4e86779b7eed51c56a4e0918de95f58
|
/R Programming/chaid 2.r
|
9a15e23a7b992f2b4ac18c7eaf4128bba97212f3
|
[] |
no_license
|
prabanch/Analytics
|
007c57c0c4e140fded113700d8506766910c04f9
|
829fd616b510893769df3b9308c07ee4c862dfa0
|
refs/heads/master
| 2020-05-27T01:37:18.984246
| 2017-11-19T19:10:31
| 2017-11-19T19:10:31
| 82,520,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,351
|
r
|
chaid 2.r
|
#Read the data
hr <- read.csv('C:/Users/prabanch/Desktop/PGBA/dm/HR.csv')
#Find the structure
str(hr)
attach(hr)
View(hr)
#Summarise the data
summary(hr)
head(hr)
f=sapply(hr, is.factor)
which(f)
chisq.test(Age, Attrition)
chisq.test(hr)
hr$Age
View(hr)
i =0
while(i <= length(names(hr)))
{
ifelse((sapply(hr[,i],is.factor)), hr[,i], (hr[,i]=cut(hr[,i], 10, seq(from = min(hr[,i]), to = max(hr[,i])), include.lowest = TRUE)))
i = i+1
}
hr$Attrition = cut(hr$Attrition, 10, seq(from = min(hr$Attrition), to = max(hr$Attrition)), include.lowest = TRUE)
hr$Age = cut(hr$Age, 5, include.lowest = TRUE)
hr$Attrition = as.numeric(hr$Attrition)
hr$Age = as.factor(hr$Age)
names(hr)
i = 0
while(i <= 6)
{
chisq.test(hr[,i], hr$Attrition)
i = i+1
}
chisq.test(hr$Age, hr$Attrition)
chisq.test(hr$BusinessTravel, hr$Attrition)
chisq.test(hr$DailyRate, hr$Attrition)
chisq.test(hr$Department, hr$Attrition)
chisq.test(hr$DistanceFromHome, hr$Attrition)
chisq.test(hr$Education, hr$Attrition)
chisq.test(hr$EducationField, hr$Attrition)
#chisq.test(hr$EmployeeCount, hr$Attrition)
chisq.test(hr$EnvironmentSatisfaction, hr$Attrition)
chisq.test(hr$Gender, hr$Attrition)
chisq.test(hr$HourlyRate, hr$Attrition)
chisq.test(hr$JobInvolvement, hr$Attrition)
chisq.test(hr$MaritalStatus, hr$Attrition)
chisq.test(hr$MonthlyIncome, hr$Attrition)
chisq.test(hr$MonthlyRate, hr$Attrition)
chisq.test(hr$NumCompaniesWorked, hr$Attrition)
chisq.test(hr$Over18, hr$Attrition)
chisq.test(hr$OverTime, hr$Attrition)
chisq.test(hr$PercentSalaryHike, hr$Attrition)
chisq.test(hr$PerformanceRating, hr$Attrition)
chisq.test(hr$RelationshipSatisfaction, hr$Attrition)
#chisq.test(hr$StandardHours, hr$Attrition)
chisq.test(hr$StockOptionLevel, hr$Attrition)
chisq.test(hr$TotalWorkingYears, hr$Attrition)
chisq.test(hr$TrainingTimesLastYear, hr$Attrition)
chisq.test(hr$WorkLifeBalance, hr$Attrition)
chisq.test(hr$YearsAtCompany, hr$Attrition)
chisq.test(hr$YearsInCurrentRole, hr$Attrition)
chisq.test(hr$YearsSinceLastPromotion, hr$Attrition)
chisq.test(hr$YearsWithCurrManager, hr$Attrition)
#install.packages("partykit")
#install.packages("CHAID", repos="http://R-Forge.R-project.org")
library(partykit)
library(CHAID)
library(tree)
set.seed(10)
ctrl <- chaid_control(minbucket = 100, minsplit = 100, alpha2=.05, alpha4 = .05)
chaid.tree <-chaid(Attrition~Age+BusinessTravel+DailyRate+Department +
DistanceFromHome+EducationField+EmployeeCount+EnvironmentSatisfaction+HourlyRate+
JobInvolvement+JobLevel+JobRole+JobSatisfaction+MaritalStatus+
MonthlyIncome+NumCompaniesWorked+OverTime+
RelationshipSatisfaction+StandardHours+
StockOptionLevel+TotalWorkingYears+
TrainingTimesLastYear+WorkLifeBalance+
YearsAtCompany+YearsInCurrentRole+
YearsSinceLastPromotion+YearsWithCurrManager,
data=hr, control = ctrl)
ctrl <- chaid_control(minbucket = 100, minsplit = 100, alpha2=.05, alpha4 = .05)
chaid.tree <-chaid(Attrition~Age ,data=hr, control = ctrl)
print(chaid.tree)
plot(chaid.tree, gp = gpar(fontsize=6))
text(chaid.tree, pretty=0)
## loading the library
library(rpart)
library(rpart.plot)
library(rattle)
library(RColorBrewer)
## setting the control parameter inputs for rpart
r.ctrl = rpart.control(minsplit=100, minbucket = 10, cp = 0, xval = 10)
## calling the rpart function to build the tree
m1 <- rpart(Attrition~Age+BusinessTravel+DailyRate+Department +
DistanceFromHome+EducationField+EmployeeCount+EnvironmentSatisfaction+HourlyRate+
JobInvolvement+JobLevel+JobRole+JobSatisfaction+MaritalStatus+
MonthlyIncome+NumCompaniesWorked+OverTime+
RelationshipSatisfaction+StandardHours+
StockOptionLevel+TotalWorkingYears+
TrainingTimesLastYear+WorkLifeBalance+
YearsAtCompany+YearsInCurrentRole+
YearsSinceLastPromotion+YearsWithCurrManager ,data=hr, method = "class", control = r.ctrl)
ctrl <- chaid_control(minbucket = 100, minsplit = 100, alpha2=.05, alpha4 = .05)
chaid.tree <-chaid(Attrition~Age + OverTime + Over18 + TotalWorkingYears ,data=hr, control = ctrl)
print(m1)
fancyRpartPlot(m1)
|
517344a11f48efeb1f5aac290051c96cae46b396
|
fa853333757506c8415434340c0547347086e02c
|
/man/func8.Rd
|
d44f9ed51afc5c61953e7a84378c352b9b70de30
|
[] |
no_license
|
Xinyu-Jiang/XinyuJiangTools
|
efec2a5ea270a22f48a6adb1b7a80e15708a539c
|
ccd57b2b229c2f17c6c0f0d94b290d0842c542cc
|
refs/heads/master
| 2021-01-25T10:00:59.108334
| 2018-03-10T01:34:14
| 2018-03-10T01:34:14
| 123,335,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 282
|
rd
|
func8.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/XinyuJiangToolsRfunctions.R
\name{func8}
\alias{func8}
\title{Quiz 2 - 1}
\usage{
func8(a, x)
}
\arguments{
\item{a}{matrix}
\item{x}{vector}
}
\value{
object
}
\description{
calculates $x^T A^{-1} x$
}
|
486b8b9e690f70cb39418244e14231cca3bd4810
|
539f352d0959cd134a2846728281d43c5266e121
|
/src/example_analysis_config.R
|
5bfec8184de7b004803878df8c68e9b71a6ef550
|
[
"MIT"
] |
permissive
|
chendaniely/multidisciplinary-diffusion-model-experiments
|
56420f065de42f4fe080bc77988fcfe9592182c1
|
04edf28bb1bfadaff7baf82b8e3af02a3f34bf6d
|
refs/heads/master
| 2016-09-06T10:03:27.798846
| 2016-02-28T19:01:26
| 2016-02-28T19:01:26
| 24,729,632
| 2
| 0
| null | 2015-11-24T17:04:32
| 2014-10-02T17:46:19
|
R
|
UTF-8
|
R
| false
| false
| 1,798
|
r
|
example_analysis_config.R
|
library(testthat)
###############################################################################
# USER CONFIGURATIONS
###############################################################################
config_name_batch_simulation_output_folder <-
'02-lens_batch_2014-12-23_03:41:22_sm_partial'
# 'bkup_02-lens_batch_2014-12-23_03:41:22
###############################################################################
# DEFAULT CONFIGURATIONS
###############################################################################
config_simulation_results_folder <- '../results/simulations/'
config_save_df_list <- TRUE
config_num_cores <- get_num_cores_to_use()
###############################################################################
# READ CONFIG FILE
###############################################################################
config_batch_folder_path <- paste(config_simulation_results_folder,
config_name_batch_simulation_output_folder,
sep='')
###############################################################################
# Parameters from config file
###############################################################################
config_num_processing_units <- 20
config_num_sims_per_sim_set <- 5
config_num_agents <- 10
config_num_ticks <- 10000
config_num_parameter_sets_no_a <- 30 # num parameter sets w/out num agents
config_num_delta_values <- 5
config_num_epsilon_values <- 6
expect_equal(config_num_delta_values * config_num_epsilon_values,
config_num_parameter_sets_no_a)
config_activation_value_columns <-
calculate_activation_value_columns(config_num_processing_units)
config_prototype_value_columns <-
calculate_prototype_value_columns(config_num_processing_units)
|
4d409641aeca54915bf0504924fbc0bc57e0aece
|
d84a3f8b27940f2ac851d633b6ed2a47edb029b5
|
/code_analysis/RNAseq20_twoaxes_effects_allgenes.R
|
ec72796eb83eabd05f5f426bc9ed79ed5bc6de82
|
[] |
no_license
|
jalapic/mouse_socialhierarchy_immune
|
4621fc7ab34460f373033c0bf35fc8faee7fd9b7
|
38579026acf5262acb327dc3ff3bd08c1d7dc4bf
|
refs/heads/main
| 2023-08-15T10:30:23.494090
| 2021-08-23T03:36:54
| 2021-08-23T03:36:54
| 398,898,170
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,634
|
r
|
RNAseq20_twoaxes_effects_allgenes.R
|
my_tissue = "Liver"
my_tissue = "Spleen"
if(my_tissue == "Liver"){limma_list <- readRDS(glue("results_RNAseqRDS/limma_{my_tissue}_second.RDS"))
} else {limma_list <- readRDS(glue("results_RNAseqRDS/limma_{my_tissue}_second.RDS"))}
limma_list$status %>%
select(symbol, logFC, P.Value) %>%
rename(logFC_status = logFC,
pval_status = P.Value) -> status_df
limma_list$cort %>%
select(symbol, logFC, P.Value) %>%
rename(logFC_cort = logFC,
pval_cort = P.Value) -> cort_df
df <- full_join(status_df, cort_df) %>%
mutate(Sig = ifelse(pval_status >= 0.05 & pval_cort >= 0.05, "None",
ifelse(pval_status < 0.05 & pval_cort < 0.05,"Both",
ifelse(pval_status < 0.05,"Status-specific","CORT-specific")))) %>%
mutate(Sig = factor(Sig, levels =c("Both","Status-specific","CORT-specific","None")))
lim = 1.5
# png(filename = glue("results_figures/twoaxis_{my_tissue}_allgene.png"),
# width = 7, height = 7.6, units = "cm", res = 600)
df %>%
ggplot(aes(logFC_status, logFC_cort))+
geom_abline(slope = 1, intercept = 0, color ='grey', linetype = 'dashed')+
geom_abline(slope = -1, intercept = 0, color ='grey', linetype = 'dashed')+
geom_hline(yintercept = 0, color ='grey', linetype = 'dashed')+
geom_vline(xintercept = 0, color ='grey', linetype = 'dashed')+
geom_point(shape = 21, size = 0.3)+
labs(x = "Status effect (Sub <-> Dom)",
y = "CORT effect (Low <-> High)",
color = "",
title = glue("{my_tissue}"))+
theme_bw(base_size = 7)+
theme(legend.position = "top")+
scale_x_continuous(limits = c(-lim,lim), expand=expansion(mult=c(0,0.0)))+
scale_y_continuous(limits = c(-lim,lim), expand=expansion(mult=c(0,0.0)))
# invisible(dev.off())
png(filename = glue("results_figures/twoaxis_{my_tissue}_allgene2z.png"),
width = 7, height = 7.6, units = "cm", res = 600)
ggplot(df,aes(color = Sig))+
geom_abline(slope = 1, intercept = 0, color ='grey', linetype = 'dashed')+
geom_abline(slope = -1, intercept = 0, color ='grey', linetype = 'dashed')+
geom_hline(yintercept = 0, color ='grey', linetype = 'dashed')+
geom_vline(xintercept = 0, color ='grey', linetype = 'dashed')+
geom_point(data = df %>%
filter(Sig == "None"),
aes(logFC_status, logFC_cort),
color = "grey", #fill = "grey",
shape = 21, size = 0.3)+
geom_point(data = df %>%
filter(Sig == "Status-specific"),
aes(logFC_status, logFC_cort),
color = "#E7B800",#fill = "#d8b365",
shape = 21, size = 0.3)+
geom_point(data = df %>%
filter(Sig == "CORT-specific"),
aes(logFC_status, logFC_cort),
color = "#00AFBB" ,#fill = "#5ab4ac",
shape = 21, size = 0.3)+
geom_point(data = df %>%
filter(Sig == "Both"),
aes(logFC_status, logFC_cort),
color = "#FC4E07",#fill = "#fdc086",
shape = 21, size = 0.3)+
labs(x = "Status effect (Sub <-> Dom)",
y = "CORT effect (Low <-> High)",
color = "",
title = glue("{my_tissue}"))+
theme_bw(base_size = 7)+
theme(legend.position = "top")+
scale_x_continuous(limits = c(-lim,lim), expand=expansion(mult=c(0,0.0)))+
scale_y_continuous(limits = c(-lim,lim), expand=expansion(mult=c(0,0.0)))
invisible(dev.off())
# png(filename = glue("results_figures/twoaxis_{my_tissue}_allgene3.png"),
# width = 7, height = 7.6, units = "cm", res = 600)
df %>%
ggplot(aes(logFC_status, logFC_cort, color = Sig))+
geom_abline(slope = 1, intercept = 0, color ='grey', linetype = 'dashed')+
geom_abline(slope = -1, intercept = 0, color ='grey', linetype = 'dashed')+
geom_hline(yintercept = 0, color ='grey', linetype = 'dashed')+
geom_vline(xintercept = 0, color ='grey', linetype = 'dashed')+
geom_point(shape = 21, size = 3.3)+
labs(x = "Status effect (Sub <-> Dom)",
y = "CORT effect (Low <-> High)",
color = "P-value < 0.05",
fill = "P-value < 0.05",
title = glue("{my_tissue}"))+
theme_bw(base_size = 7)+
theme(legend.position = c(0.8,0.2),
legend.key.height = unit(0,"mm"))+
scale_x_continuous(limits = c(-lim,lim), expand=expansion(mult=c(0,0.0)))+
scale_y_continuous(limits = c(-lim,lim), expand=expansion(mult=c(0,0.0)))+
scale_color_manual(values = c("#FC4E07","#E7B800","#00AFBB" ,"grey"))-> ppp
# dev.off()
png(filename = glue("results_figures/legend_allgene.png"),
width = 7, height = 7.6, units = "cm", res = 600)
leg <- get_legend(ppp)
grid.arrange(leg)
dev.off()
|
f27c1940690b0aa644892d6553aa54e70a19b6eb
|
506865b72bc04160b1a965e7d880490800085f17
|
/bauhaus/scripts/R/ccsMappingPlots.R
|
06bf97af6d5b87d722abd40a58c58c2286934a82
|
[] |
no_license
|
nlhepler/bauhaus
|
aec1e306f92fe824ea60cd8f81195c4a8024400a
|
7d2e8453a9028a84baf5c9b1f52fd5d63337f1d7
|
refs/heads/master
| 2020-04-03T13:02:39.138674
| 2016-08-20T18:45:47
| 2016-08-20T18:45:47
| 66,022,485
| 0
| 0
| null | 2016-08-20T18:45:48
| 2016-08-18T18:48:49
|
Python
|
UTF-8
|
R
| false
| false
| 5,696
|
r
|
ccsMappingPlots.R
|
library(pbbamr)
library(dplyr)
library(ggplot2)
library(xml2)
library(stringr)
library(feather)
toPhred <- function(acc, maximum=60) {
err = pmax(1-acc, 10^(-maximum/10))
-10*log10(err)
}
getConditionTable <- function(wfOutputRoot)
{
read.csv(file.path(wfOutputRoot, "condition-table.csv"))
}
## This is not a good idea at all really---the dataset could contain
## "filter" operations that we are ignoring via this mechanism. We
## need a better solution in pbbamr, to somehow provide access to a
## virtual pbi.
listDatasetContents <- function(datasetXmlFile)
{
x <- read_xml(datasetXmlFile)
ns <- xml_ns(x)
allResourceFiles <- sapply(xml_find_all(x, ".//pbbase:ExternalResource/@ResourceId", ns), xml_text)
isBam <- str_detect(allResourceFiles, ".*.bam$")
bams <- unique(allResourceFiles[isBam])
bams
}
makeCCSDataFrame1 <- function(datasetXmlFile, conditionName, sampleFraction=1.0)
{
print(datasetXmlFile)
## Do subsampling at the BAM level
allBams <- listDatasetContents(datasetXmlFile)
if (sampleFraction < 1) {
set.seed(42) # Do we want to do this globally instead?
n <- max(1, floor(length(allBams)*sampleFraction))
sampledBams <- as.character(sample_n(data.frame(fname=allBams), n)$fname)
} else {
sampledBams <- allBams
}
pbis <- lapply(sampledBams, pbbamr::loadPBI,
loadSNR = TRUE, loadNumPasses = TRUE, loadRQ = TRUE)
## This would be more efficient, but it crashes!
##do.call(bind_rows, sampledBams)
combinedPbi <- do.call(rbind, pbis)
## TODO: moviename??
## TODO: readlength not yet available, unfortunately, due to the
## qstart/qend convention for CCS reads.
with(combinedPbi,
tbl_df(data.frame(
Condition=conditionName,
NumPasses = np,
HoleNumber = hole,
ReadQuality = qual,
ReadQualityPhred = toPhred(qual),
Identity = matches/(tend-tstart),
IdentityPhred = toPhred(matches/(tend-tstart)),
NumErrors=(mismatches+inserts+dels),
TemplateSpan=(tend-tstart),
ReadLength=(aend-astart), ## <-- this is a lie, see above!
SnrA = snrA,
SnrC = snrC,
SnrG = snrG,
SnrT = snrT)))
}
makeCCSDataFrame <- function(wfOutputRoot, sampleFraction=1.0)
{
ct <- getConditionTable(wfOutputRoot)
conditions <- unique(ct$Condition)
dsetXmls <- sapply(conditions, function(condition) file.path(wfOutputRoot, condition, "ccs_mapping/all_movies.consensusalignments.xml"))
dfs <- mapply(makeCCSDataFrame1, dsetXmls, conditions, sampleFraction=sampleFraction, SIMPLIFY=F)
tbl_df(do.call(rbind, dfs))
}
doCCSCumulativeYieldPlots <- function(ccsDf)
{
cumByCut <- function(x) {
qvOrder <- order(x$IdentityPhred, decreasing=TRUE)
xo <- x[qvOrder,]
xo$NumReads <- seq(1, nrow(xo))
xo$YieldFraction <- cumsum(xo$ReadLength) / sum(xo$ReadLength)
xo[seq(1,nrow(xo), by=10),]
}
## yield <- ddply(ccsDf, "Condition", cumByCut)
yield <- ccsDf %>% group_by(Condition) %>% do(cumByCut(.))
## NumReads on y-axis
p <- qplot(IdentityPhred, NumReads, colour=Condition, data=yield, main="Yield of reads by CCS accuracy")
print(p)
## Fraction of reads on y-axis
p <- qplot(IdentityPhred, YieldFraction, colour=Condition, data=yield, main="Fractional yield by CCS accuracy")
print(p)
}
doCCSNumPassesHistogram <- function(ccsDf)
{
p <- qplot(NumPasses, data=ccsDf, geom="density", color=Condition,
main="NumPasses distribution (density)")
print(p)
}
doCCSNumPassesCDF <- function(ccsDf)
{
p <- (ggplot(aes(x=NumPasses, color=Condition), data=ccsDf) +
stat_ecdf(geom="step") +
ggtitle("NumPasses distribution (ECDF)"))
print(p)
}
## calibration plot...
doCCSReadQualityCalibrationPlots <- function(ccsDf)
{
ccsDf <- sample_n(ccsDf, 5000)
p <- qplot(ReadQuality, Identity, alpha=I(0.1), data=ccsDf) + facet_grid(.~Condition) +
geom_abline(slope=1, color="red") +
ggtitle("Read quality versus empirical accuracy")
print(p)
p <- qplot(ReadQualityPhred, IdentityPhred, alpha=I(0.1), data=ccsDf) + facet_grid(.~Condition) +
geom_abline(slope=1, color="red") +
ggtitle("Read quality versus empirical accuracy (Phred scale)")
print(p)
}
doCCSTitrationPlots <- function(ccsDf)
{
accVsNp <- ccsDf %>% group_by(Condition, NumPasses) %>% summarize(
MeanIdentity=1-(max(1, sum(NumErrors))/sum(ReadLength)),
TotalBases=sum(ReadLength)) %>% mutate(
MeanIdentityPhred=toPhred(MeanIdentity))
p <- qplot(NumPasses, MeanIdentityPhred, size=TotalBases, weight=TotalBases, data=filter(accVsNp, NumPasses<20)) +
facet_grid(.~Condition) + geom_smooth()
print(p)
}
doAllCCSPlots <- function(ccsDf)
{
doCCSTitrationPlots(ccsDf)
doCCSNumPassesHistogram(ccsDf)
doCCSNumPassesCDF(ccsDf)
doCCSReadQualityCalibrationPlots(ccsDf)
doCCSCumulativeYieldPlots(ccsDf)
}
## Main, when run as a script.
if (!interactive())
{
args <- commandArgs(TRUE)
wfRootDir <- args[1]
ccsDf <- makeCCSDataFrame(wfRootDir)
##write.csv(ccsDf, "ccs-mapping.csv") ## TOO BIG, TOO SLOW
write_feather(ccsDf, "ccs-mapping.feather")
pdf("ccs-mapping.pdf", 11, 8.5)
doAllCCSPlots(ccsDf)
dev.off()
}
if (0) {
##wfRoot = "/home/UNIXHOME/dalexander/Projects/Analysis/EchidnaConsensus/2kLambda_4hr_postTrain_CCS/"
wfRoot <- "/home/UNIXHOME/ayang/projects/bauhaus/Echidna_PerfVer/EchidnaVer_CCS_postTrain"
df <- makeCCSDataFrame(wfRoot, 1.0)
}
|
85b60ff2e83482aada15493564b9fc381d4168a6
|
2c570af4ad5f6015c5d144a1e58dbc76492abea2
|
/R/extract.R
|
42d81690d68e3c7d34ddcfb056147a455deaeb93
|
[] |
no_license
|
jhuovari/statfitools
|
1fdcf8a1e1b287302f060702903c4a97d7df6a6d
|
4fd48239419c95a81cbe6368cd36425328ca7d7b
|
refs/heads/master
| 2020-12-24T11:46:34.144165
| 2016-11-07T06:54:28
| 2016-11-07T06:54:28
| 73,012,116
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,444
|
r
|
extract.R
|
#' Extract a code component
#'
#' This uses a regular expression to get a code from beging of a character
#' string. Useful to extract names from code-name variables.
#'
#'
#'
#' @param x A character vector (or a factor).
#' @param numbers_as_numeric A locigal. Whether to try to convert a code to
#' a numeric.
#' @return A character vector (or a factor).
#' @export
#' @seealso \code{\link{extract_name}}
#'
#' @examples
#' extract_code("508 Mantta-Vilppula")
#' extract_code("508 Mantta-Vilppula", numbers_as_numeric = FALSE)
extract_code <- function (x, numbers_as_numeric = TRUE) {
if (is.factor(x)){
levels(x) <- extract_code(levels(x), numbers_as_numeric = numbers_as_numeric)
return(x)
} else{
y <- gsub(" .*", "", x)
if (numbers_as_numeric) {
num <- suppressWarnings(as.numeric(y))
if (!any(is.na(num))) y <- num
}
y
}
}
#' Extract only a non-code component
#'
#' This uses a regular expression to strip a code from beging of a character
#' string. Useful to extract names from code-name variables.
#'
#' @param x A character vector (or a factor).
#' @return A character vector (or a factor).
#' @export
#' @seealso \code{\link{extract_code}}
#'
#' @examples
#' extract_name("S1311 Valtionhallinto")
extract_name <- function (x) {
if (is.factor(x)){
levels(x) <- extract_name(levels(x))
return(x)
} else {
y <- gsub("^[[:alnum:][:punct:]]+ +", "", x)
y
}
}
|
27f2f54f4d6aaefbe6dbe8e9021ab3b0a64f1416
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/acc/R/readRaw.R
|
93ca046a81b01e22a0c3a3e7d398710683f8f17b
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,681
|
r
|
readRaw.R
|
#' @export
#' @importFrom utils head tail
#' @importFrom R.utils countLines
#' @importFrom iterators ireadLines nextElem
#' @importFrom Rcpp evalCpp
#' @useDynLib acc
readRaw <- function(filepath,type,resting=NULL){
filelength <- countLines(filepath)
fname <- filepath
it <- ireadLines(con=fname,warn=FALSE)
#on.exit(close(file(fname)))
line1 <- nextElem(it)
if(grepl("at", line1) == TRUE & grepl("Hz", line1) == TRUE){
hertzPre <- sub(".*at ", "", line1)
myhertz <- strsplit(hertzPre, " ")[[1]][1]
devicePre <- sub(".*ActiGraph ", "", line1)
device <- strsplit(devicePre, " ")[[1]][1]
dateformatPre <- sub(".*date format ", "", line1)
dateformat <- strsplit(dateformatPre, " ")[[1]][1]
}
line2 <- nextElem(it)
serialNumberPre <- sub(".*Serial Number: ", "", line2)
serialNumberPre2 <- strsplit(serialNumberPre, " ")[[1]][1]
serialNumber <- gsub(',,','',serialNumberPre2)
line3 <- nextElem(it)
startTimePre <- sub(".*Start Time ", "", line3)
startTimePre2 <- strsplit(startTimePre, " ")[[1]][1]
startTime <- gsub(',,','',startTimePre2)
line4 <- nextElem(it)
startDatePre <- sub(".*Start Date ", "", line4)
startDatePre2 <- strsplit(startDatePre, " ")[[1]][1]
startDate <- gsub(',,','',startDatePre2)
cat(noquote(paste("Raw data read for ", device, " device.", sep = "")))
cat("\n")
cat(noquote(paste("Start date is ", startDate, " and sampling rate is ",
myhertz, " Hz.", sep = "")))
cat("\n")
invisible(nextElem(it));
invisible(nextElem(it));
invisible(nextElem(it));
invisible(nextElem(it));
invisible(nextElem(it));
invisible(nextElem(it));
invisible(nextElem(it));
# MAD
if(type == "mad" | type == "MAD" | type == "Mad"){
windowSize <- 6*as.numeric(as.character(myhertz))
numit <- floor((filelength[1]-11)/windowSize)
mad <- rep(NA, numit)
for(j in 1:numit){
# A six second window
mywindow <- matrix(rep(NA, windowSize*3),ncol=3)
for(i in 1:windowSize){
myline <- strsplit(nextElem(it), ",")[[1]]
mywindow[i,] <- c(as.numeric(myline[1]),
as.numeric(myline[2]),
as.numeric(myline[3]))
}
vm1 <- sqrt(mywindow[,1]^2 + mywindow[,2]^2 + mywindow[,3]^2)
vm1.mean <- mean(vm1,na.rm=TRUE)
mad[j] <- mean(abs(vm1-vm1.mean),na.rm=TRUE)
}
timeseq <- seq(ISOdate(strsplit(startDate, "/")[[1]][3],
strsplit(startDate, "/")[[1]][1],
strsplit(startDate, "/")[[1]][2],
hour = strsplit(startTime, ":")[[1]][1],
min = strsplit(startTime, ":")[[1]][2],
sec = strsplit(startTime, ":")[[1]][3], tz = "GMT"),
by = "6 sec", length.out=length(mad))
madCat <- mad
madCat[mad < .9] <- "Below Moderate"
madCat[mad >= .9 & mad < 4.14] <- "Moderate"
madCat[mad >= 4.14] <- "Vigorous"
mydata <- data.frame(Time = timeseq,
MAD = mad,
pa.category = madCat)
}
# AI
if(type == "ai" | type == "AI" | type == "Ai"){
if((is.numeric(resting)==TRUE) & (length(resting)==3)){rsd <- resting}
if((is.numeric(resting)==TRUE) & (length(resting)==1)){rsd <- rep(resting,3)}
windowSize <- as.numeric(as.character(myhertz))
numit <- floor((filelength[1]-11)/windowSize)
ai <- rep(NA, numit)
for(j in 1:numit){
# A one second window
mywindow <- matrix(rep(NA, windowSize*3),ncol=3)
for(i in 1:windowSize){
myline <- strsplit(nextElem(it), ",")[[1]]
mywindow[i,] <- c(as.numeric(myline[1]),
as.numeric(myline[2]),
as.numeric(myline[3]))
}
sd11 <- sd(mywindow[,1],na.rm=TRUE)
sd12 <- sd(mywindow[,2],na.rm=TRUE)
sd13 <- sd(mywindow[,3],na.rm=TRUE)
ai[j] <-max((((sd11-rsd[1])/rsd[1]) + ((sd12-rsd[2])/rsd[2]) + ((sd13-rsd[3])/rsd[3])),0)
}
timeseq <- seq(ISOdate(strsplit(startDate, "/")[[1]][3],
strsplit(startDate, "/")[[1]][1],
strsplit(startDate, "/")[[1]][2],
hour = strsplit(startTime, ":")[[1]][1],
min = strsplit(startTime, ":")[[1]][2],
sec = strsplit(startTime, ":")[[1]][3], tz = "GMT"),
by = "1 sec", length.out=length(ai))
mydata <- data.frame(Time = timeseq, AI = ai)
}
# Resting state
if(type == "resting" | type == "Resting" | type == "RESTING"){
windowSize <- as.numeric(as.character(myhertz))
numit <- floor((filelength[1]-11)/windowSize)
resting <- matrix(rep(NA,numit*3),ncol=3)
for(j in 1:numit){
# A one second window
mywindow <- matrix(rep(NA, windowSize*3),ncol=3)
for(i in 1:windowSize){
myline <- strsplit(nextElem(it), ",")[[1]]
mywindow[i,] <- c(as.numeric(myline[1]),
as.numeric(myline[2]),
as.numeric(myline[3]))
}
sd11 <- sd(mywindow[,1],na.rm=TRUE)
sd12 <- sd(mywindow[,2],na.rm=TRUE)
sd13 <- sd(mywindow[,3],na.rm=TRUE)
resting[j,] <- c(sd11, sd12, sd13)
}
mydata <- c(mean(resting[,1],na.rm=TRUE),mean(resting[,2],na.rm=TRUE),mean(resting[,3],na.rm=TRUE))
}
mydata
}
|
2ad96dfd017a010605e933fe73f65f2aa5330636
|
fd238af8ac37e4080e533e27c3702c8d99dd3d1b
|
/R/opt_paramsNLS.R
|
9920218d4de84a828427978f06b8783bcc41d212
|
[
"MIT"
] |
permissive
|
SharpRT/NFRR_Philippines
|
4f0fd1a48816c2dbe6622ca19d8c025842c00fe8
|
258958f19be3df9de9135ba4f0e464c2b21c5733
|
refs/heads/main
| 2023-04-09T11:58:10.359390
| 2021-12-16T15:19:36
| 2021-12-16T15:19:36
| 410,866,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,149
|
r
|
opt_paramsNLS.R
|
#' @importFrom stats nls
NULL
#' Performs non-linear least squares parameter optimisation.
#'
#' Performs non-linear least squares parameter optimisation.
#' @param initParams initial guess parameter values for optimiser to improve upon
#' @param plotSum target experimental data
#' @param tColStr name of matrix's column to be used as measure of time, e.g. "DAT"
#' @param lower lower bound of parameter values for optimiser to search
#' @param upper upper bound of parameter values for optimiser to search
#' @param modelFuncRes function containing the resistant variety model to be solved
#' @param modelFuncSus function containing the susceptible variety model to be solved
#' @param control nls control object
#' @param ... unmodified parameters (incl. isNonDim, N0, args(optional), plotArea and residFunc) to be passed to \code{opt_residuals()}
#' @return non-linear least squares (nls) object
#' @export
opt_paramsNLS = function(
initParams, lower=NULL, upper=NULL,
plotSum, tColStr,
modelFunc=NULL,
modelFuncRes=NULL, modelFuncSus=NULL, control=nls.control(), algorithm="port",
...
){
print(control)
#split the data according to plot type.
if(!is.null(modelFuncRes) && is.null(modelFuncSus)){
plotSum = plotSum[grepl("res", plotSum$Plot_Type),]
} else if(!is.null(modelFuncSus) && is.null(modelFuncRes)){
plotSum = plotSum[grepl("sus", plotSum$Plot_Type),]
}
# Calculate Sum of Squares (SS) of model produced with initParams.
residuals = opt_residuals(
params=initParams, plotSum=plotSum, tColStr=tColStr,
modelFuncRes=modelFuncRes, modelFuncSus=modelFuncSus, modelFunc=modelFunc,
...
)
# fitFunc=fitFunc,
# lower=lower,upper=upper,
# control=control,
initSS = sum((residuals)^2,na.rm = T)
#Wrapper to make nls call compatible with residFunc.
residWrapper = function(numTillers, DAT, Plot_Type, Plot_f, site, Line, params){
return(opt_residuals(
plotSum=data.frame(numTillers, DAT, Plot_Type, Plot_f, site, Line),
params=params, tColStr=tColStr,
modelFuncRes=modelFuncRes, modelFuncSus=modelFuncSus, modelFunc=modelFunc,
...
))
}
#To generalise nls code to accept different parameter sets.
formulaString=paste0(
"~residWrapper(",
"numTillers=numTillers,",
"DAT=", tColStr, ",",
"Plot_Type=Plot_Type,",
"Plot_f=Plot_f,",
"site=site,",
"Line=Line,",
"params=cbind(",
paste0(names(initParams),collapse=", "),
")[1,])"
)
fitval = nls(
formula=as.formula(formulaString),
start=initParams,
lower=lower,
upper=upper,
data = plotSum,
control = control,
trace=T
, algorithm = algorithm
)
# Determine relative improvement compared to initParams (finalSS/initSS<1 = improvement)
finalSS = fitval$m$deviance()
print(paste0("SE = ", summary(fitval)$sigma,"; SS = ",finalSS,"; finalSS/initSS = ",finalSS/initSS))
# Return the results of the fitting routine.
return(fitval)
}
|
8096333b23bfd8883bae701dae06c6b51751220c
|
3ebbd6220109f68462519f33460cae1c1c5de352
|
/man/etm_model.Rd
|
7013c7cb3dcb637198ab3dadac77cedaeba5340f
|
[
"MIT"
] |
permissive
|
adjidieng/ETM-R
|
8a69fc79a32c825cf77b73a25e6f13dd6e644e72
|
65d089be64a882a80238174ebe21784f7bf1acd8
|
refs/heads/main
| 2023-05-14T14:16:56.862479
| 2021-06-07T17:27:59
| 2021-06-07T17:27:59
| 349,517,189
| 5
| 1
|
MIT
| 2021-06-07T17:28:00
| 2021-03-19T18:18:03
|
Python
|
UTF-8
|
R
| false
| true
| 2,397
|
rd
|
etm_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/etm_model.R
\name{etm_model}
\alias{etm_model}
\title{Create Word Embeddings and Run the Embedded Topic Model on a Corpus}
\usage{
etm_model(dataset_name, data_path, num_topics = 50, epochs = 1000,
save_path, use_embed, embed_path)
}
\arguments{
\item{dataset_name}{The folder name of the preprocessed data (e.g. "20ng"). Remember to run `etm_preprocess`
to produce this folder of bag-of-words representations.}
\item{data_path}{The directory or location to the preprocessed data (e.g. "~/Desktop/20ng").}
\item{num_topics}{The number of topics to learn. The default number of topics is 50.}
\item{epochs}{Number of times the dataset is fed into the algorithm for
learning. The default number of epochs is 1000. Minimum number of epochs is 1.}
\item{save_path}{The directory for the output. The default is "./results".}
\item{use_embed}{Boolean entry (TRUE or FALSE) to skip creating word embeddings
and use pre-fitted embeddings.}
\item{embed_path}{The path to the pre-fitted word embeddings file, if `use_embed` is TRUE. Otherwise,
leave blank.}
}
\value{
A text file, known as the checkpoint file, which contains the trained embeddings and topics. This
file is saved at the designated save
}
\description{
`etm_model` combines two steps into one: first, it creates word embeddings on a
corpus, and second, it then runs the embedded topic model. Before using `etm_model`,
use `etm_preprocess` to process the corpus into a bag-of-words representation (
tokens and counts). An optional step before using `etm_model` is to use `etm_embed` (or another means)
to pre-fit word embeddings on the corpus, and hence, skip the first step of `etm_model`.
}
\examples{
\dontrun{
# most minimal example
etm_train(dataset_name = "bow_output", data_path = "Desktop/bow_output", num_topics = 2,
epochs = 2, save_path = "Desktop/", use_embed = FALSE)
# create word embeddings and then run the embedded topic model
etm_train(dataset_name = "bow_output", data_path = "Desktop/bow_output", num_topics = 10,
epochs = 1000, save_path = "Desktop/", use_embed = FALSE)
# use pre-fitted word embeddings, and then run the embedded topic model
etm_train(dataset_name = "prefitted_data", data_path = "Desktop/", num_topics = 10,
epochs = 1000, save_path = "Desktop/",
use_embed = TRUE, embed_path = "Desktop/embeddings/")
}
}
|
54bed228720f4c743e9af1a5629ce9be53003027
|
3f312cabe37e69f3a2a8c2c96b53e4c5b7700f82
|
/ver_devel/bio3d/man/pca.Rd
|
66103be71bc3f16b1c0b3c296ff25bd51c99598c
|
[] |
no_license
|
Grantlab/bio3d
|
41aa8252dd1c86d1ee0aec2b4a93929ba9fbc3bf
|
9686c49cf36d6639b51708d18c378c8ed2ca3c3e
|
refs/heads/master
| 2023-05-29T10:56:22.958679
| 2023-04-30T23:17:59
| 2023-04-30T23:17:59
| 31,440,847
| 16
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,835
|
rd
|
pca.Rd
|
\name{pca}
\alias{pca}
\title{ Principal Component Analysis }
\description{
Performs principal components analysis (PCA) on biomolecular structure data.
}
\usage{
pca(...)
}
\arguments{
\item{\dots}{ arguments passed to the methods \code{pca.xyz},
\code{pca.pdbs}, etc. Typically this includes either a numeric
matrix of Cartesian coordinates with a row per structure/frame (function
\code{pca.xyz()}), or an object of class \code{pdbs} as obtained from
function \code{pdbaln} or \code{read.fasta.pdb} (function
\code{pca.pdbs()}). }
}
\details{
Principal component analysis can be performed on any structure dataset of equal or unequal sequence composition to capture and characterize inter-conformer relationships.
This generic \code{pca} function calls the corresponding methods function for actual calculation, which is determined by the class of the input argument \code{x}. Use
\code{methods("pca")} to list all the current methods for \code{pca}
generic. These will include:
\code{\link{pca.xyz}}, which will be used when \code{x} is a numeric matrix
containing Cartesian coordinates (e.g. trajectory data).
\code{\link{pca.pdbs}}, which will perform PCA on the
Cartesian coordinates of a input \code{pdbs} object (as obtained from
the \sQuote{read.fasta.pdb} or \sQuote{pdbaln} functions).
Currently, function \code{\link{pca.tor}} should be called explicitly as there
are currently no defined \sQuote{tor} object classes.
See the documentation and examples for each individual function for
more details and worked examples.
}
\references{
Grant, B.J. et al. (2006) \emph{Bioinformatics} \bold{22}, 2695--2696.
}
\author{ Barry Grant, Lars Skjaerven }
\seealso{
\code{\link{pca.xyz}}, \code{\link{pca.pdbs}},
\code{\link{pdbaln}}. }
\keyword{ utilities }
|
f887920de26c306333b084b969d19cb899df4529
|
544509f8706dcea9e791d5828b89a3282cdcd673
|
/DSSA-5101-DATA-EXPLORATION/Project5/walshCode.r
|
76e47bbbe818071b361b6b13c6bd9125235708cf
|
[] |
no_license
|
walshg3/DataScience
|
0474b88195199b7275d82e589d8cd7018a1a2050
|
7198212ea7b7a19832062b370d25650201262626
|
refs/heads/master
| 2021-08-07T06:25:54.340050
| 2020-08-11T06:23:30
| 2020-08-11T06:23:30
| 208,895,573
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,282
|
r
|
walshCode.r
|
library(tidyverse)
library(ggplot2)
### Function Calls ###
acprint_merge <- function(source_csv, demographic_csv) {
# Source
source_df <- read_csv(
source_csv,
col_types = cols(
Date = col_date("%m/%d/%Y") #convert col to date
)
)
# Demographic data sent back
demographic_df <- read_csv(
demographic_csv,
col_types = cols(
PRINT_DATE = col_date("%m/%d/%Y") #convert col to date
)
) %>% distinct(STUDENT_UNAME, PRINT_DATE, .keep_all = TRUE) #select distinct names and dates
merge_df <- merge(source_df, demographic_df, by.x = c("User","Date"), by.y = c("STUDENT_UNAME","PRINT_DATE"), all.x = TRUE) %>%
# Sanitize data, remove Username
select(c("User","Document","Printer","Date","Time","Computer","Pages","Cost", "STVTERM_CODE", "AGE", "SPBPERS_SEX", "MAJR_CODE_LIST", "MINR_CODE_LIST", "CLASS_CODE", "RESIDENT")) %>%
arrange(Date)
# clean Resident data
merge_df <- replace_na(merge_df, list(RESIDENT="N"))
# clean Major list
merge_df <- merge_df %>%
# mark records that will be modified
mutate(multiple_majors = ifelse(str_detect(MAJR_CODE_LIST, ":"), "Y", "N")) %>%
# separate records that have multiple majors
separate_rows(MAJR_CODE_LIST, sep = ":")
# clean Minor list
merge_df <- merge_df %>%
# mark records that will be modified
mutate(multiple_minors = ifelse(str_detect(MINR_CODE_LIST, ":"), "Y", "N")) %>%
# separate records that have multiple majors
separate_rows(MINR_CODE_LIST, sep = ":")
return(merge_df)
}
# Function that takes a term code and converts it to a friendly string
term_tostring <- function(term) {
year <- substr(term, 0, 4)
# https://stackoverflow.com/questions/28593265/is-there-a-function-like-switch-which-works-inside-of-dplyrmutate
semester <- recode(substr(term, 5, 6), "80" = "Fall", "50" = "Summer", "20" = "Spring")
return(paste(semester, year))
}
sort_printer_data <- function(df) {
dfsort <- aggregate(df$Pages, by=list(Category=df$MAJR_CODE_LIST), FUN=sum)
dfsort <- dfsort %>% arrange(desc(x))
dfsort <- dfsort %>% head(10)
return(dfsort)
}
sort_printer_data_color <- function(df) {
dfsort <- aggregate(df$Cost, by=list(Category=df$MAJR_CODE_LIST), FUN=sum)
dfsort <- dfsort %>% arrange(desc(x))
dfsort <- dfsort %>% head(10)
return(dfsort)
}
print_data_yearly <- function(df, year){
ggplot(data.frame(df), aes(reorder(x=df$Category, -df$x), y=df$x / 1000)) +
geom_bar(stat = "identity") + labs( x = NULL,
y = "Printer Points (By Thousands)",
title = paste("Printer Points Usage By Program For", year )) +
theme(axis.text.x=element_text(angle=60, hjust=1), text=element_text(size=18)) ## Include for text labels + geom_text(label = df$x)
}
### Begin Data Merge ###
# Merge acprint4
acprint4_merge <- acprint_merge("acprint4.csv", "EVAN_DSSA_ACPRINT4_3.csv")
# Merge acprint6
acprint6_merge <- acprint_merge("acprint6.csv", "EVAN_DSSA_ACPRINT6_3.csv")
# get unknown majors
enrollment_majors <- c("AFST","ARTS","ARTV","COMM","HIST","LCST","LITT","MAAS","PHIL","BSNS","CMPT","CSCI","CSIS","HTMS","INSY","MBA","CERT","EDOL","MAED","MAIT","TEDU","CERT","LIBA","MAHG","CERT","DNP","DPT","EXSC","HLSC","MSCD","MSN","MSOT","NRS4","NURS","PUBH","SPAD","BCMB","BIOL","CHEM","CPLS","DSSA","ENVL","GEOL","MARS","MATH","MSCP","PHYS","PSM","SSTB","CERT","COUN","CRIM","ECON","MACJ","MSW","POLS","PSYC","SOCY","SOWK","NMAT","UNDC")
acprint6_merge %>% filter(!MAJR_CODE_LIST %in% enrollment_majors) %>% select(MAJR_CODE_LIST) %>% distinct()
acprint4_merge %>% filter(!MAJR_CODE_LIST %in% enrollment_majors) %>% select(MAJR_CODE_LIST) %>% distinct()
#acprint6_merge <- acprint6_merge[is.na(acprint6_merge$Document),]
### Being Data Cleaning ###
## Remove IT Admins Usersname from Priner account
acprint6_merge <- acprint6_merge[!acprint6_merge$Printer == "AC\\gallag99"
& !acprint6_merge$Printer == "AC\\koppt"
& !acprint6_merge$Printer == "AC\\roubosd"
& !acprint6_merge$Printer == "AC\\yeunge"
& !acprint6_merge$Printer == "AC\\admin-kapmat"
& !acprint6_merge$Printer == "AC\\pestritm"
& !acprint6_merge$Printer == "AC\\Dan"
& !acprint6_merge$Printer == "AC\\mcadmin"
, ]
acprint4_merge <- acprint4_merge[!acprint4_merge$Printer == "AC\\gallag99"
& !acprint4_merge$Printer == "AC\\koppt"
& !acprint4_merge$Printer == "AC\\roubosd"
& !acprint4_merge$Printer == "AC\\yeunge"
& !acprint4_merge$Printer == "AC\\admin-kapmat"
& !acprint4_merge$Printer == "AC\\pestritm"
& !acprint4_merge$Printer == "AC\\Dan"
& !acprint6_merge$Printer == "AC\\mcadmin"
, ]
## Remove the Documents Col
acprint6_merge = acprint6_merge[-c(2)]
## Things to Look For
## What Major Prints the most
## Pages X Cost
## Acprint4
## Need to do this for AC Print 6 as well
## Acprint4
df1 <- filter(acprint4_merge, Pages > 0 )
df1 <- df1 %>% filter(MAJR_CODE_LIST %in% enrollment_majors)
## Semester FA 19 + SP 20 + SU 20
## 2016/04/
#Fall 2019 September 1 - Dec 20
#Summer 19 May 10 - Aug 10th
#Spring 19 Jan 12 - May 6
df1 <- df1[!is.na(df1$MAJR_CODE_LIST),]
subset2k16 <- subset(df1, format(as.Date(df1$Date),"%Y/%M/%D") >= "2016-09-01" & df1$Date <= "2017-08-10")
subset2k17 <- subset(df1, format(as.Date(df1$Date),"%Y/%M/%D") >= "2017-09-01" & df1$Date <= "2018-08-10")
subset2k18 <- subset(df1, format(as.Date(df1$Date),"%Y/%M/%D") >= "2018-09-01" & df1$Date <= "2019-08-10")
cost2k18 <- subset(df1, format(as.Date(df1$Date),"%Y/%M/%D") >= "2018-09-01" & df1$Date <= "2019-01-01")
cost2k19 <- subset(df1, format(as.Date(df1$Date),"%Y/%M/%D") >= "2019-01-02" & df1$Date <= "2019-08-10")
sumsubset2k16 <- sum(subset2k16$Pages)
sumsubset2k17 <- sum(subset2k17$Pages)
sumsubset2k18 <- sum(subset2k18$Pages)
sum2k18 <- sum(cost2k18$Pages) * 0.004598
sum2k19 <- sum(cost2k19$Pages) * 0.004598
total <- sum2k18 + sum2k19
pagecost2k17 <- sumsubset2k17 * 0.00542
pagecost2k16 <- sumsubset2k16 * 0.00542
cost_sums <- data.frame(c(pagecost2k16, pagecost2k17, total), c("Fall 2016 - Summer 2017","Fall 2017 - Summer 2018","Fall 2018 - Summer 2019" ))
ggplot(data.frame(cost_sums), aes(y=cost_sums$c.pagecost2k16..pagecost2k17..total. , x=cost_sums$c..Fall.2016...Summer.2017....Fall.2017...Summer.2018....Fall.2018...Summer.2019..)) +
geom_bar(stat = "identity") +
labs(x = "Academic Year",
y = "Cost $",
title = "Cost Of Printing Per Academic Year")
print_sums <- data.frame(c(sumsubset2k16, sumsubset2k17, sumsubset2k18 ), c("Fall 2016 - Summer 2017","Fall 2017 - Summer 2018","Fall 2018 - Summer 2019" ))
ggplot(data.frame(print_sums), aes(y=print_sums$c.sumsubset2k16..sumsubset2k17..sumsubset2k18. / 1000000, x=print_sums$c..Fall.2016...Summer.2017....Fall.2017...Summer.2018....Fall.2018...Summer.2019..)) +
geom_bar(stat = "identity") +
labs(x = "Academic Year",
y = "Prints (By Millions)",
title = "Total Prints Per Academic Year")
print_data_yearly(sort_printer_data(subset2k16), "Fall 2016 - Summer 2017")
print_data_yearly(sort_printer_data(subset2k17), "Fall 2017 - Summer 2018")
print_data_yearly(sort_printer_data(subset2k18), "Fall 2018 - Summer 2019")
ggplot(data.frame(top10), aes(reorder(x=top10$Category, -top10$x), y=top10$x)) +
geom_bar(stat = "identity")
## Are people spending more to print over the years
## Are the majors changing over the years
ggplot(data.frame(acprint6_merge), aes(x=acprint6_merge$Date, y=acprint6_merge$Cost)) +
scale_x_date(limits = as.Date(c("2016-04-01","2016-04-29")), date_breaks = "1 day") +
geom_bar(stat = "identity") + labs(x = "Date",
y = "Printer Points",
title = "Printer Points Usage") +
theme(axis.text.x=element_text(angle=60, hjust=1))
ggplot(data.frame(acprint6_merge), aes(x=acprint6_merge$Date, y=acprint6_merge$Cost)) +
scale_x_date(limits = as.Date(c("2018-09-01","2018-12-31")), date_breaks = "1 weeks") +
geom_bar(stat = "identity") + labs(x = "Date",
y = "Printer Points",
title = "Printer Points Usage") +
theme(axis.text.x=element_text(angle=60, hjust=1))
## ACPRINT6
df2 <- filter(acprint6_merge, Cost > 0 )
df2 <- df2 %>% filter(MAJR_CODE_LIST %in% enrollment_majors)
df2 <- df2[!is.na(df2$MAJR_CODE_LIST),]
subset2k16color <- subset(df2, format(as.Date(df2$Date),"%Y/%M/%D") >= "2016-09-01" & df2$Date <= "2017-08-10")
subset2k17color <- subset(df2, format(as.Date(df2$Date),"%Y/%M/%D") >= "2017-09-01" & df2$Date <= "2018-08-10")
subset2k18color <- subset(df2, format(as.Date(df2$Date),"%Y/%M/%D") >= "2018-09-01" & df2$Date <= "2019-08-10")
cost2k18color <- subset(df2, format(as.Date(df2$Date),"%Y/%M/%D") >= "2018-09-01" & df2$Date <= "2019-01-01")
cost2k19color <- subset(df2, format(as.Date(df2$Date),"%Y/%M/%D") >= "2019-01-02" & df2$Date <= "2019-08-10")
#subset2k19 <- subset(df1, format(as.Date(df1$Date),"%Y/%M/%D") >= "2016-09-01" & df1$Date <= "2017-08-10")
sumsubset2k16color <- sum(subset2k16color$Cost)
sumsubset2k17color <- sum(subset2k17color$Cost)
sumsubset2k18color <- sum(subset2k18color$Cost)
sum2k18color <- sum(cost2k18color$Cost) * 0.004598
sum2k19color <- sum(cost2k19color$Cost) * 0.004598
totalcolor <- sum2k18color + sum2k19color
pagecost2k17color <- sumsubset2k17color * 0.00542
pagecost2k16color <- sumsubset2k16color * 0.00542
cost_sumscolor <- data.frame(c(pagecost2k16color, pagecost2k17color, totalcolor), c("Fall 2016 - Summer 2017","Fall 2017 - Summer 2018","Fall 2018 - Summer 2019" ))
ggplot(data.frame(cost_sums), aes(y=cost_sumscolor$c.pagecost2k16color..pagecost2k17color..totalcolor., x=cost_sumscolor$c..Fall.2016...Summer.2017....Fall.2017...Summer.2018....Fall.2018...Summer.2019..)) +
geom_bar(stat = "identity") +
labs(x = "Academic Year",
y = "Cost $",
title = "Cost Of Printing Per Academic Year")
print_sumscolor <- data.frame(c(sumsubset2k16color, sumsubset2k17color, sumsubset2k18color), c("Fall 2016 - Summer 2017","Fall 2017 - Summer 2018","Fall 2018 - Summer 2019" ))
ggplot(data.frame(print_sumscolor), aes(y=print_sumscolor$c.sumsubset2k16color..sumsubset2k17color..sumsubset2k18color. / 1000, x=print_sumscolor$c..Fall.2016...Summer.2017....Fall.2017...Summer.2018....Fall.2018...Summer.2019..)) +
geom_bar(stat = "identity") +
labs(x = "Academic Year",
y = "Prints (By Thousands)",
title = "Total Prints Per Academic Year")
print_data_yearly(sort_printer_data_color(subset2k16color), "Fall 2016 - Summer 2017")
print_data_yearly(sort_printer_data_color(subset2k17color), "Fall 2017 - Summer 2018")
print_data_yearly(sort_printer_data_color(subset2k18color), "Fall 2018 - Summer 2019")
sum(subset2k16color$Cost)
sum(subset2k17color$Cost)
sum(subset2k18color$Cost)
ggplot(data.frame(top10), aes(reorder(x=top10$Category, -top10$x), y=top10$x)) +
geom_bar(stat = "identity")
## Are people spending more to print over the years
ggplot(data.frame(acprint6_merge), aes(x=acprint6_merge$Date, y=acprint6_merge$Cost)) +
scale_x_date(limits = as.Date(c("2016-04-01","2016-04-29")), date_breaks = "1 day") +
geom_bar(stat = "identity") + labs(x = "Date",
y = "Printer Points",
title = "Printer Points Usage") +
theme(axis.text.x=element_text(angle=60, hjust=1))
ggplot(data.frame(acprint6_merge), aes(x=acprint6_merge$Date, y=acprint6_merge$Cost)) +
scale_x_date(limits = as.Date(c("2018-09-01","2018-12-31")), date_breaks = "1 weeks") +
geom_bar(stat = "identity") + labs(x = "Date",
y = "Printer Points",
title = "Printer Points Usage") +
theme(axis.text.x=element_text(angle=60, hjust=1))
|
df9e66a2ccc3a47c39d497ff790b73d8666be24e
|
a2718fd2bab9eb1b86b77b8c9b0d776973d11315
|
/man/createSeasonalityCovariateSettings.Rd
|
cb2c33ace59f71114480e626015beec4f95ebd02
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/SelfControlledCaseSeries
|
df61f7622fa19bb7b24a909a8da4f18c626cf33c
|
28cd3a6cb6f67be5c6dfba0682b142ff3853e94c
|
refs/heads/main
| 2023-08-31T02:00:48.909626
| 2023-04-13T11:42:06
| 2023-04-13T11:42:06
| 20,701,289
| 13
| 12
| null | 2023-09-07T04:50:44
| 2014-06-10T20:53:47
|
R
|
UTF-8
|
R
| false
| true
| 1,498
|
rd
|
createSeasonalityCovariateSettings.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CovariateSettings.R
\name{createSeasonalityCovariateSettings}
\alias{createSeasonalityCovariateSettings}
\title{Create seasonality settings}
\usage{
createSeasonalityCovariateSettings(
seasonKnots = 5,
allowRegularization = FALSE,
computeConfidenceIntervals = FALSE
)
}
\arguments{
\item{seasonKnots}{If a single number is provided this is assumed to indicate the
number of knots to use for the spline, and the knots are
automatically equally spaced across the year. If more than one
number is provided these are assumed to be the exact location of
the knots in days relative to the start of the year.}
\item{allowRegularization}{When fitting the model, should the covariates defined here be
allowed to be regularized?}
\item{computeConfidenceIntervals}{Should confidence intervals be computed for the covariates
defined here? Setting this to FALSE might save computing time
when fitting the model. Will be turned to FALSE automatically
when \code{allowRegularization = TRUE}.}
}
\value{
An object of type \code{seasonalitySettings}.
}
\description{
Create seasonality settings
}
\details{
Create an object specifying whether and how seasonality should be included in the model.
Seasonality can be included by splitting patient time into calendar months. During a month, the
relative risk attributed to season is assumed to be constant, and the risk from month to month is
modeled using a cyclic cubic spline.
}
|
31008648d3ae3628ed16b96b29d094484d46e8f2
|
a1738539620913a8cf50d51517fcd9df5c79fbd6
|
/R/DLMextra.R
|
40da8cc652c2f353dee1c35a6f7b25f434b833fc
|
[] |
no_license
|
Lijiuqi/DLMtool
|
aa5ec72fd83436ebd95ebb6e80f61053603c901c
|
bfafc37c100680b5757f03ae22c5d4062829258b
|
refs/heads/master
| 2021-05-05T22:07:48.346019
| 2018-01-02T22:22:45
| 2018-01-02T22:22:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 959
|
r
|
DLMextra.R
|
#' Load more data from DLMextra package
#'
#' Downloads the DLMextra package from GitHub
#' @param silent Logical. Should messages to printed?
#' @export
#'
#' @importFrom devtools install_github
DLMextra <- function(silent=FALSE) {
if (!silent) message("\nDownloading 'DLMextra' from GitHub")
tt <- devtools::install_github("DLMtool/DLMextra", quiet=TRUE)
if (tt) {
if (!silent) message("Use 'library(DLMextra)' to load additional data into workspace")
} else {
if (!silent) message("Package 'DLMextra' already up to date\n Use 'library(DLMextra)' to load additional data into workspace")
}
# d <- data(package = "DLMdata")
# DataObjs <- d$results[,3]
# for (X in 1:length(DataObjs)) {
# dat <- eval(parse(text=paste0("DLMdata::",(DataObjs[X]))))
# AAname <- DataObjs[X]
# assign(AAname, dat)
# # environment(AAname) <- asNamespace('DLMtool')
# # environment(AAname) <- as.environment("package:DLMtool")
# }
}
|
2c98b1ca7fb376e605c00747b0d5c9ff5155d193
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.iotanalytics/man/cancel_pipeline_reprocessing.Rd
|
bbdcb84b3ccf41f21dfedf517af120ffeb818e89
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 739
|
rd
|
cancel_pipeline_reprocessing.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iotanalytics_operations.R
\name{cancel_pipeline_reprocessing}
\alias{cancel_pipeline_reprocessing}
\title{Cancels the reprocessing of data through the pipeline}
\usage{
cancel_pipeline_reprocessing(pipelineName, reprocessingId)
}
\arguments{
\item{pipelineName}{[required] The name of pipeline for which data reprocessing is canceled.}
\item{reprocessingId}{[required] The ID of the reprocessing task (returned by "StartPipelineReprocessing").}
}
\description{
Cancels the reprocessing of data through the pipeline.
}
\section{Accepted Parameters}{
\preformatted{cancel_pipeline_reprocessing(
pipelineName = "string",
reprocessingId = "string"
)
}
}
|
1fec1ad3b645f38739b979c13bac9a7d36af6986
|
bbfc445d3b3c4ebe5ac9f2e3ad6a27bf0f049319
|
/moeny.R
|
27e68c7cfca3d338a1896c1ff1636d4b602fbda5
|
[] |
no_license
|
sckingsley/Money
|
2887a082b1dcfd0ce6b6dea7c6aca8b71eb7417b
|
b7d038c6672596a3e65571f28cea48e95bdd47c3
|
refs/heads/master
| 2016-09-06T01:34:40.264081
| 2014-03-22T00:53:21
| 2014-03-22T00:53:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,339
|
r
|
moeny.R
|
View(Money)
setwd("~/Money")
View(Money)
View(Money)
Money <- read.delim("~/Dropbox/Metrics Data Files/Money.txt")
View(Money)
View(Money)
FILE <- "~/Dropbox/Metrics Data Files/Money.txt"
setwd("~/Money")
FILE <- "~/Dropbox/Metrics Data Files/Money.txt"
setwd("~/Money")
Money <- read.delim("~/Money/Money.txt")
View(Money)
View(Money)
View(Money)
load("~/Money/Money.txt")
View(Money)
FILE <- "~/Money/Money.txt"
dat <- read.table(file = FILE, header = TRUE)
summary (dat)
v <- c("M2", "interest", "GDP", "Year")
print (v)
f <- c("Year$M2", "Year$interest", "Year$GDP")
print(f)
lm (formula = M2 ~ interest + GDP, data=dat)
m <- lm(formula = M2 ~ interest + GDP, data=dat, TRUE)
summary(m)
//parameter estimates//
coef(m)
anova.lm(m)
anova(m)
coefficients(m)
fitted(m)
residuals(m)
summary(m)
deviance(m)
anova.lmlist(m)
load("~/Money/Money.txt")
View(Money)
FILE <- "~/Money/Money.txt"
dat <- read.table(file = FILE, header = TRUE)
summary (dat)
v <- c("M2", "interest", "GDP", "Year")
print (v)
f <- c("Year$M2", "Year$interest", "Year$GDP")
print(f)
lm (formula = M2 ~ interest + GDP, data=dat)
m <- lm(formula = M2 ~ interest + GDP, data=dat, TRUE)
summary(m)
//parameter estimates//
coef(m)
anova.lm(m)
anova(m)
coefficients(m)
residuals(m)
summary(m)
deviance(m)
anova.lmlist(m)
load("~/Money/Money.txt")
View(Money)
FILE <- "~/Money/Money.txt"
dat <- read.table(file = FILE, header = TRUE)
summary (dat)
v <- c("M2", "interest", "GDP", "Year")
print (v)
f <- c("Year$M2", "Year$interest", "Year$GDP")
print(f)
lm (formula = M2 ~ interest + GDP, data=dat)
m <- lm(formula = M2 ~ interest + GDP, data=dat, TRUE)
summary(m)
FILE <- "~/Dropbox/Metrics Data Files/Money.txt"
dat <_ read.table(file = FILE, header = TRUE)
dat <- read.table(file = FILE, header = TRUE)
summary(dat)
v <- c("M2", "interest", "GDP", "Year")
print(v)
summary(dat)
f <- c("Year$M2", "Year$interest", "Year$GDP")
print(f)
summary(dat)
lm(formula = M2 ~ interest + GDP, data=dat)
lm(formula = M2*f ~ interest + GDP, data=dat)
lm(formula = M2(f) ~ interest + GDP, data=dat)
lm(formula = M2$f ~ interest + GDP, data=dat)
m <- lm(formula = M2 ~ interest + GDP, data=dat)
summary(m)
anova(m)
plot (m)
?plot
xlab("Years")
?ggplot
??ggplot
FILE <- "~/Dropbox/Metrics Data Files/Money.txt"
dat <- read.table(file = FILE, header = TRUE)
v <- c("M2", "interest", "GDP", "Year")
autoplot(m)
autoplot(Year, M2, GDP)
ggplot(Year, M2, GDP)
library("AER", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("boot", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
ggplot(Year, M2, GDP)
library("dynlm", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
ggplot(Year, M2, GDP)
library("ggplot2", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
ggplot(Year, M2, GDP)
ggplot(M2, GDP)
ggplot("M2", "GDP")
aes_string <- ggplot("M2", "GDP")
ggplot(m)
library("evaluate", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("Formula", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("grid", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("labeling", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("manipulate", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("multcomp", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("nlme", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("plyr", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("rstudio", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("scales", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
library("SparseM", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
detach("package:survival", unload=TRUE)
ggplot(m)
ggplot(m, M2)
?ggplot
ggplot(2, aes(x, y, "Years"))
ggplot(df, aes(x, y, "Years"))
ggplot(df, aes(x, y))
ggplot(df, aes(m, f))
ggplot(aes(m, f))
ggplot(df)
ggplot(dat)
ggplot(dat, m)
ggplot(dat, aes(m))
ggplot(dat, aes(m, f))
ggplot(dat, aes(x = Years, y = m))
ggplot(data=dat, aes(x = Years, y = m))
ggplot(df, aes(x = Years, y = m))
?plot
install.packages("ggplot2")
install.packages("ggplot2")
install.packages("gcookbook")
library(ggplot2)
library(gcookbook)
plot(v, f, type="l")
View(Money)
setwd("~/Money")
View(Money)
View(Money)
Money <- read.delim("~/Dropbox/Metrics Data Files/Money.txt")
View(Money)
View(Money)
FILE <- "~/Dropbox/Metrics Data Files/Money.txt"
setwd("~/Money")
FILE <- "~/Dropbox/Metrics Data Files/Money.txt"
setwd("~/Money")
Money <- read.delim("~/Money/Money.txt")
View(Money)
View(Money)
View(Money)
load("~/Money/Money.txt")
View(Money)
FILE <- "~/Money/Money.txt"
dat <- read.table(file = FILE, header = TRUE)
summary (dat)
v <- c("M2", "interest", "GDP", "Year")
print (v)
f <- c("Year$M2", "Year$interest", "Year$GDP")
print(f)
lm (formula = M2 ~ interest + GDP, data=dat)
m <- lm(formula = M2 ~ interest + GDP, data=dat, TRUE)
summary(m)
//parameter estimates//
coef(m)
anova.lm(m)
anova(m)
coefficients(m)
fitted(m)
residuals(m)
summary(m)
deviance(m)
anova.lmlist(m)
load("~/Money/Money.txt")
View(Money)
FILE <- "~/Money/Money.txt"
dat <- read.table(file = FILE, header = TRUE)
summary (dat)
v <- c("M2", "interest", "GDP", "Year")
print (v)
f <- c("Year$M2", "Year$interest", "Year$GDP")
print(f)
lm (formula = M2 ~ interest + GDP, data=dat)
m <- lm(formula = M2 ~ interest + GDP, data=dat, TRUE)
summary(m)
//parameter estimates//
coef(m)
anova.lm(m)
anova(m)
coefficients(m)
residuals(m)
summary(m)
deviance(m)
anova.lmlist(m)
confint(m)
##covariance of estimates##
vcov(m)
coef(m)
effects(m)
deviance(m)
m <- aov(v ~ f)
slope <- coef(m)
slope
print(slope)
plot(slope)
mode(m)
df.residual(m)
coefficients(m)
residuals(m)
effects(m)
rank(m)
fitted.values(m)
xlevels(m)
model(m)
coeftest(m)
confint(m)
fit <- lm(M2 ~ interest + GDP, data=dat)
summary(fit)
xlab="Years"
logit(m)
fit <- glm(M2 ~ interest + GDP, data=mydat, family=binomial(m))
print(fit)
summary(fit)
exp(confint(fit))
exp(coef(fit))
predict(fit, type="response")
residuals(fit, type="deviance")
anova(fit1,fit2, test="Chisq")
log(fit)
log(m)
loglm <- c(formula= log("M2") ~ log("interest") + log("GDP"))
summary(loglm)
x1 <- "interest"
x2 <- "GDP"
y <- "M2"
log(x1, base = exp(1))
log(x1)
log(interest)
lm(formula = log(M2) ~ log(interest) + log(GDP), data = dat)
s <- lm(formula = log(M2) ~ log(interest) + log(GDP), data=dat)
anova(s)
coefficients(s)
summary(s)
coef(s)
vcov(s)
##Difference##
FILE <- "~/Dropbox/Metrics Data Files/PS4diff.txt"
dat2 <- read.table(file = FILE, header = TRUE)
summary(dat2)
v <- c("dM2t", "dInt", "dGDP", "Year")
print (v)
f <- c("Year$dM2t", "Year$dInt", "Year$dGDP")
print(f)
lm (formula = dM2t ~ dInt + dGDP, data=dat2)
n <- lm(formula = dM2t ~ dInt + dGDP, data=dat2, TRUE)
print(n)
summary(n)
confint(n)
coef(n)
vcov(n)
anova(n)
##log difference model##
loglm(formula = dM2t ~ dInt + dGDP, data=dat2)
q <- loglm(formula = dM2t ~ dInt + dGDP, data=dat2)
summary(q)
coef(q)
anova(q)
resid(q)
deviance(q)
effects(q)
dat2$Year <- factor(dat2$Year)
glm(formula = dM2t ~ dInt + dGDP, data = dat2)
coef(log)
|
42fefce274ebd03a8356dd7df8ae5415ea3e0e55
|
bf5c82a303681312929b6488dd9d3cf65886a831
|
/R_script/README_r_script.R
|
87cd039c68e714fccdf61a8730981414f1523e6c
|
[] |
no_license
|
mrecos/Put_a_prior_on_it-Blog_post
|
247670c75295274c92ac1aba67580daca088f5f6
|
99efa2429fb15ba288a90761781eef74c9a8896e
|
refs/heads/master
| 2021-01-10T22:53:46.999967
| 2016-10-18T01:42:38
| 2016-10-18T01:42:38
| 70,365,270
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,498
|
r
|
README_r_script.R
|
## ----libraries, echo=TRUE, message=FALSE, warning=FALSE------------------
library("ggplot2")
library("grid")
library("dplyr")
library("tools")
library("tidyverse")
library("acs")
library("reshape2")
library("readr")
library("tidytext")
library("scales")
library("ggplot2")
library("ggrepel")
library("broom")
library("fitdistrplus")
library("rstan")
library("knitr")
library("rmarkdown")
## ----get_data, comment = '', cache=TRUE----------------------------------
#api.key.install("YOUR KEY HERE!")
## population data
stategeo <- geo.make(state = "*")
popfetch <- acs.fetch(geography = stategeo,
endyear = 2014,
span = 5,
table.number = "B01003",
col.names = "pretty")
## song lyrics
lyrics_url <- "https://raw.githubusercontent.com/walkerkq/musiclyrics/master/billboard_lyrics_1964-2015.csv"
save_lyrics_loc <- "~/Documents/R_Local/Put_a_prior_on_it-Blog_post/data/billboard_lyrics_1964-2015.csv"
# download.file(lyrics_url, save_lyrics_loc)
song_lyrics <- read_csv(save_lyrics_loc)
cities_dat_loc <- "~/Documents/R_Local/Put_a_prior_on_it-Blog_post/data/cities_over_100k_pop.csv"
state_abbrev_loc <- "~/Documents/R_Local/Put_a_prior_on_it-Blog_post/data/state_abbrev.csv"
state_abbrev <- read_csv(state_abbrev_loc)
## ----population_data,comment = ''----------------------------------------
# extract desired info from acs data
pop_df <- tbl_df(melt(estimate(popfetch))) %>%
mutate(name = as.character(Var1),
state_name = tolower(Var1),
pop2014 = value) %>%
dplyr::select(name, state_name, pop2014) %>%
filter(state_name != "puerto rico") %>%
left_join(state_abbrev)
# clean in city names
cities <- read_csv(cities_dat_loc) %>%
mutate(city = gsub("\x96", "-", city),
city_name = tolower(city),
state_name = tolower(state))
# extract and tidy lyrics from songs data
tidy_lyrics <- bind_rows(song_lyrics %>%
unnest_tokens(lyric, Lyrics),
song_lyrics %>%
unnest_tokens(lyric, Lyrics,
token = "ngrams", n = 2))
## ----data_summarize, comment=''------------------------------------------
# join and retain songs whether or not they are in the lyrics
tidy_lyrics_state_zeros <- right_join(tidy_lyrics, pop_df,
by = c("lyric" = "state_name")) %>%
distinct(Song, Artist, lyric, .keep_all = TRUE) %>%
mutate(cnt = ifelse(is.na(Source), 0, 1)) %>%
filter(lyric != "district of columbia") %>%
dplyr::rename(state_name = lyric)
tidy_lyrics_state <- filter(tidy_lyrics_state_zeros, cnt > 0)
## count the states up
zero_rate <- 0.0000001 # beta hates zeros
# group, summarise, and calculate rate per 100k population
state_counts_zeros <- tidy_lyrics_state_zeros %>%
group_by(state_name) %>%
dplyr::summarise(n = sum(cnt)) %>% # sum(cnt)
left_join(pop_df, by = c("state_name" = "state_name")) %>%
mutate(rate = (n / (pop2014 / 100000)) + zero_rate) %>%
arrange(desc(n))
# create another data set with
state_counts <- filter(state_counts_zeros, rate > zero_rate)
print(dplyr::select(state_counts, state_name, n ,pop2014, rate))
## ----cities, comment=''--------------------------------------------------
### Cities
## join cities together - inner_join b/c I don't care about cities with zero mentions (right_join otherwise)
tidy_lyrics_city <- inner_join(tidy_lyrics, cities,
by = c("lyric" = "city_name")) %>%
distinct(Song, Artist, lyric, .keep_all = TRUE) %>%
filter(!city %in% c("Surprise", "Hollywood",
"District of Columbia", "Jackson",
"Independence")) %>%
mutate(cnt = ifelse(is.na(Source), 0, 1)) %>%
dplyr::rename(city_name = lyric)
# count cities mentions. No need for a rate; not of use now
city_counts <- tidy_lyrics_city %>%
group_by(city_name) %>%
dplyr::summarise(n = sum(cnt)) %>%
arrange(desc(n))
print(city_counts)
# count of states that host the cities mentioned
city_state_counts <- tidy_lyrics_city %>%
group_by(state_name) %>%
dplyr::summarise(n = sum(cnt)) %>%
arrange(desc(n))
print(city_state_counts)
## ----city_state_join, comment=''-----------------------------------------
state_city_counts_zeros <- left_join(state_counts_zeros,
city_state_counts,
by = "state_name") %>%
dplyr::rename(n_state = n.x, n_city = n.y) %>%
mutate(n_city = ifelse(is.na(n_city), 0, n_city),
n_city_state = n_state + n_city,
city_state_rate = (n_city_state / (pop2014 / 100000)) + zero_rate)
# same as above, but no states with zero mentioned by city or state
state_city_counts <- filter(state_city_counts_zeros, n_city_state > zero_rate)
## ----fun_facts, comment=''-----------------------------------------------
# Boston = most mentioned city without its state
all_the_cities <- filter(state_city_counts, !state_name %in% state_counts$state_name) %>%
dplyr::select(name) %>%
mutate_if(is.factor, as.character) %>%
left_join(tidy_lyrics_city, by = c("name" = "state")) %>%
dplyr::select(name, Song, Artist, city)
kable(all_the_cities)
## ----fun_facts2, comment=''----------------------------------------------
n_states_mentioned <- tidy_lyrics_state %>%
group_by(Artist, Song) %>%
dplyr::summarise(n = n()) %>%
arrange(desc(n)) %>%
ungroup() %>%
top_n(5)
kable(n_states_mentioned)
# Top song is...
filter(tidy_lyrics_state, Song == as.character(n_states_mentioned[1,1])) %>%
dplyr::select(Song, Artist, Year, state_name)
## ----fun_facts3, comment=''----------------------------------------------
most_repeated_in_song <- right_join(tidy_lyrics, pop_df,
by = c("lyric" = "state_name")) %>%
group_by(Song, Artist, lyric) %>%
dplyr::summarise(n = n()) %>%
arrange(desc(n)) %>%
ungroup() %>%
filter(row_number() <= 10)
kable(most_repeated_in_song)
## ----fitdistr, comment=''------------------------------------------------
## beta boot for comparison
beta_fit <- fitdist(state_counts_zeros$rate,"beta") # best logLik
summary(beta_fit)
exp_fit <- fitdist(state_counts_zeros$rate,"exp")
summary(exp_fit)
lnorm_fit <- fitdist(state_counts_zeros$rate,"lnorm")
summary(lnorm_fit)
## ----stan_optim, message=FALSE, warning=FALSE, comment='', include=TRUE, results="hide", cache = TRUE----
opt_chr1 <- "
data {
int<lower=0> N;
real x[N];
}
parameters {
real<lower = 0> alpha0;
real<lower = 0> beta0;
}
model {
alpha0 ~ normal(0, 1);
beta0 ~ normal(0, 10);
//target += beta_lpdf(x | alpha0, beta0); // same as below
x ~ beta(alpha0, beta0);
}
"
# initialize parameter values (based on knowledge or fitdist results)
init_list <- list(alpha0 = 0.1, beta0 = 1)
# compile model (~ 10 to 15 seconds)
opt_mod1 <- stan_model(model_code = opt_chr1)
# optimize data given model
opt1 <- optimizing(object = opt_mod1, as_vector = FALSE,
data = list(x = state_counts_zeros$rate,
N = length(state_counts_zeros$rate)),
hessian = TRUE,
draws = 2500)
## ----optim_results, comment=''-------------------------------------------
# view results
opt1$par
opt1$value #compare to LogLikelihood of summary(beta_fit)
## ----param_plot, comment='', fig.align="center"--------------------------
ggplot(data.frame(opt1$theta_tilde), aes(x = alpha0, y = beta0)) +
geom_point(color = "skyblue3", alpha = 0.35) +
geom_density2d(aes(colour =..level..)) +
scale_colour_gradient(low="gray80",high="firebrick") +
scale_x_continuous(breaks = seq(0,0.3,0.025)) +
scale_y_continuous(breaks = seq(0,7,0.5)) +
theme_bw() +
labs(x = "alpha0",
y = "beta0",
title = "Distribution of Alpha and Beta Shape Parameters",
subtitle = "2500 samples from MLE optimized beta model posterior") +
theme(
panel.border = element_rect(colour = "gray90"),
axis.text.x = element_text(size = 8, family = "Trebuchet MS"),
axis.text.y = element_text(size = 8, family = "Trebuchet MS"),
axis.title = element_text(size = 10, family = "Trebuchet MS"),
plot.caption = element_text(size = 7, hjust=0, margin=margin(t=5),
family = "Trebuchet MS"),
plot.title=element_text(family="TrebuchetMS-Bold"),
legend.position = "none",
panel.grid.minor = element_blank()
)
## ---- stan_fit, comment='', cache=TRUE-----------------------------------
model_string1_pred <- "
data {
int<lower=1> N;
vector[N] x;
int<lower=1> M;
vector[M] new_success;
vector[M] new_attempts;
}
parameters {
real<lower=0> alpha0;
real<lower=0> beta0;
}
model {
alpha0 ~ normal(0, 1);
beta0 ~ normal(0, 10);
x ~ beta(alpha0, beta0);
} generated quantities {
vector[M] x_tilde;
for (n in 1:M)
x_tilde[n] = beta_rng((new_success[n] + alpha0),
(new_attempts[n] - new_success[n] + beta0));
}
"
## ----sta_fit_data, message=FALSE, warning=FALSE, comment='', include=TRUE, results="hide"----
new_success = state_counts_zeros$n
new_attempts = (state_counts_zeros$pop2014)/100000
model_dat1_pred <- list(x = state_counts_zeros$rate,
N = length(state_counts_zeros$rate),
new_success = new_success,
new_attempts = new_attempts,
M = length(new_success))
fit1_pred <- stan(model_code = model_string1_pred,
data = model_dat1_pred,
iter = 10000, chains = 4, warmup=2500)
## ----print_dat1_model, comment=''----------------------------------------
fit1_pred_summary <- data.frame(summary(fit1_pred)[["summary"]]) %>%
rownames_to_column() %>%
mutate(Parameter = c("alpha0", "beta0",
as.character(state_counts_zeros$name), "lp__")) %>%
dplyr::select(Parameter, mean, sd, X2.5., X97.5., n_eff, Rhat) %>%
dplyr::rename(Mean = mean,
SD = sd,
`2.5%` = X2.5.,
`97.5%` = X97.5.)
kable(fit1_pred_summary, digits = 3)
## ----state_estimate, message=FALSE, warning=FALSE, comment='', fig.height=6, fig.width=6, fig.align="center"----
state_estimates <- rstan::extract(fit1_pred, pars = "x_tilde") %>%
data.frame() %>%
rename_(.dots=setNames(names(.),state_counts_zeros$state_name)) %>%
gather() %>%
dplyr::rename(state_name = key) %>%
group_by(state_name) %>%
dplyr::summarise(q025 = quantile(value, probs = 0.025),
q5 = quantile(value, probs = 0.5),
q975 = quantile(value, probs = 0.975),
mean = mean(value)) %>%
left_join(.,state_counts_zeros)
## ----post_pred_state_plot, comment=''------------------------------------
### could melt and add q025,q5,q975 by color/shape
### could also predict across range of rates and show areas
ggplot(state_estimates, aes(rate, mean)) +
geom_abline(intercept = 0, slope = 1, color = "gray70", linetype = 2) +
geom_point(size = 4, aes(color = n)) +
geom_text_repel(aes(label = abbrev), stat = "identity",
point.padding = unit(0.5, "lines"),
max.iter = 5000) +
scale_color_gradient(low = "midnightblue", high = "pink",
name="Number\nof songs") +
labs(title = "States in Song Lyrics with Empirical Bayes",
subtitle = "States like Montana and Hawaii (high rates, few mentions) are shifted the most",
x = "Measured rate of mentions per 100k population",
y = "Mean predicted rate per 100k population",
caption = "plot design by @juliasilge") +
theme_minimal(base_family = "Trebuchet MS") +
theme(plot.title=element_text(family="Trebuchet MS"))
## ----state_estim_plot, comment='', fig.height=8, fig.width=7, fig.align="center"----
state_estimates %>%
arrange(desc(mean)) %>%
mutate(state_name = factor(name, levels = rev(unique(name)))) %>%
dplyr::select(state_name, 'Measured rate' = rate,
'Bayesian estimate' = mean, q025, q975) %>%
gather(type, rate, `Measured rate`, `Bayesian estimate`) %>%
ggplot(aes(rate, state_name, color = type)) +
geom_errorbarh(aes(xmin = q025, xmax = q975), color = "gray50") +
geom_point(size = 3) +
xlim(0, NA) +
labs(x = "Rate of mentions per 100k population",
y = NULL, title = "Measured Rates, Bayesian Estimates (HMC), and 95% Credible Intervals",
subtitle = "Mention rate for states sorted by descending posterior mean",
caption = "plot design by @juliasilge") +
theme_minimal(base_family = "Trebuchet MS") +
theme(plot.title=element_text(family="Trebuchet MS", face = "bold")) +
theme(legend.title=element_blank())
## ----state_city_fit, comment=''------------------------------------------
new_success_SC = state_city_counts_zeros$n_city_state
new_attempts_SC = (state_city_counts_zeros$pop2014)/100000
model_SC_pred <- list(x = state_city_counts_zeros$city_state_rate,
N = length(state_city_counts_zeros$city_state_rate),
new_success = new_success_SC,
new_attempts = new_attempts_SC,
M = length(new_success_SC))
## ----fit_SC_model, echo=TRUE, message=FALSE, warning=FALSE, cache=TRUE, comment='', results="hide"----
fit_SC_pred <- stan(model_code = model_string1_pred,
data = model_SC_pred,
iter = 10000, chains = 4, warmup=2500)
## ----print_model_SC_pred, comment=''-------------------------------------
fit_SC_pred_summary <- data.frame(summary(fit_SC_pred)[["summary"]]) %>%
rownames_to_column() %>%
mutate(Parameter = c("alpha0", "beta0",
as.character(state_counts_zeros$name), "lp__")) %>%
dplyr::select(Parameter, mean, sd, X2.5., X97.5., n_eff, Rhat) %>%
dplyr::rename(Mean = mean,
SD = sd,
`2.5%` = X2.5.,
`97.5%` = X97.5.)
kable(fit_SC_pred_summary, digits = 3)
## ----state_city_estimates, comment=''------------------------------------
state_city_estimates <- rstan::extract(fit_SC_pred, pars = "x_tilde") %>%
data.frame() %>%
rename_(.dots=setNames(names(.),state_city_counts_zeros$state_name)) %>%
gather() %>%
dplyr::rename(state_name = key) %>%
group_by(state_name) %>%
dplyr::summarise(q025 = quantile(value, probs = 0.025),
q5 = quantile(value, probs = 0.5),
q975 = quantile(value, probs = 0.975),
mean = mean(value)) %>%
left_join(.,state_city_counts_zeros)
## ----state_city_dot_plot, comment='', fig.height=6, fig.width=6, fig.align="center"----
### could melt and add q025,q5,q975 by color/shape
### could also predict across range of rates and show areas
ggplot(state_city_estimates, aes(city_state_rate, mean)) +
geom_abline(intercept = 0, slope = 1, color = "gray70", linetype = 2) +
geom_point(size = 4, aes(color = n_city_state)) +
geom_text_repel(aes(label = abbrev), stat = "identity",
point.padding = unit(0.5, "lines"),
max.iter = 5000) +
scale_color_gradient(low = "midnightblue", high = "pink",
name="Number\nof songs") +
labs(title = "States & Cities in Song Lyrics Modeled with Bayes (HMC)",
subtitle = "States like Nebraska and Hawaii (high rates, few mentions) are shifted the most",
x = "Measured rate of mentions per 100k population",
y = "Mean predicted rate per 100k population",
caption = "plot design by @juliasilge") +
theme_minimal(base_family = "Trebuchet MS") +
theme(plot.title=element_text(family="Trebuchet MS"))
## ----state_city_range_plot, comment='', fig.height=8, fig.width=7, fig.align="center"----
### range estiamtes plot
state_city_estimates %>%
arrange(desc(mean)) %>%
mutate(state_name = factor(name, levels = rev(unique(name)))) %>%
dplyr::select(state_name, 'Measured rate' = city_state_rate,
'Bayesian estimate' = mean, q025, q975) %>%
gather(type, city_state_rate, `Measured rate`, `Bayesian estimate`) %>%
ggplot(aes(city_state_rate, state_name, color = type)) +
geom_errorbarh(aes(xmin = q025, xmax = q975), color = "gray50") +
geom_point(size = 3) +
xlim(0, NA) +
labs(x = "Rate of mentions per 100k population",
y = NULL, title = "Measured Rates, Bayesian Estimates (HMC), and 95% Credible Intervals",
subtitle = "Mention rate for states & cities sorted by descending posterior mean",
caption = "plot design by @juliasilge") +
theme_minimal(base_family = "Trebuchet MS") +
theme(plot.title=element_text(family="Trebuchet MS", face = "bold")) +
theme(legend.title=element_blank())
## ----model_RMSE, comment=''----------------------------------------------
city_state_error <- state_city_estimates %>%
mutate(rate_error = mean - city_state_rate,
pred_mentions = round(mean * (pop2014/100000),1),
mention_error = n_city_state - pred_mentions,
CI_width = q975 - q025) %>%
dplyr::summarise(RMSE_rate = sqrt(mean(rate_error^2)),
MAE_rate = mean(abs(rate_error)),
mean_CI = mean(CI_width),
RMSE_mentions = sqrt(mean(mention_error^2)),
MAE_mentions = mean(abs(mention_error))) %>%
as.numeric()
state_error <- state_estimates %>%
mutate(rate_error = mean - rate,
pred_mentions = round(mean * (pop2014/100000),1),
mention_error = n - pred_mentions,
CI_width = q975 - q025) %>%
dplyr::summarise(RMSE_rate = sqrt(mean(rate_error^2)),
MAE_rate = mean(abs(rate_error)),
median_CI = median(CI_width),
RMSE_mentions = sqrt(mean(mention_error^2)),
MAE_mentions = mean(abs(mention_error))) %>%
as.numeric()
#print
model_rmse <- data.frame(model = c("States Only", "City and States"),
RMSE_rate = c(state_error[1], city_state_error[1]),
MAE_rate = c(state_error[2], city_state_error[2]),
Median_CI = c(state_error[3], city_state_error[3]),
RMSE_mentions = c(state_error[4], city_state_error[4]),
MAE_mentions = c(state_error[5], city_state_error[5]))
kable(model_rmse, digits = 3)
## ----purl, comment=''----------------------------------------------------
rmd_loc <- "/Users/mattharris/Documents/R_Local/Put_a_prior_on_it-Blog_post/"
purl(input = paste0(rmd_loc, "README.Rmd"),
output = paste0(rmd_loc, "R_script/README_r_script.R"))
## ----session_info, comment=''--------------------------------------------
sessionInfo()
|
6e41b07d46e4bc4369cbc1e65a81bcd9436d03ee
|
6c03c37bacf9b3ffdda05d6a62f05a66fdfad1ec
|
/drake/functions/documents/my_generate.R
|
52e3518678df7663266658b8fde95fcc5fd925a7
|
[] |
no_license
|
GiuseppeTT/me812
|
dfe3c4d36265626bfad63840eb6b43e2c61d0223
|
b850a6c8a740a1de30d1877d7e87963953195321
|
refs/heads/main
| 2023-02-14T20:20:00.691766
| 2021-01-06T19:46:31
| 2021-01-06T19:46:31
| 307,148,408
| 0
| 0
| null | 2021-01-06T19:46:32
| 2020-10-25T17:02:02
|
R
|
UTF-8
|
R
| false
| false
| 347
|
r
|
my_generate.R
|
my_generate_report <- function(
source_path,
output_path,
parameters,
...
) {
my_render_sweave(source_path, output_path, parameters, ...)
}
my_generate_presentation <- function(
source_path,
output_path,
parameters,
...
) {
my_render_sweave(source_path, output_path, parameters, engine = "xelatex", ...)
}
|
c01c452fd198eba22d14568c1029da7454f59a9e
|
d9d00a88f700653b2c4af2b1845f82dd970d0915
|
/Dx bias results.R
|
361be4ec75552eaf737111af23c969e40b37a247
|
[] |
no_license
|
Mayeda-Research-Group/CancerAD-diagnosissims
|
f2c631255261634aa25cec11e09b9cde0e5ad475
|
cd6c5e5b14d096f96cadd1f3d88c87b47af79dd8
|
refs/heads/main
| 2023-03-17T04:04:05.758211
| 2023-03-09T21:14:41
| 2023-03-09T21:14:41
| 326,770,344
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,109
|
r
|
Dx bias results.R
|
#Examining simulation results
#---- Package Loading ----
if (!require("pacman")){
install.packages("pacman", repos = 'http://cran.us.r-project.org')
}
p_load("here", "tidyverse", "ggplot2", "magrittr", "foreign", "deSolve",
"numDeriv", "pracma", "dplyr", "RColorBrewer")
# Load sim results
load(here("Output", "Allsims_dxbias.Rdata"))
results<-data.frame(t(data.frame(Allsims_dxbias)))
exampledata<-data.frame(results[results$cancertype=="all" &
results$Incr_contact==1 &
results$Decr_demdx==1 &
results$Mortmultfactor==0.5 &
results$Demdxwaittime==1.5,"outdata"])
colnames(exampledata)<-substr(colnames(exampledata),6,10000) #fixing col names
exampledata$C0D0<-exampledata$C0D0S0+exampledata$C0D0S1
exampledata$C0D1<-exampledata$C0D1S0+exampledata$C0D1S1
exampledata$C0D2<-exampledata$C0D2S0+exampledata$C0D2S1
exampledata$C1D0<-exampledata$C1D0S0+exampledata$C1D0S1
exampledata$C1D1<-exampledata$C1D1S0+exampledata$C1D1S1
exampledata$C1D2<-exampledata$C1D2S0+exampledata$C1D2S1
exampledata$D0<-exampledata$C0D0+exampledata$C1D0
exampledata$D1<-exampledata$C0D1+exampledata$C1D1
exampledata$D2<-exampledata$C0D2+exampledata$C1D2
test2<-c("time","DEMENTIA", "DEMENTIAdx","DEAD")
to.plot2<-gather(exampledata[,test2], "state","number",-time)
#plot cohort over time.
Cohort_buildfinal2<-ggplot(to.plot2,aes(time+65,number,
group=factor(state, levels=c("DEAD", "DEMENTIA", "DEMENTIAdx")),
color=factor(state, levels=c("DEAD", "DEMENTIA", "DEMENTIAdx"))))+
geom_line(size=2) + ylab("Cumulative proportion of cohort experiencing \ndementia, dementia diagnosis, and death")+theme_bw()+
scale_x_continuous(breaks=c(65, 75, 85, 95))+
scale_color_manual(name=NULL,values=c("darkred","lightblue", "blue"),
labels=c("Death",
"Dementia",
"Diagnosed dementia"))+
xlab("Age")+theme(legend.position = c(0.8,0.5),
legend.text = element_text(size=16),
axis.text.x = element_text(size=20),
axis.text.y = element_text(size=20),
axis.title.x = element_text(size=20, face="bold"),
axis.title.y = element_text(size=20, face="bold"))
Cohort_buildfinal2
#need to add save of this figure:
ggsave("C:/Users/ehlarson/MHL Dropbox/Eleanor Hayes-Larson/UCLA/Eleanor_ERM/Cancer_AD sims/Github repo/Output/Dx bias figures/Figure1.jpg",
device="jpg", plot=Cohort_buildfinal2, dpi="retina")
#Calculate cumulative proportion dead, dementia, and dementia dx at 30 years
exampledata[exampledata$time==30, c("DEMENTIA", "DEMENTIAdx", "DEAD")]
exampledata[exampledata$time==30,"DEMENTIAdx"]/exampledata[exampledata$time==30,"DEMENTIA"]
#Main results
results<-results[,1:26]
results$dxbias<-as.numeric(results$Incr_contact)*as.numeric(results$Decr_demdx)
#print main results
basecase_nondiff<-results[results$Incr_contact==1 &
results$Decr_demdx==1 & results$Demdxwaittime==1.5 &
results$Mortmultfactor==.5,]
print(basecase_nondiff[,c("cancertype", "Demdx_IRR_PTcalc")])
#Input Ording et al. data
ording_AD<-data.frame(cancertype=c("All", "Lung", "Breast","Prostate"),
Est = c(0.94, 0.84, 0.95, 0.96),
LCI = c(0.92,0.72, 0.91, 0.89),
UCI = c(0.96, 0.97, 1.00,1.03),
Data = rep("Ording et al. (AD only)",4)
)
ording_all<-data.frame(cancertype=c("All", "Lung", "Breast","Prostate"),
Est = c(0.96, 1.12, 1.00, 0.97),
LCI = c(0.95, 1.04, 0.97, 0.93),
UCI = c(0.97, 1.22, 1.03,1.01),
Data = rep("Ording et al. (All-cause dementia)",4)
)
#Need to check with Monica if I shoudl use All cancer types , or summary HR for all studies
ospina<-data.frame(cancertype=c("All", "Breast","Prostate"),
Est = c(0.81, 0.93, 0.99),
LCI = c(0.70, 0.87, 0.87),
UCI = c(0.94, 0.98, 1.13),
Data = rep("Ospina-Romero et al.",3)
)
#MSE to determine best-fit models
sim_res_dxbias<-results %>% filter(Demdxwaittime==1.5, Mortmultfactor==0.5) %>% mutate(cancertype=factor(str_to_sentence(cancertype)))
obs_data<-rbind(ording_AD, ording_all, ospina)
obs_data$cancertype <- factor(obs_data$cancertype)
sim_obs_merge<-full_join(sim_res_dxbias, obs_data, by = "cancertype")
sim_obs_merge$sim_obs_diffsq<-(as.numeric(sim_obs_merge$Demdx_IRR_PTcalc)-as.numeric(sim_obs_merge$Est))^2
bestfit<-sim_obs_merge %>% filter(cancertype!="Lung") %>% group_by(cancertype, dxbias) %>% summarize(MSE=weighted.mean(sim_obs_diffsq, c(0.5, 0.5, 1)))
bestfit2<-sim_obs_merge %>% filter(cancertype=="Lung") %>% group_by(cancertype, dxbias) %>% summarize(MSE=weighted.mean(sim_obs_diffsq, c(0.5, 0.5)))
bestfit3<-rbind(bestfit, bestfit2)
bestfit4<-bestfit3 %>% group_by(cancertype) %>% filter(MSE==min(MSE))
#Format results for plotting
Nondif_delay<-results %>% filter(Decr_demdx==1 & Incr_contact==1 &
Mortmultfactor==0.5 & Demdxwaittime == 1.5) %>%
mutate(cancertype=str_to_sentence(cancertype), Est=Demdx_IRR_PTcalc, LCI=NA, UCI=NA, Data = "Non-differential Delay simulation") %>%
select(cancertype, Est, LCI, UCI, Data)
Dif_delay<-left_join(bestfit4, sim_res_dxbias, by=c("cancertype", "dxbias"))
Dif_delay<-Dif_delay %>% mutate(cancertype=str_to_sentence(cancertype),
Est=Demdx_IRR_PTcalc, LCI=NA, UCI=NA,
Data = "Best-match Differential Delay simulation") %>%
select(cancertype, Est, LCI, UCI, Data)
Overlaydata<-rbind(obs_data, Nondif_delay,
Dif_delay)
Overlaydata$Data=factor(Overlaydata$Data, levels=c("Non-differential Delay simulation",
"Best-match Differential Delay simulation",
"Ospina-Romero et al.",
"Ording et al. (AD only)",
"Ording et al. (All-cause dementia)"))
#Plot
overlay<-ggplot(data=Overlaydata, aes(x=1,
group=Data, color=Data, shape=Data))+
geom_pointrange(aes(y=as.numeric(Est), ymin=LCI, ymax=UCI), size=1.5, position=position_dodge(.5))+
scale_shape_manual(name="",values=c(17, 15, 16, 16, 16))+
scale_color_manual(name="", values=c("dodgerblue3", "dodgerblue4", "orange", "tomato3", "tomato4"))+
scale_x_discrete(labels=c("All", "Breast", "Prostate", "Lung"))+
scale_y_continuous(breaks=c(0.7, 0.8, 0.9, 1.0, 1.1, 1.2))+
geom_hline(yintercept=1, colour="black", lwd=1) +
theme_bw()+facet_grid(~factor(unlist(cancertype),
levels=c("All", "Breast","Prostate", "Lung")))+
theme(legend.position = "bottom",
panel.grid.minor = element_blank(),
legend.text = element_text(size=16),
axis.text.x = element_text(size=20),
axis.text.y = element_text(size=20),
axis.title.x = element_text(size=20, face="bold"),
axis.title.y = element_text(size=20, face="bold"),
strip.text.x = element_text(size=16))+
ylab("Effect estimate")+xlab("Cancer type")+
guides(color=guide_legend(nrow=5), shape=guide_legend(nrow=5))
overlay
ggsave("C:/Users/ehlarson/MHL Dropbox/Eleanor Hayes-Larson/UCLA/Eleanor_ERM/Cancer_AD sims/Github repo/Output/Dx bias figures/Figure2.jpg",
device="jpg", plot=overlay, dpi="retina")
#Plot sensitivity analyses
nodxbias<-ggplot(data=results[results$cancertype=="all" & results$Incr_contact==1 &results$Decr_demdx==1,])+
geom_point(aes(x=unlist(Demdxwaittime), y=unlist(Demdx_IRR_PTcalc),
group=factor(unlist(Mortmultfactor)),
color=factor(unlist(Mortmultfactor))),
size=2)+
geom_line(aes(x=unlist(Demdxwaittime), y=unlist(Demdx_IRR_PTcalc),
group=factor(unlist(Mortmultfactor)),
color=factor(unlist(Mortmultfactor))),
size=2)+
ylim(0.7,1.1)+
scale_x_continuous(breaks=seq(0,2.1,0.5), limits=c(0,2.1))+
xlab("Average dementia diagnosis delay (years)\n among those without cancer history")+ ylab("Observed IRR for dementia diagnosis \n(cancer history vs. no cancer history)")+
geom_hline(yintercept=1, colour="black", lwd=1) +
scale_color_manual(name="Interaction effect of cancer and dementia \non mortality rate (% of multiplicative)"
,values=brewer.pal(6,"Blues")[2:6],
labels=c("40%", "50%", "60%", "70%", "80%"))+
theme_bw()+
theme(legend.position = c(0.2,0.2),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
axis.title.x = element_text(size=18, face="bold"),
axis.title.y = element_text(size=18, face="bold"),
legend.text = element_text(size=16)
)
nodxbias
ggsave("C:/Users/ehlarson/MHL Dropbox/Eleanor Hayes-Larson/UCLA/Eleanor_ERM/Cancer_AD sims/Github repo/Output/Dx bias figures/Figure5A.jpg",
device="jpg", plot=nodxbias, dpi="retina")
withdxbias<-ggplot(data=results[results$cancertype=="all" & results$Incr_contact==1 &results$Decr_demdx==0.8,])+
geom_point(aes(x=unlist(Demdxwaittime), y=unlist(Demdx_IRR_PTcalc),
group=factor(unlist(Mortmultfactor)),
color=factor(unlist(Mortmultfactor))),
size=2)+
geom_line(aes(x=unlist(Demdxwaittime), y=unlist(Demdx_IRR_PTcalc),
group=factor(unlist(Mortmultfactor)),
color=factor(unlist(Mortmultfactor))),
size=2)+
ylim(0.7,1.1)+
scale_x_continuous(breaks=seq(0,2.1,0.5), limits=c(0,2.1))+
xlab("Average dementia diagnosis delay (years)\n among those without cancer history")+ ylab("Observed IRR for dementia diagnosis \n(cancer history vs. no cancer history)")+
geom_hline(yintercept=1, colour="black", lwd=1) +
scale_color_manual(name="Interaction effect of cancer and dementia \non mortality rate (% of multiplicative)"
,values=brewer.pal(6,"Blues")[2:6],
labels=c("40%", "50%", "60%", "70%", "80%"))+
theme_bw()+
theme(legend.position = c(0.2,0.2),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
axis.title.x = element_text(size=18, face="bold"),
axis.title.y = element_text(size=18, face="bold"),
legend.text = element_text(size=16)
)
withdxbias
ggsave("C:/Users/ehlarson/MHL Dropbox/Eleanor Hayes-Larson/UCLA/Eleanor_ERM/Cancer_AD sims/Github repo/Output/Dx bias figures/Figure5B.jpg",
device="jpg", plot=withdxbias, dpi="retina")
#Sensitivity to dx bias parameters
results_dxbiasplot<-results[results$Mortmultfactor==0.5 & results$Demdxwaittime == 1.5,]
results_dxbiasplot$cancertype<-factor(results_dxbiasplot$cancertype, levels = c("breast", "prostate", "all", "lung"))
dxbiassens<-ggplot(data=results_dxbiasplot)+
geom_point(aes(x=unlist(dxbias), y=unlist(Demdx_IRR_PTcalc),
group=factor(unlist(cancertype)), color=factor(unlist(cancertype))), size=2)+
geom_line(aes(x=unlist(dxbias), y=unlist(Demdx_IRR_PTcalc),
group=factor(unlist(cancertype)), color=factor(unlist(cancertype))), size=2)+
#ylim(0.7,1)+
xlab("Relative rate of dementia diagnosis in those with vs. without cancer history")+ ylab("Observed IRR for dementia diagnosis \n(cancer history vs. no cancer history)")+
geom_hline(yintercept=1, colour="black", lwd=1) +
geom_vline(xintercept=1, colour="black", lwd=1) +
theme_bw()+
labs(color="Cancer type")+scale_color_discrete(name = "Cancer type", labels=c("Breast", "Prostate", "All", "Lung"))+
scale_x_continuous(breaks=c(0.5, .7, .9, 1.0, 1.1, 1.3, 1.5))+
theme(axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
axis.title.x = element_text(size=18, face="bold"),
axis.title.y = element_text(size=18, face="bold"),
legend.position = c(0.8, 0.2),
legend.text = element_text(size=16),
legend.title = element_text(size=20)
)
dxbiassens
ggsave("C:/Users/ehlarson/MHL Dropbox/Eleanor Hayes-Larson/UCLA/Eleanor_ERM/Cancer_AD sims/Github repo/Output/Dx bias figures/Figure4.jpg",
device="jpg", plot=dxbiassens, dpi="retina")
#Framed as extra average delay
results_dxbiasplot$diff_delay<-((1/(results_dxbiasplot$dxbias*(1/as.numeric(results_dxbiasplot$Demdxwaittime))))-as.numeric(results_dxbiasplot$Demdxwaittime))*12
results_dxbiasplot$cancertype<-factor(results_dxbiasplot$cancertype, levels = c("breast", "prostate", "all", "lung"))
dxbiassens_v2<-ggplot(data=results_dxbiasplot)+
geom_point(aes(x=unlist(diff_delay), y=unlist(Demdx_IRR_PTcalc),
group=factor(unlist(cancertype)), color=factor(unlist(cancertype))), size=2)+
geom_line(aes(x=unlist(diff_delay), y=unlist(Demdx_IRR_PTcalc),
group=factor(unlist(cancertype)), color=factor(unlist(cancertype))), size=2)+
#ylim(0.7,1)+
xlab("Difference in average dementia diagnosis delay between\n those with and without cancer (months)")+ ylab("Observed IRR for dementia diagnosis \n(cancer history vs. no cancer history)")+
scale_x_continuous(breaks=seq(-5,15,5))+
geom_hline(yintercept=1, colour="black", lwd=1) +
geom_vline(xintercept=0, colour="black", lwd=1) +
theme_bw()+
labs(color="Cancer type")+scale_color_discrete(name = "Cancer type", labels=c("Breast", "Prostate", "All", "Lung"))+
theme(axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
axis.title.x = element_text(size=18, face="bold"),
axis.title.y = element_text(size=18, face="bold"),
legend.position = c(0.8, 0.7),
legend.text = element_text(size=16),
legend.title = element_text(size=20)
)
dxbiassens_v2
ggsave("C:/Users/ehlarson/MHL Dropbox/Eleanor Hayes-Larson/UCLA/Eleanor_ERM/Cancer_AD sims/Github repo/Output/Dx bias figures/Figure4_v2.jpg",
device="jpg", plot=dxbiassens_v2, dpi="retina")
|
6aae2616ea250366130a7f35f4399fbd08251675
|
375b5316501a3557b3984c83ca31efa9a3d34d96
|
/ksm/day04/한국복지패널.R
|
901187e9eacf0d2c9a831f88ee8a18735e6042ac
|
[] |
no_license
|
ssemto/ksm0487
|
58a00bde1d592cfaf34c1cf99e62cc87cd96a019
|
9f51a33e4560680ba8a0eda20c53b3edbc0eebe5
|
refs/heads/master
| 2020-04-24T21:57:05.235222
| 2019-03-03T08:59:29
| 2019-03-03T08:59:29
| 172,295,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,855
|
r
|
한국복지패널.R
|
install.packages("foreign")
library(foreign)
library(dplyr)
library(ggplot2)
install.packages("readxl")
library(readxl)
raw_welfare <- read.spss(file = "C:/ksm/day04/Koweps_hpc10_2015_beta1.sav", to.data.frame = T)
welfare <- raw_welfare
View(welfare)
head(welfare)
summary(welfare)
welfare <- rename(welfare,
gender = h10_g3,
birth = h10_g4,
marriage = h10_g10,
religion = h10_g11,
income = p1002_8aq1,
code_job=h10_eco9,
code_region=h10_reg7
)
head(welfare$gender)
head(welfare$marriage)
head(welfare$h_new)
head(welfare$income)
head(welfare$birth)
count(welfare, gender)
welfare$gender
welfare$gender <- ifelse(welfare$gender == 9, NA, welfare$gender)
table(is.na(welfare$gender))
welfare$gender <- ifels(welfare$gender == 1, "남", "여")
table(welfare$gender)
qplot(welfare$gender)
class(welfare$income)
summary(welfare$income)
qplot(welfare$income) + xlim(0,1000)
table(is.na(welfare$income))
welfare$income <- ifelse(welfare$income %in% c(0,9999),NA, welfare$income)
table(is.na(welfare$income))
#성별 월급의 기준
#1. NA는 빼고 분석
#2. 성별로 그룹!(dplyr)
#3. 평균
gender_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(gender) %>%
summarise(mean_income = mean(income))
gender_income
ggplot(data=gender_income, aes(x=gender, y=mean_income)) + geom_col()
qplot(welfare$birth)
#income에서 na제거
#나이별 평균 월급
#시각화까지(시계열로!:geom_line())
class(welfare$birth)
summary(welfare$birth)
qplot(welfare$birth)
summary(welfare$birth)
table(is.na(welfare$birth))
welfare$birth <- ifelse(welfare$birth == 9999, NA, welfare$birth)
table(is.na(welfare$birth))
welfare$age <- 2015 - welfare$birth + 1
summary(welfare$age)
qplot(welfare$age)
age_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(mean_income = mean(income))
head(age_income)
ggplot(data = age_income, aes(x=age, y=mean_income)) + geom_line()
#연령대별
#평균 월급
welfare <- welfare %>%
mutate(age2 = ifelse(age < 30, "young",
ifelse(age <= 59, "middle", "old")))
table(welfare$age) #빈도수 정리 테이블
summary(welfare$age)
qplot(welfare$age)
age2_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(mean_income3 = mean(income))
age2_income
age2_income
qqplot(data=age_income2, aes(x=age2, y=mean_income3)) + geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
#연령대+성별 평균 수입
age3_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(age2, gender) %>%
summarise(mean_income4 = mean(income))
age3_income
qqplot(data=age3_income, aes(x=age2, y=mean_income3, fill=gender)) + geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
#나이_성별 평균 월급의 흐름을 알고 싶습니다
#분석 후 시각화(시계열로!)
#시계열인 경우 fill옵션 대신, "col="사용!
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(age, sex) %>%
summarise(mean_income = mean(income))
sex_income
age4_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(age, gender) %>%
summarise(mean_income5 = mean(income))
age4_income
library(readxl)
list_job <- read_excel("C:/ksm/day04/Koweps_Codebook.xlsx", col_names = T, sheet = 2)
head(welfare$code_job)
dim(list_job)
welfare <- left_join(welfare, list_job, id = "code_job")
head(welfare$job)
age5_income <- welfare %>%
filter(!is.na(income) & !is.na(job)) %>%
group_by(job) %>%
summarise(mean_income6 = mean(income))
head(age5_income)
age5_income %>%
arrange(desc(mean_income6)) %>%
head(10)
|
fd28e7ac717c42eff34927cf25da5b63a5789b71
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/fitdistrplus/tests/ppcomp.R
|
d45e7be2fdb3b9463d509df084d1927991ca0176
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,557
|
r
|
ppcomp.R
|
library(fitdistrplus)
# ?cdfcomp
# (1) Plot various distributions fitted to serving size data
#
data(groundbeef)
serving <- groundbeef$serving
fitW <- fitdist(serving,"weibull")
fitln <- fitdist(serving,"lnorm")
fitg <- fitdist(serving,"gamma")
#sanity checks
try(ppcomp("list(fitW, fitln, fitg)"), silent=TRUE)
try(ppcomp(list(fitW, fitln, fitg, a=1)), silent=TRUE)
#real call
ppcomp(list(fitW, fitln, fitg))
ppcomp(list(fitW, fitln, fitg), legendtext=c("Weibull","lognormal","gamma"),
main="ground beef fits", xlab="Theo.",
ylab="serving sizes (g)", xlim = c(0, 1/2))
ppcomp(list(fitW, fitln, fitg), legendtext=c("Weibull","lognormal","gamma"),
main="ground beef fits", xlab="Theo.",
ylab="serving sizes (g)", xlogscale=TRUE, line01=FALSE)
ppcomp(list(fitW, fitln, fitg), legendtext=c("Weibull","lognormal","gamma"),
main="ground beef fits", xlab="Theo.",
ylab="serving sizes (g)", ylogscale=TRUE, line01=FALSE)
ppcomp(list(fitW, fitln, fitg), legendtext=c("Weibull","lognormal","gamma"),
main="ground beef fits", ylim=c(1e-3, 1), xlim=c(1e-3, 1),
fitpch=c("+", "-", "."))
# (2) Plot lognormal distributions fitted by
# maximum goodness-of-fit estimation
# using various distances (data plotted in log scale)
#
data(endosulfan)
ATV <-subset(endosulfan, group == "NonArthroInvert")$ATV
flnMGEKS <- fitdist(ATV,"lnorm",method="mge",gof="KS")
flnMGEAD <- fitdist(ATV,"lnorm",method="mge",gof="AD")
flnMGEADL <- fitdist(ATV,"lnorm",method="mge",gof="ADL")
flnMGEAD2L <- fitdist(ATV,"lnorm",method="mge",gof="AD2L")
llfit <- list(flnMGEKS, flnMGEAD, flnMGEADL, flnMGEAD2L)
ppcomp(list(flnMGEKS, flnMGEAD, flnMGEADL, flnMGEAD2L),
main="fits of a lognormal dist. using various GOF dist.")
ppcomp(list(flnMGEKS, flnMGEAD, flnMGEADL, flnMGEAD2L), xlegend="topleft",
xlogscale=TRUE, main="fits of a lognormal dist. using various GOF dist.",
legendtext=c("MGE KS","MGE AD","MGE ADL","MGE AD2L"))
ppcomp(list(flnMGEKS, flnMGEAD, flnMGEADL, flnMGEAD2L), xlegend="topleft",
xlogscale=TRUE, main="fits of a lognormal dist. using various GOF dist.",
legendtext=c("MGE KS","MGE AD","MGE ADL","MGE AD2L"),
fitcol=c("black", "darkgreen", "yellowgreen", "yellow2"))
ppcomp(list(flnMGEKS, flnMGEAD, flnMGEADL, flnMGEAD2L), ynoise=FALSE,
xlogscale=TRUE, ylogscale=TRUE, xlim=c(1e-3,1), ylim=c(1e-3,1))
ppcomp(flnMGEKS)
# (3) Plot lognormal distributions fitted by
# maximum goodness-of-fit estimation
# using various distances (data plotted in log scale)
#
x1 <- c(6.4,13.3,4.1,1.3,14.1,10.6,9.9,9.6,15.3,22.1,13.4,
13.2,8.4,6.3,8.9,5.2,10.9,14.4)
n1 <- length(x1)
pgumbel<-function(q,a,b) exp(-exp((a-q)/b))
dgumbel<-function(x,a,b) 1/b*exp((a-x)/b)*exp(-exp((a-x)/b))
f1 <- mledist(x1,"norm")
f2 <- mledist(x1,"gumbel",start=list(a=10,b=5))
f3 <- mledist(x1, "exp")
plot(pnorm(sort(x1), f1$estimate[1], f1$estimate[2]), 1:n1/n1)
points(pgumbel(sort(x1), f2$estimate[1], f2$estimate[2]), 1:n1/n1, col="red")
points(pexp(sort(x1), f3$estimate[1]), 1:n1/n1, col="green")
legend("bottomright", lty=1, leg=c("Normal","Gumbel","Exp"), col=c("red","green","blue"))
f1 <- fitdist(x1,"norm")
f2 <- fitdist(x1,"gumbel",start=list(a=10,b=5))
f3 <- fitdist(x1, "exp")
ppcomp(list(f1, f2, f3), fitcol=c("red","green","blue"))
# (4) normal mixture
#
#mixture of two normal distributions
#density
dnorm2 <- function(x, poid, m1, s1, m2, s2)
poid*dnorm(x, m1, s1) + (1-poid)*dnorm(x, m2, s2)
#numerical approximate quantile function
qnorm2 <- function(p, poid, m1, s1, m2, s2)
{
L2 <- function(x, prob)
(prob - pnorm2(x, poid, m1, s1, m2, s2))^2
sapply(p, function(pr) optimize(L2, c(-1000, 1000), prob=pr)$minimum)
}
#distribution function
pnorm2 <- function(q, poid, m1, s1, m2, s2)
poid*pnorm(q, m1, s1) + (1-poid)*pnorm(q, m2, s2)
#basic normal distribution
set.seed(1234)
x2 <- c(rnorm(1000, 5), rnorm(1000, 10))
#MLE fit
fit1 <- fitdist(x2, "norm2", "mle", start=list(poid=1/3, m1=4, s1=2, m2=8, s2=2),
lower=c(0, 0, 0, 0, 0))
fit2 <- fitdist(x2, "norm2", "qme", probs=c(1/6, 1/4, 1/3, 1/2, 2/3),
start=list(poid=1/3, m1=4, s1=2, m2=8, s2=2),
lower=c(0, 0, 0, 0, 0), upper=c(1/2, Inf, Inf, Inf, Inf))
fit3 <- fitdist(x2, "norm2", "mge", gof="AD",
start=list(poid=1/3, m1=4, s1=2, m2=8, s2=2),
lower=c(0, 0, 0, 0, 0), upper=c(1/2, Inf, Inf, Inf, Inf))
ppcomp(list(fit1, fit2, fit3), fitpch=rep(".", 3),
fitcol=c("green", "red", "blue"))
# (5) large data
#
n <- 2e4
n <- 1e2
x <- rlnorm(n)
f1 <- fitdist(x, "lnorm")
f2 <- fitdist(x, "exp")
ppcomp(list(f1, f2), lty=2)
|
986dccd29679c0b925341f5b3636fc7cde5fcd3f
|
bcd0e4df4a224b149d561f2470c2d463a93c1bc5
|
/code_original/extraScript_lookAtOtherModels.R
|
0494c6383324b19f4aba6a56ccb217a123589bb7
|
[] |
no_license
|
marissalee/E8-NichePlots
|
de570190a9da6adca53a8822c0c06dda01452de4
|
020128d7b085a8f0cbc4236c43db2fb3d84f193a
|
refs/heads/master
| 2021-06-12T22:46:56.205264
| 2021-03-19T16:38:25
| 2021-03-19T16:38:25
| 22,583,913
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,597
|
r
|
extraScript_lookAtOtherModels.R
|
#extraScript_lookAtOtherModels.R
#reshape data.choice
data.T<-subset(data.choice, depth == 'T')
data.T1<-data.choice[data.choice$variable %in% c(allVars,'mv_g.m2'),
c('plotid','plothalfid1','inv','year','variable','value')]
data.T1$variable<-factor(data.T1$variable, levels=c(allVars,'mv_g.m2'))
data.T1.wide<-dcast(data.T1, plotid + year ~ variable + inv, value.var='value')
df<-data.T1.wide
#nitrate
#reference
mod<-lmer(noi_T_N ~ nTrees_N + nitrifd_T_N + ph_T_N + soilmoi_T_N +
(1|year), data=df)
summary(mod)
mod<-lmer(nitrifd_T_N ~ nTrees_N +
(1|year), data=df)
summary(mod)
mod<-lmer(ph_T_N ~ nTrees_N +
(1|year), data=df)
summary(mod)
mod<-lmer(soilmoi_T_N ~ nTrees_N + litter_g.m2_N +
(1|year), data=df)
summary(mod)
mod<-lmer(litter_g.m2_N ~ nTrees_N +
(1|year), data=df)
summary(mod)
#invaded
mod<-lmer(noi_T_I ~ nTrees_N + nitrifd_T_I + ph_T_I + soilmoi_T_I + mv_g.m2_I +
(1|year), data=df)
summary(mod)
mod<-lmer(nitrifd_T_I ~ nTrees_N + mv_g.m2_I +
(1|year), data=df)
summary(mod)
mod<-lmer(ph_T_I ~ nTrees_N + mv_g.m2_I +
(1|year), data=df)
summary(mod)
mod<-lmer(soilmoi_T_I ~ nTrees_N + litter_g.m2_I + mv_g.m2_I +
(1|year), data=df)
summary(mod)
mod<-lmer(litter_g.m2_I ~ nTrees_N + mv_g.m2_I +
(1|year), data=df)
summary(mod)
## mv biomass
df$mv_g.m2_I_logt<-log(df$mv_g.m2_I+0.01)
mod<-lmer(mv_g.m2_I_logt ~ nTrees_N + noi_T_N + nitrifd_T_N + ph_T_N + soilmoi_T_N + litter_g.m2_N +
(1|year), data=df)
summary(mod)
#ammonification
#reference
mod<-lmer(ammonifd_T_N ~ nTrees_N + nhi_T_N + som_T_N + ph_T_N + soilmoi_T_N +
(1|year), data=df)
summary(mod)
mod<-lmer(nhi_T_N ~ nTrees_N +
(1|year), data=df)
summary(mod)
mod<-lmer(som_T_N ~ nTrees_N +
(1|year), data=df)
summary(mod)
mod<-lmer(ph_T_N ~ nTrees_N +
(1|year), data=df)
summary(mod)
mod<-lmer(soilmoi_T_N ~ nTrees_N + litter_g.m2_N +
(1|year), data=df)
summary(mod)
mod<-lmer(litter_g.m2_N ~ nTrees_N +
(1|year), data=df)
summary(mod)
#invaded
mod<-lmer(ammonifd_T_I ~ nTrees_N + nhi_T_I + som_T_I + ph_T_I + soilmoi_T_I + mv_g.m2_I +
(1|year), data=df)
summary(mod)
mod<-lmer(nhi_T_N ~ nTrees_N +mv_g.m2_I +
(1|year), data=df)
summary(mod)
mod<-lmer(som_T_N ~ nTrees_N +mv_g.m2_I +
(1|year), data=df)
summary(mod)
mod<-lmer(ph_T_N ~ nTrees_N +mv_g.m2_I +
(1|year), data=df)
summary(mod)
mod<-lmer(soilmoi_T_N ~ nTrees_N + litter_g.m2_N + mv_g.m2_I +
(1|year), data=df)
summary(mod)
mod<-lmer(litter_g.m2_N ~ nTrees_N + mv_g.m2_I +
(1|year), data=df)
summary(mod)
#Mv biomass
mod<-lmer(mv_g.m2_I_logt ~ nTrees_N + ammonifd_T_N + nhi_T_N + som_T_N +
soilmoi_T_N + litter_g.m2_N +
(1|year), data=df)
summary(mod)
#nitrate
modT<-lmer(noi_T ~ mv_g.m2_logt + nTrees + litter_g.m2 +
mv_g.m2_logt:nTrees + mv_g.m2_logt:litter_g.m2 +
(1|year), data=data.q2)
summary(modT)
meanVal<-mean(data.q2$litter_g.m2)
data.q2$litterBin<-'Low litter'
data.q2[data.q2$litter_g.m2>meanVal,'litterBin']<-'High litter'
data.q2$litterBin<-factor(data.q2$litterBin, levels=c('Low litter','High litter'))
ggplot(data.q2, aes(x=mv_g.m2_logt, y=noi_T)) +
geom_point() + facet_grid(~litterBin)
modB<-lmer(noi_B ~ mv_g.m2_logt + nTrees + litter_g.m2 +
mv_g.m2_logt:nTrees + mv_g.m2_logt:litter_g.m2 +
(1|year), data=data.q2)
summary(modB)
meanVal<-mean(data.q2$litter_g.m2)
data.q2$litterBin<-'Low litter'
data.q2[data.q2$litter_g.m2>meanVal,'litterBin']<-'High litter'
data.q2$litterBin<-factor(data.q2$litterBin, levels=c('Low litter','High litter'))
ggplot(data.q2, aes(x=mv_g.m2_logt, y=noi_B)) +
geom_point() + facet_grid(~litterBin)
#ammonification
modT<-lmer(ammonifd_T ~ mv_g.m2_logt + soilmoi_T +
mv_g.m2_logt:soilmoi_T +
(1|year), data=data.q2)
summary(modT)
meanVal<-mean(data.q2$soilmoi_T)
data.q2$soilmoiBin<-'Low moisture'
data.q2[data.q2$soilmoi_T>meanVal,'soilmoiBin']<-'High moisture'
data.q2$soilmoiBin<-factor(data.q2$soilmoiBin, levels=c('Low moisture','High moisture'))
ggplot(data.q2, aes(x=mv_g.m2_logt, y=ammonifd_T, size=soilmoi_T)) +
geom_point() + facet_grid(~soilmoiBin) + geom_smooth(method='lm')
modT<-lmer(ammonifd_T ~ mv_g.m2_logt + soilmoi_T + PercBA_AM +
mv_g.m2_logt:soilmoi_T + PercBA_AM:soilmoi_T + mv_g.m2_logt:PercBA_AM +
(1|year), data=data.q2)
summary(modT)
|
a787615eeb1d1b64c8a6eef9a88ab1a65a8dfcb3
|
44415fd86412a96b039d60a6ba83b8065bde6f1d
|
/man/lizards.Rd
|
6bf5e2e1d67fd4feb65414990074e9ef0b184e9a
|
[] |
no_license
|
cran/AICcmodavg
|
f9451566b4415350ff91d4e1fffc323ca6f6082e
|
69bf7930f2228ed6fb06683cd766a16b0bf5cdce
|
refs/heads/master
| 2023-04-08T21:23:38.333939
| 2023-03-20T15:20:02
| 2023-03-20T15:20:02
| 17,677,598
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,006
|
rd
|
lizards.Rd
|
\name{lizards}
\Rdversion{1.1}
\alias{lizards}
\docType{data}
\title{
Habitat Preference of Lizards
}
\description{
This data set describes the habitat preference of two species of
lizards, \emph{Anolis grahami} and \emph{A}. \emph{opalinus}, on the
island of Jamaica and is originally from Schoener (1970). McCullagh and
Nelder (1989) and Burnham and Anderson (2002) reanalyzed the data. Note
that a typo occurs in table 3.11 of Burnham and Anderson (2002).
}
\usage{data(lizards)}
\format{
A data frame with 48 rows and 6 variables.
\describe{
\item{\code{Insolation}}{position of perch, either \code{shaded} or
\code{sunny}.}
\item{\code{Diameter}}{diameter of the perch, either \code{< 2 in}
or \code{>= 2 in}.}
\item{\code{Height}}{perch height, either \code{< 5} or
\code{>= 5}.}
\item{\code{Time}}{time of day, either \code{morning},
\code{midday}, or \code{afternoon}.}
\item{\code{Species}}{species observed, either \code{grahami} or
\code{opalinus}.}
\item{\code{Counts}}{number of individuals observed.}
}
}
\details{
Burnham and Anderson (2002, p. 137) use this data set originally from
Schoener (1970) to illustrate model selection for log-linear models.
}
\source{
Burnham, K. P., Anderson, D. R. (2002) \emph{Model Selection and
Multimodel Inference: a practical information-theoretic
approach}. Second edition. Springer: New York.
McCullagh, P., Nelder, J. A. (1989) \emph{Generalized Linear
Models}. Second edition. Chapman and Hall: New York.
Schoener, T. W. (1970) Nonsynchronous spatial overlap of lizards in
patchy habitats. \emph{Ecology} \bold{51}, 408--418.
}
\examples{
data(lizards)
\dontrun{
##log-linear model as in Burnham and Anderson 2002, p. 137
##main effects
m1 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species,
family = poisson, data = lizards)
##main effects and all second order interactions = base
m2 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species +
Insolation:Diameter + Insolation:Height + Insolation:Time +
Insolation:Species + Diameter:Height + Diameter:Time +
Diameter:Species + Height:Time + Height:Species +
Time:Species, family = poisson, data = lizards)
##base - DT
m3 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species +
Insolation:Diameter + Insolation:Height + Insolation:Time +
Insolation:Species + Diameter:Height + Diameter:Species +
Height:Time + Height:Species + Time:Species,
family = poisson, data = lizards)
##base + HDI + HDT + HDS
m4 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species +
Insolation:Diameter + Insolation:Height + Insolation:Time +
Insolation:Species + Diameter:Height + Diameter:Time +
Diameter:Species + Height:Time + Height:Species +
Time:Species + Height:Diameter:Insolation +
Height:Diameter:Time + Height:Diameter:Species,
family = poisson, data = lizards)
##base + HDI + HDS + HIT + HIS + HTS + ITS
m5 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species +
Insolation:Diameter + Insolation:Height + Insolation:Time +
Insolation:Species + Diameter:Height + Diameter:Time +
Diameter:Species + Height:Time + Height:Species +
Time:Species + Height:Diameter:Insolation +
Height:Diameter:Species + Height:Insolation:Time +
Height:Insolation:Species + Height:Time:Species +
Insolation:Time:Species, family = poisson, data = lizards)
##base + HIT + HIS + HTS + ITS
m6 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species +
Insolation:Diameter + Insolation:Height + Insolation:Time +
Insolation:Species + Diameter:Height + Diameter:Time +
Diameter:Species + Height:Time + Height:Species +
Time:Species + Height:Insolation:Time +
Height:Insolation:Species + Height:Time:Species +
Insolation:Time:Species, family = poisson, data = lizards)
##base + HIS + HTS + ITS
m7 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species +
Insolation:Diameter + Insolation:Height + Insolation:Time +
Insolation:Species + Diameter:Height + Diameter:Time +
Diameter:Species + Height:Time + Height:Species +
Time:Species + Height:Insolation:Species +
Height:Time:Species + Insolation:Time:Species,
family = poisson, data = lizards)
##base + HIT + HIS + HTS + ITS - DT
m8 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species +
Insolation:Diameter + Insolation:Height + Insolation:Time +
Insolation:Species + Diameter:Height + Diameter:Species +
Height:Time + Height:Species + Time:Species +
Height:Insolation:Time + Height:Insolation:Species +
Height:Time:Species + Insolation:Time:Species,
family = poisson, data = lizards)
##base + HIT + HIS + ITS - DT
m9 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species +
Insolation:Diameter + Insolation:Height + Insolation:Time +
Insolation:Species + Diameter:Height + Diameter:Species +
Height:Time + Height:Species + Time:Species +
Height:Insolation:Time + Height:Insolation:Species +
Insolation:Time:Species,
family = poisson, data = lizards)
##base + HIT + HIS - DT
m10 <- glm(Counts ~ Insolation + Diameter + Height + Time + Species +
Insolation:Diameter + Insolation:Height + Insolation:Time +
Insolation:Species + Diameter:Height + Diameter:Species +
Height:Time + Height:Species + Time:Species +
Height:Insolation:Time + Height:Insolation:Species,
family = poisson, data = lizards)
##set up in list
Cands <- list(m1, m2, m3, m4, m5, m6, m7, m8, m9, m10)
Modnames <- paste("m", 1:length(Cands), sep = "")
##model selection
library(AICcmodavg)
aictab(Cands, Modnames)
}
}
\keyword{datasets}
|
e9c8219caa12891ef299befcd6ded09e53cf6e36
|
5bde8725af216500ff7aa08c45fda093f6ab1706
|
/man/calc_ice_season.Rd
|
9ae97dbd4812f77bfd13f9049639af805b0a04f4
|
[] |
no_license
|
AustralianAntarcticDivision/aceecostats
|
37086bd2178a5abd68a9b71f9fe0b04dd68b6ecb
|
b789f6453a676a19aeb9d9f57b210a663378f2cb
|
refs/heads/master
| 2021-05-01T01:19:30.966700
| 2019-03-05T23:01:21
| 2019-03-05T23:01:21
| 72,392,976
| 2
| 2
| null | 2016-12-19T01:20:17
| 2016-10-31T02:52:30
|
HTML
|
UTF-8
|
R
| false
| true
| 284
|
rd
|
calc_ice_season.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc-ice-season.R
\name{calc_ice_season}
\alias{calc_ice_season}
\title{actually calculate the ice season}
\usage{
calc_ice_season(yfile, threshval = 15)
}
\description{
actually calculate the ice season
}
|
27e22de5c367a9a10abc2ca1c08cb6667fbdd006
|
3e74b2d423d7b4d472ffce4ead1605621fb2d401
|
/variancePartition/R/plotStratifyBy.R
|
13666655f53f019e732d075f282e77b28025c711
|
[] |
no_license
|
jamesjcai/My_Code_Collection
|
954988ee24c7bd34139d35c880a2093b01cef8d1
|
99905cc5d063918cbe6c4126b5d7708a4ddffc90
|
refs/heads/master
| 2023-07-06T07:43:00.956813
| 2023-07-03T22:17:32
| 2023-07-03T22:17:32
| 79,670,576
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,113
|
r
|
plotStratifyBy.R
|
#' plotStratify
#'
#' Plot gene expression stratified by another variable
#'
#' @param formula specify variables shown in the x- and y-axes. Y-axis should be continuous variable, x-axis should be discrete.
#' @param data data.frame storing continuous and discrete variables specified in formula
#' @param xlab label x-asis. Defaults to value of xval
#' @param ylab label y-asis. Defaults to value of yval
#' @param main main label
#' @param sortBy name of column in geneExpr to sort samples by. Defaults to xval
#' @param colorBy name of column in geneExpr to color box plots. Defaults to xval
#' @param sort if TRUE, sort boxplots by median value, else use default ordering
#' @param text plot text on the top left of the plot
#' @param text.y indicate position of the text on the y-axis as a fraction of the y-axis range
#' @param text.size size of text
#' @param pts.cex size of points
#' @param ylim specify range of y-axis
#' @param legend show legend
#' @param x.labels show x axis labels
#'
#' @return
#' ggplot2 object
#'
#' @examples
#'
#' # Note: This is a newer, more convient interface to plotStratifyBy()
#'
#' # load library
#' # library(variancePartition)
#'
#' # load simulated data:
#' data(varPartData)
#'
#' # Create data.frame with expression and Tissue information for each sample
#' GE = data.frame( Expression = geneExpr[1,], Tissue = info$Tissue)
#'
#' # Plot expression stratified by Tissue
#' plotStratify( Expression ~ Tissue, GE )
#'
#' # Omit legend and color boxes grey
#' plotStratify( Expression ~ Tissue, GE, colorBy = NULL)
#'
#' # Specify colors
#' col = c( B="green", A="red", C="yellow")
#' plotStratify( Expression ~ Tissue, GE, colorBy=col, sort=FALSE)
#'
#' @export
plotStratify = function( formula, data, xlab, ylab, main, sortBy, colorBy, sort=TRUE, text=NULL, text.y=1, text.size=5, pts.cex=1, ylim=NULL, legend=TRUE, x.labels=FALSE ){
mc <- match.call()
m <- match(c("formula","data"), names(mc), 0L)
mf <- mc[c(1L, m)]
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
data.st <- data.frame(mf)
if( ncol(data.st) != 2){
stop("formula must have exactly 2 entries")
}
xval = colnames(data.st)[attr(attr(mf, "terms"), "response")+1]
yval = colnames(data.st)[attr(attr(mf, "terms"), "response")]
if( missing(xlab) ){
xlab = xval
}
if( missing(sortBy) || is.null(sortBy) ){
sortBy = xval
}
if( missing(colorBy) ){
colorBy = xval
}
if( missing(ylab) ){
ylab = yval
}
# check that sortBy exist in data.st
if( !(sortBy %in% colnames(data.st)) ){
stop(paste("sortBy is not found in colnames(data): sortBy =", sortBy))
}
data.st[[yval]] = as.numeric( data.st[[yval]] )
xpos = 0.5 #text.x * nlevels(data.st[[xval]])
ypos = text.y * (max(data.st[[yval]]) - min(data.st[[yval]])) + min(data.st[[yval]])
if( sort ){
# sort categories by median expression
data.st[['reorder']] = reorder(data.st[[sortBy]],data.st[[yval]], FUN=median)
ord = "reorder"
}else{
ord = xval
}
pOut = ggplot( data.st, aes_string(x=ord, y=yval)) + theme_bw() + theme( plot.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) + ylab(ylab) + xlab(xlab) + theme(plot.title=element_text(hjust=0.5))
if( is.null(colorBy) || is.na(colorBy) ){
pOut = pOut + geom_boxplot(color="grey", fill="grey", outlier.colour='black',outlier.shape = 20)
}else{
# if colors are specified and all levels of xval are represented
if( sum(levels(data.st[[xval]]) %in% names(colorBy)) == nlevels(data.st[[xval]]) ){
i = match(levels(data.st[[ord]]), levels(data.st[[xval]]) )
pOut = pOut + geom_boxplot(aes_string(fill=xval), color=colorBy[i], outlier.colour='black',outlier.shape = 20) + scale_fill_manual( values=array(colorBy))
}else{
# color boxes by colorBy variable in data.st
pOut = pOut + geom_boxplot( aes_string(color=colorBy, fill=colorBy), outlier.colour='black', outlier.shape = 20)
}
# add legend
if( legend ){
pOut = pOut + theme(legend.justification=c(1,0), legend.position=c(1,0), legend.key = element_rect(fill="transparent"), axis.text.x=element_text(angle=30), legend.background = element_rect(fill="transparent"))
}else{
pOut = pOut + theme(legend.position="none", axis.text.x=element_text(angle=30))
}
}
if( ! x.labels ){
pOut = pOut + theme(axis.ticks.x = element_blank(), axis.text.x = element_blank())
}
# add median bar
pOut = pOut + stat_summary(geom = "crossbar", width=0.65, fatten=0, color="black", fun.data = function(x){ return(c(y=median(x), ymin=median(x), ymax=median(x))) })
if( ! missing(ylim) ){
pOut = pOut + ylim(ylim)
}
if( ! missing(main) ){
pOut = pOut + ggtitle(main)
}
if( ! missing(text) ){
pOut = pOut + annotate("text", label = text, x = xpos, y=ypos, size = text.size, hjust=0)
}
#pOut = pOut + geom_jitter(size=pts.cex,height=0, width=0, col="black")
return( pOut )
}
#' plotStratifyBy
#'
#' Plot gene expression stratified by another variable
#'
#' @param geneExpr data.frame of gene expression values and another variable for each sample. If there are multiple columns, the user can specify which one to use
#' @param xval name of column in geneExpr to be used along x-axis to stratify gene expression
#' @param yval name of column in geneExpr indicating gene expression
#' @param xlab label x-asis. Defaults to value of xval
#' @param ylab label y-asis. Defaults to value of yval
#' @param main main label
#' @param sortBy name of column in geneExpr to sort samples by. Defaults to xval
#' @param colorBy name of column in geneExpr to color box plots. Defaults to xval
#' @param sort if TRUE, sort boxplots by median value, else use default ordering
#' @param text plot text on the top left of the plot
#' @param text.y indicate position of the text on the y-axis as a fraction of the y-axis range
#' @param text.size size of text
#' @param pts.cex size of points
#' @param ylim specify range of y-axis
#' @param legend show legend
#' @param x.labels show x axis labels
#'
#' @return
#' ggplot2 object
#'
#' @examples
#'
#' # load library
#' # library(variancePartition)
#'
#' # load simulated data:
#' data(varPartData)
#'
#' # Create data.frame with expression and Tissue information for each sample
#' GE = data.frame( Expression = geneExpr[1,], Tissue = info$Tissue)
#'
#' # Plot expression stratified by Tissue
#' plotStratifyBy( GE, "Tissue", "Expression")
#'
#' # Omit legend and color boxes grey
#' plotStratifyBy( GE, "Tissue", "Expression", colorBy = NULL)
#'
#' # Specify colors
#' col = c( B="green", A="red", C="yellow")
#' plotStratifyBy( GE, "Tissue", "Expression", colorBy=col, sort=FALSE)
#'
#' @export
plotStratifyBy = function( geneExpr, xval, yval, xlab=xval, ylab=yval, main=NULL, sortBy=xval, colorBy=xval, sort=TRUE, text=NULL, text.y=1, text.size=5, pts.cex=1, ylim=NULL, legend=TRUE, x.labels=FALSE ){
geneExpr = data.frame( geneExpr )
geneExpr = droplevels( geneExpr )
sortBy = xval
# check that xval and yval exist in geneExpr
if( !(xval %in% colnames(geneExpr)) ){
stop(paste("xval is not found in colnames(geneExpr): xval =", xval))
}
if( !(yval %in% colnames(geneExpr)) ){
stop(paste("yval is not found in colnames(geneExpr): yval =", yval))
}
# check that sortBy exist in geneExpr
if( !(sortBy %in% colnames(geneExpr)) ){
stop(paste("sortBy is not found in colnames(geneExpr): sortBy =", sortBy))
}
geneExpr[[yval]] = as.numeric( geneExpr[[yval]] )
xpos = 0.5 #text.x * nlevels(geneExpr[[xval]])
ypos = text.y * (max(geneExpr[[yval]]) - min(geneExpr[[yval]])) + min(geneExpr[[yval]])
if( sort ){
# sort categories by median expression
geneExpr[['reorder']] = reorder(geneExpr[[sortBy]],geneExpr[[yval]], FUN=median)
ord = "reorder"
}else{
ord = xval
}
pOut = ggplot( geneExpr, aes_string(x=ord, y=yval)) + theme_bw() + theme( plot.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) + ylab(ylab) + xlab(xlab) + theme(plot.title=element_text(hjust=0.5))
if( is.null(colorBy) || is.na(colorBy) ){
pOut = pOut + geom_boxplot(color="grey", fill="grey", outlier.colour='black',outlier.shape = 20)
}else{
# if colors are specified and all levels of xval are represented
if( sum(levels(geneExpr[[xval]]) %in% names(colorBy)) == nlevels(geneExpr[[xval]]) ){
i = match(levels(geneExpr[[ord]]), levels(geneExpr[[xval]]) )
pOut = pOut + geom_boxplot(aes_string(fill=xval), color=colorBy[i], outlier.colour='black',outlier.shape = 20) + scale_fill_manual( values=array(colorBy))
}else{
# color boxes by colorBy variable in geneExpr
pOut = pOut + geom_boxplot( aes_string(color=colorBy, fill=colorBy), outlier.colour='black', outlier.shape = 20)
}
# add legend
if( legend ){
pOut = pOut + theme(legend.justification=c(1,0), legend.position=c(1,0), legend.key = element_rect(fill="transparent"), axis.text.x=element_text(angle=30), legend.background = element_rect(fill="transparent"))
}else{
pOut = pOut + theme(legend.position="none", axis.text.x=element_text(angle=30))
}
}
if( ! x.labels ){
pOut = pOut + theme(axis.ticks.x = element_blank(), axis.text.x = element_blank())
}
# add median bar
pOut = pOut + stat_summary(geom = "crossbar", width=0.65, fatten=0, color="black", fun.data = function(x){ return(c(y=median(x), ymin=median(x), ymax=median(x))) })
if( !is.null(ylim)){
pOut = pOut + ylim(ylim)
}
if( !is.null(main) ){
pOut = pOut + ggtitle(main)
}
if( !is.null(text) ){
pOut = pOut + annotate("text", label = text, x = xpos, y=ypos, size = text.size, hjust=0)
}
#pOut = pOut + geom_jitter(size=pts.cex,height=0, width=0, col="black")
return( pOut )
}
|
fd5776c2ed2d353fa70d9239fe93b69400c696cd
|
feabcc19c0457cdd946433dd0869d0b4b9885384
|
/R/subgraph.R
|
f327c23537d49148162bed8f383c267feed194ba
|
[] |
no_license
|
cran/lava
|
e9dd8f8dcdceb987b8a27e62a2b1663b3b060891
|
b731197dbd9edb76987ccacf94dd95c6a54e4504
|
refs/heads/master
| 2023-03-05T00:01:23.232939
| 2023-02-27T07:12:30
| 2023-02-27T07:12:30
| 17,697,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 613
|
r
|
subgraph.R
|
subgraph <- function(g,from,to,Tree=new("graphNEL",node=c(to,from),edgemode="directed"),...) {
adjnodes <- graph::adj(g,from)[[1]]
if (length(adjnodes)==0)
return(Tree)
for (v in adjnodes) {
if (v==to) {
Tree <- graph::addEdge(from, v, Tree)
}
re1 <- graph::acc(g,v)[[1]] ## Reachable nodes from v
if ((to %in% names(re1)[re1>0])) {
if (!(v %in% graph::nodes(Tree)))
Tree <- graph::addNode(v,Tree)
Tree <- graph::addEdge(from, v, Tree)
Tree <- path(g,v,to,Tree)
}
}
return(Tree)
}
|
1b6cfb1c850ce8c18427c163fa757918b9272782
|
f07436dc70374dc828d94bcbf1fe29dd7fc2926c
|
/R/tadpole.R
|
2b2928a1e2a9334558058e802ccd8d5562a340b1
|
[] |
no_license
|
franzbischoff/lab_hida_alzheimer
|
e8cd1af84804293110a2e8be638b693d4a90ddab
|
e08ee35f58d80ebdcf3b6d2defadf02ad86768de
|
refs/heads/master
| 2022-12-17T02:16:23.058424
| 2020-09-25T01:04:39
| 2020-09-25T01:04:39
| 260,707,432
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,100
|
r
|
tadpole.R
|
options(digits = 3)
library(knitr)
library(ADNIMERGE)
library(ggplot2)
library(dplyr)
library(caret)
library(Hmisc)
library(gridExtra)
library(RColorBrewer)
source("https://adni.bitbucket.io/myfunctions.R")
theme_set(theme_bw())
### TADPOLE ----
###
### Each row represents data for one particular visit of a subject, and each
### column represents a feature or measurement (commonly called biomarker) from
### the subject at that particular visit.
###
### The first columns in the spreadsheet contain unique identifiers: RID (roster
### ID) uniquely identifies every subject, VISCODE (visit code) is the timepoint
### when the visit takes place (bl is baseline or month 0, m06 is month 6, etc
### ..), SITE represents the site ID where the visit took place. Other important
### columns are: EXAMDATE represents the date of the clinical examination, AGE
### is their age at baseline visit, PTEDUCAT represents their total years of
### education.
###
### Here is a list of biomarkers we suggest participants unfamiliar with ADNI
### data to start with:
### * The main measures to be predicted: DX, ADAS13, Ventricles
### * Cognitive tests: CDRSB, ADAS11, MMSE, RAVLT_immediate
### * MRI measures: Hippocampus, WholeBrain, Entorhinal, MidTemp
### * PET measures: FDG, AV45
### * CSF measures: ABETA_UPENNBIOMK9_04_19_17 (amyloid-beta level in CSF),
### TAU_UPENNBIOMK9_04_19_17 (tau level), PTAU_UPENNBIOMK9_04_19_17
### (phosphorylated tau level)
### * Risk factors: APOE4, AGE
###
### Other important biomarkers that participants can consider are the various
### MRI, PET and DTI measures for the hippocampus, entorhinal cortex, temporal
### and parietal lobe structures. Use the dictionary file
### (TADPOLE_D1_D2_Dict.csv) and search for keywords such as "hippocampus" or
### "hippocampal" to find the necessary columns. For example, column
### ST44CV_UCSFFSL_02_01_16_UCSFFSL51ALL_08_01_16 represents the volume of the
### left hippocampus. If desired, the measures for the left and right structures
### can be averaged together.
###
### >65 risk increase every 5 yo
### female more than male
###
###
### TADPOLE datasets include three main types of structural MRI markers of
### atrophy:
### 1. ROI volumes
### 2. ROI cortical thicknesses
### 3. ROI surface areas
### where an ROI (region of interest) is a 3D sub-region of the brain such as
### the inferior temporal lobe. Obtaining these structural MRI markers from the
### images is a long and complicated process. This involves registering (i.e.
### aligning) the MRI images with each other and performing a segmentation of
### the main brain structures using an atlas-based technique. More information
### can be found on the Freesurfer website:
### https://surfer.nmr.mgh.harvard.edu/fswiki/LongitudinalProcessing
###
### These measures are computed with an image analysis software called
### Freesurfer using two pipelines: cross-sectional (each subject visit is
### independent) or longitudinal (uses information from all the visits of a
### subject). The longitudinal measures are ?more robust?, but the downside is
### that there are more missing values in our TADPOLE spreadsheet. The MRI
### biomarkers in TADPOLE can be found in the columns containing UCSFFSX
### (cross-sectional) and UCSFFSL (longitudinal).
###
### The D1 a comprehensive longitudinal data set for training.
### The D2 a comprehensive longitudinal data set for prediction.
### The D3 dataset is for cross-sectional prediction. For each participant in
### D2, the final visit only and a limited number of data columns to mimic
### screening data for a clinical trial: demographics, cognitive test scores,
### and structural MRI (derived brain volumes).
###
### * The MRI biomarkers consist of FreeSurfer longitudinally processed ROIs
### from UCSFFSL tables
### * The DTI biomarkers added represent ROI summary measures (e.g. mean
### diffusivity MD, axial diffusivity AD) taken from the spreadsheet
### DTIROI_04_30_14.csv.
### * CSF biomarkers: Amyloid-beta, Tau and P-Tau were taken from the
### Elecsys analysis, which can be found in the UPENNBIOMK9_04_19_17.csv
### spreadsheet.
tadpole_d1_d2 <- readr::read_csv("data/TADPOLE_D1_D2.csv", guess_max = 20000)
tadpole_d1_d2_dict <- readr::read_csv("data/TADPOLE_D1_D2_DICT.csv", guess_max = 20000)
tadpole_d3 <- readr::read_csv("data/TADPOLE_D3.csv", guess_max = 20000)
tadpole_d1 <- filter(tadpole_d1_d2, D1 == 1)
tadpole_d2 <- filter(tadpole_d1_d2, D2 == 1)
### Initial biomarkers:
###
### MRI measures: Hippocampus (Hippocampus_bl) (numeric, normal) (UCSF Hippocampus)
### MRI measures: WholeBrain (WholeBrain_bl) (numeric, normal) (UCSF WholeBrain)
### MRI measures: Entorhinal (Entorhinal_bl) (numeric, normal) (UCSF Entorhinal)
### MRI measures: MidTemp (MidTemp_bl) (numeric, normal) (UCSF Med Temp)
###
### PET measures: FDG (FDG_bl) (numeric, normal) ("Average FDG-PET of angular, temporal, and posterior cingulate")
### PET measures: AV45 (AV45_bl) (numeric) ("Average AV45 SUVR of frontal, anterior cingulate, precuneus, and parietal cortex relative to the cer")
###
### CSF measures: ABETA_UPENNBIOMK9_04_19_17 (amyloid-beta level in CSF) (numeric, lognormal)
### CSF measures: TAU_UPENNBIOMK9_04_19_17 (tau level) (numeric, lognormal)
### CSF measures: PTAU_UPENNBIOMK9_04_19_17 (phosphorylated tau level) (numeric, lognormal, bimodal?dx)
###
### Risk factors: APOE4 (numeric)
### Risk factors: AGE (numeric, normal)
###
### Cognitive tests: CDRSB (CDRSB_bl) (numeric)
### Cognitive tests: ADAS11 (ADAS11_bl) (numeric, lognormal, dx changes dist)
### Cognitive tests: MMSE (MMSE_bl) (numeric)
### Cognitive tests: RAVLT_immediate (RAVLT_immediate_bl) (numeric, normal)
###
###
### Predict: ADAS13 (ADAS13_bl) (numeric, lognormal, dx changes dist)
### Predict: Ventricles (Ventricles_bl) (UCSF Ventricles)
### Predict: DX (DX_bl, DX_CHANGE) "1=Stable:NL to NL, 2=Stable:MCI to MCI,
### 3=Stable:AD to AD, 4=Conv:NL to MCI, 5=Conv:MCI to AD, 6=Conv:NL to AD,
### 7=Rev:MCI to NL, 8=Rev:AD to MCI, 9=Rev:AD to NL, -1=Not available"
###
### PTID = 005_S_0223
###
### NL = Normal?
### CN = cognitively normal
### MCI = mild cognitive impairment (Late, Early)
### SMC = subjective memory complain
### AD = Alzheimer's disease
###
###
###
### You are asked to forecast three features of each rollover individual at the
### time of their future data provision. Each feature is a common or likely
### outcome measure for clinical trial
###
### Clinical status -> DX (give probability of the class)
### ADAS-Cog13 score -> ADAS13 (best-guess a value as well as a 50% confidence interval)
### Ventricles volume, divided by intracranial volume -> Ventricles (best-guess a value as well as a 50% confidence interval)
###
###
### Methods:
### - Regression
### - ML
### - Disease Progression Model
###
tad1 <- tadpole_d1 %>%
select(
RID, PTID, VISCODE, SITE, D1, D2, COLPROT, ORIGPROT, EXAMDATE,
PTGENDER, PTEDUCAT, PTETHCAT, PTRACCAT, APOE4, AGE,
Hippocampus, Hippocampus_bl,
WholeBrain, WholeBrain_bl,
Entorhinal, Entorhinal_bl,
MidTemp, MidTemp_bl,
FDG, FDG_bl,
AV45, AV45_bl,
ABETA_UPENNBIOMK9_04_19_17,
TAU_UPENNBIOMK9_04_19_17,
PTAU_UPENNBIOMK9_04_19_17,
CDRSB, CDRSB_bl,
ADAS11, ADAS11_bl,
MMSE, MMSE_bl,
RAVLT_immediate, RAVLT_immediate_bl,
DX, DX_bl, DXCHANGE,
ADAS13, ADAS13_bl,
Ventricles, Ventricles_bl
) %>%
mutate(
ABETA_UPENNBIOMK9_04_19_17 = if_else(ABETA_UPENNBIOMK9_04_19_17 == "<200", "190", ABETA_UPENNBIOMK9_04_19_17),
TAU_UPENNBIOMK9_04_19_17 = if_else(TAU_UPENNBIOMK9_04_19_17 == "<80", "70", TAU_UPENNBIOMK9_04_19_17),
PTAU_UPENNBIOMK9_04_19_17 = if_else(PTAU_UPENNBIOMK9_04_19_17 == "<8", "7", PTAU_UPENNBIOMK9_04_19_17)
) %>%
mutate(
ABETA_UPENNBIOMK9_04_19_17 = as.numeric(ABETA_UPENNBIOMK9_04_19_17),
TAU_UPENNBIOMK9_04_19_17 = as.numeric(TAU_UPENNBIOMK9_04_19_17),
PTAU_UPENNBIOMK9_04_19_17 = as.numeric(PTAU_UPENNBIOMK9_04_19_17)
)
dxfactors <- c(
"Stable:NL to NL", "Stable:MCI to MCI", "Stable:AD to AD", "Conv:NL to MCI", "Conv:MCI to AD",
"Conv:NL to AD", "Rev:MCI to NL", "Rev:AD to MCI", "Rev:AD to NL"
)
tad1$DXCHANGE <- factor(tad1$DXCHANGE, 1:9, dxfactors)
tad1$PTGENDER <- factor(tad1$PTGENDER)
tad1$PTETHCAT <- factor(tad1$PTETHCAT)
tad1$PTRACCAT <- factor(tad1$PTRACCAT)
tad2 <- tadpole_d2 %>%
select(
RID, PTID, VISCODE, SITE, D1, D2, COLPROT, ORIGPROT, EXAMDATE,
PTGENDER, PTEDUCAT, PTETHCAT, PTRACCAT, APOE4, AGE,
Hippocampus, Hippocampus_bl,
WholeBrain, WholeBrain_bl,
Entorhinal, Entorhinal_bl,
MidTemp, MidTemp_bl,
FDG, FDG_bl,
AV45, AV45_bl,
ABETA_UPENNBIOMK9_04_19_17,
TAU_UPENNBIOMK9_04_19_17,
PTAU_UPENNBIOMK9_04_19_17,
CDRSB, CDRSB_bl,
ADAS11, ADAS11_bl,
MMSE, MMSE_bl,
RAVLT_immediate, RAVLT_immediate_bl,
DX, DX_bl, DXCHANGE,
ADAS13, ADAS13_bl,
Ventricles, Ventricles_bl
) %>%
mutate(
ABETA_UPENNBIOMK9_04_19_17 = if_else(ABETA_UPENNBIOMK9_04_19_17 == "<200", "190", ABETA_UPENNBIOMK9_04_19_17),
TAU_UPENNBIOMK9_04_19_17 = if_else(TAU_UPENNBIOMK9_04_19_17 == "<80", "70", TAU_UPENNBIOMK9_04_19_17),
PTAU_UPENNBIOMK9_04_19_17 = if_else(PTAU_UPENNBIOMK9_04_19_17 == "<8", "7", PTAU_UPENNBIOMK9_04_19_17)
) %>%
mutate(
ABETA_UPENNBIOMK9_04_19_17 = as.numeric(ABETA_UPENNBIOMK9_04_19_17),
TAU_UPENNBIOMK9_04_19_17 = as.numeric(TAU_UPENNBIOMK9_04_19_17),
PTAU_UPENNBIOMK9_04_19_17 = as.numeric(PTAU_UPENNBIOMK9_04_19_17)
)
tad2$DXCHANGE <- factor(tad2$DXCHANGE, 1:9, dxfactors)
tad2$PTGENDER <- factor(tad2$PTGENDER)
tad2$PTETHCAT <- factor(tad2$PTETHCAT)
tad2$PTRACCAT <- factor(tad2$PTRACCAT)
## Need to get other variables to forecast this
tad3 <- tadpole_d3 %>% select(
RID, VISCODE, COLPROT, EXAMDATE,
PTGENDER, PTEDUCAT, PTETHCAT, PTRACCAT, AGE,
Hippocampus,
WholeBrain,
Entorhinal,
MidTemp,
DX,
ADAS13,
Ventricles
)
tad3$PTGENDER <- factor(tad3$PTGENDER)
tad3$PTETHCAT <- factor(tad3$PTETHCAT)
tad3$PTRACCAT <- factor(tad3$PTRACCAT)
# scatterplot matrix
# dataset <- select(tad1, -(RID:EXAMDATE), -ADAS13, -ADAS13_bl, -Ventricles, -Ventricles_bl, -DXCHANGE, -DX_bl) %>% filter(complete.cases(.))
dataset <- select(tad1, DX, PTGENDER, PTEDUCAT, PTETHCAT, PTRACCAT, AGE, Hippocampus, Entorhinal, MidTemp) %>% filter(complete.cases(.))
dataset$DX[dataset$DX == "NL to MCI"] <- "MCI"
dataset$DX[dataset$DX == "Dementia to MCI"] <- "MCI"
dataset$DX[dataset$DX == "MCI to Dementia"] <- "Dementia"
dataset$DX[dataset$DX == "MCI to NL"] <- "NL"
dataset$DX <- factor(dataset$DX, ordered = TRUE)
# dataset$DX_bl <- factor(dataset$DX_bl)
# validation <- select(tad2, -(RID:EXAMDATE), -ADAS13, -ADAS13_bl, -Ventricles, -Ventricles_bl, -DXCHANGE, -DX_bl) %>% filter(complete.cases(.))
longitudinal <- select(tad2, DX, PTGENDER, PTEDUCAT, PTETHCAT, PTRACCAT, AGE, Hippocampus, Entorhinal, MidTemp) %>% filter(complete.cases(.))
longitudinal$DX[longitudinal$DX == "NL to MCI"] <- "MCI"
longitudinal$DX[longitudinal$DX == "Dementia to MCI"] <- "MCI"
longitudinal$DX[longitudinal$DX == "MCI to Dementia"] <- "Dementia"
longitudinal$DX[longitudinal$DX == "MCI to NL"] <- "NL"
longitudinal$DX <- factor(longitudinal$DX, ordered = TRUE)
# longitudinal$DX_bl <- factor(longitudinal$DX_bl)
cross_sectional <- select(tad3, DX, PTGENDER, PTEDUCAT, PTETHCAT, PTRACCAT, AGE, Hippocampus, Entorhinal, MidTemp) %>% filter(complete.cases(.))
cross_sectional$DX[cross_sectional$DX == "NL to MCI"] <- "MCI"
cross_sectional$DX[cross_sectional$DX == "Dementia to MCI"] <- "MCI"
cross_sectional$DX[cross_sectional$DX == "MCI to Dementia"] <- "Dementia"
cross_sectional$DX[cross_sectional$DX == "MCI to NL"] <- "NL"
cross_sectional$DX <- factor(cross_sectional$DX, ordered = TRUE)
metric <- "Accuracy"
control <- trainControl(
method = "cv", number = 5, classProbs = T,
summaryFunction = multiClassSummary,
savePredictions = T
)
set.seed(7)
fit <- train(DX ~ ., data = dataset, method = "rf", metric = metric, trControl = control)
confusionMatrix(fit$pred$pred, fit$pred$obs)
varImp(fit, scale = FALSE)
set.seed(7)
long_pred <- predict(fit, longitudinal)
long_pred_prob <- predict(fit, longitudinal, type = "prob")
confusionMatrix(long_pred, longitudinal$DX)
long_mroc <- multiclass.roc(factor(longitudinal$DX, ordered = TRUE), long_pred_prob)
auc(long_mroc)
plot(long_mroc$rocs$`Dementia/MCI`[[1]])
plot(long_mroc$rocs$`Dementia/MCI`[[2]])
plot(long_mroc$rocs$`Dementia/NL`[[1]])
plot(long_mroc$rocs$`Dementia/NL`[[2]])
plot(long_mroc$rocs$`MCI/NL`[[1]])
plot(long_mroc$rocs$`MCI/NL`[[2]])
set.seed(7)
cross_pred_prob <- predict(fit, cross_sectional, type = "prob")
cross_pred <- predict(fit, cross_sectional)
confusionMatrix(cross_pred, cross_sectional$DX)
cross_mroc <- multiclass.roc(factor(cross_sectional$DX, ordered = TRUE), cross_pred_prob)
auc(cross_mroc)
plot(cross_mroc$rocs$`Dementia/MCI`[[1]])
plot(cross_mroc$rocs$`Dementia/MCI`[[2]])
plot(cross_mroc$rocs$`Dementia/NL`[[1]])
plot(cross_mroc$rocs$`Dementia/NL`[[2]])
plot(cross_mroc$rocs$`MCI/NL`[[1]])
plot(cross_mroc$rocs$`MCI/NL`[[2]])
|
07152f2b5595834550e833ac8024831c48074dae
|
6ac55e9eb21c4a8df6f7b1b30d0aec6bc8bfcfdb
|
/33_GenevsGenome.R
|
09adcafb5ba24d72c1ba8236838f0ade6c4110fe
|
[] |
no_license
|
raramayo/R_vikas0633
|
c7b9efecaa21699510e9422cd8049d6e3caa21bc
|
44b04ab0bbdfb4fd3e660d1a0c01b618ed32f100
|
refs/heads/master
| 2022-05-03T07:38:29.384021
| 2014-12-09T13:12:33
| 2014-12-09T13:12:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 646
|
r
|
33_GenevsGenome.R
|
setwd('~/Desktop/100_Thesis/100_datafile/')
infile='eukaryotes.gene.txt'
d <- read.table(infile, header=TRUE, sep='\t', fill=TRUE)
head(d)
pdf(paste0(infile,'.pdf'),height=5,width=7.5)
library('ggplot2')
d <- d[complete.cases(d),]
d$Group <- factor(d$Group, levels = c("Animals","Plants","Fungi","Protists","Other"))
library(ggplot2)
# producing some data
# initiating a plot
p <- ggplot(d, aes(x=Size.in.Mb,y=Genes))
# making the size dynamic
p + geom_point(aes(col=Group))+theme_bw()+ geom_smooth(aes(group=Group,col=Group), method="lm", fullrange=FALSE, se = FALSE, lwd=1)+xlab("Genome Size (in Mb)") + ylab("Gene Count")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.