blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe2271c0adee5366ffeeb3b53ee4eb8f85512f3b
|
f9229ecfef3b8ce90dad05cfef395c442cc90e23
|
/script/question3.r
|
d672bdaaabf498bad88f1736c7d8ee94396a7f37
|
[] |
no_license
|
lga37/mc2-trabalhofinal
|
ee452fa32499b294e0446207e25b5d29c84b4162
|
fc027d48082bc31bfdf0390d5ee97ddfaff8470e
|
refs/heads/main
| 2023-03-09T09:40:19.332620
| 2021-02-23T22:02:56
| 2021-02-23T22:02:56
| 342,687,141
| 0
| 0
| null | 2021-02-26T20:01:15
| 2021-02-26T20:01:14
| null |
UTF-8
|
R
| false
| false
| 896
|
r
|
question3.r
|
library(tidyverse)
rm(list = ls())
# SETANDO A HOME COMO WORK DIR
setwd("~/Mestrado/TrabalhoMC2");
data <- read.table("data/data_t3-t4.txt", header = TRUE);
configs <- unique(as.character(data$config));
instances <- unique(as.character(data$inst));
# qualidade dos dados
ic <- matrix(nrow=length(instances), ncol=length(configs), dimnames=list(instances, configs));
hv <- matrix(nrow=length(instances), ncol=length(configs), dimnames=list(instances, configs));
gd <- matrix(nrow=length(instances), ncol=length(configs), dimnames=list(instances, configs));
for (config_ in configs)
{
for (instance_ in instances)
{
instance_ <- instances[which(instances==instance_)];
datarow <- subset(data, inst == instance_ & config == config_);
ic[instance_, config_] <- mean(datarow$best);
hv[instance_, config_] <- mean(datarow$hv);
gd[instance_, config_] <- mean(datarow$gd);
}
}
|
2ba599784c4338f576966af78ffcd029da039978
|
c2c35d0c4e9fc33b9efae54ca98f76e9e216bbbd
|
/project_code/Download MICS from json.R
|
6213f4b3329dda99772139a8c073e56abdf1b6d8
|
[] |
no_license
|
danjwalton/MPI
|
c4bcb83037dd3b176eb0c3a1a30ecf9a80288b61
|
930d759ad0f12ed5fead7e560b04feccd54733f8
|
refs/heads/master
| 2020-07-04T14:50:05.227444
| 2019-08-19T16:19:55
| 2019-08-19T16:19:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,941
|
r
|
Download MICS from json.R
|
required.packages <- c("reshape2","ggplot2","data.table","jsonlite","RCurl","XML","xml2","RStata","stringr","foreign")
lapply(required.packages, require, character.only=T)
wd <- "G:/My Drive/Work/GitHub/MPI/"
setwd(wd)
basename.url=function(path){
path_sep=strsplit(path,split="/")[[1]]
path_len=length(path_sep)
return(path_sep[path_len])
}
mics_dat <- fromJSON("project_data/mics.json",flatten=T)
mics_dat <- subset(mics_dat,dataset.url!="")
urls <- mics_dat$dataset.url
uniquesavs=c()
for(url in urls){
if(exists("ch")){rm(ch)}
if(exists("hh")){rm(hh)}
if(exists("hl")){rm(hl)}
if(exists("wm")){rm(wm)}
if(exists("mn")){rm(mn)}
if(exists("bh")){rm(bh)}
if(exists("ph")){rm(bh)}
if(exists("who_z")){rm(who_z)}
if(exists("fg")){rm(fg)}
if(exists("tn")){rm(tn)}
if(exists("fs")){rm(fs)}
if(exists("uncaptured_list")){rm(uncaptured_list)}
filename <- gsub("%20","_",basename.url(url))
uniquename <- substr(filename,1,nchar(filename)-4)
message(paste(uniquename)," ... ",match(url,urls),"/",length(urls))
tmp <- tempfile()
download.file(url,tmp,quiet=T)
zip.contents <- unzip(tmp,exdir="large.data")
if(!(exists("zip.contents"))){ next; }
file.remove(tmp)
if("zip" %in% str_sub(zip.contents,-3)){
message("multiple zips")
zip.contents=unzip(zip.contents[which(str_sub(zip.contents,-3)=="zip")],exdir="large.data")
}else{
zip.contents <- zip.contents[which(str_sub(zip.contents,-3)=="sav")]
}
all.sav <- zip.contents[which(grepl("(.*)sav",tolower(basename(zip.contents))))]
# uniquesavs=unique(c(uniquesavs,all.sav))
ch.sav <- zip.contents[which(grepl("^ch(.*)sav|(.*)ch.sav",tolower(basename(zip.contents))))]
ch.sav2 <- zip.contents[which(grepl("^under5(.*)sav|(.*)under5.sav",tolower(basename(zip.contents))))]
ch.sav3 <- zip.contents[which(grepl("^underfive(.*)sav|(.*)underfive.sav",tolower(basename(zip.contents))))]
ch.sav=c(ch.sav,ch.sav2,ch.sav3)
hh.sav <- zip.contents[which(grepl("^hh(.*)sav|(.*)hh.sav",tolower(basename(zip.contents))))]
hl.sav <- zip.contents[which(grepl("^hl(.*)sav|(.*)hl.sav",tolower(basename(zip.contents))))]
wm.sav <- zip.contents[which(grepl("^wm(.*)sav|(.*)wm.sav",tolower(basename(zip.contents))))]
wm.sav2 <- zip.contents[which(grepl("^woman(.*)sav|(.*)woman.sav",tolower(basename(zip.contents))))]
wm.sav <- c(wm.sav,wm.sav2)
mn.sav <- zip.contents[which(grepl("^mn(.*)sav|(.*)mn.sav",tolower(basename(zip.contents))))]
mn.sav2 <- zip.contents[which(grepl("^man(.*)sav|(.*)man.sav",tolower(basename(zip.contents))))]
mn.sav <- c(mn.sav,mn.sav2)
bh.sav <- zip.contents[which(grepl("^bh(.*)sav|(.*)bh.sav",tolower(basename(zip.contents))))]
ph.sav <- zip.contents[which(grepl("^ph(.*)sav|(.*)ph.sav",tolower(basename(zip.contents))))]
who_z.sav <- zip.contents[which(grepl("^who_z(.*)sav|(.*)who_z.sav",tolower(basename(zip.contents))))]
fg.sav <- zip.contents[which(grepl("^fg(.*)sav|(.*)fg.sav",tolower(basename(zip.contents))))]
tn.sav <- zip.contents[which(grepl("^tn(.*)sav|(.*)tn.sav",tolower(basename(zip.contents))))]
fs.sav <- zip.contents[which(grepl("^fs(.*)sav|(.*)fs.sav",tolower(basename(zip.contents))))]
if(length(ch.sav)>0){
ch <- read.spss(ch.sav, use.value.labels = F)
ch["FILTER_$"] <- NULL
}else{
ch <- NULL
}
if(length(hh.sav)>0){
hh <- read.spss(hh.sav, use.value.labels = T)
hh["FILTER_$"] <- NULL
}else{
hh <- NULL
}
if(length(hl.sav)>0){
hl <- read.spss(hl.sav, use.value.labels = T)
hl["FILTER_$"] <- NULL
}else{
hl <- NULL
}
if(length(wm.sav)>0){
wm <- read.spss(wm.sav, use.value.labels = T)
wm["FILTER_$"] <- NULL
}else{
wm <- NULL
}
if(length(mn.sav)>0){
if(grepl("mnmn",tolower(mn.sav))){
mn.sav <- mn.sav[grepl("mnmn",tolower(mn.sav))]
}
mn <- read.spss(mn.sav, use.value.labels = T)
mn["FILTER_$"] <- NULL
}else{
mn <- NULL
}
if(length(bh.sav)>0){
if(grepl("bhbh",tolower(bh.sav))){
bh.sav <- bh.sav[grepl("bhbh",tolower(bh.sav))]
}
bh <- read.spss(bh.sav, use.value.labels = T)
bh["FILTER_$"] <- NULL
}else{
bh <- NULL
}
if(length(ph.sav)>0){
if(grepl("phph",tolower(ph.sav))){
ph.sav <- ph.sav[grepl("phph",tolower(ph.sav))]
}
ph <- read.spss(ph.sav, use.value.labels = T)
ph["FILTER_$"] <- NULL
}else{
ph <- NULL
}
if(length(who_z.sav)>0){
who_z <- read.spss(who_z.sav, use.value.labels = F)
who_z["FILTER_$"] <- NULL
}else{
who_z <- NULL
}
if(length(fg.sav)>0){
fg <- read.spss(fg.sav, use.value.labels = F)
fg["FILTER_$"] <- NULL
}else{
fg <- NULL
}
if(length(tn.sav)>0){
tn <- read.spss(tn.sav, use.value.labels = F)
tn["FILTER_$"] <- NULL
}else{
tn <- NULL
}
if(length(fs.sav)>0){
fs <- read.spss(fs.sav, use.value.labels = F)
fs["FILTER_$"] <- NULL
}else{
fs <- NULL
}
uncaptured=all.sav[which(!all.sav %in% c(
ch.sav
,hh.sav
,hl.sav
,wm.sav
,mn.sav
,bh.sav
,ph.sav
,who_z.sav
,fg.sav
,tn.sav
,fs.sav
))]
uncaptured_list=list()
if(length(uncaptured)>0){
for(uncap in uncaptured){
data.tmp= read.spss(uncap, use.value.labels = T)
#uncap.labs <- data.frame(var.name=names(data.tmp),var.lab=attributes(data.tmp)$variable.labels)
data.tmp$filename <- uniquename
uncap.list=list("data"=data.tmp)#,"labs"=uncap.labs)
uncaptured_list[[basename(uncap)]]=uncap.list
}
}
dtapath <- paste0("project_data/DHS MICS data files/",uniquename)
dir.create(dtapath)
tryCatch({
write.dta(as.data.frame(ch),paste0(dtapath,"/ch.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(hh),paste0(dtapath,"/hh.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(hl),paste0(dtapath,"/hl.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(wm),paste0(dtapath,"/wm.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(mn),paste0(dtapath,"/mn.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(bh),paste0(dtapath,"/bh.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(ph),paste0(dtapath,"/ph.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(who_z),paste0(dtapath,"/who_z.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(fg),paste0(dtapath,"/fg.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(tn),paste0(dtapath,"/tn.dta"),version=12)
},error=function(e){return(NULL)})
tryCatch({
write.dta(as.data.frame(fs),paste0(dtapath,"/fs.dta"),version=12)
},error=function(e){return(NULL)})
rm(zip.contents)
}
|
1aaf1d6fdd400481319c6356820a9bf1b57ce9ba
|
ea492f927e78f9eef5e805bb1b884830c1a76f68
|
/mctd_nc
|
b5d3bf6afdc44aa709ea628bed527d1f2ffc03ca
|
[] |
no_license
|
EOGrady21/netCDF
|
b1f2d7041fb3e556919953b9b54963ac812e00d3
|
742615c0b8422ca4c260065d7118fc216fe40c46
|
refs/heads/master
| 2022-04-27T16:23:19.705034
| 2019-04-17T16:46:40
| 2019-04-17T16:46:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,253
|
mctd_nc
|
####mctd NC template####
# obj <- read.odf('C:/Users/ChisholmE/Documents/sample files/mctd/MCTD_HUD2015006_1897_11688_1800.ODF', header = 'list')
# metadata <- ('C:/Users/ChisholmE/Documents/sample files/metadata/MCTD_SAMPLE_METADATA.csv')
source('asP01.R')
#' Moored CTD netCDF template
#'
#' @param obj an odf object from oce which contains mctd data
#' @param metadata a csv file following the standard template which includes all
#' necessary metadata
#' @param filename the desired name for the netCDF file produced, if left NULL
#' the default will conform to BIO naming conventions
#'
#'
#' @return netCDF file with a maximum of 12 variables
#' @export
#'
#' @examples
#' file <- list.files('.', pattern = "MCTD*...*.ODF")
#' obj <- read.odf(file)
#' metadata <- 'MCTD_SAMPLE_METADATA.csv'
#' mctd_nc(obj, metadata)
#'
mctd_nc <- function(obj, metadata, filename = NULL){
require(oce)
require(ncdf4)
v <- names(obj@data)
var <- obj@metadata$dataNamesOriginal
#remove SYTM from var list
tr <- grep(v, pattern = 'time')
v <- v[-tr]
vt <- grep(var, pattern = 'SYTM')
var <- var[-vt]
#POPULATE VARIABLES WITH APPROPRIATE CODES
for ( i in 1:length(var)){
var[[i]] <- as.P01(var[[i]])
}
i <- 1
for ( vv in var ){
eval(parse(text = paste0("variable_", i, "<- '" , v[[i]], "'")))
eval(parse(text= paste0("var",i," <-'", vv$gf3,"'")))
eval(parse(text = paste0("units", i, " <-'", vv$units, "'")))
eval(parse(text = paste0('P01_VAR', i," <- paste0('SDN:P01::', vv$P01)" )))
eval(parse(text = paste0('P01_name_var', i," <-'" , vv$P01name , "'")))
eval(parse(text = paste0('P06_var', i, "<-'" , vv$P06 , "'")))
eval(parse(text = paste0('P06_name_var', i, "<- '" , vv$P06name , "'")))
eval(parse(text = paste0('var', i, 'max <-', -10000)))
eval(parse(text = paste0('var', i, 'min <-' , 10000)))
if(!is.null(vv$std)){
eval(parse(text = paste0("std_variable_", i, " <- '", vv$std, "'")))
}else{
eval(parse(text = paste0("std_variable_", i, " <- NULL")))
}
#check if variable also has quality flag
if (v[[i]] %in% names(obj[['flags']])) {
eval(parse(text = paste0("var", i, "_QC <- '", vv$gf3, "_QC'")))
eval(parse(text = paste0("variable", i , "_QC <- 'quality flag for " , v[[i]], "'")))
}
i <- i+1
}
#CHECK LENGTH OF VARIABLES
numvar <- length(var)
#FILENAME
if(missing(filename)){
filename <- paste("MCTD", obj[['cruiseNumber']], obj[['eventNumber']], obj[['eventQualifier']], obj[['samplingInterval']], sep = '_')
}
ncpath <- "./"
ncfname <- paste(ncpath, filename, ".nc", sep = "")
#DIMENSIONS
timedim <- ncdim_def("time", "seconds since 1970-01-01T00:00:00Z", as.double(obj[['time']]))
stationdim <- ncdim_def("station", "counts", as.numeric(obj[['station']]))
londim <- ncdim_def("lon", "degrees_east" , as.double(obj[['longitude']]))
latdim <- ncdim_def("lat", "degrees_north", as.double(obj[['latitude']]))
dimnchar <- ncdim_def('nchar', '', 1:23, create_dimvar = FALSE)
#FILLVALUE
FillValue <- 1e35
#VARIABLES
dlname <- 'lon'
lon_def <- ncvar_def(longname= "longitude", units = 'degrees_east', dim = stationdim, name = dlname, prec = 'double')
dlname <- 'lat'
lat_def <- ncvar_def( longname = 'latitude', units = 'degrees_north', dim = stationdim, name = dlname, prec = 'double')
dlname <- "time_02"
t_def <- ncvar_def("ELTMEP01", "seconds since 1970-01-01T00:00:00Z", list( stationdim, timedim), FillValue, dlname, prec = "double")
dlname <- "time_string"
ts_def <- ncvar_def("DTUT8601", units = "",dim = list( dimnchar, timedim), missval = NULL, name = dlname, prec = "char")
dlname <- variable_1
v1_def <- ncvar_def(var1, units1, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >1){
dlname <- variable_2
v2_def <- ncvar_def(var2, units2, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >2){
dlname <- variable_3
v3_def <- ncvar_def(var3, units3, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >3){
dlname <- variable_4
v4_def <- ncvar_def(var4, units4, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >4){
dlname <- variable_5
v5_def <- ncvar_def(var5, units5, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >5){
dlname <- variable_6
v6_def <- ncvar_def(var6, units6, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >6){
dlname <- variable_7
v7_def <- ncvar_def(var7, units7, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >7){
dlname <- variable_8
v8_def <- ncvar_def(var8, units8, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >8){
dlname <- variable_9
v9_def <- ncvar_def(var9, units9, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >9){
dlname <- variable_10
v10_def <- ncvar_def(var10, units10, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar > 10){
dlname <- variable_11
v11_def <- ncvar_def(var11, units11, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar > 11){
dlname <- variable_12
v12_def <- ncvar_def(var12, units12, list(timedim, stationdim), FillValue, dlname, prec = 'double')
if (numvar >12){
warning ("Maximum of 12 variables exceeded, not all data has been exported!")
}
}
}
}
}
}
}
}
}
}
}
}
#####write out definitions to new nc file####
defs <- grep(ls(), pattern = '_def', value = TRUE)
dd <- NULL
for ( i in 1:length(defs)){
eval(parse(text = paste0("dd[[i]] <- ", defs[[i]])))
}
ncout <-
nc_create(
ncfname,
dd
,
force_v4 = TRUE
)
####INSERT DATA####
ncvar_put(ncout, ts_def, obj[['time']])
ncvar_put(ncout, t_def, as.POSIXct(obj[['time']], tz = 'UTC', origin = '1970-01-01 00:00:00'))
ncvar_put(ncout, lon_def, obj[['longitude']])
ncvar_put(ncout, lat_def, obj[['latitude']])
ncvar_put(ncout, v1_def, obj[[variable_1]])
if (numvar >1){
ncvar_put(ncout, v2_def, obj[[variable_2]])
if (numvar >2){
ncvar_put(ncout, v3_def, obj[[variable_3]])
if (numvar >3){
ncvar_put(ncout, v4_def, obj[[variable_4]])
if (numvar >4){
ncvar_put(ncout, v5_def, obj[[variable_5]])
if (numvar >5){
ncvar_put(ncout, v6_def, obj[[variable_6]])
if (numvar >6){
ncvar_put(ncout, v7_def, obj[[variable_7]])
if (numvar >7){
ncvar_put(ncout, v8_def, obj[[variable_8]])
if (numvar >8){
ncvar_put(ncout, v9_def, obj[[variable_9]])
if (numvar >9){
ncvar_put(ncout, v10_def, obj[[variable_10]])
if (numvar >10){
ncvar_put(ncout, v11_def, obj[[variable_11]])
if(numvar >11){
ncvar_put(ncout, v12_def, obj[[variable_12]])
}
}
}
}
}
}
}
}
}
}
}
####metadata####
ncatt_put(ncout, 'station', 'longitude', obj[['longitude']])
ncatt_put(ncout, 'station', 'latitiude', obj[['latitude']])
ncatt_put(ncout, 'station', 'standard_name', 'platform_name')
ncatt_put(ncout, 'station', 'cf_role', 'timeseries_id')
ncatt_put(ncout, 'time' , 'calendar', 'gregorian')
ncatt_put(ncout, 'time_string', 'note', 'time values as ISO8601 string, YY-MM-DD hh:mm:ss')
ncatt_put(ncout, 'time_string', 'time_zone', 'UTC')
#FROM ODF
ncatt_put(ncout, 0, 'inst_type', obj[['type']])
ncatt_put(ncout, 0, 'model', obj[['model']])
ncatt_put(ncout, 0, 'sampling_interval', obj[['samplingInterval']])
ncatt_put(ncout, 0, 'country_code', obj[['countryInstituteCode']])
ncatt_put(ncout, 0, 'cruise_number', obj[['cruiseNumber']])
ncatt_put(ncout, 0, "mooring_number", obj[['station']])
ncatt_put(ncout, 0, "time_coverage_duration", (tail(obj[['time']], n = 1) - obj[['time']][[1]]))
ncatt_put(ncout, 0, "time_coverage_duration_units", "days")
ncatt_put(ncout, 0, "cdm_data_type", "station")
ncatt_put(ncout, 0, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, 0, "data_type", 'MCTD')
ncatt_put(ncout, 0, "longitude", obj[['longitude']])
ncatt_put(ncout, 0, "latitude", obj[['latitude']])
ncatt_put(ncout, 0, "platform", obj[['cruise']])
ncatt_put(ncout, 0, "sounding", obj[['sounding']])
ncatt_put(ncout, 0, "chief_scientist", obj[['scientist']])
ncatt_put(ncout, 0, "water_depth", obj[['waterDepth']])
ncatt_put(ncout, 0, "cruise_name", obj[['cruise']])
####variable ATTRIBUTES####
ncatt_put(ncout, var1, 'reference_scale', 'IPTS-68')
####variables####
#sensor type, sensor depth and serial number for each variable
#generic nameS
#STANDARD NAMES
#data max and min
#VALID MIN AND MAX
#p01 and p06 names
ncatt_put(ncout, var1, "sensor_type", obj[['model']])
ncatt_put(ncout, var1, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var1, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var1, "generic_name", variable_1)
ncatt_put(ncout, var1, "sdn_parameter_urn", P01_VAR1)
ncatt_put(ncout, var1, "sdn_parameter_name", P01_name_var1)
ncatt_put(ncout, var1, "sdn_uom_urn", P06_var1)
ncatt_put(ncout, var1, "sdn_uom_name", P06_name_var1)
if (!is.null(std_variable_1)){
ncatt_put(ncout, var1, "standard_name", std_variable_1)
}
ncatt_put(ncout, var1, "data_max", max(obj[[variable_1]], na.rm = TRUE))
ncatt_put(ncout, var1, "data_min", min(obj[[variable_1]], na.rm = TRUE))
ncatt_put(ncout, var1, "valid_max", var1max)
ncatt_put(ncout, var1, "valid_min", var1min)
if (numvar > 1){
ncatt_put(ncout, var2, "sensor_type", obj[['model']])
ncatt_put(ncout, var2, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var2, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var2, "generic_name", variable_2)
ncatt_put(ncout, var2, "sdn_parameter_urn", P01_VAR2)
ncatt_put(ncout, var2, "sdn_parameter_name", P01_name_var2)
ncatt_put(ncout, var2, "sdn_uom_urn", P06_var2)
ncatt_put(ncout, var2, "sdn_uom_name", P06_name_var2)
if (!is.null(std_variable_2)){
ncatt_put(ncout, var2, "standard_name", std_variable_2)
}
ncatt_put(ncout, var2, "data_max", max(obj[[variable_2]], na.rm = TRUE))
ncatt_put(ncout, var2, "data_min", min(obj[[variable_2]], na.rm = TRUE))
ncatt_put(ncout, var2, "valid_max", var2max)
ncatt_put(ncout, var2, "valid_min", var2min)
if (numvar >2){
ncatt_put(ncout, var3, "sensor_type", obj[['model']])
ncatt_put(ncout, var3, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var3, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var3, "generic_name", variable_3)
ncatt_put(ncout, var3, "sdn_parameter_urn", P01_VAR3)
ncatt_put(ncout, var3, "sdn_parameter_name", P01_name_var3)
ncatt_put(ncout, var3, "sdn_uom_urn", P06_var3)
ncatt_put(ncout, var3, "sdn_uom_name", P06_name_var3)
if (!is.null(std_variable_3)){
ncatt_put(ncout, var3, "standard_name", std_variable_3)
}
ncatt_put(ncout, var3, "data_max", max(obj[[variable_3]], na.rm = TRUE))
ncatt_put(ncout, var3, "data_min", min(obj[[variable_3]], na.rm = TRUE))
ncatt_put(ncout, var3, "valid_max", var3max)
ncatt_put(ncout, var3, "valid_min", var3min)
if (numvar >3){
ncatt_put(ncout, var4, "sensor_type", obj[['model']])
ncatt_put(ncout, var4, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var4, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var4, "generic_name", variable_4)
ncatt_put(ncout, var4, "sdn_parameter_urn", P01_VAR4)
ncatt_put(ncout, var4, "sdn_parameter_name", P01_name_var4)
ncatt_put(ncout, var4, "sdn_uom_urn", P06_var4)
ncatt_put(ncout, var4, "sdn_uom_name", P06_name_var4)
if (!is.null(std_variable_4)){
ncatt_put(ncout, var4, "standard_name", std_variable_4)
}
ncatt_put(ncout, var4, "data_max", max(obj[[variable_4]], na.rm = TRUE))
ncatt_put(ncout, var4, "data_min", min(obj[[variable_4]], na.rm = TRUE))
ncatt_put(ncout, var4, "valid_max", var4max)
ncatt_put(ncout, var4, "valid_min", var4min)
if (numvar >4){
ncatt_put(ncout, var5, "sensor_type", obj[['model']])
ncatt_put(ncout, var5, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var5, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var5, "generic_name", variable_5)
ncatt_put(ncout, var5, "sdn_parameter_urn", P01_VAR5)
ncatt_put(ncout, var5, "sdn_parameter_name", P01_name_var5)
ncatt_put(ncout, var5, "sdn_uom_urn", P06_var5)
ncatt_put(ncout, var5, "sdn_uom_name", P06_name_var5)
if (!is.null(std_variable_5)){
ncatt_put(ncout, var5, "standard_name", std_variable_5)
}
ncatt_put(ncout, var5, "data_max", max(obj[[variable_5]], na.rm = TRUE))
ncatt_put(ncout, var5, "data_min", min(obj[[variable_5]], na.rm = TRUE))
ncatt_put(ncout, var5, "valid_max", var5max)
ncatt_put(ncout, var5, "valid_min", var5min)
if (numvar >5){
ncatt_put(ncout, var6, "sensor_type", obj[['model']])
ncatt_put(ncout, var6, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var6, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var6, "generic_name", variable_6)
ncatt_put(ncout, var6, "sdn_parameter_urn", P01_VAR6)
ncatt_put(ncout, var6, "sdn_parameter_name", P01_name_var6)
ncatt_put(ncout, var6, "sdn_uom_urn", P06_var6)
ncatt_put(ncout, var6, "sdn_uom_name", P06_name_var6)
if (!is.null(std_variable_6)){
ncatt_put(ncout, var6, "standard_name", std_variable_6)
}
ncatt_put(ncout, var6, "data_max", max(obj[[variable_6]], na.rm = TRUE))
ncatt_put(ncout, var6, "data_min", min(obj[[variable_6]], na.rm = TRUE))
ncatt_put(ncout, var6, "valid_max", var6max)
ncatt_put(ncout, var6, "valid_min", var6min)
if (numvar > 6){
ncatt_put(ncout, var7, "sensor_type", obj[['model']])
ncatt_put(ncout, var7, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var7, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var7, "generic_name", variable_7)
ncatt_put(ncout, var7, "sdn_parameter_urn", P01_VAR7)
ncatt_put(ncout, var7, "sdn_parameter_name", P01_name_var7)
ncatt_put(ncout, var7, "sdn_uom_urn", P06_var7)
ncatt_put(ncout, var7, "sdn_uom_name", P06_name_var7)
if (!is.null(std_variable_7)){
ncatt_put(ncout, var7, "standard_name", std_variable_7)
}
ncatt_put(ncout, var7, "data_max", max(obj[[variable_7]], na.rm = TRUE))
ncatt_put(ncout, var7, "data_min", min(obj[[variable_7]], na.rm = TRUE))
ncatt_put(ncout, var7, "valid_max", var7max)
ncatt_put(ncout, var7, "valid_min", var7min)
if (numvar > 7){
ncatt_put(ncout, var8, "sensor_type", obj[['model']])
ncatt_put(ncout, var8, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var8, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var8, "generic_name", variable_8)
ncatt_put(ncout, var8, "sdn_parameter_urn", P01_VAR8)
ncatt_put(ncout, var8, "sdn_parameter_name", P01_name_var8)
ncatt_put(ncout, var8, "sdn_uom_urn", P06_var8)
ncatt_put(ncout, var8, "sdn_uom_name", P06_name_var8)
if (!is.null(std_variable_8)){
ncatt_put(ncout, var8, "standard_name", std_variable_8)
}
ncatt_put(ncout, var8, "data_max", max(obj[[variable_8]], na.rm = TRUE))
ncatt_put(ncout, var8, "data_min", min(obj[[variable_8]], na.rm = TRUE))
ncatt_put(ncout, var8, "valid_max", var8max)
ncatt_put(ncout, var8, "valid_min", var8min)
if (numvar > 8){
ncatt_put(ncout, var9, "sensor_type", obj[['model']])
ncatt_put(ncout, var9, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var9, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var9, "generic_name", variable_9)
ncatt_put(ncout, var9, "sdn_parameter_urn", P01_VAR9)
ncatt_put(ncout, var9, "sdn_parameter_name", P01_name_var9)
ncatt_put(ncout, var9 , "sdn_uom_urn", P06_var9)
ncatt_put(ncout, var9, "sdn_uom_name", P06_name_var9)
if (!is.null(std_variable_9)){
ncatt_put(ncout, var9, "standard_name", std_variable_9)
}
ncatt_put(ncout, var9, "data_max", max(obj[[variable_9]], na.rm = TRUE))
ncatt_put(ncout, var9, "data_min", min(obj[[variable_9]], na.rm = TRUE))
ncatt_put(ncout, var9, "valid_max", var9max)
ncatt_put(ncout, var9, "valid_min", var9min)
if (numvar >9){
ncatt_put(ncout, var10, "sensor_type", obj[['model']])
ncatt_put(ncout, var10, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var10, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var10, "generic_name", variable_10)
ncatt_put(ncout, var10, "sdn_parameter_urn", P01_VAR10)
ncatt_put(ncout, var10, "sdn_parameter_name", P01_name_var10)
ncatt_put(ncout, var10, "sdn_uom_urn", P06_var10)
ncatt_put(ncout, var10, "sdn_uom_name", P06_name_var10)
if (!is.null(std_variable_10)){
ncatt_put(ncout, var10, "standard_name", std_variable_10)
}
ncatt_put(ncout, var10, "data_max", max(obj[[variable_10]], na.rm = TRUE))
ncatt_put(ncout, var10, "data_min", min(obj[[variable_10]], na.rm = TRUE))
ncatt_put(ncout, var10, "valid_max", var10max)
ncatt_put(ncout, var10, "valid_min", var10min)
if (numvar >10){
ncatt_put(ncout, var11, "sensor_type", obj[['model']])
ncatt_put(ncout, var11, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var11, "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var11, "generic_name", variable_11)
ncatt_put(ncout, var11, "sdn_parameter_urn", P01_VAR11)
ncatt_put(ncout, var11, "sdn_parameter_name", P01_name_var11)
ncatt_put(ncout, var11, "sdn_uom_urn", P06_var11)
ncatt_put(ncout, var11, "sdn_uom_name", P06_name_var11)
if (!is.null(std_variable_11)){
ncatt_put(ncout, var11, "standard_name", std_variable_11)
}
ncatt_put(ncout, var11, "data_max", max(obj[[variable_11]], na.rm = TRUE))
ncatt_put(ncout, var11, "data_min", min(obj[[variable_11]], na.rm = TRUE))
ncatt_put(ncout, var11, "valid_max", var11max)
ncatt_put(ncout, var11, "valid_min", var11min)
if (numvar >11){
ncatt_put(ncout, var12, "sensor_type", obj[['model']])
ncatt_put(ncout, var12, "sensor_depth", obj[['depthMin']])
ncatt_put(ncout, var12 , "serial_number", obj[['serialNumber']])
ncatt_put(ncout, var12, "generic_name", variable_12)
ncatt_put(ncout, var12, "sdn_parameter_urn", P01_VAR12)
ncatt_put(ncout, var12, "sdn_parameter_name", P01_name_var12)
ncatt_put(ncout, var12, "sdn_uom_urn", P06_var12)
ncatt_put(ncout, var12, "sdn_uom_name", P06_name_var12)
if (!is.null(std_variable_12)){
ncatt_put(ncout, var12, "standard_name", std_variable_12)
}
ncatt_put(ncout, var12, "data_max", max(obj[[variable_12]], na.rm = TRUE))
ncatt_put(ncout, var12, "data_min", min(obj[[variable_12]], na.rm = TRUE))
ncatt_put(ncout, var12, "valid_max", var12max)
ncatt_put(ncout, var12, "valid_min", var12min)
}
}
}
}
}
}
}
}
}
}
}
####CF conventions & BODC standards####
ncatt_put(ncout, 0, 'Conventions', 'CF-1.7')
ncatt_put(ncout, 0, "creator_type", "person")
ncatt_put(ncout, 0, "time_coverage_start", as.character(as.POSIXct(obj[['time']][1])))
ncatt_put(ncout, 0, "time_coverage_end", as.character(as.POSIXct(tail(obj[['time']], n= 1))))
ncatt_put(ncout, 0, "geospatial_lat_min", obj[['latitude']])
ncatt_put(ncout, 0, "geospatial_lat_max", obj[['latitude']])
ncatt_put(ncout, 0, "geospatial_lat_units", "degrees_north")
ncatt_put(ncout, 0, "geospatial_lon_min", obj[['longitude']])
ncatt_put(ncout, 0, "geospatial_lon_max", obj[['longitude']])
ncatt_put(ncout, 0, "geospatial_lon_units", "degrees_east")
ncatt_put(ncout, 0, "geospatial_vertical_max", obj[['depthMax']])
ncatt_put(ncout, 0, "geospatial_vertical_min", obj[['depthMin']])
ncatt_put(ncout, 0, "geospatial_vertical_units", "metres")
ncatt_put(ncout, 0, "geospatial_vertical_positive", 'down')
ncatt_put(ncout,0, "_FillValue", "1e35")
ncatt_put(ncout, 0, "date_modified", date())
ncatt_put(ncout, 0, "institution", obj[['institute']])
####BODC P01 names####
ncatt_put(ncout, "ELTMEP01", "sdn_parameter_urn", "SDN:P01::ELTMEP01")
ncatt_put(ncout, "lon", "sdn_parameter_urn", "SDN:P01::ALONZZ01")
ncatt_put(ncout, "lat", "sdn_parameter_urn", "SDN:P01::ALATZZ01")
ncatt_put(ncout, "time_string", "sdn_parameter_urn", "SDN:P01::DTUT8601")
ncatt_put(ncout, "lon", "sdn_parameter_name", "Longitude east")
ncatt_put(ncout, "lat", "sdn_parameter_name", "Latitude north")
ncatt_put(ncout, 'ELTMEP01', "sdn_parameter_name", "Elapsed time (since 1970-01-01T00:00:00Z)")
ncatt_put(ncout, 'time_string', "sdn_parameter_name", "String corresponding to format 'YYYY-MM-DDThh:mm:ss.sssZ' or other valid ISO8601 string")
ncatt_put(ncout, "lon", "sdn_uom_urn", "SDN:P06::DEGE")
ncatt_put(ncout, "lat", "sdn_uom_urn", "SDN:P06:DEGN")
ncatt_put(ncout, "ELTMEP01", "sdn_uom_urn", "SDN:P06::UTBB")
ncatt_put(ncout, "time_string", "sdn_uom_urn", "SDN:P06::TISO")
ncatt_put(ncout, "lon", "sdn_uom_name", "Degrees east")
ncatt_put(ncout, "lat", "sdn_uom_name", "Degrees north")
ncatt_put(ncout, "ELTMEP01", "sdn_uom_name", "Seconds")
ncatt_put(ncout, "time_string", "sdn_uom_name", "ISO8601")
#####CF standard names####
ncatt_put(ncout, "ELTMEP01", "standard_name", "time")
ncatt_put(ncout, "lat", "standard_name", "latitude")
ncatt_put(ncout, "lon", "standard_name", "longitude")
####data max and min####
#metadata from spreadsheet
if (!missing(metadata)) {
metad <- read.csv(metadata, header = TRUE)
mn <- as.character(metad[,1])
mv <- as.character(metad[,2])
md <- as.list(mv)
names(md) <- mn
for (m in seq_along(md)) {
ncatt_put(ncout, 0, names(md)[m], md[[m]])
}
}
####preserve ODF history header####
if (!is.null(obj@metadata$header)){
if (length(obj@metadata$header) != 0){
head <- obj@metadata$header
hi <- list(grep(names(head), pattern = "HISTORY"))
hist <- NULL
for ( i in 1:length(hi[[1]])){
hist[[i]] <- unlist(head[[hi[[1]][i]]])
}
histo <- unlist(hist)
histor <- NULL
for (i in 1:length(histo)){
histor[[i]] <- paste(names(histo)[[i]],":", histo[[i]])
}
history <- unlist(histor)
for (i in 1:length(history)){
ncatt_put(ncout, 0, paste0("ODF_HISTORY_", i), history[[i]])
}
#PRESERVE EVENT_COMMENTS
ec <- list(grep(names(head$EVENT_HEADER), pattern = 'EVENT_COMMENTS'))
if (length(ec[[1]] != 0)){
evc <- NULL
for( i in 1:length(ec[[1]])){
evc[[i]] <- unlist(head$EVENT_HEADER[[ec[[1]][i]]])
}
evec <- unlist(evc)
evenc <- NULL
for (i in 1:length(evec)){
evenc[[i]] <- paste(names(evec)[[i]], ":", evec[[i]])
}
eventc <- unlist(evenc)
for( i in 1:length(eventc)){
ncatt_put(ncout, 0, paste0("EVENT_COMMENTS_", i), eventc[[i]])
}
}
}
}
####nc close####
nc_close(ncout)
}
|
|
7b3d1e00ba465c321a71527ba13e1b7be18760ec
|
ba14c315f4ed435384c5b48185a5707dcf1ce093
|
/SidebarUi.R
|
c9e6ff771681fcdac0d99eb6326499c2c8c2a547
|
[] |
no_license
|
antgers/Project_AquaMiner_Periodic
|
0e318e381f1e244ba6858407f22d8900a78d7f6f
|
7e81781d607e83833e1bd2fd60f93bd5995b8497
|
refs/heads/master
| 2021-01-17T19:20:10.620858
| 2016-10-23T20:57:37
| 2016-10-23T20:57:37
| 71,663,263
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,615
|
r
|
SidebarUi.R
|
sidebarUni <- sidebarPanel(
#fixed responsive img #added class img
img(src="menfishing21.png", class = "img-responsive", align = 'middle'),
hr(),
bsCollapse(id = "collapseSidebar" , open = "Upload Data", multiple = FALSE,
bsCollapsePanel("Upload Data", style = "primary",
radioButtons(inputId = 'ext', label = 'File extention', choices = c('xlsx', 'xls', 'csv'),
selected = 'xlsx', inline = TRUE),
hr(),
checkboxInput(inputId = 'header', label = 'First line is a header', value = TRUE),
hr(),
radioButtons(inputId = 'th.sep', label = 'Thousand Separator', choices = c(Comma=',', Dot='.'),
selected = ',', inline = TRUE),
tags$hr(),
fileInput('file', 'Choose Excel File...', accept = c('.xls', '.xlsx', '.csv')),
tags$hr(),
h5("Press to Upload Dataset..."),
actionButton("action", label = "Action")
), # end bsCollapsePanel Upload Data
bsCollapsePanel("Dimensions", style = "primary",
fluidRow(column(6,
uiOutput("dimSpecies"),
uiOutput("dimUnit"),
uiOutput("dimHatchery"),
uiOutput("dimOriginMonth"),
uiOutput("dimOriginYear"),
uiOutput("dimActualFeed"),
uiOutput("dimStartAvWtCat"),
uiOutput("dimEndAvWtCat")
),
column(6,
uiOutput("dimRegion"),
uiOutput("dimSite"),
uiOutput("dimBatch"),
uiOutput("dimSamplMonth"),
uiOutput("dimSamplYear"),
uiOutput("dimSupplier"),
uiOutput("dimFeedCategory"),
uiOutput("dimFeedingPolicy")
) # end column
), # end fluidRow
fluidRow(
uiOutput("dateRangeFrom"),
uiOutput("dateRangeTo")
) # end fluidRow
), # end bsCollapsePanel Dimensions
bsCollapsePanel('Measures', style = "primary" ,
fluidRow( uiOutput("rangeStAvWeight"),
uiOutput("rangeEndAvWeight"),
uiOutput("rangeBiolPeriodFCR"),
uiOutput("rangeEconPeriodFCR")
),
fluidRow(column(6,
uiOutput("rangePeriodSGR"),
uiOutput("rangePeriodTGC"),
uiOutput("rangeAvWtDeviation"),
uiOutput("rangeAvgTemp"),
uiOutput("rangeFeedDeviation"),
uiOutput("rangeLTDEconFCR")
),
column(6,
uiOutput("rangePeriodSFR"),
uiOutput("rangeGrowthPerDay"),
uiOutput("rangePeriodMortalityPerc"),
uiOutput("rangeDiffDays"),
uiOutput("rangePeriodDayDegrees"),
uiOutput("rangeLTDMortalityPerc")
) # end column
) # end fluid row
) # end of colapsePanel Measures
), # end bsCollapse
hr(),
actionButton(inputId = 'Go.Button', label = 'Go...')
) # end sidebarUni function
|
9fe0d902dff5c71a1edad97eb4f483713b339270
|
2cb802c7e9bb18670769604cb289b03192661d5a
|
/COPS code/6b create pregnancy level file.R
|
1b66a315e5c145b3ecf6dfafb256ea60d15990d9
|
[] |
no_license
|
Public-Health-Scotland/COPS-public
|
a18d36d8a69479e34c1ddd31f23a15b5b7a6eba6
|
b4c4df18020712fbae08a979226d0a382d6aeda9
|
refs/heads/main
| 2023-07-29T17:41:26.677028
| 2023-07-11T12:40:32
| 2023-07-11T12:40:32
| 362,821,738
| 0
| 2
| null | 2021-12-07T12:55:46
| 2021-04-29T13:11:02
|
R
|
UTF-8
|
R
| false
| false
| 8,080
|
r
|
6b create pregnancy level file.R
|
fetuslevel <- read_rds(paste0(folder_temp_data, "script6_baby_level_record_infection.rds"))
#quick fixes to names to let the cohort run with extra data.
#needs changes in 6aa to retain names without "_value_", or a decision to change names below in the long run
fetuslevel <-fetuslevel %>%
rename(tests_mother_has_had_pcr_test_at_any_point = tests_mother_has_pos_test_at_any_point) %>%
rename(tests_mother_positive_test_during_pregnancy_1 =tests_mother_value_positive_test_during_pregnancy_1,
tests_mother_positive_test_during_pregnancy_2 = tests_mother_value_positive_test_during_pregnancy_2 )
pregnancies <- fetuslevel %>%
rowwise() %>%
mutate(x_pregnancy_end_date = replace_na(x_pregnancy_end_date, as.Date("1970-01-01"))) %>% # summarise() won't alter NA date values, so use 1970-01-01 as a stand-in and change it back later
ungroup() %>%
arrange(pregnancy_id) %>%
group_by(pregnancy_id) %>%
mutate(overall_outcome = case_when("Live birth" %in% outcome ~ "Live birth",
"Termination" %in% outcome ~ "Termination",
"Stillbirth" %in% outcome ~ "Stillbirth",
"Ectopic pregnancy" %in% outcome ~ "Ectopic pregnancy",
"Molar pregnancy" %in% outcome ~ "Molar pregnancy",
"Miscarriage" %in% outcome ~ "Miscarriage",
"Ongoing" %in% outcome ~ "Ongoing",
"Unknown" %in% outcome ~ "Unknown")) %>%
summarise(mother_upi = first_(mother_upi),
gestation_at_outcome = max_(x_gestation_at_outcome),
pregnancy_end_date = as.Date(max_(x_pregnancy_end_date)),
est_conception_date = as.Date(min_(x_est_conception_date)),
overall_outcome = first_(overall_outcome),
first_wave = max_(x_first_wave),
full_cohort = max_(x_full_cohort),
mother_dob = first_(x_mother_dob),
mother_age_at_conception = first_(x_mother_age_at_conception),
mother_age_at_outcome = first_(x_mother_age_at_outcome),
hbres = first_(x_hbres),
postcode = first(x_postcode),
simd = first_(x_simd),
bmi = first_(x_bmi),
booking_smoking_status = first_(x_booking_smoking_status),
gp_smoking_status = first_(x_gp_smoking_status),
overall_smoking_status = first_(x_overall_smoking_status),
ethnicity_code = first_(x_ethnicity_code),
ethnicity_description = first_(x_ethnicity_desc),
urban_rural_description = first_(x_urban_rural_8_description),
births_this_pregnancy = max_(x_births_this_pregnancy),
diabetes = max_(x_diabetes),
shielding = max_(shielding_shield),
shielding_group1 = max_(shielding_group1),
shielding_group2 = max_(shielding_group2),
shielding_group3 = max_(shielding_group3),
shielding_group4 = max_(shielding_group4),
shielding_group5 = max_(shielding_group5),
shielding_group6 = max_(shielding_group6),
shielding_group7 = max_(shielding_group7),
shielding_group_any = max_(shielding_group_any),
q_covid = max_(q_covid),
q_bmi = first_(q_bmi),
q_bmi_40_plus = max_(q_bmi_40_plus),
q_diabetes_type = max_(q_diabetes_type),
q_diag_af = max_(q_diag_af),
q_diag_asthma = max_(q_diag_asthma),
q_diag_blood_cancer = max_(q_diag_blood_cancer),
q_diag_ccf = max_(q_diag_ccf),
q_diag_cerebralpalsy = max_(q_diag_cerebralpalsy),
q_diag_chd = max_(q_diag_chd),
q_diag_cirrhosis = max_(q_diag_cirrhosis),
q_diag_ckd3 = max_(q_diag_ckd3),
q_diag_ckd4 = max_(q_diag_ckd4),
q_diag_ckd5 = max_(q_diag_ckd5),
q_diag_congen_hd = max_(q_diag_congen_hd),
q_diag_copd = max_(q_diag_copd),
q_diag_dementia = max_(q_diag_dementia),
q_diag_diabetes_1 = max_(q_diag_diabetes_1),
q_diag_diabetes_2 = max_(q_diag_diabetes_2),
q_diag_epilepsy = max_(q_diag_epilepsy),
q_diag_fracture = max_(q_diag_fracture),
q_diag_neuro = max_(q_diag_neuro),
q_diag_parkinsons = max_(q_diag_parkinsons),
q_diag_pulm_hyper = max(q_diag_pulm_hyper),
q_diag_pulm_rare = max_(q_diag_pulm_rare),
q_diag_pvd = max_(q_diag_pvd),
q_diag_ra_sle = max_(q_diag_ra_sle),
q_diag_resp_cancer = max_(q_diag_resp_cancer),
q_diag_sev_ment_ill = max_(q_diag_sev_ment_ill),
q_diag_sickle_cell = max_(q_diag_sickle_cell),
q_diag_stroke = max_(q_diag_stroke),
q_diag_vte = max_(q_diag_vte),
q_diag_renal_failure = max_(q_diag_renal_failure),
q_ethnicity = first_(q_ethnicity),
q_ethnicity_mapped9 = first_(q_ethnicity_mapped9),
q_home_cat = first_(q_home_cat), # Should we use first_() or max_() here?
q_learn_cat = first_(q_learn_cat),# Should we use first_() or max_() here?
q_preexisting_diabetes = max_(q_preexisting_diabetes),
cv_clinical_vulnerability_category = first_(cv_clinical_vulnerability_category),
dose_1_vacc_occurence_date = first_(dose_1_vacc_occurence_date),
dose_1_vacc_product_name = first_(dose_1_vacc_product_name),
dose_1_vacc_location_health_board_name = first_(dose_1_vacc_location_health_board_name),
dose_2_vacc_occurence_date = first_(dose_2_vacc_occurence_date),
dose_2_vacc_product_name = first_(dose_2_vacc_product_name),
dose_2_vacc_location_health_board_name = first_(dose_2_vacc_location_health_board_name),
dose_3_vacc_occurence_date = first_(dose_3_vacc_occurence_date),
dose_3_vacc_product_name = first_(dose_3_vacc_product_name),
dose_3_vacc_location_health_board_name = first_(dose_3_vacc_location_health_board_name),
dose_4_vacc_occurence_date = first_(dose_4_vacc_occurence_date),
dose_4_vacc_product_name = first_(dose_4_vacc_product_name),
dose_4_vacc_location_health_board_name = first_(dose_4_vacc_location_health_board_name),
mother_has_had_pcr_test_at_any_point = max_(tests_mother_has_had_pcr_test_at_any_point),
mother_earliest_positive_test = first_(tests_mother_earliest_positive_test),
#mother_earliest_negative_test = first_(tests_mother_earliest_negative_test),
mother_tested_positive_during_pregnancy = max_(tests_mother_positive_test_during_pregnancy),
#mother_tested_negative_during_pregnancy = max_(tests_mother_negative_test_during_pregnancy),
mother_earliest_positive_test_during_pregnancy = first_(tests_mother_earliest_positive_test_during_pregnancy),
#mother_earliest_negative_test_during_pregnancy = first_(tests_mother_earliest_negative_test_during_pregnancy),
mother_positive_test_during_pregnancy_1 = first_(tests_mother_positive_test_during_pregnancy_1),
mother_positive_test_during_pregnancy_2 = first_(tests_mother_positive_test_during_pregnancy_2), # Theoretically we could have up to three positive tests during a pregnancy. Revise this code to accept an arbitrary number of positive tests.
mother_total_positive_during_pregnancy = first_(tests_mother_total_positives_during_this_pregnancy),
mother_eave_linkno = first_(mother_eave_linkno),
gp_data_status = "GP data included",
chi_validity = first_(chi_validity)) %>%
mutate(pregnancy_end_date = if_else(pregnancy_end_date == as.Date("1970-01-01"), NA_Date_ , pregnancy_end_date))
#### Write pregnancy-level file ####
pregnancies %>% write_rds(paste0(folder_temp_data, "script6b_pregnancy_level_record.rds"), compress = "gz")
|
f556a9157829fa6cfda53a6615c5069ad3a1c038
|
b6abcd32866919c5330394ec171a09d6a9085930
|
/R/topicModel.terms.R
|
ba8535e1e6ebeb1f176f77d5ad895683bf352f2d
|
[] |
no_license
|
aidanoneill/textmining
|
b1c35e3cff9de1de392b90ba99c99ebde242464b
|
1699dddb76d305c33799af5bd3f976031db2438a
|
refs/heads/master
| 2016-09-10T19:40:03.450673
| 2015-03-19T19:38:19
| 2015-03-19T19:38:19
| 32,542,012
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
topicModel.terms.R
|
# Models which terms belong to which topic, based on a passed number of topics
topicModel.terms <-
function(data, k){
require("topicmodels")
lda = LDA(x = data, k = k, method = "VEM")
return(terms(lda, 10)) # which documents belong to which topic
}
|
eb5f791115caabce15649bc5b3dd49767fd739a7
|
4fea06a47c87fec7905b553c7f2184664abec48f
|
/R/create-events.R
|
dbc4e0ae48bb2bbde429311df368c41a83afee42
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
au-cru/site
|
982792332b309e72cb8417415f2d1b9422fe78ae
|
c92349af621d62efeb58d4b11e81c85c8328ece8
|
refs/heads/master
| 2021-07-24T05:52:37.834147
| 2021-07-07T15:17:31
| 2021-07-07T15:17:31
| 207,162,840
| 0
| 3
|
NOASSERTION
| 2020-02-28T09:31:43
| 2019-09-08T19:30:07
|
CSS
|
UTF-8
|
R
| false
| false
| 3,632
|
r
|
create-events.R
|
library(tidyverse)
# Also use glue and datapasta.
# Code-along --------------------------------------------------------------
# Paste from Google Sheets into Excel/Calc using datapaste package.
events <- tibble::tribble(
~Date, ~Topic, ~Level,
"1 November 2019", "Data visualization with ggplot2, part 1", "Beginner",
"15 November 2019", "Data wrangling with dplyr, part 1", "Beginner",
"29 November 2019", "Creating functions", "Beginner-Intermediate",
"13 December 2019", "Reproducible reports with R Markdown", "Beginner",
"17 January 2020", "Reproducibility and project management", "Beginner",
"31 January 2020", "Data wrangling with data.table", "Intermediate",
"14 February 2020", "Version control with Git", "Beginner",
"28 February 2020", "Efficient coding and best practices", "Beginner-Intermediate",
"13 March 2020", "Data visualization with ggplot2, part 2", "Beginner-Intermediate",
"27 March 2020", "Learning how to learn/finding help", "Beginner",
"10 April 2020", "First steps in creating R packages", "Intermediate",
"24 April 2020", "Create websites with R (blogdown)", "Intermediate",
"8 May 2020", "Creating interactive apps with Shiny", "Intermediate",
"22 May 2020", "Data wrangling with dplyr and tidyr, part 2", "Beginner-Intermediate"
)
# Do some data wrangling of the pasted in schedule.
events_prep <- events %>%
rename(name = Topic, level = Level) %>%
mutate(
date_ymd = lubridate::dmy(Date),
start_date = str_c(date_ymd, "T13:00:00+01:00"),
end_date = str_c(date_ymd, "T14:15:00+01:00"),
type = "code-along",
location = "AU Library, Main Floor, Nobelparken, Universitetsparken 461, 8000 Aarhus",
software = "R"
)
# Template for the events.
events_template <- '
type: "{type}"
name: "{name}"
description: >
FILL IN
location: "{location}"
start_date: {start_date}
end_date: {end_date}
level: "{level}"
software: ["{software}"]
'
# All new file names to be created.
event_files <- here::here("data", "events", str_c(events_prep$date_ymd, "-code-along.yaml"))
# Fill in the template with the contents of the events.
event_file_contents <- events_prep %>%
glue::glue_data(events_template)
# Create the event files.
walk2(event_file_contents, event_files, write_lines)
# Hacky hours -------------------------------------------------------------
hacky_dates <- events_prep %>%
filter(as.numeric(str_extract(Date, "^\\d+")) > 20)
hacky_hours_template <- '
type: "coworking"
name: "Hacky hour hang out"
description: >
Come to this informal hangout to ask for feedback on problems you are experiencing,
to give advice or help others out with their problems, or just co-work with
other likeminded researchers who also use (open) research software for their work.
location: "Studiecaféen, Studenterhus, Nordre Ringgade 3, 8000 Aarhus"
start_date: {date_ymd}T14:30:00+01:00
end_date: {date_ymd}T15:30:00+01:00
software: [""]
level: "everyone"
'
# All new file names to be created.
hacky_files <- here::here("data", "events", str_c(hacky_dates$date_ymd, "-hacky-hour.yaml"))
# Fill in the template with the contents of the events.
hacky_file_contents <- hacky_dates %>%
glue::glue_data(hacky_hours_template)
# Create the event files.
walk2(hacky_file_contents, hacky_files, write_lines)
|
60ade196f0694b1383c5d764590f131f7a051542
|
247168dd727c19cef2ce885476d3e4102d2ca7de
|
/man/auth_put.Rd
|
65f87e05642520320a83038338cfdaacf7616d91
|
[
"Apache-2.0"
] |
permissive
|
DataONEorg/rdataone
|
cdb0a3a7b8c3f66ce5b2af41505d89d2201cce90
|
97ef173bce6e4cb3bf09698324185964299a8df1
|
refs/heads/main
| 2022-06-15T08:31:18.102298
| 2022-06-09T21:07:26
| 2022-06-09T21:07:26
| 14,430,641
| 27
| 19
| null | 2022-06-01T14:48:02
| 2013-11-15T17:27:47
|
R
|
UTF-8
|
R
| false
| true
| 846
|
rd
|
auth_put.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auth_request.R
\name{auth_put}
\alias{auth_put}
\title{PUT a resource with authenticated credentials.}
\usage{
auth_put(url, encode = "multipart", body = NULL, node)
}
\arguments{
\item{url}{The URL to be accessed via authenticated PUT}
\item{encode}{the type of encoding to use for the PUT body, defaults to 'multipart'}
\item{body}{a list of data to be included in the body of the PUT request}
\item{node}{The D1Node object that the request will be made to.}
}
\value{
the HTTP response from the request
}
\description{
PUT data to a URL using an HTTP PUT request using authentication credentials
provided in a client certificate. Authenticated access depends on the suggested
openssl package. If the openssl package is not installed, then the request fails.
}
|
fb19c08d3c7e0d715f02a346ba9b6a1ea641cd93
|
4add4f324b954c7dc2e53fc040108dd5d200ce2f
|
/R/code.R
|
1721b76db09003b8cf631adc891be1212091c509
|
[] |
no_license
|
tdhock/requireGitHub
|
c12b211875be47bb878d2d05f5b9f0211dc592ed
|
f36a95a1542bbab0614ba20c610bfe4e6f497332
|
refs/heads/master
| 2020-12-29T02:19:36.062504
| 2019-05-17T16:03:10
| 2019-05-17T16:03:10
| 18,137,680
| 1
| 0
| null | 2017-03-16T20:02:13
| 2014-03-26T12:46:55
|
R
|
UTF-8
|
R
| false
| false
| 893
|
r
|
code.R
|
##' Print a requireGitHub declaration.
##' @param ... unquoted package names.
##' @return An invisible character vector of repository/package
##' version codes.
##' @author Toby Dylan Hocking
##' @export
##' @examples
##' if(FALSE){
##' requireGitHub_code(requireGitHub)
##' }
requireGitHub_code <- function(...){
pkgs <- match.call()[-1]
repo.code <- c()
for(pkg.i in seq_along(pkgs)){
pkg.name <- as.character(pkgs[[pkg.i]])
pkg.info <- packageDescription(pkg.name)
tryCatch({
repo.code[[pkg.i]] <- with(pkg.info, {
sprintf("%s/%s@%s", GithubUsername, GithubRepo, GithubSHA1)
})
}, error=function(e){
stop("GitHub meta-data not in ", pkg.name, " DESCRIPTION")
})
}
txt <- deparse(repo.code)
txt.return <-
sub("c[(]", "requireGitHub::requireGitHub(\n ", gsub("[ ]+", "\n ", txt))
cat(txt.return, "\n")
invisible(repo.code)
}
|
5fbd9b6eecd0577b8e3147592ea6b85e8e422fd5
|
7bae5569fd5509263b0cdd20fc1c6c14436410f9
|
/packages/RNASeq/summary/RCODE/readFiles.R
|
f06d9cb5bf058391f813bf99aa8eb3c090c64ed2
|
[] |
no_license
|
cfbuenabadn/YosefCode2
|
fe578ac0e9d0ff5ce724209dde1379acae6ab0ad
|
35bd4e749301b728ad502d6327b88c01de71cbd3
|
refs/heads/master
| 2021-07-05T03:40:51.753643
| 2017-06-23T10:50:08
| 2017-06-23T10:50:08
| 105,191,082
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,768
|
r
|
readFiles.R
|
#for debug
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="~/archive/users/allonwag/temp/big_pipe_out/collect"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_file.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=F, LOAD_CUFF=T, LOAD_KALLISTO=T)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
#collect_dir="/data/yosef/BRAIN/processed_Sep2015/collect"
collect_dir="/data/yosef2/BRAIN/processed_olfactory_Jun2016/collect_20160818/"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_olfactory.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=TRUE, LOAD_KALLISTO=TRUE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="/data/yosef/BRAIN/processed_July2015/collect"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_cortical.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=T, LOAD_CUFF=T)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="/data/yosef2/BRAIN/processed_cortical_Oct2016/collect"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_cortical.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=TRUE, LOAD_KALLISTO=TRUE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="/data/yosef/BRAIN/processed_Bateup_Aug2015/collect"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_bateup.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=T, LOAD_CUFF=T)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="/data/yosef/BRAIN/processed_Zebrafish_Oct2015/collect"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_samisrael.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=T, LOAD_CUFF=T)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="~/data2/Published_Data/TH17/processed_aw20160712/collect"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_th17.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=TRUE, LOAD_KALLISTO=TRUE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="~/data2/Published_Data/Shalek_DC/co/"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path("~/data2/Published_Data/Shalek_DC/config_shalek2014_aw.txt"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=TRUE, LOAD_KALLISTO=FALSE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="~/data2/TFH/processed_20160720/collect/"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_tfh.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=TRUE, LOAD_KALLISTO=TRUE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="~/data2/TFH/processed_20160720/collectWithFateMapping/"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_tfh.csv"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=TRUE, LOAD_KALLISTO=TRUE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="~/data/TFH/processed2/collect/"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_FC_01930.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=TRUE, LOAD_KALLISTO=FALSE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="~/data/TFH/processed_20161012/FC_01930/collect/"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_FC_01930.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=FALSE, LOAD_CUFF=FALSE, LOAD_KALLISTO=TRUE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="~/data2/BASF/Nutraceuticals/processed_RNASeq_20160826/collect/"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_basf.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=TRUE, LOAD_KALLISTO=TRUE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="~/data2/BASF/Nutraceuticals/processed_RNASeq_20160826/collect/"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_basf.xlsx"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=TRUE, LOAD_KALLISTO=TRUE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="/data/yosef2/th17_RNA_Seq_Out/collect_both_pools"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "config_ChaoWang_Th17Meta_RNAseq_Sep20_2016.csv"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=FALSE, LOAD_KALLISTO=FALSE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
rm(list=ls())
setwd("/data/yosef/users/allonwag//YosefCode//packages//RNASeq//summary//RCODE")
source("loadProcessedRNASeq_NG.R")
collect_dir="/data/yosef2/users/allonwag/Th17Metabolic/processed_1/collect/"
collectedRNASeqStudy = loadProcessedRNASeq_NG(collect_dir=collect_dir,
config_file=file.path(collect_dir, "../../sources/all_configs.csv"),
qc_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/qc_fields.txt",
gene_fields_file="/data/yosef/CD8_effector_diff/src/YosefCode/packages/RNASeq/summary/TEXT/gene_fields.txt",
LOAD_RSEM=TRUE, LOAD_CUFF=FALSE, LOAD_KALLISTO=FALSE)
save(collectedRNASeqStudy, file=file.path(collect_dir, "collectedRNASeqStudy.RData"))
|
eb283f196984440799e98bbf106e166efff4e19d
|
2ae1c90fd6beefbf099342fbc48e079ca904b979
|
/legacy/dhfr/alt_rank_plot.R
|
fc9b82e30e278a1d3337e0a7192a6f91a78a8041
|
[] |
no_license
|
SamStudio8/gretel-test
|
62bab2e1719d5cb20467549fe2ce429cb4b6d55d
|
9d168f64ab5485416c58f1b7976356ad89eea903
|
refs/heads/master
| 2021-10-26T03:26:47.408040
| 2021-10-18T13:19:46
| 2021-10-18T13:19:46
| 64,262,553
| 2
| 2
| null | 2019-06-24T10:54:50
| 2016-07-26T23:54:56
|
Python
|
UTF-8
|
R
| false
| false
| 2,068
|
r
|
alt_rank_plot.R
|
library("ggplot2");
#d <- read.table(TABLE_P, header=T);
#d <- read.table("fbc_hamming_wpd_wmeta.txt", header=T);
d <- read.table("fbc_hamming_wpd_wmeta.withbestworstlikl.withabslikl.txt", header=T);
#d <- d[d$cov == 10,]
#p <- ggplot(d, aes(recovery, rank_w, colour=factor(in_hname))) + facet_grid(readsize~cov) + scale_y_reverse( lim=c(75,0)) + scale_x_reverse( lim=c(100,0)) + theme_grey(base_size=30) + geom_jitter(size=2)
d <- d[d$n_outhaps > 0, ]
d <- d[d$rank_w > -1, ]
d <- d[d$worstlikl < 112.14, ]
read_l_labels <- c("50"="50 bp", "100"="100 bp", "125"="125 bp", "150"="150 bp")
d$in_hname_o = factor(d$in_hname, levels=c('BC070280', 'XR_634888', 'AK232978', 'M19237', 'XM_014960529'))
ho_labels <- c(BC070280 = "BC070280 (99.8%)", XR_634888 = "XR_634888 (97.3%)", AK232978="AK232978 (90.1%)", M19237="M19237 (83.5%)", XM_014960529="XM_014960529 (78.7%)")
d$rank_w <- (d$rank_w/(d$n_outhaps-1))
d$liklscale <- (d$abslik - min(d$bestlikll)) / (max(d$worstlikl) - min(d$bestlikll))
#d$liklscale <- (d$abslik - (d$bestlikll)) / ((d$worstlikl) - (d$bestlikll))
#d$liklscale <- (d$abslik - d$bestlikll) / (d$worstlikl - d$bestlikll)
#d$liklscale <- (d$abslik - 0) / (d$worstlikl - 0)
p <- ggplot(d, aes(recovery, rank_w, colour=factor(cov))) + facet_grid(readsize~in_hname_o, labeller=labeller(in_hname_o = ho_labels, readsize=read_l_labels)) + scale_y_reverse(lim=c(1.01,-0.01)) + scale_x_reverse( lim=c(101,39)) + theme_grey(base_size=30) + geom_point(size=2.5, alpha=0.8)
#p <- p + theme(panel.grid.major.x=element_line(color="gray"))
#p <- p + theme(panel.grid.major.y=element_line(color="gray"))
p <- p + theme(legend.key = element_rect(size = 5), legend.key.size = unit(1.5, 'lines'))
p <- p + theme(strip.text=element_text(size=30))
p <- p + theme(legend.position="bottom")
p <- p + xlab("Correctly Recovered SNPs (% Proportion) - Facets: Input Haplotype (Reference Identity)")
p <- p + ylab("Haplotype Scaled Likelihood - Facets: Synthetic Read Length")
p <- p + guides(colour=guide_legend(title="Depth",nrow=1))
ggsave("dhfr-ranks.png")
|
3cdb84092d446505f3e53e7765cfabc047ce8278
|
1817920a05d0282936b6bd88fcbc5eeb9cbbcaf0
|
/run_analysis.R
|
2acb8eddde9a1be508892fceb3ed3212279c016b
|
[] |
no_license
|
sallytian/Getting-and-Cleaning-Data-Project
|
0114324db338871b3e3583c7827852f6d2dcd148
|
0d805b555fddf086ac9e8cff20800697c95afd81
|
refs/heads/master
| 2020-05-19T18:52:56.983970
| 2015-02-22T22:09:39
| 2015-02-22T22:09:39
| 31,182,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,690
|
r
|
run_analysis.R
|
## Get features names
feature <- read.table("./UCI HAR Dataset/features.txt")
colnames(feature) <- c("No", "Names")
## Extracts mean and std related features
index <- grep("mean|std", feature$Names)
## Get test and train data with desired features
testAll <- read.table("./UCI HAR Dataset/test/X_test.txt")
test <- testAll[ , index]
trainAll <- read.table("./UCI HAR Dataset/train/X_train.txt")
train <- trainAll[ , index]
## Merge test and train data with desired features into one dataset
all <- rbind(test, train)
## Add descriptive column names
colnames(all) <- feature[index, 2]
## Read test and train labels&subjects
testlabel <- read.table("./UCI HAR Dataset/test/y_test.txt")
trainlabel <- read.table("./UCI HAR Dataset/train/y_train.txt")
label <- rbind(testlabel, trainlabel)
colnames(label) <- "label"
testsubject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
trainsubject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
subject <- rbind(testsubject, trainsubject)
colnames(subject) <- "subject"
## Map activities labels
activity <- read.table("./UCI HAR Dataset/activity_labels.txt")
colnames(activity) <- c("label", "activity")
label$activity <- factor(label$label, levels = activity$label,
labels = activity$activity)
## Merge label and subject info into dataset
all <- cbind(label, subject, all)
## Create a dataset with average of each variable for each activity and each subject
col <- as.character(feature[index, 2])
tidy <- aggregate(all[ ,col], by = list(all$subject, all$activity), FUN = mean)
colnames(tidy)[1] <- "subject"
colnames(tidy)[2] <- "activity"
write.table(tidy, file = "tidy.txt", row.names = FALSE)
|
306170c36139a0e68014b913e4dbdd1f959919c0
|
2281cdb6065a06304e9ed82649d546e1e64edce3
|
/man/print_class_id.Rd
|
f57b099793ba67ddbe378d7be91cb35ab0c1842d
|
[
"Apache-2.0"
] |
permissive
|
hhoeflin/hdf5r
|
142d391e60d97b57a4fcba76a407ef9bc0e08984
|
450d483364e3bf84db19df7eb330633952ec31ae
|
refs/heads/master
| 2023-08-17T07:04:15.470093
| 2023-01-21T15:57:59
| 2023-01-21T15:57:59
| 70,118,016
| 77
| 29
|
NOASSERTION
| 2023-06-22T13:49:27
| 2016-10-06T02:58:42
|
C
|
UTF-8
|
R
| false
| true
| 472
|
rd
|
print_class_id.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Helper_functions.R
\name{print_class_id}
\alias{print_class_id}
\title{Print the class and ID}
\usage{
print_class_id(obj, is_valid)
}
\arguments{
\item{obj}{The object for which to print the class and id}
\item{is_valid}{is the object valid}
}
\value{
invisible NULL
}
\description{
Print the class and ID
}
\details{
Used by the print-methods
}
\author{
Holger Hoefling
}
\keyword{internal}
|
35c04fc06edc51d5e74490d7e36108a5e5c30aa1
|
b9c2609f7ba23410bb549383a7edc1faefd47c3c
|
/R_scripts/021_PeriodicProcessTemplate.R
|
5753c009de080be894cc3ede0e6231bf63955712
|
[] |
no_license
|
stochastictalk/msc_statistics_thesis
|
8faa6f4ce3a9384e6f1728e5e431508de89c88d6
|
fb584d8b34c33e527106b369983ec56d3b10e833
|
refs/heads/main
| 2023-01-12T04:23:59.325525
| 2020-11-20T13:37:38
| 2020-11-20T13:37:38
| 313,441,237
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 318
|
r
|
021_PeriodicProcessTemplate.R
|
rm(list=ls())
T_ <- 10
j <- seq(1, T_)
sigma_P <- 0.1
sigma_P <- sqrt(1/(1 - mean(sin(2*pi*j/T_)^2)))
N <- 1000
X_t <- rnorm(N, mean=0, sd=1)
X_t <- c(X_t, rnorm(N, mean=sin(2*pi*seq(1, N)/T_), sd=1/sigma_P))
plot(X_t)
EX <- c(rep(0, N), sin(2*pi*seq(1, N)/T_))
lines(EX)
mean(X_t[(N+1):(2*N)]^2)
mean(X_t[1:N]^2)
|
e23551d3e6f4833e80d4da33ffc78b9e24f40e16
|
a07c8e474c1f44d69ed862bea9f571e48c61dfc9
|
/missing-values/missing-values-poisson.R
|
aea371192b12c22f5eee75c479a18fe1f00dd725
|
[] |
no_license
|
zlliang/statistical-computing-experiments
|
11fa050af5ad29895b730fe33847c69547676089
|
e67a6e1927c372962fcb4f6b28df73db7ecf8487
|
refs/heads/master
| 2020-03-17T14:28:03.972110
| 2018-05-24T10:37:07
| 2018-05-24T10:37:07
| 133,673,166
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,270
|
r
|
missing-values-poisson.R
|
# -----------------------------------------------------------
# Statistical Computing Experiments
# -----------------------------------------------------------
# EM Algorithm for Poisson Mixture Distribution
# Author: Zilong Liang
# Date: 2018-04-04
# -----------------------------------------------------------
# -----------------------------
# Main function of EM Algorithm
# -----------------------------
poismm <- function(x, k, tol = 1e-6, iter.max = 200) {
# Check the samples
n <- nrow(x)
# Initialize the parameters
tau <- runif(k - 1, 0, 1 / k); tau <- c(tau, 1 - sum(tau))
lambda <- runif(k)
# EM iterations
iter <- 0 # Iteration index
repeat {
# E-Step
f <- matrix(nrow = n, ncol = k)
for (j in 1:k) { # TODO: vectorization?
f[, j] <- dpois(x, lambda[j])
}
e.weighted <- f %*% tau
e <- matrix(0, nrow = n, ncol = k)
for (j in 1:k) { # TODO: vectorization?
e[, j] <- f[, j] * tau[j]
}
e <- e / rowSums(e.weighted)
# M-Step
sum.e <- colSums(e)
tau.new <- sum.e / n
lambda.new <- c(t(e) %*% x / sum.e)
# Judge convergence
iter <- iter + 1
if (iter > iter.max) { break }
err.tau <- norm(rbind(tau.new - tau), "I") / norm(rbind(tau), "I")
err.lambda <- norm(rbind(lambda.new - lambda), "I") /
norm(rbind(lambda), "I")
err.max <- max(c(err.tau, err.lambda))
if (err.max < tol) { break }
# Iterate the parameters
tau <- tau.new
lambda <- lambda.new
}
return (list(tau, lambda))
}
# ----------
# Experiment
# ----------
# Read sample data
x <- read.csv("data.csv")
x <- as.matrix(x[, 2])
# Estimate parameters
estimates <- poismm(x, 2)
tau <- estimates[[1]]
lambda <- estimates[[2]]
# Prepare plotting
pfunc <- function(xx, tau, lambda) {
# PDF of Gaussian Mixture Distribution
k = length(tau)
yy <- 0
for (j in 1:k) {
yy <- yy + tau[j] * dpois(xx, lambda[j])
}
return (yy)
}
# Plotting
xx <- seq(0, 11)
yy = pfunc(xx, tau, lambda)
hist(x, freq = FALSE,
breaks = c(-0.5:11.5),
xlab = "k",
ylab = "Density or Possibility",
main = "EM Algorithm on Poisson Mixture Distribution",
family = "serif")
points(xx, yy, pch = 18, cex = 1.5, col = "#b28fce")
|
8b77c0fa374b68afe2988bf2ee9886251dba4d7d
|
5c2350f172e1a7b7f61e1047d515357735e5895e
|
/man/christmas_stats_participants.Rd
|
5ea87ef46ea44ddc4c4d67fe6fbe843569c2603b
|
[
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
richarddmorey/Morey_Hoekstra_StatCognition
|
4da5b3f205d1038b850fa701354bd59b62a05eed
|
373b9ac75219d84d7b5a6454296e80aa4f34ea54
|
refs/heads/master
| 2022-12-06T14:50:55.198542
| 2022-11-30T18:23:58
| 2022-11-30T18:23:58
| 189,821,493
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,889
|
rd
|
christmas_stats_participants.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/christmas_stats_public-data.R
\docType{data}
\name{christmas_stats_participants}
\alias{christmas_stats_participants}
\title{Cleaned, participant-level data}
\description{
Participant-level data for the Christmas statistical
cognition experiment
}
\details{
\itemize{
\item duration: Qualtrics-reported time taken in the experiment (seconds)
\item id: participant id
\item consent: Response to informed consent
\item times: "Have you participated previously"?
\item shuffle: "Do you understand use of shuffle reports?"
\item shuffle_other" (text) response for "Other" responses to \code{shuffle}
\item response: primary response: which team is faster?
\item confidence: participant's confidence in their \code{response}
\item salient_factors: (text) "What facts or observations were most salient in coming to the conclusion that you did?"
\item expt_strategy: (text) "Please describe your experimental strategy, if any, in your own words."
\item shuffle_desc: (text) "Did you make use of the "random shuffle reports"? If so, how?"
\item is_science: "Is your work in a field that would typically be considered scientific?"
\item is_science_other: (text) response for "Other" responses to \code{is_science}
\item education: highest level of formal education in scientific field
\item formal_training: years of formal statistical training
\item how_use: "How do statistics play a role in your work?" Concatenated string of responses
\item field: "In what applied field(s) do you use statistics?" Concatenated string of responses
\item preferred: "What sort of inferential procedures would you typically prefer?"
\item preferred_other: (text) response for "Other" responses to \code{preferred}
\item sig_testing: "What is your opinion about statistical significance testing?" Concatenated string of responses
\item mobile: Qualtrics flag for mobile browsers. Should be blank for all participants
\item evidence_power: Randomly-assigned transformation power. 3 or 7
\item effect_size: "true" effect size in standard deviation units, hidden from the participant
\item finished_practice_timestamp: javascript timestamp reported by browser when participant ended the instructions. Indexed to initial_timestamp
\item downloaded_expt: Did the participant download their experimental samples?
\item downloaded_null: Did the participant download their null samples?
\item initial_timestamp: initial timestamp (should be 0, start of experiment)
\item true_winner: Which "team" was the true winner? Corresponds to sign of effect_size
\item true_null: Was the effect size 0?
\item response_null: Was the participant's response "null"? (same or no detection)
\item response_alt: Negation of \code{response_null}
\item n_expt: Number of experimental samples requested
\item n_null: Number of random shuffle reports requested
\item how_use.analysis: (logical) Did the participant select the corresponding response to \code{how_use}?
\item how_use.develop: (logical) Did the participant select the corresponding response to \code{how_use}?
\item how_use.philosophy: (logical) Did the participant select the corresponding response to \code{how_use}?
\item how_use.method_comment: (logical) Did the participant select the corresponding response to \code{how_use}?
\item how_use.none: (logical) Did the participant select the corresponding response to \code{how_use}?
\item how_use.other: (logical) Did the participant select the corresponding response to \code{how_use}?
\item field.bio: (logical) Did the participant select the corresponding response to \code{field}?
\item field.phys: (logical) Did the participant select the corresponding response to \code{field}?
\item field.soc_beh: (logical) Did the participant select the corresponding response to \code{field}?
\item field.comp_tech: (logical) Did the participant select the corresponding response to \code{field}?
\item field.med: (logical) Did the participant select the corresponding response to \code{field}?
\item field.other: (text) If applicable, text response to "Other" responses for \code{field}.
\item sig_testing.necessary: (logical) Did the participant select the corresponding response to \code{sig_testing}?
\item sig_testing.prefer_other: (logical) Did the participant select the corresponding response to \code{sig_testing}?
\item sig_testing.misunderstood: (logical) Did the participant select the corresponding response to \code{sig_testing}?
\item sig_testing.no_opinion: (logical) Did the participant select the corresponding response to \code{sig_testing}?
\item sig_testing.discontinued: (logical) Did the participant select the corresponding response to \code{sig_testing}?
\item sig_testing.fatally_flawed: (logical) Did the participant select the corresponding response to \code{sig_testing}?
\item sig_testing.do_not_understand: (logical) Did the participant select the corresponding response to \code{sig_testing}?
\item sig_testing.other: (text) If applicable, text response to "Other" responses to \code{sig_testing}
\item text_comparison: (Strategy questions) Response mentioned comparison to the random shuffle reports
\item text_asymmetry: (Strategy questions) Response mentioned symmetry/asymmetry in the experimental samples
\item text_sampling_var: (Strategy questions) Response mentioned random shuffle reports as a means to assess sampling variability,
distribution under the null, chance distribution, etc
\item text_inc_asymmetry: (Strategy questions) Response mentioned increasing asymmetry (or lack thereof) as sample size increased
\item text_no_shuffle: (Strategy questions) Explicitly said they did not use the shuffle reports
\item text_irrelevant: (Strategy questions) Text was irrelevant
\item text_missing: (Strategy questions) Text was missing
}
}
\author{
Richard D. Morey \email{richarddmorey@gmail.com}
}
\keyword{data}
|
cad557fbd9bc866b9876047f72e264235ef4d62e
|
45fcec2ad46e80b31ab2cd855feb2a0b6a7d7431
|
/plot3.R
|
d3e1239518782abcbea41f69efd60dde1ed48370
|
[] |
no_license
|
lamwai/ExData_Plotting1
|
371c76090b91c039e8083f3b5d00ba1cdf7dbc6c
|
289345adeaab504323a055107bb62fddb592460d
|
refs/heads/master
| 2021-01-21T17:50:21.513968
| 2014-09-05T12:15:10
| 2014-09-05T12:15:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
plot3.R
|
## read the complete file and read each column as character type
fulldata<-read.csv("household_power_consumption.txt", sep=";", colClasses=c(rep("character",9)))
## filter only the first 2 days in Feb 2007, and output the first three columns,
## comprising the date, time and Global Active Power
tmpdata <- subset(fulldata, Date=="1/2/2007" | Date=="2/2/2007", select=c(1,2,7,8,9))
## create a new datetime column based on the first two columns
tmpdata <- cbind(tmpdata,strptime(paste(tmpdata[,1],tmpdata[,2]), "%d/%m/%Y %H:%M:%S"))
## Global Active Power, convert to numeric
tmpdata[,3]<-as.numeric(tmpdata[,3]) ##sub metering 1
tmpdata[,4]<-as.numeric(tmpdata[,4]) ##sub metering 2
tmpdata[,5]<-as.numeric(tmpdata[,5]) ##sub metering 3
## setup the png device output
png(file="plot3.png", width=480, height=480)
## plot the graph per specification, using line type
plot(tmpdata[,6], col="Black",tmpdata[,3], type="l", xlab="", ylab="Energy sub metering")
lines(tmpdata[,6], tmpdata[,4], col="Red")
lines(tmpdata[,6], tmpdata[,5], col="Blue")
legend("topright", lwd=1, col=c("Black","Red","Blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
2addfa87785fe4ca46e8b77332a5b46d119ef907
|
ee0689132c92cf0ea3e82c65b20f85a2d6127bb8
|
/23-functions/49c-replicate.R
|
236c9742267ddddf47c5d0419d84576b9a2ef343
|
[] |
no_license
|
DUanalytics/rAnalytics
|
f98d34d324e1611c8c0924fbd499a5fdac0e0911
|
07242250a702631c0d6a31d3ad8568daf9256099
|
refs/heads/master
| 2023-08-08T14:48:13.210501
| 2023-07-30T12:27:26
| 2023-07-30T12:27:26
| 201,704,509
| 203
| 29
| null | null | null | null |
UTF-8
|
R
| false
| false
| 250
|
r
|
49c-replicate.R
|
#replicate
#
?replicate
replicate(n, expr, simplify = "array")
replicate(4, rnorm(5))
my.fun = function() {
for (i in 1:1000) {
...
for (j in 1:20) {
...
}
}
return(output)
}
rep(1:4,len=20)
replicate(1:4,len=20)
|
acbb011f14079e7c8ffafee6d83a86b1a778ed6c
|
6e9abf08a2d2728495c89611c9e8a1517ff329d8
|
/man/gwc_parse_args.Rd
|
4ef5daaae50fac22f038a4ecb3a6969181d64765
|
[] |
no_license
|
ebi-gene-expression-group/workflowscriptscommon
|
ef307c615d07599e65dc00bed3334acd291d1956
|
59d978c4c27df5e6bd1a3fbea443305cff3312b2
|
refs/heads/develop
| 2022-03-15T12:34:14.491968
| 2022-02-13T10:41:00
| 2022-02-13T10:41:00
| 141,469,535
| 0
| 1
| null | 2022-02-09T22:43:13
| 2018-07-18T17:43:06
|
R
|
UTF-8
|
R
| false
| true
| 542
|
rd
|
gwc_parse_args.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arguments.R
\name{gwc_parse_args}
\alias{gwc_parse_args}
\title{Wrap optparse's parse_args() to add support for mandatory arguments}
\usage{
gwc_parse_args(option_list, mandatory = c())
}
\arguments{
\item{option_list}{List of OptionParserOption objects}
\item{mandatory}{Character vector of mandatory parameters}
}
\value{
a list containing option values, as per parse_args()
}
\description{
Wrap optparse's parse_args() to add support for mandatory arguments
}
|
da7f9ad219787598b1a5c7ecfb087913bdf1c00f
|
f8588995f20739d4ffadd60baa48dafc85a3d1fa
|
/perceptron_pocket.r
|
d241912456f81b48d04bae351c5d95c680a7fbcf
|
[] |
no_license
|
gauravshelangia/ai-lab
|
90ef8bf6d76d7f2e2373c51ec027ae6da25888b0
|
3d19c8e5ce849f0a974620e0d1080688b380991c
|
refs/heads/master
| 2020-12-01T01:17:52.696436
| 2016-02-11T09:25:35
| 2016-02-11T09:25:35
| 51,135,556
| 0
| 0
| null | 2016-02-05T08:42:03
| 2016-02-05T08:42:02
| null |
UTF-8
|
R
| false
| false
| 1,780
|
r
|
perceptron_pocket.r
|
# By Gaurav Yadav
# reading file and storing that in matrix form
train = as.matrix(read.table("Iris_data_norm_train.txt",sep=","))
test <- as.matrix(read.table("iris_data_norm_test.txt",sep=","))
H<- function(x){
if(x > 0){
r <- 1
}
else{
r <- -1
}
return (r)
}
# initial random weight are runif(len,in,last
in_size <- ncol(train)-1
w <- runif(in_size,0,1)
#for plotting the error in each learning step make an array to store them
errorin <- vector(mode="numeric",length=0)
errorout <- vector(mode="numeric",length=0)
rows <- nrow(train)
test_rows <- nrow(test)
# initial large number of error
error_temp=10000
w_pre <- vector(mode="numeric")
for(f in 1:100){
no_errin=0
no_errout=0
for (i in 1:rows ){
x <- train[i,1:in_size]
x <- as.numeric(x)
expected <- as.numeric(train[i,in_size+1])
result = w%*%x
error = expected - H(result)
w <- w + error*x
}
for(i in 1:rows){
#i <- as.integer(runif(1,1,rows))
x <- train[i,1:in_size]
x <- as.numeric(x)
expected <- as.numeric(train[i,in_size+1])
result = w%*%x
if(result<0 && expected == 1){
no_errin <- no_errin+1
}
if(result>0 && expected == -1){
no_errin <- no_errin+1
}
}
if(error_temp < no_errin){
w <- w_pre
}
error_temp <- no_errin
w_pre <- w
for(i in 1:test_rows){
y <- test[i,1:in_size]
ex_out <- as.numeric(test[i,in_size+1])
y <- as.numeric(y)
result_out = y%*%w
if(result_out<0 && ex_out == 1){
no_errout <- no_errout+1
}
if(result>0 && ex_out == -1){
no_errout <- no_errout+1
}
}
errorin <- c(errorin,no_errin)
errorout <- c(errorout,no_errout)
}
print("END")
w
#points(iris$Petal.Length, iris$Petal.Width,iris$Sepal.Lenght , pch=19, col=iris$Species)
|
0f61410a08bcf76d911904b428b35ce7f1253507
|
3727eb350c9f10d1f835314115e401e76ea8e913
|
/EDA_VisualizationScripts.R
|
dce4e0e588acc2e03635c8550f4da8aafc2200a2
|
[] |
no_license
|
TaneishaArora/Pfft
|
950b6497811fb712ccb6f30f14a618d30b95e81a
|
0582369efa952be0a4ae7539e01ce84a0a9f5ccd
|
refs/heads/master
| 2022-08-19T08:04:24.023120
| 2020-05-24T12:43:44
| 2020-05-24T12:43:44
| 265,209,275
| 0
| 0
| null | 2020-05-24T12:43:45
| 2020-05-19T09:53:26
|
R
|
UTF-8
|
R
| false
| false
| 7,135
|
r
|
EDA_VisualizationScripts.R
|
library(tidyverse)
library(ggplot2)
# Dividing the data up by visit month
baseline <- amyloid %>% filter(month == 0)
# Test score Progression at baseline, by different demographic traits
# Sex
baseline %>% gather("test_number", "score", c(t1sum, t2sum, t3sum, t4sum, t5sum, t6sum, t7sum)) %>%
ggplot(aes(x = test_number, fill = sex)) +
scale_x_discrete(labels = c("IR 1", "IR 2", "IR 3", "IR 4", "IR 5", "DR 1", "DR 2")) +
geom_boxplot(aes(x= test_number, y = score)) +
scale_fill_discrete(name = "Sex", labels = c("Female", "Male")) +
labs(x = "AVLT", y = "Score")
# Amyloid Positivity
baseline %>% gather("test_number", "score", c(t1sum, t2sum, t3sum, t4sum, t5sum, t6sum, t7sum)) %>%
ggplot(aes(x = test_number, fill = abeta6mcut)) +
scale_x_discrete(labels = c("IR 1", "IR 2", "IR 3", "IR 4", "IR 5", "DR 1", "DR 2")) +
geom_boxplot(aes(x= test_number, y = score)) +
scale_fill_discrete(name = "Amyloid Positivity", labels = c("Positive", "Negative")) +
labs(x = "AVLT", y = "Score")
# Genotype
baseline %>% gather("test_number", "score", c(t1sum, t2sum, t3sum, t4sum, t5sum, t6sum, t7sum)) %>%
ggplot(aes(x = test_number, fill = genotype)) +
scale_x_discrete(labels = c("IR 1", "IR 2", "IR 3", "IR 4", "IR 5", "DR 1", "DR 2")) +
geom_boxplot(aes(x= test_number, y = score)) +
scale_fill_discrete(name = "Genotype", labels = c("e2/e2", "e2/e3", "e3/e3", "e2/e4", "e3/e4", "e4/e4")) +
labs(x = "AVLT", y = "Score")
# Diagnosis
baseline %>% gather("test_number", "score", c(t1sum, t2sum, t3sum, t4sum, t5sum, t6sum, t7sum)) %>%
ggplot(aes(x = test_number, fill = dx)) +
scale_x_discrete(labels = c("IR 1", "IR 2", "IR 3", "IR 4", "IR 5", "DR 1", "DR 2")) +
geom_boxplot(aes(x= test_number, y = score)) +
scale_fill_discrete(name = "Diagnosis", labels = c("Cognitively Normal", "Subjectively Cognitively Impaired", "Objective Mild Cognitive Impairment")) +
labs(x = "AVLT", y = "Score")
# Education
ntile(baseline$edu, 3)
# To check ranges
baseline %>% mutate (edu_cat = ntile(edu, 3)) %>% filter(edu_cat == 1) %>% summarise(min(edu), max(edu))
baseline %>% mutate (edu_cat = ntile(edu, 3)) %>% filter(edu_cat == 2) %>% summarise(min(edu), max(edu))
baseline %>% mutate (edu_cat = ntile(edu, 3)) %>% filter(edu_cat == 3) %>% summarise(min(edu), max(edu))
baseline %>% gather("test_number", "score", c(t1sum, t2sum, t3sum, t4sum, t5sum, t6sum, t7sum)) %>%
mutate(edu_factor = as.factor(ntile(edu, 3))) %>%
ggplot(aes(x = test_number, fill = edu_factor)) +
scale_x_discrete(labels = c("IR 1", "IR 2", "IR 3", "IR 4", "IR 5", "DR 1", "DR 2")) +
geom_boxplot(aes(x= test_number, y = score)) +
scale_fill_discrete(name = "Education Bracket", labels = c("<= Bachelors", "<= Masters", "<= PhD")) +
labs(x = "AVLT", y = "Score")
# Age
ntile(baseline$age, 3)
# To check ranges
baseline %>% mutate (age_cat = ntile(age, 3)) %>% filter(age_cat == 1) %>% summarise(min(age), max(age))
baseline %>% mutate (age_cat = ntile(age, 3)) %>% filter(age_cat == 2) %>% summarise(min(age), max(age))
baseline %>% mutate (age_cat = ntile(age, 3)) %>% filter(age_cat == 3) %>% summarise(min(age), max(age))
baseline %>% gather("test_number", "score", c(t1sum, t2sum, t3sum, t4sum, t5sum, t6sum, t7sum)) %>%
mutate(age_factor = as.factor(ntile(age, 3))) %>%
ggplot(aes(x = test_number, fill = age_factor)) +
scale_x_discrete(labels = c("IR 1", "IR 2", "IR 3", "IR 4", "IR 5", "DR 1", "DR 2")) +
geom_boxplot(aes(x= test_number, y = score)) +
scale_fill_discrete(name = "Age Range", labels = c("[54-72)", "[72-77)", "[77, 89]")) +
labs(x = "AVLT", y = "Score")
##################################################################################################
# Amyloid Beta Measures at baseline based on various demographic traits
# sex
amyloid %>%
ggplot(aes(x = month, fill = sex)) +
geom_boxplot(aes(x= month, y = abeta6m)) +
scale_fill_discrete(name = "Sex", labels = c("Female", "Male")) +
labs(x = "Month", y = "Amyloid Beta Levels")
# Genotype
amyloid %>%
ggplot(aes(x = month, fill = genotype)) +
geom_boxplot(aes(x= month, y = abeta6m)) +
scale_fill_discrete(name = "Genotype", labels = c("e2/e2", "e2/e3", "e3/e3", "e2/e4", "e3/e4", "e4/e4")) +
labs(x = "Month", y = "Amyloid Beta Levels")
# Diagnosis
amyloid %>%
ggplot(aes(x = month, fill = dx)) +
geom_boxplot(aes(x= month, y = abeta6m)) +
scale_fill_discrete(name = "Diagnosis", labels = c("Cognitively Normal", "Subjectively Cognitively Impaired", "Objective Mild Cognitive Impairment")) +
labs(x = "Month", y = "Amyloid Beta Levels")
# Education
amyloid %>%
mutate(edu_factor = as.factor(ntile(edu, 3))) %>%
ggplot(aes(x = month, fill = edu_factor)) +
geom_boxplot(aes(x = month, y = abeta6m)) +
scale_fill_discrete(name = "Diagnosis", labels = c("<= Bachelors", "<= Masters", "<= PhD")) +
labs(x = "Month", y = "Amyloid Beta Levels")
# Age
amyloid %>%
mutate(age_factor = as.factor(ntile(age, 3))) %>%
ggplot(aes(x = month, fill = age_factor)) +
geom_boxplot(aes(x= month, y = abeta6m)) +
scale_fill_discrete(name = "Age", labels = c("[54-72)", "[72-77)", "[77, 89]")) +
labs(x = "Month", y = "Amyloid Beta Levels")
##################################################################################################
# Proportion Right/Wrong for Recogition Test
# get average values of drec hits and fa
recognition_prop <- baseline %>%
group_by(abeta6mcut) %>%
summarise(
drec_hits_mean = mean(drec_hits, na.rm = TRUE),
drec_fa_mean = mean(drec_fa, na.rm = TRUE)
)
# transform data
recognition_prop <- as.data.frame(t(recognition_prop))
# remove the abeta6mcut row (first row)
recognition_prop = recognition_prop[-1,]
# combine both scores columns into one
recognition_prop <- data.frame(Score = c(recognition_prop[,"V1"],
recognition_prop[,"V2"]))
# populate drec and amyloid positivity accordingly
recognition_prop <- recognition_prop %>% mutate(
drec = if_else(row_number() %% 2 == 0, "False Alarm", "Hit"),
amyloid_positivity = if_else(row_number() < 3, "Positive", "Negative"),
)
# convert scores from char to numeric
recognition_prop$Score <- as.numeric(as.character(recognition_prop$Score))
sapply(recognition_prop, class)
# plot
ggplot(recognition_prop, aes(fill=drec, y=Score, x=amyloid_positivity)) +
geom_bar(position="fill", stat="identity") +
ggtitle("Word Recognition Hits/False Alarms") +
labs(x="Amyloid Positivity", y="Relative Score",
col="Recognized Words")
##################################################################################################
# Delta AVLT Score vs. Amyloid Positivity
baseline %>% gather("test_number", "score", c(t1t2, t2t3, t3t4, t4t5, t5t6, t6t7)) %>%
ggplot(aes(x = test_number, fill = abeta6mcut)) +
scale_x_discrete(labels = c("IR1-IR2", "IR2-IR3", "IR3-IR4", "IR4-IR5", "IR5-DR1", "DR1-DR2")) +
geom_boxplot(aes(x= test_number, y = score)) +
scale_fill_discrete(name = "Amyloid Positivity", labels = c("Positive", "Negative")) +
labs(x = "AVLT", y = "Score") +
ggtitle("Delta AVLT Scores at Baseline")
|
9e0c13ae6640826919acd8a9d936a4e4c4d15fde
|
b47aa2e09add49ab85ec3b04c3e3279f28706c1c
|
/man/FitGP_MLE.Rd
|
fb9cffcf02db34a058cf52cd0d0d71f06721434f
|
[] |
no_license
|
ceesfdevalk/EVTools
|
db232bc94b0a22b1a0fdfbd8ba5e6e9e94e8ad3c
|
0e3440f031b6a8abcfd6fc00d981d0656710d93e
|
refs/heads/master
| 2022-09-30T06:25:51.784236
| 2022-08-22T07:48:01
| 2022-08-22T07:48:01
| 130,972,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,162
|
rd
|
FitGP_MLE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FitGP_MLE.R
\name{FitGP_MLE}
\alias{FitGP_MLE}
\title{FitGP_MLE}
\usage{
Value <- FitGP_MLE(X, p, N= 0, r11= 1, fixedpar= NULL, l0= NULL, metadata= NULL)
}
\arguments{
\item{X}{data sample (double(n))}
\item{p}{probabilities of exceedance of the quantiles to be estimated (double(np))}
\item{N}{(optional) (effective) sample size, in case X is not complete but contains only (peak) values above some threshold (integer(1))}
\item{r11}{(optional) factor to increase estimator variance by, to account for serial dependence (default: 1) (double(1) or list, see Details)}
\item{fixedpar}{(optional): fixed model parameters not to be estimated, and their standard errors (double(1) or list, see Details)}
\item{l0}{(optional) value of l (no. of order stats used) in case it is imposed (integer(0))}
\item{metadata}{(optional) information about the variable and, if applicable, the time-series (list; see Details)}
}
\value{
A list, with members:
\item{l}{no. of order statistics used for scale and quantile estimation}
\item{k}{no. of order statistics used for tail index estimation}
\item{tailindex}{estimates or imposed value of GP tail index}
\item{tailindexStd}{standard deviations of tail index estimates}
\item{logdisp}{estimates or imposed value of log of dispersion coeff.}
\item{logdispStd}{standard deviations of log of dispersion coeff. estimates}
\item{scale}{estimates of GP scale parameter}
\item{locationStd}{standard deviation of order statistic}
\item{lambda}{ratio of logarithms of probabilities of exceedance of quantile and threshold}
\item{p}{probabilities of exceedance of quantiles to be estimated}
\item{quantile}{quantile estimates}
\item{quantileStd}{standard deviations of quantile estimates}
\item{orderstats}{data X sorted (decreasing)}
\item{df}{= "GP": fitted distribution function tail (Generalised Pareto)}
\item{estimator}{= "maximum likelihood": see "method" below}
}
\description{
Fit a Generalised Pareto (GP) upper tail to the sample X and estimate quantiles, using
the ML estimator for tail index and scale
}
\details{
Pre-determined model parameters are to be supplied in the list fixedpar (see above):
\itemize{
\item{$gamma0: (optional) value of tailindex in case it is imposed (double(1))}
\item{$gamma0Std: (optional) its standard deviation (double(1))}
\item{$logdisp0: (optional) value of log of dispersion coeff. in case it is imposed (dispersion coeff. is the raio of scale par. to location par.) (double(1))}
\item{$logdisp0Std: (optional) its standard deviation (double(1))}
}
The serial dependence coefficient r11 can be a positive number, or a list
produced by R11.R.
In case a quantile is to be estimated for a \emph{frequency}, say f, and
\enumerate{
\item{if X contains all values (possibly above some threshold), then with
EI an estimate of the Extremal Index from EI.R, set
p = f*d/EI and N = T/d, with T the length of the observation period and d the time step.
Note that f and d are defined with reference to the same unit of time!! In this case,
r11 needs to be estimated.
}
\item{if X contains only the n (approximately Poisson) peak values above some threshold
(in a PoT analysis), it is recommended to set r11= 1 and take p = f*d/EI and
N = T/d*EI; in this case (for GP), EI can be any value; e.g. take p= fT/n and N= n.
}
}
metadata may contain the following fields (in addition to your own meta data):
\itemize{
\item{$varname: variable name}
\item{$varunit: physical unit of variable}
\item{$timeunit: time unit (e.g. year)}
\item{$timestep: time step in units of timeunit}
\item{$timelength: length of time covered by time-series, in units of timeunit}
\item{$EI: extremal index (see above)}
\item{$nexcess (for PoT only): no. of data values (as opposed to peak values) exceeding the threshold}
}
}
\references{
De Haan, L. and A. Ferreira (2006), Extreme Value Theory - An Introduction. Springer.
}
\author{
Cees de Valk \email{ceesfdevalk@gmail.com}
}
|
da5b724c0aa24a5b30b6286bb51016c7d3d55402
|
0fb3d61813752e6134b4dc4d88876d380e31c60b
|
/code/RunMosaicStates.R
|
7364d2de40e37a8dfa79d3e53155a1388a068654
|
[] |
no_license
|
melaniekamm/MergeLANDFIREandCDL
|
ec96612568a0e090ee4df0b5ec127e3b2cd6cf42
|
1df8767ef662631d115f3c99e068b6615aa0603a
|
refs/heads/main
| 2023-04-08T15:15:13.270691
| 2023-02-24T19:20:39
| 2023-02-24T19:20:39
| 351,850,305
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 582
|
r
|
RunMosaicStates.R
|
args <- commandArgs(trailingOnly = T)
message(args)
# specify input parameters
CDLYear <- args[2] # year of NASS Cropland Data Layer
tier <- unlist(stringr::str_split(args[3], pattern=":")) # which hierarchy of mosaic states to process
message(tier)
#outdir <- 'D:/MergeLANDFIRECDL_Rasters/2017MergeCDL_LANDFIRE/' #file path on laptop
outdir <- '/90daydata/geoecoservices/MergeLANDFIREandCDL/'
statedir <- paste0(outdir,'/StateRasters/', CDLYear)
ID <- paste0('CDL', CDLYear,'NVC')
beecoSp::mosaic_states(outdir=outdir, statedir=statedir, ID=ID, tier=tier, usepackage='gdal')
|
7d2ae9f29c19d5a512916fb66d7a458954fbbe0e
|
f5f6069fc04306383a2b1e015dc9925d57543442
|
/R_Programming/R_Programming_Coursera/Assignment_Week_4/rankall.R
|
e6a27cd26adda8b28cfa50f776b5c85325fe37f9
|
[] |
no_license
|
grizztastic/projects
|
42b408b2321a27610691629451a014d982533c6c
|
03019a82b16a23264a8eb1394ea30bc7dc7beb4a
|
refs/heads/master
| 2023-01-27T17:33:01.962227
| 2020-12-02T03:05:32
| 2020-12-02T03:05:32
| 267,395,406
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,592
|
r
|
rankall.R
|
rankall <- function(outcome, num = "best") {
## Read outcome data
outcome1 <- read.csv("rprog_data_ProgAssignment3-data/outcome-of-care-measures.csv", colClasses = "character")
## Check that the outcomes are valid
unique_outcomes <- c("heart attack", "heart failure", "pneumonia")
if(!outcome %in% unique_outcomes){
stop("invalid outcome")
}
## Get the desired column to examine based on the outcome condition
if (outcome == "heart attack") {
col <- outcome1$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
}
else if (outcome == "heart failure") {
col <- outcome1$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
}
else {
col <- outcome1$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
}
## Sorting the hospital by state dataframe by the desired column values in ascending order and removing NA vals
sorted_df <- outcome1[order(as.numeric(col), outcome1[,2], na.last = NA), ]
split_df_by_state <- split(sorted_df, sorted_df$State)
## Split the sorted dataframe by state and loop over the entire list returning the desired hospital name at the num
ans = lapply(split_df_by_state, function(x, num) {
if(num == "best") {
return (x$Hospital.Name[1])
}
else if(num == "worst") {
return (x$Hospital.Name[nrow(x)])
}
else {
return (x$Hospital.Name[num])
}
}, num)
## Return a data.frame with the proper form
return (data.frame(hospital=unlist(ans), state=names(ans)) )
}
|
6b8278bebce650dd21b1f211009d9042dc3813df
|
40051d5f9e1fe85adbbb5cebf42bb541012e5237
|
/R/calculatePosteriors.R
|
c42edd2b6040872e085b4118b7076ad050e86e47
|
[] |
no_license
|
ndukler/tkSim
|
e5f1edce59a746c61a71e7bb22b55a9f097cd9e0
|
0520ba0467dda9a68f786943237c85ec04760e38
|
refs/heads/master
| 2021-05-11T15:35:58.096459
| 2018-05-01T18:33:40
| 2018-05-01T18:33:40
| 117,736,436
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,233
|
r
|
calculatePosteriors.R
|
setGeneric("calculatePosteriors", function(object,...) standardGeneric("calculatePosteriors"))
#' Calculate Posterior Probabilities for Infered Parameters
#'
#' Uses numeric methods to estimate posteriors for the infered parameters \code{alpha} (synthesis rate) and \code{beta} (degredation rate).
#' Currently uses a flat prior for both alpha and beta as a default. This function is written such that non-flat priors may be used in the future.
#' @param object A \linkS4class{basicKineticModel} object
#' @param alphaRange Scale factors used to calculate the upper and lower bounds of the parameter range explored for \code{alpha}. These scale factors will
#' be applied to the infered value of \code{alpha}. Must be defined as \code{c(lower,upper)}.
#' @param betaRange Scale factors used to calculate the upper and lower bounds of the parameter range explored for \code{beta}. These scale factors will
#' be applied to the infered value of \code{beta}. Must be defined as \code{c(lower,upper)}.
#' @param paramSpaceSize The total size of parameter space to numerically integrate over. Half of the parameter space will be given to each parameter.
#' @param logProbAlpha A function that returns the log probability for a given value of \code{alpha}
#' @param lobProbBeta A function that returns the log probability for a given value of \code{beta}
#' @param dispByGene Boolean controlling the expected nature of the \code{dispersionModel}. See \code{dispersionModel} description in \code{\link{inferParameters}}
#' for more details.
#'
#' @name calculatePosteriors
#' @include class-basicKineticModel.R llFactory.R logSumExp.R
#' @examples
#' EXAMPLE HERE
#' @export
setMethod("calculatePosteriors",signature(object="basicKineticModel"), function(object,alphaRange=numeric(2),betaRange=numeric(2),paramSpaceSize=10^4,dispByGene=T,logProbAlpha=NULL,logProbBeta=NULL)
{
if(alphaRange[1]==0)
{
warning("No range specified for alpha, using default range of .25x to 2x alpha.")
alphaRange=c(.25,2)
}else if(alphaRange[1]>alphaRange[2])
stop(paste0("Alpha range specified incorrectly. Lower: ",alphaRange[1]," > Upper: ",alphaRange[2],". Upper must be greater than Lower"))
else if(length(alphaRange)>2)
stop("Too many arguments supplied for alpha range. Must be a vector of length 2 in the form (lower, upper).")
if(betaRange[1]==0)
{
warning("No range specified for beta, using default range of .25x to 2x beta.")
betaRange=c(.25,2)
}else if(betaRange[1]>betaRange[2])
stop(paste0("Beta range specified incorrectly. Lower: ",betaRange[1]," > Upper: ",betaRange[2],". Upper must be greater than Lower"))
else if(length(betaRange)>2)
stop("Too many arguments supplied for beta range. Must be a vector of length 2 in the form (lower, upper).")
# | betaRange[1]==0)
# {
# stop("Must enter ranges for both alpha and beta. They should be in the form c(lower,upper) where lower and upper are multipliers that are applied to the inferred parameter to calculate the bounds")
# }
if(is.null(logProbAlpha))
{
logProbAlpha = function(x){rep(log(1/sqrt(paramSpaceSize)),length(x))}
}
if(is.null(logProbBeta))
{
logProbBeta = function(x){rep(log(1/sqrt(paramSpaceSize)),length(x))}
}
#generate likelyhood esitmators for each gene
logLH = lapply(X=1:nrow(object@inferedParams), FUN=llFactory, object=object,dispByGene=dispByGene)
posteriors = lapply(X=1:nrow(object@inferedParams),object=object,logLH=logLH,FUN=function(x,object,logLH)
{
alpha = object@inferedParams[x,"alpha"]
beta = object@inferedParams[x,"beta"]
aMax = alphaRange[2]*alpha
aMin = alphaRange[1]*alpha
bMax = betaRange[2]*beta
bMin = betaRange[1]*beta
paramRange = expand.grid(seq(aMin,aMax,length.out = sqrt(paramSpaceSize)), seq(bMin,bMax,length.out = sqrt(paramSpaceSize)))
numerator = apply(paramRange,1,function(y) logLH[[x]](y)) + logProbAlpha(paramRange[,1]) + logProbBeta(paramRange[,2])
marginal = logSumExp(numerator)
posterior = exp(numerator-marginal)
res=cbind(paramRange,posterior=posterior)
colnames(res) = c("alpha","beta","posterior")
return(res)
})
object@posteriors = posteriors
invisible(object)
})
|
6dea293ef1db0cb3ceabf7d656cf98c732244b67
|
a2cfda897fad97d76a3b4c4be986eb63ef8046cc
|
/exercises/c5.R
|
84e4cd9fc00076a7f5f1d6cd9e988e38f2952fd4
|
[] |
no_license
|
ssh352/islr_stats
|
1815ff8731b6d460216898d02bfc88f26ed16cee
|
9eae37ca99ca6762e67eb9c99e459ef01a9ee9ea
|
refs/heads/master
| 2021-09-10T12:26:59.658428
| 2018-03-26T09:04:30
| 2018-03-26T09:04:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,471
|
r
|
c5.R
|
require(data.table)
require(ISLR)
## Question 05 ----
dt <- data.table(Default)
glm.fit <- glm(default ~ income+balance,
data = dt,
family = binomial)
# (b)
dt <- sample(dt)
train <- dt[1:6667]
test <- dt[6668:10000]
glm.fit <- glm(default ~ income+balance,
data = train,
family = binomial)
glm.prob <- data.table("prob" = predict(glm.fit, test, type = "response"))
glm.prob[prob > 0.5, pred := "Yes"]
glm.prob[is.na(pred),pred := "No"]
table(glm.prob$pred, test$default)
# Overall fraction of error in validation dataset:
(12+72)/nrow(glm.prob)
# 2.52%
# c)
train <- dt[1:8000]
test <- dt[8001:10000]
glm.fit <- glm(default ~ income+balance,
data = train,
family = binomial)
glm.prob <- data.table("prob" = predict(glm.fit, test, type = "response"))
glm.prob[prob > 0.5, pred := "Yes"]
glm.prob[is.na(pred),pred := "No"]
table(glm.prob$pred, test$default)
# Overall fraction of error in validation dataset:
(7+43)/nrow(glm.prob)
# 2.5%
# The error decreased a bit, but that is explained by having less variables
# in the validation set, so that the model already know how to predict too
# many values
# d)
dt[student=="Yes",st:=1]
dt[is.na(st),st:=0]
train <- dt[1:6667]
test <- dt[6668:10000]
glm.fit <- glm(default ~ income+balance+st,
data = train,
family = binomial)
glm.prob <- data.table("prob" = predict(glm.fit, test, type = "response"))
glm.prob[prob > 0.5, pred := "Yes"]
glm.prob[is.na(pred),pred := "No"]
table(glm.prob$pred, test$default)
# Overall fraction of error in validation dataset:
(16+71)/nrow(glm.prob)
# 2.61%
# The error increased a bit, meaning that student might not be that much
# important when feeding our model
## Question 06 ----
dt <- data.table(Default)
glm.fit <- glm(default ~ income+balance,
data = dt,
family = binomial)
# a)
summary(glm.fit)
#std.error:
# income 2.965e-6
# balance 2.274e-04
# b)
boot.fn <- function(dt, index)
return(coef(glm(default ~ income+balance, data = dt[index,],
family = binomial)))
dt <- data.table(Default)
nrow(dt)
boot.fn(dt, 1:900)
require(boot)
boot(data = dt, statistic = boot.fn, R = 1000)
# Comparing the two methods, the income had the most different coefficient
# for the different approaches
## QUESTION 07 ----
dt <- data.table(Weekly)
glm.fit <- glm(Direction ~ Lag1 + Lag2, data = dt, family = binomial)
plot(glm.fit)
glm.fit2 <- glm(Direction ~ Lag1 + Lag2, data = dt[-1,], family = binomial)
glm.prob <- data.table("prob" = predict(glm.fit2, dt[1,], type = "response"))
glm.prob[prob > 0.5, pred := "Up"]
glm.prob[is.na(pred),pred := "Down"]
table(glm.prob$pred, dt[1,Direction])
# The prediction says that it is up, when actually it is down
# d) In a loop
n <- rep(0, nrow(dt))
for (i in 1:nrow(dt)){
glm.fit <- glm(Direction ~ Lag1 + Lag2, data = dt[-i,], family = binomial)
glm.prob <- data.table("prob" = predict(glm.fit, dt[i,], type = "response"))
glm.prob[,pred := ifelse(prob > 0.5, "Up", "Down")]
if (glm.prob$pred != dt[i,Direction])
n[i] <- 1
}
#LOOCV Error
cat(paste0((round(sum(n)*100/length(n),3)),"%"))
# A relatively high error, but the error of predicting all the
# training dataset is already high, about 44.44%
## QUESTION 07 ----
set.seed(1)
x = rnorm(100)
y = x-2*x^2+rnorm(100)
# a) n is 100 points and p is 1, only one predictor, the x
# b) The relationship seems to be quadratic, from the scatterplot,
# so we expect to have a better fit if we use
# X and the square of X
plot(x,y)
# c)
set.seed(1)
x = rnorm(100)
dt <- data.table(x,y = x-2*x^2+rnorm(100))
plot(dt)
# i.) Y = B0+B1X+e
cv.glm(dt, glm(y ~ x, data = dt))$delta
# i.) Y = B0+B1X+B2X2+e
cv.glm(dt, glm(y ~ x+I(x^2), data = dt))$delta
# i.) Y = B0+B1X+B2X2+B3X3+e
cv.glm(dt, glm(y ~ poly(x,3), data = dt))$delta
# i.) Y = B0+B1X+B2X2+B3X3+B4X4+e
cv.glm(dt, glm(y ~ poly(x,4), data = dt))$delta
# d)
set.seed(2)
x = rnorm(100)
dt <- data.table(x,y = x-2*x^2+rnorm(100))
plot(dt)
# i.) Y = B0+B1X+e
cv.glm(dt, glm(y ~ x, data = dt))$delta
# i.) Y = B0+B1X+B2X2+e
cv.glm(dt, glm(y ~ x+I(x^2), data = dt))$delta
# i.) Y = B0+B1X+B2X2+B3X3+e
cv.glm(dt, glm(y ~ poly(x,3), data = dt))$delta
# i.) Y = B0+B1X+B2X2+B3X3+B4X4+e
cv.glm(dt, glm(y ~ poly(x,4), data = dt))$delta
# The results changed a bit, since the random values are
# not coming from the same seed. The scatter plots show the
# different patterns
|
275eab1e6f15eb9e828f0c471fded97ce275ac66
|
3e9052c3badc3b2363456142b53a552cf3bffdde
|
/R/create_equal_alignment.R
|
37a58c3e7c0d40273da0241c2de8e7d9eeb24d8d
|
[] |
no_license
|
thijsjanzen/nodeSub
|
ad0a73acfc99241302d2c8307e90dcc4ac302306
|
a85bb1a6251a1b15cd6add635b721247b508f896
|
refs/heads/master
| 2023-05-25T17:28:39.099142
| 2023-05-15T08:21:56
| 2023-05-15T08:21:56
| 180,762,207
| 1
| 2
| null | 2020-01-08T15:03:51
| 2019-04-11T09:45:02
|
R
|
UTF-8
|
R
| false
| false
| 3,324
|
r
|
create_equal_alignment.R
|
#' function create an alignment with identical information content
#' @param input_tree phylogeny for which to generate alignment
#' @param sub_rate substitution rate used in the original phylogeny
#' @param alignment_result result of sim_normal, sim_linked or sim_unlinked
#' @param sim_function function that accepts a tree, sequence length,
#' rootsequence and substitution rate (in that order). Default is sim_normal
#' @param verbose provide intermediate output
#' @param node_time node time
#' @param input_alignment_type was the input alignment simulated with a node
#' substitution model or a normal substitution model? Used to calculate the
#' twin mutation rate. Options are "nodesub" and "normal".
#' @return list with four properties: 1) alignment: the alignment itself,
#' 2) adjusted rate: the substitution rate used to obtain identical information
#' content 3) total_accumulated_substitutions: the total number of
#' substitutions accumulated. 4) total_node_substitutions: total number of
#' substitutions accumulated on the nodes 5) total_branch_substitutions: total
#' number of substitutions accumulated on the branches.
#' @export
create_equal_alignment <- function(input_tree,
sub_rate,
alignment_result,
sim_function = NULL,
verbose = FALSE,
node_time = NULL,
input_alignment_type = "nodesub") {
num_emp_subs <- alignment_result$total_accumulated_substitutions
adjusted_rate <- sub_rate +
sub_rate * alignment_result$total_node_substitutions /
alignment_result$total_branch_substitutions
if (input_alignment_type == "normal") {
if (is.null(node_time)) {
stop("Node time needs to be provided")
}
total_node_sub <- node_time * 2 * input_tree$Nnode
total_branch_time <- sum(input_tree$edge.length)
frac <- 1 + total_node_sub /
total_branch_time
adjusted_rate <- sub_rate / frac
}
if (input_alignment_type == "fix_sub_rate") {
adjusted_rate <- sub_rate
}
seqlen <- length(alignment_result$root_seq)
if (is.null(sim_function)) {
sim_function <- function(input_tree, seqlen, rootseq, rate) {
sim_normal(x = input_tree,
l = seqlen,
rootseq = rootseq,
rate = rate)
}
}
proposed_alignment <- sim_function(input_tree,
seqlen,
alignment_result$root_seq,
adjusted_rate)
proposed_subs <- proposed_alignment$total_accumulated_substitutions
while (proposed_subs != num_emp_subs) {
proposed_alignment <- sim_function(input_tree,
seqlen,
alignment_result$root_seq,
adjusted_rate)
proposed_subs <- proposed_alignment$total_accumulated_substitutions
if (verbose) cat(proposed_subs, " ",
num_emp_subs, " ",
sub_rate, " ",
adjusted_rate, "\n")
}
proposed_alignment$adjusted_rate <- adjusted_rate
return(proposed_alignment)
}
|
c9bca76cdcbd46799921b9e678750402c51ac1c4
|
0cc863fed706b96df0c44afe7d466cff23228049
|
/man/suff_stat.Bernoulli.Rd
|
38246f9c8b56de0fabf29fbf79f6f7cdffc88198
|
[
"MIT"
] |
permissive
|
alexpghayes/distributions3
|
80a96665b4dabe2300908d569cb74de3cc75b151
|
67d27df128c86d80fe0c903b5b2c8af1fb9b0643
|
refs/heads/main
| 2023-01-27T14:49:47.588553
| 2023-01-18T18:12:22
| 2023-01-18T18:12:22
| 185,505,802
| 52
| 11
|
NOASSERTION
| 2023-01-18T18:12:24
| 2019-05-08T01:38:24
|
R
|
UTF-8
|
R
| false
| true
| 723
|
rd
|
suff_stat.Bernoulli.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Bernoulli.R
\name{suff_stat.Bernoulli}
\alias{suff_stat.Bernoulli}
\title{Compute the sufficient statistics for a Bernoulli distribution from data}
\usage{
\method{suff_stat}{Bernoulli}(d, x, ...)
}
\arguments{
\item{d}{A \code{Bernoulli} object.}
\item{x}{A vector of zeroes and ones.}
\item{...}{Unused.}
}
\value{
A named list of the sufficient statistics of the Bernoulli
distribution:
\itemize{
\item \code{successes}: The number of successful trials (\code{sum(x == 1)})
\item \code{failures}: The number of failed trials (\code{sum(x == 0)}).
}
}
\description{
Compute the sufficient statistics for a Bernoulli distribution from data
}
|
71621656d85ed2309815089d948c717e5fe7578d
|
6129a47af94390370862748e1cb00104309766d6
|
/Human_Cultural_Boundaries/R/Source_Plotting.R
|
eeca2634cc7d57f9190db141341edbf5304a2682
|
[] |
no_license
|
NeuroBio/HCB_R_Package
|
ea06605d1420499044e0478b2719bca2be2e4bb3
|
a7458d1d6e4ce9a130b741e0b05ce7b17750b3c5
|
refs/heads/master
| 2023-08-28T04:35:33.451306
| 2021-11-08T11:07:07
| 2021-11-08T11:07:07
| 191,992,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,633
|
r
|
Source_Plotting.R
|
#Plotting
#THIS NEEDS TO BE CLEANED UP
#' Get Groups
#'
#' Returns the territories descended from each seed. Includes detailed ancestory data. Ony works when Uproot and Death are FALSE.
#' @param P A list of parameters.
#' @param Data The Pre or Post output from an HBC simulation.
#' @keywords Plotting
#' @export
#'
GetGroups <- function(P, Data){
Groups <- list()
for(i in seq_along(P$PopStart)){
G <- list()
start <- P$PopStart[i]
repeat{#given a starting place, what populations did it create?
start <- which(Data$Populations[,1] %in% Data$Populations[start,2])
if(length(start)==0){
break()
}
G[[length(G)+1]] <- start
}
if(length(G)==0){
Terr <- NA
Connect <- NA
}else{
Terr <- sort(unlist(G))
Connect <- sapply(1:length(Terr), function(x) which(Data$Populations[,2] == Data$Populations[Terr[x],1]))
}
Groups[[i]] <- cbind(Terr,Connect)
}
return(Groups)
}
#' Bering Straight Plot
#'
#' Creates a plot that shows the Bring strait boundaries.
#' @param P A list of parameters.
#' @param Data The Pre or Post output from an HBC simulation.
#' @param colors A vector of colors of length equal to the number of seed populations.
#' @keywords Plotting
#' @export
#
BeringStraitPlot <- function(P, Data, colors=NA){
if(!P$Bering){
stop("This plot assumes that the Berring Barriers were implemented.")
}
Groups <- GroupBySeed(P, Data)
if(is.na(colors)[1]){
colors <- randomColor(length(Groups))
}
par(mar=c(3,2.5,1,1), mgp=c(1.5,.5,0), mfrow=c(2,2), bg="grey10", fg="white")
Sizes <- Data$Populations$SizeCurrent
Sizes <- Sizes[-(which(Sizes==0))]
hist(Sizes,
xlab="Population Size", ylab="Number of Populations",
col.axis="white", col.lab="white", col = "lightblue", border="lightblue4")
PopulationPlot(P, Data, Groups, colors)
PhonemePopulationFrequencyPlots(P, Data, Groups, colors, sort=TRUE)
}
#' Group By Seed
#'
#' Returns the territories descended from each seed. Works for all sims, but lacks ancestory data.
#' @param P A list of parameters.
#' @param Data The Pre or Post output from an HBC simulation.
#' @keywords Plotting
#' @export
#
GroupBySeed <- function(P, Data){
Groups <- vector("list")
for(i in seq_along(P$PopStart)){
Groups[[i]] <- which(Data$Populations$SeedID == i)
}
return(Groups)
}
#' Snapshot Plot
#'
#' Shows which territories are populated and from which seed they decended.
#' @param P A list of parameters.
#' @param Data The Pre or Post output from an HBC simulation.
#' @param colors A vector of colors, one for each seed.
#' @keywords Plotting
#' @export
#
SnapshotPlot<-function(P, Data, colors){
Groups <- GroupBySeed(P, Data)
PopulationPlot(P, Groups, colors)
}
#' Population Plot
#'
#'
#' @param P A list of parameters.
#' @param groups Group structure of which territories were descended from what population seed.
#' @param colors A vector of colors of length equal to the number of seed populations.
#' @keywords Plotting
#' @export
#
PopulationPlot <- function(P, Data, groups=NA, colors=NA){
if(is.na(groups)[1]){
if(P$Death || P$UpRoot){
groups <- GroupBySeed(P, Data)
}else{
groups <- GetGroups(P, Data)
for(i in seq_along(groups)){
groups[[i]] <- groups[[i]][,1]
}
}
}
if(is.na(colors)[1]){
colors <- randomColor(length(groups))
}
plot(0, type="n", xlim=c(1, P$C), ylim=c(P$R,1),
col.axis="white", font.axis=2)
for(i in seq_along(groups)){
if(length(groups[[i]]) > 0){
Modtest <- groups[[i]]%%P$R
points(ceiling(groups[[i]]/P$R), ifelse(Modtest==0,P$R,Modtest), col=colors[i], pch=19)
}
}
if(P$Bering){
Pos <- GetBering(P)
AddSegment(P, Pos$AsiaLowerRight, Pos$AsiaUpperRight)
AddSegment(P, Pos$AsiaBeringCorner, Pos$NAmericanLowerRight)
AddSegment(P, Pos$AsiaBeringCorner, Pos$BeringNAmericaCorner)
AddSegment(P, Pos$NAmericanLowerEntry, Pos$NAmericanUpperRight, top=TRUE)
}
}
#' Add Segment
#'
#' Creates lines to show the Bering Straight borders.
#' @param P A list of parameters.
#' @param a Start.
#' @param b End.
#' @param Data The Pre or Post output from an HBC simulation.
#' @keywords Plotting
#' @export
#
AddSegment <- function(P, a, b, top=FALSE){
Modtest1 <- a%%P$R
Modtest2 <- b%%P$R
if(top){
segments(ceiling(a/P$R)+.5, ifelse(Modtest1==0,P$R,Modtest1)+.5,
ceiling(b/P$R)+.5, Modtest2-.5,
col="white", lwd=2)
}else{
segments(ceiling(a/P$R)+.5, ifelse(Modtest1==0,P$R,Modtest1)+.5,
ceiling(b/P$R)+.5, ifelse(Modtest2==0,P$R,Modtest2)+.5,
col="white", lwd=2)
}
}
#' Migration Plot
#'
#' Shows the expansion of populations from the seed population. Ony works when Uproot and Death are FALSE.
#' @param P A list of parameters.
#' @param Data The Pre or Post output from an HBC simulation.
#' @param groups Group structure of which territories were descended from what population seed.
#' @param colors A vector of colors of length equal to the number of seed populations.
#' @keywords Plotting
#' @export
#
MigrationPlot <- function(P, Data, groups=NA, colors=NA){
if(is.na(groups)[1]){
groups <- GetGroups(P, Data)
}
if(is.na(colors)[1]){
colors <- distinctColorPalette(length(groups))
}
par(mar=c(2,2,1,1), mgp=c(1.5,.5,0), bg="grey10", fg="white")
plot(0, type="n", xlim=c(1, P$C), ylim=c(P$R,1),
col.axis="white", font.axis=2)
for(i in seq_along(groups)){
Modtest1 <- groups[[i]][,"Terr"]%%P$R
Modtest2 <- groups[[i]][,"Connect"]%%P$R
arrows(ceiling(groups[[i]][,"Terr"]/P$R), ifelse(Modtest1==0,P$R,Modtest1),
ceiling(groups[[i]][,"Connect"]/P$R), ifelse(Modtest2==0,P$R,Modtest2),
col=colors[[i]], angle=15, length=.1, code=1, lwd=2)
}
}
#' Phoneme Frequency Plots
#'
#' Shows how common each phonemes is in the simulation color coded by population.
#' @param P A list of parameters.
#' @param Data The Pre or Post output from an HBC simulation.
#' @param groups Group structure of which territories were descended from what population seed.
#' @param colors A vector of colors of length equal to the number of seed populations.
#' @keywords Plotting
#' @export
#
PhonemeFrequencyPlots <- function(P, Data, groups=NA, colors=NA){
if(is.na(groups)[1]){
if(P$Death || P$UpRoot){
groups <- GroupBySeed(P, Data)
}else{
groups <- GetGroups(P, Data)
for(i in seq_along(groups)){
groups[[i]] <- groups[[i]][,1]
}
}
}
if(is.na(colors)[1]){
colors <- randomColor(length(groups))
}
par(mar=c(3,3,1,1))
plot(colSums(Data$Languages), type="l", col="White",col.axis="white",
col.lab="white", xlab="Phonemes Ordered Most to Least Common",
ylab=paste0("Number of Populations (Total=",nrow(Data$Languages),")"))
for(i in seq_along(groups)){
Sums <- colSums(Data$Languages[groups[[i]],])
plot(Sums, type="l",
col=colors[i],col.axis="white",col.lab="white",
xlab="Phonemes Ordered Most to Least Common",
ylab=paste0("Number of Populations (Total=",length(groups[[i]]),")"))
points(which(Data$Languages[P$PopStart[i],]==1), rep(max(Sums),sum(Data$Languages[P$PopStart[i],])),
col="White", cex=.6,pch=19)
#PhoPerSeed[i,] <- Sums
#print(summary(rowSums(Data$Languages[groups[[i]][,1],])))
#Sums <- colSums(Data$Languages[groups[[i]][,1],])
#segments(1:(728-1), Sums[1:(728-1)], 2:(728), Sums[2:(728)],col=colorSet[i])
}
}
#' Phoneme Population Frequency Plots
#'
#' Shows how common each phonemes is in the simulation color coded by population.
#' @param P A list of parameters.
#' @param Data The Pre or Post output from an HBC simulation.
#' @param groups Group structure of which territories were descended from what population seed.
#' @param colors A vector of colors of length equal to the number of seed populations.#' @param sort Whether to sort the data from most to least frequent phoneme.
#' @keywords Plotting
#' @export
#
PhonemePopulationFrequencyPlots <- function(P, Data, groups=NA, colors=NA, sort=TRUE){
if(is.na(groups)[1]){
if(P$Death || P$UpRoot){
groups <- GroupBySeed(P, Data)
}else{
groups <- GetGroups(P, Data)
}
}
if(is.na(colors)[1]){
colors <- randomColor(length(groups))
}
#print(colorSet)
if(sort){
Data$Languages <- Data$Languages[,order(colSums(Data$Languages), decreasing = TRUE)]
}
#print(Data$Languages[1:10, 1:10])
PhoPerSeed <- matrix(0,nrow=length(groups),ncol=P$nPhon)
for(i in seq_along(groups)){
if(P$Death || P$UpRoot){
Choices <- Data$Languages[groups[[i]],]
}else{
Choices <- Data$Languages[groups[[i]][,1],]
}
if(class(Choices) == "numeric"){
PhoPerSeed[i,] <- Choices
}else{
PhoPerSeed[i,] <- colSums(Choices)
}
}
#print(PhoPerSeed)
plot(0,type = "n", xlim=c(0,P$nPhon), ylim=c(0,max(colSums(Data$Languages))),
col.axis="white",col.lab="white", ylab="Number of populations",
xlab="Phonemes, Ordered Common to Rare", font.lab=2, cex.lab=1, font.axis=2)
for(i in 1:P$nPhon){
rect(i-1,0,i,rev(cumsum(PhoPerSeed[,i])), col=rev(colors),border = NA)
}
}
#' Get Colors
#'
#' Creates a color gradient.
#' @param P A list of parameters.
#' @param Data The Pre or Post output from an HBC simulation.
#' @param colors A vector of beginning and ending colors for a gradient.
#' @keywords Plotting
#' @export
#
GetColorDistribution <- function(P, Data, i, colors=c('coral1','coral4')){
Order <- list()
j <- 1
Order[[1]] <- P$PopStart[i]
while(length(Order[[j]])!=0){
Order[[j+1]] <- which(Data$Populations$Founder %in% Data$Populations$ID[Order[[j]]])
j <- j+1
}
Order[[j]] <- NULL
Order[[1]] <- NULL
Pal <- colorRampPalette(colors)
names(Order) <- Pal(length(Order))
Order <- setNames(unlist(Order, use.names=F),rep(names(Order), lengths(Order)))
return(names(sort(Order)))
}
#Gets the distance between two points
#' Phoneme Mantel
#'
#' Performs both a Hamming and Jaccard Mantel test.
#' @param P A list of parameters.
#' @param Data The Pre or Post output from an HBC simulation.
#' @param repeats How many times to repeat the analysis.
#' @keywords Plotting
#' @export
#
PhonemeMantel <- function(P, Data, repeats=100){
time <- proc.time()
#get distances; Geo, Jac, and Ham
DistMat <- MakeDistanceMap(P)
Jac <- distance(Data$Languages, method="jaccard")
Ham <- distance(Data$Languages, method="manhattan")
par(mfrow=c(1,2))
image(Jac)
image(Ham)
#get into right form
FinalGeo <- as.dist(DistMat)
FinalJac <- as.dist(Jac)
FinalHam <- as.dist(Ham)
#perform tests
print(mantel.rtest(FinalGeo, FinalJac, repeats))
print(mantel.rtest(FinalGeo, FinalHam, repeats))
proc.time()-time
}
#' Make Distance Map
#'
#' Creates a distance map based on the euclidian diatcnes between territories.
#' @param P A list of parameters.
#' @keywords Plotting
#' @export
#
MakeDistanceMap <- function(P){
DistMat <- matrix(0, nrow=2000,ncol=2000)
for(i in 1:2000){
for(j in 1:2000){
DistMat[i,j]<-GetDist(P,i,j)
}
}
return(DistMat)
}
#' Get Distance
#'
#' Get the euclidian distance between two territories.
#' @param P A list of parameters.
#' @param point1 One territory location.
#' @param point2 Another territory location.
#' @keywords Plotting
#' @export
#
GetDist <- function(P, point1, point2){
XY <- GetXYCoords(P, rbind(point1, point2))
A <- XY[1,1]-XY[2,1]
B <- XY[1,2]-XY[2,2]
return(sqrt(A^2+B^2))
}
#' Get XY Coordinates
#'
#' COnverts territory numbers into X,Y corrdinates.
#' @param P A list of parameters.
#' @param territories A vector of territory indicies to convert into X,Y coordinates.
#' @keywords Plotting
#' @export
#
GetXYCoords <- function(P, territories){
Xs <- integer(length(territories))
Ys <- integer(length(territories))
for(i in 1:length(territories)){
Ys[i] <- territories[i]%%P$R
if(Ys[i] == 0){
Xs[i] <- territories[i]%/%P$R
}else{
Xs[i] <- territories[i]%/%P$R+1
}
if(Ys[i]==0){
Ys[i] <- P$R
}
}
return(cbind(Xs,Ys))
}
#' Get Bering Strait Coordinates
#'
#' Returns the hardcoded locations of the Bering Strait boundaries.
#' @keywords Plotting
#' @export
#
GetBeringCoords <- function(P){
Pos <- GetBering(P)
#vertical boundaries are imposed rightward/increasing X index
Vert<-GetXYCoords(P, c(Pos$AsiaLowerRight:Pos$AsiaUpperRight,
Pos$AsiaBeringCorner:Pos$BeringNAmericaCorner,
Pos$NAmericanLowerEntry:Pos$NAmericanUpperRight))
Vert[,1] <- Vert[,1]+.5
#horizontal boundaries are imposed downward/increasing Y index
Horz <- GetXYCoords(P, c(seq(Pos$AsiaUpperRight,Pos$AsiaBeringCorner, by=-P$R)-1,
seq(Pos$AsiaUpperRight, Pos$NAmericanLowerRight, by=P$R)-1))
Horz[,2] <- Horz[,2]+.5
return(rbind(Vert,Horz))
}
#' Save Data
#'
#' Saves just the language data from the simulation to .csv files.
#' @keywords Plotting
#' @export
#
SaveData <- function(Data, filename){
if("NoHorizontal" %in% names(Data)){
write.csv(Data$NoHorizontal$Languages, paste0(filename, "-pre.csv"))
write.csv(Data$Horizontal$Languages, paste0(filename, "-post.csv"))
write.csv(Data$NoHorizontal$Populations$SeedID, paste0(filename, "-seeds.csv"))
}
if("Alternated" %in% names(Data)){
write.csv(Data$Alternated$Languages, paste0(filename, "-alt.csv"))
write.csv(Data$Alternated$Populations$SeedID, paste0(filename, "-seeds.csv"))
}
}
|
1b5b25a413380dd6d4c536c6fc4edb414704b564
|
3f36e3afc25870cf6e9429de4a5b0604d52dc03a
|
/inst/shiny/VisualisingTrajectories/app.R
|
442568430b2e2dc7173b8a07ff434821469bfd62
|
[] |
no_license
|
Patricklomp/VisualisingHealthTrajectories
|
4077a62b7da7b92ad2c7aa99a918aaf15585788e
|
98e69c50d354a693f0e9e8a3d76c81e3e5088a7a
|
refs/heads/master
| 2023-05-31T16:21:50.435675
| 2021-06-04T07:43:42
| 2021-06-04T07:43:42
| 317,466,951
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 61
|
r
|
app.R
|
#Starts shiny application
shinyApp(ui = ui, server = server)
|
a0fac1bf948521d541cfb800c6b277116db27c10
|
1b2646afcc7c602243e1025c6653d91a8aa313e9
|
/R/resample.CoxBoost.R
|
596103ebeccd8b5179debe6e63698e884beea5a2
|
[] |
no_license
|
kaixinhuaihuai/CoxBoost
|
b171c8d28b0ff7f302acf3310845f53ae40b04e4
|
e7fe9d6a30a8e77d9516539f2c21d377c83ea8f2
|
refs/heads/master
| 2023-01-15T12:08:06.490468
| 2020-11-18T22:06:48
| 2020-11-18T22:06:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,491
|
r
|
resample.CoxBoost.R
|
resample.CoxBoost<- function(time,status,x,rep=100,maxstepno=200,multicore=TRUE,
mix.list=c(0.001, 0.01, 0.05, 0.1, 0.25, 0.35, 0.5, 0.7, 0.9, 0.99),
stratum,stratnotinfocus=0,
penalty=sum(status)*(1/0.02-1),criterion="hscore",unpen.index=NULL)
{
rep <- rep
trainind <- list()
for (i in 1:rep){
trainind[[length(trainind)+1]] <- sample(1:nrow(x),round(nrow(x)*0.632),replace = F)
}
out <- list()
for (iter in 1:rep) {
message('iter=', iter)
outbeta<-c()
outCV.opt<-c()
for (mix.prop in mix.list) {
print(mix.prop)
obs.weights <- rep(1,length(status))
case.weights <- ifelse(stratum == stratnotinfocus,mix.prop,1)
obs.weights <- case.weights/sum(case.weights)*length(case.weights)
set.seed(x[1,5]*100+time[19]*10)
CV <- cv.CoxBoost(time=time[trainind[[iter]]],status=status[trainind[[iter]]],x=x[trainind[[iter]],],
stratum=stratum[trainind[[iter]]],unpen.index=unpen.index,
coupled.strata = FALSE,weights=obs.weights[trainind[[iter]]],
maxstepno=maxstepno,K=10,penalty=penalty,
standardize=TRUE,trace=TRUE, multicore=multicore,criterion=criterion)
set.seed(x[1,5]*100+time[19]*10)
CB <- CoxBoost(time=time[trainind[[iter]]],status=status[trainind[[iter]]],x=x[trainind[[iter]],],
stratum=stratum[trainind[[iter]]],unpen.index=unpen.index,
coupled.strata = FALSE,weights=obs.weights[trainind[[iter]]],
stepsize.factor=1,stepno=CV$optimal.step,penalty=penalty,
standardize=TRUE,trace=TRUE,criterion=criterion)
outbeta<-c(outbeta,CB$model[[1]][[5]][nrow(CB$model[[1]][[5]]),] )
outCV.opt <- c(outCV.opt,CV$optimal.step)
}
out[[iter]] <- list(beta=outbeta,CV.opt=outCV.opt)
}
out
}
stabtrajec<-function(RIF,mix.list=c(0.001,0.01, 0.05, 0.1, 0.25, 0.35, 0.5, 0.7, 0.9, 0.99)
,plotmix=c(0.001,0.01, 0.05, 0.1, 0.25, 0.35, 0.5, 0.7, 0.9, 0.99)
,my.colors=grDevices::gray(seq(.99,0,len=10)),
yupperlim=1,huge=0.6,lowerRIFlimit=0.6,legendval=4.5)
{
RIF1<-c()
for (i in 1: length(RIF)){RIF1<-c(RIF1,RIF[[i]][[1]])}
freqmat <-matrix(apply(matrix(unlist(RIF1), ncol=length(RIF))!=0,1,mean), ncol=length(mix.list))
sel.mask <- apply(freqmat,1,function(arg) any(arg >= lowerRIFlimit & arg < 1.1))
w5<-c(1:length(which(sel.mask==T)))
jitmat<-cbind(w5-0.28,w5-0.21,w5-0.14,w5-0.07,w5,w5+0.07,w5+0.14,w5+0.21,w5+0.28,w5+0.35)
colnames(jitmat)<-mix.list
colnames(freqmat)<-mix.list
freqmat<-freqmat[,which(colnames(freqmat)%in%paste("",plotmix,sep=""))]
jitmat<-jitmat[,which(colnames(jitmat)%in%paste("",plotmix,sep=""))]
plot(0,xlim=c(0.5,length(which(sel.mask==T))+legendval),ylim=c(0,yupperlim),type="n",main=" ",
xlab=" ",ylab="resampling inclusion frequency", las=T,xaxt = "n")
axis(1, at = c(1:length(which(sel.mask==T))),labels =rownames(freqmat[sel.mask,]),cex.axis=huge)
for (i in 1:length(plotmix)){ points(jitmat[,i],freqmat[,i][sel.mask],col=my.colors[i],type = 'p',pch=16)}
for (i in 1:length(plotmix)){ points(jitmat[,i],freqmat[,i][sel.mask],col=1,type = 'p')}
for (i in c(1:length(which(sel.mask==T)))){lines(jitmat[i,],freqmat[sel.mask,][i,],col=1)}
for (i in 1:length(plotmix)) {legend("topright",paste("w=",plotmix, sep=""),pch=16,col=my.colors,bty="n")}
for (i in 1:length(plotmix)) {legend("topright",paste("w=",plotmix, sep=""),pch=1,col=1,bty="n")}
abline(v=c(1:length(which(sel.mask==T))), col=grDevices::gray(0.7))
}
weightfreqmap<-function(RIF,mix.list=c(0.001, 0.01, 0.05, 0.1, 0.25, 0.35, 0.5, 0.7, 0.9, 0.99)
,plotmix=c(0.001, 0.01, 0.05, 0.1, 0.25, 0.35, 0.5, 0.7, 0.9, 0.99)
,lowerRIFlimit=0.5,method="complete")
{
RIF1<-c()
for (i in 1: length(RIF)){RIF1<-c(RIF1,RIF[[i]][[1]])}
freqmat <-matrix(apply(matrix(unlist(RIF1), ncol=length(RIF))!=0,1,mean), ncol=length(mix.list))
colnames(freqmat)<-mix.list
sel.indz<-apply(freqmat,1,function(arg) any(arg >= lowerRIFlimit & arg < 1.1))
heatcol<-grDevices::gray(seq(0,.9,len=100))
heatmap(freqmat[sel.indz,which(colnames(freqmat)%in%paste("",plotmix,sep=""))],
col=heatcol,hclustfun = function(x) hclust(x,method=method),
distfun=function(x) as.dist((1-cor(t(x)))/2),
xlab ="relative weights", scale="row",Colv=NA)
}
|
c24cd8d4b8af37410765953d0822d8b811ad8366
|
f2643256c6611d7de0db96d162f594388c2c2c50
|
/analyses/Trial 2/satstudy_recruitment.R
|
35f703f9a1c90ddcee69aade70c5e66b3e285789
|
[] |
no_license
|
raubreywhite/trial_dofiles
|
e06a5b3b39e9195eda79dd33856d67c918ec4053
|
eface3b83b107cf7e621b3c654e65b5cbd45b711
|
refs/heads/master
| 2022-06-14T03:26:17.492945
| 2022-06-02T07:27:04
| 2022-06-02T07:27:04
| 114,857,557
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,408
|
r
|
satstudy_recruitment.R
|
### not to be run on server ###
# set working directory
setwd("C:/Users/Mervett_Isbeih/sat_study")
getwd()
#setting up folders
FOLDER_SAT_RESULTS <<-file.path("C:/Users/Mervett_Isbeih/sat_study/sat_results")
FOLDER_SAT_DATA_CLEAN <<-file.path("C:/Users/Mervett_Isbeih/sat_study/sat_data_clean")
#idenfifying packages we want
desiredPackages <- c("stringr",
"lubridate",
"data.table",
"bit64",
"readxl",
"openxlsx",
"bit64",
"haven",
"lubridate",
"ggplot2",
"irr",
"rel",
"gridExtra",
"openssl",
"fmsb",
"ICC",
"arabicStemR",
"lme4",
"fs",
"fancycut"
)
for(i in desiredPackages) if(!i %in% rownames(installed.packages())) install.packages(i)
library(data.table)
library(readxl)
#load in key data set
skey<-fread("C:/Users/Mervett_Isbeih/sat_study/sat_data/T2_key.csv", encoding = "UTF-8")
# Load in data for this week
satresults<-fread("C:/Users/Mervett_Isbeih/sat_study/sat_data/raw.csv", encoding="UTF-8")
nrow(satresults)
sat <- merge(skey,satresults, by="cliniccode", all.y = TRUE)
nrow(sat)
sat <- setDT(sat)
nrow(sat)
########### cleaning variales ###########
## adjusting variable structures and some basic cleaning
setnames(sat,"cliniccode", "clustercode")
# yes or no questions
sat[, q9a:=as.logical(NA)]
sat[q9=="no", q9a:=FALSE]
sat[q9=="yes", q9a:=TRUE]
setnames(sat,"q9a","leavehome")
#sat[,leavehome:=q9a]
sat[,q10a:=as.logical(NA)]
sat[q10=="no", q10a:=FALSE]
sat[q10=="yes", q10a:=TRUE]
setnames(sat,"q10a","primipreg")
#sat[,q10a:=primipreg]
setnames(sat,"q11","bookgAmonth")
#sat[bookgAmonth:=q11]
sat[, q12a:=as.logical(NA)]
sat[q12=="no", q12a:=FALSE]
sat[q12=="yes", q12a:=TRUE]
setnames(sat,"q12a","usother")
#sat[,usother:=q12a]
sat[, q13a:=as.logical(NA)]
sat[q13=="no", q13a:=FALSE]
sat[q13=="yes", q13a:=TRUE]
setnames(sat,"q13a","ancother")
#sat[,ancother:=q13a]
sat[, q14a:=as.logical(NA)]
sat[q14=="no", q14a:=FALSE]
sat[q14=="yes", q14a:=TRUE]
setnames(sat,"q14a","refHR")
#sat[,refHR:=q14a]
vars <- c("q15",
"q16",
"q17",
"q18",
"q19",
"q20",
"q21",
"q22",
"q23",
"q24",
"q25",
"q26",
"q27",
"q28",
"q29",
"q30",
"q31",
"q32",
"q33",
"q34",
"q35",
"q36",
"q37",
"q38",
"q39",
"q40")
# ident variables
sat[,T2:=as.logical(NA)]
sat[ident_TRIAL_2=="Y", T2:=TRUE]
sat[,T3:=as.logical(NA)]
sat[ident_TRIAL_3=="Y", T3:=TRUE]
sat[,T2T3:=as.logical(NA)]
sat[ident_TRIAL_2_and_3=="Y", T2T3:=TRUE]
sat[,T2T3control:=as.logical(NA)]
sat[ident_TRIAL_2_3_Control=="Y", T2T3control:=TRUE]
### outliers ###
xtabs(~sat$educyears, addNA=TRUE)
xtabs(~sat$district, addNA=T)
### cleaning day end via time end ###
sat[,dayend:=timeend]
sat[,dayend:=stringr::str_remove_all(as.character(dayend)," [0-9][0-9]:[0-9][0-9]:[0-9][0-9]$")]
sat[,dayend:=stringr::str_remove_all(as.character(dayend)," [0-9][0-9]:[0-9][0-9]$")]
sat[,dayend:=stringr::str_remove_all(as.character(dayend)," [0-9][0-9]$")]
sat[,dayend:=stringr::str_remove_all(as.character(dayend)," [0-9]:[0-9][0-9]$")]
library(lubridate)
sat[,dayend:=mdy(dayend)]
### cleaning end time ###
sat[,endtime:=timeend]
unique(sat$endtime)
sat[,endtime:=stringr::str_remove_all(endtime,"^[0-9]/[0-9][0-9]/[0-9][0-9][0-9][0-9] ")]
sat[,endtime:=stringr::str_remove_all(endtime,"^[0-9]/[0-9]/[0-9][0-9][0-9][0-9] ")]
unique(sat$endtime)
### age categories ###
# need to calculate age first
# change birthyear to birthdate and subtract from todays date to get years
unique(sat$birthyear)
sat[,birthyearDate:=lubridate::ymd(birthyear, truncated = 2L)]
unique(sat$birthyearDate)
sat[,age:=floor(as.numeric(difftime(lubridate::today(),birthyearDate, units="days")/365.25))]
unique(sat$age)
sat[,agecat:=cut(age,
breaks=c(0,20,24,29,34,39,100),
include.lowest=T)]
xtabs(~sat$agecat, addNA = T)
### educ categories ###
sat[,educat:=cut(educyears,
breaks=c(-1,0,6,12,16,25),
include.lowest=T)]
xtabs(~sat$educat)
### educ level ###
sat[,edulevel:=as.character(NA)]
sat[educat=="[-1,0]", edulevel:="None"]
sat[educat=="(0,6]", edulevel:="Primary"]
sat[educat=="(6,12]", edulevel:="Secondary"]
sat[educat=="(12,16]", edulevel:="College or University"]
sat[educat=="(16,25]", edulevel:="After college or university"]
### gestational age at booking ### (q11)
# these are months in pregnancy, change to weeks
sat[,bookgestage:=as.numeric(NA)]
sat[!is.na(bookgAmonth),bookgestage:=4*bookgAmonth]
# bookgAmonthcat
sat[,bookgAmonthcat:=cut(bookgAmonth,
breaks=c(0,3,6,10),
include.lowest=T)]
xtabs(~sat$bookgAmonthcat,addNA=T)
# attendance
setnames(sat,"q15","attend_allanc")
setnames(sat,"q16","attend_testdiab")
setnames(sat,"q17","attend_testanemia")
setnames(sat,"q18","attend_testhtn")
setnames(sat,"q19","attend_fg")
# visit
setnames(sat,"q20","visit_schedvisitconfid")
setnames(sat,"q21","visit_waittime")
setnames(sat,"q22","visit_healthstaff")
setnames(sat,"q23","visit_testpurpose")
setnames(sat,"q24","visit_testgA")
setnames(sat,"q25","visit_recommend")
setnames(sat,"q26","visit_returnnextpreg")
setnames(sat,"q27","visit_satisfaction")
# worry
setnames(sat,"q28","worry_housing")
setnames(sat,"q29","worry_money")
setnames(sat,"q30","worry_partner")
setnames(sat,"q31","worry_family")
setnames(sat,"q32","worry_ownhealth")
setnames(sat,"q33","worry_otherhealth")
setnames(sat,"q34","worry_employment")
setnames(sat,"q35","worry_baby")
setnames(sat,"q36","worry_stillbirth")
setnames(sat,"q37","worry_hospital")
setnames(sat,"q38","worry_internalexam")
setnames(sat,"q39","worry_givingbirth")
setnames(sat,"q40","worry_coping")
################ Anonymized Data Set ################
# Choose only first 4 if more than four
sat <- sat[order(clustercode,dayend)]
sat[eligibility=="agree" & withdraw!="yes",SampNum:=1:.N, by=.(clustercode)]
# Make a second variable to include only first four women
sat[,instudy:=as.logical(NA)]
sat[eligibility=="agree" & withdraw!="yes" & SampNum<=4,instudy:=TRUE]
sat[eligibility=="agree" & withdraw!="yes" & SampNum>4, instudy:= FALSE]
## need to anonymize, code different arms (control and not control), and rename variables
sat[,exposure:=as.character(NA)]
sat[T2T3control==T, exposure:="A"]
sat[T2==T|T3==T, exposure:="B"]
xtabs(~sat[eligibility=="agree" & withdraw!="yes"]$exposure=="B", addNA=T)
nocalls<-sat[withdraw!="yes",.(N=.N,
Control=sum(exposure=="A"),
Intervention=sum(exposure=="B")),
keyby=.(eligibility)]
# collector code
sat[,collectorcode:=as.character(NA)]
sat[collector=="naila", collectorcode:="A"]
sat[collector=="entisar", collectorcode:="B"]
sat[collector=="khadija", collectorcode:="C"]
sat[collector=="najah", collectorcode:="D"]
### vars for anonymization data set
varskeep <- c("SampNum",
"instudy",
"clustercode",
"exposure",
"collectorcode",
"clinicsize",
"timestarted",
"timeend",
"dayend",
"endtime",
"samplnum",
"eligibility",
"herphone",
"district",
"agecat",
"educat",
"edulevel",
"leavehome",
"primipreg",
"bookgAmonth",
"bookgAmonthcat",
"usother",
"ancother",
"refHR",
"attend_allanc",
"attend_testdiab",
"attend_testanemia",
"attend_testhtn",
"attend_fg",
"visit_schedvisitconfid",
"visit_waittime",
"visit_healthstaff",
"visit_testpurpose",
"visit_testgA",
"visit_recommend",
"visit_returnnextpreg",
"visit_satisfaction",
"worry_housing",
"worry_money",
"worry_partner",
"worry_family",
"worry_ownhealth",
"worry_otherhealth",
"worry_employment",
"worry_baby",
"worry_stillbirth",
"worry_hospital",
"worry_internalexam",
"worry_givingbirth",
"worry_coping",
"callbackanothertime",
"callended_1",
"callended_2",
"callended_3",
"withdraw")
satKeep <- sat[,varskeep, with=F]
satKeep <- setDT(satKeep)
nrow(satKeep)
openxlsx::write.xlsx(satKeep,
file.path(FOLDER_SAT_DATA_CLEAN,
sprintf("Sat_data_clean_%s.xlsx",lubridate::today())))
background <- c("agecat",
"edulevel",
"bookgAmonthcat",
"leavehome",
"primipreg",
"refHR",
"usother",
"ancother")
smallD<-satKeep[instudy==T,c("exposure",background), with=F]
long <- melt.data.table(smallD,
id.vars=c("exposure"),variable.factor = F)
uglytable <- long[,
.(
N=.N,
control=sum(exposure=="A"),
intervention=sum(exposure=="B")),
keyby=.(
variable,value)]
openxlsx::write.xlsx(uglytable,
file.path(
FOLDER_SAT_RESULTS,
"freqtabs",
sprintf("Background_%s.xlsx", lubridate::today())))
################ Primary outcome ################
primary <- names(satKeep)[stringr::str_detect(names(satKeep),"^worry_")]
smallD<-satKeep[instudy==T,c("exposure",primary), with=F]
long <- melt.data.table(smallD,
id.vars=c("exposure"),variable.factor = F)
uglytable <- long[,
.(
N=.N,
mean=round(mean(value, na.rm=T),digits=2),
sd=round(sd(value, na.rm=T),digits=2)),
keyby=.(exposure,variable)]
openxlsx::write.xlsx(uglytable,
file.path(
FOLDER_SAT_RESULTS,
"freqtabs",
sprintf("%s_Primary_outcomes(rounded).xlsx", lubridate::today())))
# confidence intervals
#t.test(var1~exposure, data=long, conf.level=0.95)
#t.test(var2~exposure, data=long, conf.level=0.95)
#t.test(var3~exposure, data=long, conf.level=0.95)
#variables_to_test <- c("worry_baby", "worry_coping", "worry_mondy")
retval <- vector("list", length=length(primary))
for(i in seq_along(retval)){
var_of_interest <- primary[i]
formula <- glue::glue("{var_of_interest} ~ exposure")
fit <- t.test(as.formula(formula), data = satKeep, conf.level=0.95)
### extract results here
#temp <- data.frame(conf_level=fit$conf.int, var = var_of_interest)
temp <- data.frame(conf_level_l95=fit$conf.int[1],
conf_level_u95=fit$conf.int[2],
statistic=fit$statistic,var = var_of_interest)
retval[[i]] <- temp
}
retval <- rbindlist(retval)
openxlsx::write.xlsx(retval,
file.path(
FOLDER_SAT_RESULTS,
"freqtabs",
sprintf("%s_Primary_outcomes_Confidence_Intervals.xlsx", lubridate::today())))
################ Frequency Tables ################
#### use satkeep for analysis
# vars for frequency tables
freqvars <- c("attend_allanc",
"attend_testdiab",
"attend_testanemia",
"attend_testhtn",
"attend_fg",
"visit_schedvisitconfid",
"visit_waittime",
"visit_healthstaff",
"visit_testpurpose",
"visit_testgA",
"visit_recommend",
"visit_returnnextpreg",
"visit_satisfaction",
"worry_housing",
"worry_money",
"worry_partner",
"worry_family",
"worry_ownhealth",
"worry_otherhealth",
"worry_employment",
"worry_baby",
"worry_stillbirth",
"worry_hospital",
"worry_internalexam",
"worry_givingbirth",
"worry_coping")
smallD<-sat[instudy==T,c("exposure",
freqvars),
with=F
]
long <- melt.data.table(smallD, id.vars=c(
"exposure"
),variable.factor = F)
uglytable <- long[,
.(
not_NA=sum(!is.na(value)),
value0=sum(value==0,na.rm=T),
value1=sum(value==1, na.rm=TRUE),
value2=sum(value==2, na.rm=T),
value3=sum(value==3, na.rm=T),
value4=sum(value==4, na.rm=T),
value5=sum(value==5, na.rm=T),
Missing=sum(is.na(value))
),
keyby=.(
variable,
exposure)
]
openxlsx::write.xlsx(uglytable,
file.path(
FOLDER_SAT_RESULTS,
"freqtabs",
sprintf("frequencies_%s.xlsx", lubridate::today())))
############ Completeness Report and Numbers For data extraction ############
#completeness report
DQ <- sat[eligibility=="agree" & withdraw!="yes",.(
N=.N,
herphone=sum(herphone=="yes"|herphone=="no", na.rm=T),
district=sum(!is.na(district)),
birthyear=sum(!is.na(birthyear)),
educyears=sum(!is.na(educyears)),
#meanEdu=mean(q11, na.rm = T),
notmissing_q9=sum(!is.na(q9a)),
q9T=sum(q9a==T, na.rm=T),
q9F=sum(q9a==F, na.rm=T),
notmissing_q10=sum(!is.na(q10a)),
q10T=sum(q10a==T, na.rm=T),
q10F=sum(q10a==F, na.rm=T),
notmissing_q11=sum(!is.na(q11)),
notmissing_q12=sum(!is.na(q12a)),
q12T=sum(q12a==T, na.rm=T),
q13T=sum(q13a==T, na.rm=T),
q13F=sum(q13a==F, na.rm=T),
notmissing_q14=sum(!is.na(q14a)),
q14T=sum(q14a==T, na.rm=T),
notmissing_q15=sum(!is.na(q15)),
notmissing_q16=sum(!is.na(q16)),
notmissing_q17=sum(!is.na(q17)),
notmissing_q18=sum(!is.na(q18)),
notmissing_q19=sum(!is.na(q19)),
notmissing_q20=sum(!is.na(q20)),
notmissing_q21=sum(!is.na(q21)),
notmissing_q22=sum(!is.na(q22)),
notmissing_q23=sum(!is.na(q23)),
notmissing_q24=sum(!is.na(q24)),
notmissing_q25=sum(!is.na(q25)),
notmissing_q26=sum(!is.na(q26)),
notmissing_q27=sum(!is.na(q27)),
notmissing_q28=sum(!is.na(q28)),
notmissing_q29=sum(!is.na(q29)),
notmissing_q30=sum(!is.na(q30)),
notmissing_q31=sum(!is.na(q31)),
notmissing_q32=sum(!is.na(q32)),
notmissing_q33=sum(!is.na(q33)),
notmissing_q34=sum(!is.na(q34)),
notmissing_q35=sum(!is.na(q35)),
notmissing_q36=sum(!is.na(q36)),
notmissing_q37=sum(!is.na(q37)),
notmissing_q38=sum(!is.na(q38)),
notmissing_q39=sum(!is.na(q39)),
notmissing_q40=sum(!is.na(q40)))]
openxlsx::write.xlsx(DQ,file.path(FOLDER_SAT_RESULTS,
sprintf("%s_Completeness_Report.xlsx",
lubridate::today())))
############ Data Extraction Reports ############
# creating weekly report
satcounts <- sat[,.(N=.N,
agree=sum(eligibility=="agree" & withdraw!="yes", na.rm=T),
disagree=sum(eligibility=="disagree", na.rm=T),
cantcontact=sum(eligibility=="cantcontact", na.rm=T),
ineligible=sum(eligibility=="ineligiblegA", na.rm=T),
agreebutwithdraw=sum(eligibility=="agree" &
withdraw=="yes")),
keyby=.(weeknum,clustercode)]
openxlsx::write.xlsx(satcounts,file.path(FOLDER_SAT_RESULTS,
sprintf("%s_satcounts_clinic.xlsx",
lubridate::today())))
satcountsTotal <- sat[,.(N=.N,
agree=sum(eligibility=="agree" & withdraw!="yes", na.rm=T),
disagree=sum(eligibility=="disagree", na.rm=T),
cantcontact=sum(eligibility=="cantcontact", na.rm=T),
ineligible=sum(eligibility=="ineligiblegA", na.rm=T),
agreebutwithdraw=sum(eligibility=="agree" &
withdraw=="yes")),
keyby=.(clustercode)]
openxlsx::write.xlsx(satcountsTotal,file.path(FOLDER_SAT_RESULTS,
sprintf("%s_satcounts_clinicTotals.xlsx",
lubridate::today())))
removed <- sat[withdraw=="yes" & eligibility=="agree",.(N=.N), keyby=.(weeknum,clustercode)]
# ID clinics with less than 4 to send out
sendout <- sat[withdraw!="yes" & eligibility=="agree",]
sendout <-sendout[,.(N=.N), keyby=.(clustercode)]
sendout <- sendout[is.na(N) | N<4,]
openxlsx::write.xlsx(sendout,file.path(FOLDER_SAT_RESULTS,
sprintf("%s_send out list.xlsx",
lubridate::today())))
# results by data extractor
collectornums <- sat[,.(N=.N,
agree=sum(eligibility=="agree" &
withdraw!="yes", na.rm=T),
disagree=sum(eligibility=="disagree", na.rm=T),
cantcontact=sum(eligibility=="cantcontact", na.rm=T),
ineligible=sum(eligibility=="ineligiblegA", na.rm=T),
withdraw=sum(withdraw=="yes", na.rm=T)),
keyby=.(collector)]
|
3ea3b28206deb089a1cb8b58c02fa3ecf5e135c5
|
9e77527c480d453d64b317f1261f842f260efa6c
|
/code/06_variables.R
|
a68552a89c1d6ee3bf7b890752abc198e2dc92b3
|
[] |
no_license
|
AnnikaErtel/CropDiversity_NutritionalSupply
|
e636663df614002d2c25c643317b8787388b9d81
|
34bdcd6582eca1984f031ecd84480d1d5b7e7aba
|
refs/heads/main
| 2023-06-27T03:45:18.198520
| 2021-07-28T09:41:15
| 2021-07-28T09:41:15
| 360,645,197
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,243
|
r
|
06_variables.R
|
#####Affiliance
#Annika Ertel
#Universität Leipzig/ Institut für Geographie
#Matrikelnummer: 3710313
#SKRIPT 6: Preparation of other variables
####Setting up####
setwd("~/data/MAS-group-share/04_personal/Annika/CropDiversity_NutritionalStability_new")
rm(list=ls())
library(tidyverse)
library(readxl)
library(countrycode)
####Load data
target_country<-read.csv("data/target_country.csv")
fertilizer<-read.csv("data/variables/FAOSTAT_fertilizer.csv")
irrigation<-read_csv("data/variables/Area_equ_Irrigation.csv")
warfare<-read_xls("data/variables/warefare.xls")
gdp<-read_csv("data/pop_data/worldbank/GDP_per_capita.csv")
#final_ISO<-read_csv("data/final_ISO.csv") #final Country selection (done at the end of this skript)
agriculture<-read.csv("data/FAOstat/FAO_Agriculture.csv")
livestock<-read.csv("data/FAOstat/FAO_Livestock.csv")
####GDP PPP per capita####
# only take target country
gdp<-gdp[gdp$`Country Code` %in% target_country$ISO,]
#rename col
colnames(gdp)[5:65]<-colnames(gdp)[5:65]%>%
str_sub(start = 1L, end = 4L)
# only 1961-2010 and and country code
gdp<-gdp[,c(2,6:55)]
#from wide to long
gdp<-pivot_longer(gdp, cols = "1961":"2010", names_to = "Year", values_to = "gdp_per_capita_USD")
#Assign time Periods in 10 year intervals
gdp$timePeriod=0
gdp[gdp$Year%in%c(1961:1970),"timePeriod"] = 1961
gdp[gdp$Year%in%c(1971:1980),"timePeriod"] = 1971
gdp[gdp$Year%in%c(1981:1990),"timePeriod"] = 1981
gdp[gdp$Year%in%c(1991:2000),"timePeriod"] = 1991
gdp[gdp$Year%in%c(2001:2010),"timePeriod"] = 2001
# ".." to NA
gdp[gdp==".."]<-NA
sum(is.na(gdp)) #104
#count per country: which timePeriods are how insecure?
gdp_count<-na.omit(gdp)
gdp_count<-gdp_count%>%
group_by(`Country Code`)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise(rowCount= n())
#calculate mean per decade
gdp$gdp_per_capita_USD<-as.numeric(gdp$gdp_per_capita_USD)
gdp<-gdp%>%
group_by(`Country Code`)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise("gdp_per_capita_USD"= mean(`gdp_per_capita_USD`, na.rm= T))
#rename countrycode for later join
colnames(gdp)[1]<-"ISO"
# I will add all data to final data even if there are some NA`s values
# -> have to be adressed before analysis!
####Agriculture area####
# only take target country
agriculture<-agriculture[agriculture$Area.Code %in% target_country$Area.Code,]
# #### adapt region names (fao.code-> iso)
agriculture$ISO <- countrycode(agriculture$`Area.Code`, 'fao', 'iso3c') # no important regions missing
# only keep target year
agriculture <- agriculture[which(agriculture$Year%in%1961:2010),]
#only important info
agriculture$Value<-names(agriculture)[names(agriculture)=="Value"]<-"agriculture_area_ha" #rename column
agriculture<-agriculture[c("ISO", "Year", "agriculture_area_ha")]
#Assign time Periods in 10 year intervals
agriculture$timePeriod=0
agriculture[agriculture$Year%in%c(1961:1970),"timePeriod"] = 1961
agriculture[agriculture$Year%in%c(1971:1980),"timePeriod"] = 1971
agriculture[agriculture$Year%in%c(1981:1990),"timePeriod"] = 1981
agriculture[agriculture$Year%in%c(1991:2000),"timePeriod"] = 1991
agriculture[agriculture$Year%in%c(2001:2010),"timePeriod"] = 2001
#count per country
agriculture_count<-agriculture%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise(rowCount= n())
#only those timePeriods were time series is complete
agriculture_country<-agriculture_count%>%filter(rowCount==10) #find out which is complete
agriculture_country<-agriculture_country[,c("ISO", "timePeriod")] #selects info for filter
agriculture_merge<-merge(agriculture_country,agriculture) #takes only those timePeriods were series is complete
agriculture_merge<-agriculture_merge[,c("ISO", "timePeriod","agriculture_area_ha")]
#calculate mean per decade
agriculture_mean<-agriculture_merge%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise("agriculture_area_ha"= mean(`agriculture_area_ha`))
#### Livestock ####
livestock<-read.csv("data/FAOstat/FAO_Livestock.csv")
# only take target country
livestock<-livestock[livestock$Area %in% target_country$Area,]
# #### adapt region names (fao.code-> iso)
livestock$ISO <- countrycode(livestock$`Area.Code`, 'fao', 'iso3c') # no important regions missing
# only keep target year
livestock <- livestock[which(livestock$Year%in%1961:2010),]
#only important info
livestock$Value<-names(livestock)[names(livestock)=="Value"]<-"Livestock_LSU"
livestock<-livestock[c("ISO", "Year", "Livestock_LSU")]
#summarize all livestocks per year and country (doesn't mind, different kinds of animals LSU references bodyweight)
livestock<-livestock%>%
group_by(ISO)%>%
group_by(Year, .add = T)%>%
dplyr::summarise(Livestock_LSU =sum(Livestock_LSU))
#Assign time Periods in 10 year intervals
livestock$timePeriod=0
livestock[livestock$Year%in%c(1961:1970),"timePeriod"] = 1961
livestock[livestock$Year%in%c(1971:1980),"timePeriod"] = 1971
livestock[livestock$Year%in%c(1981:1990),"timePeriod"] = 1981
livestock[livestock$Year%in%c(1991:2000),"timePeriod"] = 1991
livestock[livestock$Year%in%c(2001:2010),"timePeriod"] = 2001
#count per country
livestock_count<-livestock%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise(rowCount= n())
#only those timePeriods were time series is complete
livestock_country<-livestock_count%>%filter(rowCount==10) #find out which is complete
livestock_country<-livestock_country[,c("ISO", "timePeriod")] #selects info for filter
livestock_merge<-merge(livestock_country,livestock) #takes only those timePeriods were series is complete
livestock_merge<-livestock_merge[,c("ISO", "timePeriod","Livestock_LSU")]
#calculate mean per decade
livestock_mean<-livestock_merge%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise("Livestock_LSU"= mean(`Livestock_LSU`))
#### Fertilizer data #####
# only take target country
fertilizer<-fertilizer[fertilizer$Area %in% target_country$Area,]
# #### adapt region names (fao.code-> iso)
fertilizer$ISO <- countrycode(fertilizer$`Area.Code`, 'fao', 'iso3c') # no important regions missing
# only keep target year
fertilizer <- fertilizer[which(fertilizer$Year%in%1961:2010),]
#only important info
fertilizer$Value<-names(fertilizer)[names(fertilizer)=="Value"]<-"N_use/croparea_in_kg/ha"
fertilizer<-fertilizer[c("ISO", "Year", "N_use/croparea_in_kg/ha")]
#Assign time Periods in 10 year intervals
fertilizer$timePeriod=0
fertilizer[fertilizer$Year%in%c(1961:1970),"timePeriod"] = 1961
fertilizer[fertilizer$Year%in%c(1971:1980),"timePeriod"] = 1971
fertilizer[fertilizer$Year%in%c(1981:1990),"timePeriod"] = 1981
fertilizer[fertilizer$Year%in%c(1991:2000),"timePeriod"] = 1991
fertilizer[fertilizer$Year%in%c(2001:2010),"timePeriod"] = 2001
#count per country
fertilizer_count<-fertilizer%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise(rowCount= n())
#only those timePeriods were time series is complete
fertilizer_country<-fertilizer_count%>%filter(rowCount==10) #find out which is complete
fertilizer_country<-fertilizer_country[,c("ISO", "timePeriod")] #selects info for filter
fertilizer_merge<-merge(fertilizer_country,fertilizer) #takes only those timePeriods were series is complete
fertilizer_merge<-fertilizer_merge[,c("ISO", "timePeriod","N_use/croparea_in_kg/ha")]
#calculate mean per decade
fertilizer_mean<-fertilizer_merge%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise("N_use/croparea_in_kg/ha"= mean(`N_use/croparea_in_kg/ha`))
####Irrigation data#####
#-> Land Area equipped for Irrigation
# only take target country
irrigation<-irrigation[irrigation$`Area Code` %in% target_country$Area.Code,]
# adapt region names (fao.code-> iso)
irrigation$ISO <- countrycode(irrigation$`Area Code`, 'fao', 'iso3c') # no important regions missing
# only keep target year
irrigation <- irrigation[which(irrigation$Year%in%1961:2010),]
#only important info
names(irrigation)[names(irrigation)=="Value"]<-"Land_area_equ._for-Irrigation_%"
irrigation<-irrigation[c("ISO", "Year", "Land_area_equ._for-Irrigation_%")]
#Assign time Periods in 10 year intervals
irrigation$timePeriod=0
irrigation[irrigation$Year%in%c(1961:1970),"timePeriod"] = 1961
irrigation[irrigation$Year%in%c(1971:1980),"timePeriod"] = 1971
irrigation[irrigation$Year%in%c(1981:1990),"timePeriod"] = 1981
irrigation[irrigation$Year%in%c(1991:2000),"timePeriod"] = 1991
irrigation[irrigation$Year%in%c(2001:2010),"timePeriod"] = 2001
#count per country
irrigation_count<-irrigation%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise(rowCount= n())
#only those timePeriods were time series is complete
irrigation_country<-irrigation_count%>%filter(rowCount==10) #find out which is complete
irrigation_country<-irrigation_country[,c("ISO", "timePeriod")] #selects info for filter
irrigation_merge<-merge(irrigation_country,irrigation) #takes only those timePeriods were series is complete
irrigation_merge<-irrigation_merge[,c("ISO", "timePeriod","Land_area_equ._for-Irrigation_%")]
#calculate mean per decade
irrigation_mean<-irrigation_merge%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise("Land_area_equ._for-Irrigation_%"= mean(`Land_area_equ._for-Irrigation_%`))
####Warefare####
#->number of armed conflicts!
# adapt region names (country.name-> iso)
warfare$ISO<- countrycode(warfare$country, 'country.name', 'iso3c') #von country zu iso
#Some values were not matched unambiguously: Czechoslovakia, Germany East, Kosovo, Serbia and Montenegro, Vietnam South, Yemen North, Yemen South, Yugoslavia
# only keep target year
warfare <- warfare[which(warfare$year%in%1961:2010),]
#target column (number of armed conflicts)
warfare <- warfare[,c("ISO","country","year","actotal")]
#remove na's and countries with no armed conflicts
warfare <- warfare[!warfare$actotal==0,]
# Check wich countries need to be prepared/translated
warfare$ISO<- countrycode(warfare$country, 'country.name', 'iso3c')
#Some values were not matched unambiguously: Czechoslovakia, Vietnam South, Yemen North, Yemen South, Yugoslavia
# combine north and south yemen
Yemen_N <- warfare[which(warfare$country=="Yemen North" & warfare$year%in%1961:1990),]
Yemen_N$ISO <- "YEM"
Yemen_S <- warfare[which(warfare$country=="Yemen South" & warfare$year%in%1961:1990),]
Yemen_S$ISO <- "YEM"
Yemen_comb <-merge(Yemen_N, Yemen_S, all= T)
Yemen_comb <-Yemen_comb %>% group_by(year) %>% dplyr::summarise(actotal=sum(actotal))
Yemen_comb$ISO<-"YEM"
#combine North and South Vietnam
Viet_N <- warfare[which(warfare$country=="Vietnam North" & warfare$year%in%1961:1975),]
Viet_N$ISO <- "VNM"
Viet_S <- warfare[which(warfare$country=="Vietnam South" & warfare$year%in%1961:1975),]
Viet_S$ISO <- "VNM"
Viet_comb <-merge(Viet_N, Viet_S, all= T)
Viet_comb <-Viet_comb %>% group_by(year) %>% dplyr::summarise(actotal=sum(actotal))
Viet_comb$ISO<-"VNM"
#Czechoslovakia
#prager frühling 1968 in territory of Czechoslovakia, which does not exist anymore.
#-> I remove the conflict as this period is not assessed for that region
#remove formaly devided countries and Czechoslovakia
warfare<-rbind(warfare[-which(warfare$country%in% c("Yemen North", "Yemen South", "Vietnam North","Vietnam South","Czechoslovakia")),])
#remove country col (so that dfs merge)
warfare$country<-NULL
#add aggregate countries instead
warfare<-rbind(warfare,Viet_comb,Yemen_comb)
# only take target country
warfare<-warfare[warfare$ISO %in% target_country$ISO,]
#Assign time Periods in 10 year intervals
warfare$timePeriod=0
warfare[warfare$year%in%c(1961:1970),"timePeriod"] = 1961
warfare[warfare$year%in%c(1971:1980),"timePeriod"] = 1971
warfare[warfare$year%in%c(1981:1990),"timePeriod"] = 1981
warfare[warfare$year%in%c(1991:2000),"timePeriod"] = 1991
warfare[warfare$year%in%c(2001:2010),"timePeriod"] = 2001
#calculate mean per decade
warfare<-warfare%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise("actotal"= mean(`actotal`))
names(warfare)[names(warfare) == 'year'] <- 'Year'
#####final variable data together#####
final_data<-fertilizer_mean%>%
full_join(irrigation_mean)%>%
full_join(warfare)%>%
full_join(gdp)%>%
full_join(agriculture_mean)%>%
full_join(livestock_mean)
#no conflicts whenever no reported
final_data$actotal[is.na(final_data$actotal)]<-0
#remove where not all information is available
n_distinct(final_data$ISO) #94
final_data<-na.omit(final_data)
n_distinct(final_data$ISO)
write.csv(final_data,"data/data_for_analysis/variables.csv")
####ALL DATA TOGETHER####
#### LOAD DATA ####
diversity <- read_csv("data/data_for_analysis/diversity.csv")
diversity$X1<-NULL
fulfilled_nutr <- read_csv("data/data_for_analysis/fulfilled_nutr.csv")
fulfilled_nutr$X1<-NULL
variables <- read_csv("data/data_for_analysis/variables.csv")
variables$X1<-NULL
climate<- read_csv("data/data_for_analysis/egli_climate_national.csv")
target_country <- read_csv("data/target_country.csv")
country_info<- read_csv("data/spatial/UNSD - Methodology.csv")
selfsuf_food_basket<-read_csv("data/data_for_analysis/selfsuf_food_basket.csv")
selfsuf_food_basket$X1<-NULL
# sd_fulfilled_nutr<-read_csv("data/data_for_analysis/fulfilled_nutr_sd.csv")
# sd_fulfilled_nutr$X1<-NULL
# #Not really used in analysis
# self_suffiency<-read_csv("data/data_for_analysis/fulfilled_selfsuffiency.csv")
# self_suffiency$X1<-NULL
# self_suf_2<-read_csv("data/data_for_analysis/share_prod_in_fulfilled_sup_ISO.csv")
# self_suf_2$X1<-NULL
#### country information ####
#####country grouping####
#grouping with Groupings the UN uses for SDG's
final_ISO<-read_csv("data/final_ISO.csv")
final_ISO$X1<-NULL
country_info<- read_csv("data/spatial/UNSD - Methodology.csv")
#only those countries which will be assessed
country_info<-country_info%>%
filter(`ISO-alpha3 Code`%in% target_country$ISO)%>%
dplyr::select(c(`ISO-alpha3 Code`, `Developed / Developing Countries`, `Sub-region Name`)) #select cols
#country grouping; regional groups: https://unstats.un.org/sdgs/indicators/regional-groups
#assigning regions in extra col
country_info$Region1<-ifelse(country_info$`Sub-region Name`=='Sub-Saharan Africa', "Sub_Saharan_Africa", "")
country_info$Region2<-ifelse(country_info$`Sub-region Name`%in% c("Northern Africa", "Western Asia"), "Northern_Africa_and_Western_Asia", "")
country_info$Region3<-ifelse(country_info$`Sub-region Name`%in% c("Central Asia", "Southern Asia"), "Central_and_Southern_Asia", "")
country_info$Region4<-ifelse(country_info$`Sub-region Name`%in% c("Eastern Asia", "South-eastern Asia"), "Eastern_and_South_Eastern_Asia", "")
country_info$Region5<-ifelse(country_info$`Sub-region Name` == "Latin America and the Caribbean", "Latin_America_and_the_Caribean", "")
country_info$Region6<-ifelse(country_info$`Sub-region Name` == "Oceania", "Oceania", "")
country_info$Region7<-ifelse(country_info$`Sub-region Name` == "Australia and New Zealand", "Australia_and_New_Zealand", "")
country_info$Region8<-ifelse(country_info$`Sub-region Name` %in% c("Eastern Europe", "Northern Europe", "Southern Europe", "Western Europe", "Northern America"), "Europe_and_Northern_America", "")
#to one col
country_info$Region<-with(country_info, paste0(Region1, Region2, Region3 , Region4, Region5, Region6, Region7, Region8))
country_info<-country_info%>%dplyr::select(!4:11)
#rename for later join
names(country_info)[names(country_info) == 'ISO-alpha3 Code'] <- 'ISO'
#### Preparation for Analysis####
#climate: decided for sd, to measure climatic stability
# only take target country
names(climate)[names(climate) == 'Level'] <- 'ISO'
climate<-climate[climate$ISO %in% target_country$ISO,]
# only keep target year
climate <- climate[which(climate$Year%in%1961:2010),]
#Assign time Periods in 10 year intervals
climate$timePeriod=0
climate[climate$Year%in%c(1961:1970),"timePeriod"] = 1961
climate[climate$Year%in%c(1971:1980),"timePeriod"] = 1971
climate[climate$Year%in%c(1981:1990),"timePeriod"] = 1981
climate[climate$Year%in%c(1991:2000),"timePeriod"] = 1991
climate[climate$Year%in%c(2001:2010),"timePeriod"] = 2001
#calculate mean per decade
sd_Temp<-climate%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise("sd_Temp"= mean(`sdTemp`))
sd_Prec<-climate%>%
group_by(ISO)%>%
group_by(timePeriod, .add=T)%>%
dplyr::summarise("sd_Prec"= mean(`sdPrec`))
#adjust naming selfsuf_food_basket
colnames(selfsuf_food_basket) <- paste0( "self_suf_food_basket_", colnames(selfsuf_food_basket))
names(selfsuf_food_basket)[names(selfsuf_food_basket) == 'self_suf_food_basket_timePeriod'] <- 'timePeriod'
names(selfsuf_food_basket)[names(selfsuf_food_basket) == 'self_suf_food_basket_ISO'] <- 'ISO'
##### one final timePeriod/country selection####
dat<-selfsuf_food_basket%>%
full_join(fulfilled_nutr)%>%
full_join(variables)%>%
full_join(sd_Prec)%>%
full_join(sd_Temp)%>%
full_join(diversity)%>%
#full_join(self_suffiency)%>%
#full_join(self_suf_2)%>%
full_join(country_info)
#full_join(sd_fulfilled_nutr)
n_distinct(dat$ISO) #94
dat$actotal[is.na(dat$actotal)] <- 0
dat<-na.omit(dat)
n_distinct(dat$ISO) #65
#just those countrys where time series is complete!
distinct_countries<-dat%>%
group_by(ISO, .add= T)%>%
dplyr::summarise(rowCount= n()) #count entries per country
distinct_countries<-filter(distinct_countries,distinct_countries$rowCount==5) #only take, when time series complete
countrycode(distinct_countries$ISO, "iso3c", "country.name") #shows which countries
n_distinct(dat$ISO) #65
dat<-dat[dat$ISO %in% distinct_countries$ISO,]
n_distinct(dat$ISO) #57
#write.csv(distinct_countries$ISO, "data/final_ISO.csv") # save final data selection!
names(dat)[names(dat) == 'mean_invSimp_D_prod'] <- 'Simp_Div'
#### FULL BASKET / NUTRITIOUS ADEQUACY ####
# Criteria: demand of the lowest fulfilliation counts- as it shows which demographic percentage is fully nourished by supply
# Accounts for: High supply for one nutrient does not substitute other requirements
col_ful_nutr<-colnames(fulfilled_nutr)[-c(1:2)] # extract colnames all but timePeriod and ISO
dat$full_basket<-apply(dat[,names(dat)[names(dat) %in% col_ful_nutr]], 1, FUN= min)
# col_ful_nutr_sd<-colnames(sd_fulfilled_nutr)[-c(1:2)] # extract colnames
# dat$sd_full_basket<-apply(dat[,names(dat)[names(dat) %in% col_ful_nutr_sd]], 1, FUN= "mean" )
#### SELF SUFFICIENT FOOD BASKET ####
# Criteria: demand of the lowest fulfilliation (of nutrients) counts- as it shows which demographic percentage is depependent on trade over all nutrients
# Accounts for: High supply for one nutrient does not substitute other requirements
col_selfsuf_bas<-colnames(selfsuf_food_basket)[-c(1:2)] # extract colnames
dat$selfsuf_food_basket<-apply(dat[,names(dat)[names(dat) %in% col_selfsuf_bas]], 1, FUN= min)
# #### SELF SUFFIENCY #####
# dat$full_sufficiency<-apply(dat[,19:26], 1, FUN= min)
#
# pdf("Plots/boxplot_SelfSuffiency_with_time_Region.pdf")
# ggplot(data = dat, aes(y = full_sufficiency, x = timePeriod, group= timePeriod, colour = Region))+ # structure of the graph
# geom_boxplot() + # add the boxplot
# geom_jitter() + # show the points distribution
# geom_hline(yintercept=100, col= "red")+
# labs(x = '', y = "full_sufficiency [%]") + # add labels to the axis
# theme_classic() # make it pretty
# dev.off()
#
# #### SELF SUFFIENCY 2 ####
#
# dat$self_suf_2<-apply(dat[,27:34], 1, FUN= min)
# pdf("Plots/boxplot_SelfSuffiency_2_with_time_Region.pdf")
# ggplot(data = dat, aes(y = self_suf_2, x = timePeriod, group= timePeriod, colour = Region))+ # structure of the graph
# geom_boxplot() + # add the boxplot
# geom_jitter() + # show the points distribution
# geom_hline(yintercept=100, col= "red")+
# labs(x = '', y = "share of national production in nutritional supply [%]") + # add labels to the axis
# theme_classic() # make it pretty
# dev.off()
#### save csv####
write.csv(dat,"data/final_dataset.csv")
#write.csv(dat,"data/final_dataset_inkl_sd.csv")
|
09f451959e2e9b2ed0115d4bb10150651f8d98e9
|
fbc244647eaf602abb4637c43641d3cdb1d178a8
|
/xts.processing.R
|
e4921a2a99bd443201441131990a3e97231bc551
|
[] |
no_license
|
patchdynamics/ct-river-R
|
9111dcebe232db3f6aa8e9c4edbcbe60a468cecc
|
bf3d4384a225aa7db705d483cb61d16ce3bbcbe2
|
refs/heads/master
| 2021-05-04T11:33:15.144478
| 2016-09-24T01:43:15
| 2016-09-24T01:43:15
| 50,894,715
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 773
|
r
|
xts.processing.R
|
# calculate warming series from xts
yearly.hval = function(ts,col) {
processed = ts[,col]
minimum = min(ts[,col])
highest = minimum
for(i in 1:nrow(ts)){
if(.indexyday(ts[i,col]) == 0) {
highest = minimum
}
if(as.numeric(ts[i,col]) > as.numeric(highest)){
highest = ts[i,col]
}
processed[i] = highest
}
tshval = processed
return(tshval)
}
# calculate cooling series from xts
yearly.lval = function(ts,col) {
processed = ts[,col]
minimum = min(ts[,col])
highest = minimum
for(i in nrow(ts):1){
if(.indexyday(ts[i, col]) == 0) {
highest = minimum
}
if(as.numeric(ts[i,col]) > as.numeric(highest)){
highest = ts[i,col]
}
processed[i] = highest
}
tslval = processed
return(tslval)
}
|
1850b4ff27b028cd7da3bbabcd70c77554f11818
|
98614a140562bebd7a6dde6df7d3fec149159e0b
|
/R/HKCSS - service utilization 191207.r
|
0e71299651357cd1a58b1419398d9705cd74c6bd
|
[] |
no_license
|
chenshuangzhou/programming101
|
07628a4fc797eb564531aa405b6e896c3c349da7
|
c09dbdb4777afa237a989fa67d9b7766079c821c
|
refs/heads/master
| 2020-03-29T16:30:36.184733
| 2020-03-08T10:53:57
| 2020-03-08T10:53:57
| 150,116,457
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,977
|
r
|
HKCSS - service utilization 191207.r
|
### Notes
# dementia, non-dementia
# male, female
# met/unmet needs
# generations
### HKCSS data on Unmet Need of Caregivers ###
library(xlsx);
library(outreg);
library(plyr);
library(psych);
library(stargazer);
library(interplot);
library(Hmisc);
# library(VIM) # visualization of missing data
# aggr(d,prop=F,numbers=T)
# matrixplot(d)
############### add number of caregivers
data = work = read.csv("C:/Users/chens/OneDrive/research/Projects/4 HKCSS/191216 HKCSS.csv",header=T,na.strings = "NA") # Office - Dell Inspiron 16
# data = read.csv("D:/OneDrive/research/Projects/4 HKCSS/190908 HKCSS (no missing).csv",header=T,na.strings = "NA") # Office - Dell Inspiron 16
## Var names
data$CMon=data$CGMon
data$CF=data$B13 # CG frequency
data$comb=data$B16f
data$dr=data$C12 # dyadic relation
data$fr=data$C10T # family relation
# with adult children caregivers
ac = data[data$CRtype2=="2" | data$CRtype2=="3",] # 742 | data$CRtype2=="4"
w = ac[ac$workCG=="1",] # 494
nw = ac[ac$workCG=="0",] # 252
# dementia population among working caregivers
d = w[w$B16b=="1",] # 181
m = d[d$genderCG=="1",] # 37
f = d[d$genderCG=="0",] # 144
nd = w[w$B16b=="0",] # 309
### unmet need of CR
# D201A-D210A: use - 0 no, 1 yes
# D201B-D210B: reasons not using - 1 dont know, 2 cannt use, 3 not appropriate
# D201C-D210C: need - 1-5 very unneed to very need
D2A - services utilized by CR
# D2C - service needed by caregivers
# D2_UN - unmet need of CR
### Correlation
# ggpairs(data = d, columns = 2:10, title = "bivariates")
#
data1 <- d[, c("ageCR","genderCR","ageCG")]
corr <- round(cor(data1), 2)
# Visualize - library("ggcorrplot")
ggcorrplot(corr, p.mat = cor_pmat(data1),
hc.order = TRUE, type = "lower",
color = c("#FC4E07", "white", "#00AFBB"),
outline.col = "white", lab = TRUE,na.rm=T)
### Anderson Model on Service Utialization ####
# predisposing (age[ageCR], gender[genderCR], marital status[null], ethnicity[null] and family size[null]; +[resid])
# enabling (education level[eduCG], family support[C10T(relationship)], access to services[ADL_UN?], travel time to the nearest health facility[?], medical expense per capita[?], and health insurance coverage[?]),
# need factors (chronic disease)[phyFra], actual needs[ADL_UN], with the utilization of health services (i.e. physician visit and hospitalization).
# description table
# head(describe(un),10) # show basic description of first 10 variables
attach(d);table1.1 = rbind.fill(describe(ageCG),describe(ageCR),describe(phyFra),describe(ADL_UN));detach()
attach(nd);table1.2 = rbind.fill(describe(ageCG),describe(ageCR),describe(phyFra),describe(ADL_UN));detach()
table1 = cbind(table1.1,table1.2)
reg1 = (glm(US ~ ageCR+genderCR+ageCG+genderCG+phyFra+ADL_UN+eduCG+economicCG+fr+resid+CF,data=d, family=poisson))
reg2 = (glm(US ~ ageCR+genderCR+ageCG+genderCG+phyFra+ADL_UN+eduCG+economicCG+fr+resid+CF,data=nd, family=poisson))
table2 = outreg(list(reg1,reg2))
write.csv(table1,file="C:/Users/chens/Desktop/table1.csv")
write.csv(table2,file="C:/Users/chens/Desktop/table2.csv")
### Pearlin's SPM Model ###
reg3 = (glm(US ~ ageCG+genderCG+eduCG+economicCG+C12+LvHm+phyFra+GF12Positive+burden+resid+CRtype3+fr+depressive+C6T,data=d, family=poisson))
reg4 = (glm(US ~ ageCG+genderCG+eduCG+economicCG+C12+LvHm+phyFra+GF12Positive+burden+resid+CRtype3+fr+depressive+C6T,data=nd, family=poisson))
table3 = outreg(list(reg3,reg4))
write.csv(table3,file="C:/Users/chens/Desktop/table3.csv")
### Andersen's model on unmet needs
### CR's unmet need: ADL_UN, IADL_UN, ADL_UNP (unmet need percentage), IADL_UNP
reg5 = (glm(ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+US,data=d, family=poisson))
reg6 = (glm(ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+US,data=nd, family=poisson))
ADL1 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US+C5T,data=d, family=poisson,na.action='na.omit')) # heart issues
# ADL1.1 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US+C5T,data=m, family=poisson,na.action='na.omit')) # heart issues
# ADL1.2 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US+C5T,data=f, family=poisson,na.action='na.omit')) # heart issues
ADL2 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US+C5T,data=nd, family=poisson,na.action='na.omit')) # heart issues
ADL3 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US*C5T,data=d, family=poisson,na.action='na.omit')) # heart issues
# ADL3.1 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US*C5T,data=m, family=poisson,na.action='na.omit')) # heart issues
# ADL3.2 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US*C5T,data=f, family=poisson,na.action='na.omit')) # heart issues
ADL4 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US*C5T,data=nd, family=poisson,na.action='na.omit')) # heart issues
IADL1 = (glm(IADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US+C5T,data=d, family=poisson,na.action='na.omit')) # heart issues
IADL2 = (glm(IADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US+C5T,data=nd, family=poisson,na.action='na.omit')) # heart issues
IADL3 = (glm(IADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US*C5T,data=d, family=poisson,na.action='na.omit')) # heart issues
IADL4 = (glm(IADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+US*C5T,data=nd, family=poisson,na.action='na.omit')) # heart issues
table4 = outreg(list(ADL1,ADL2,ADL3,ADL4,IADL1,IADL2,IADL3,IADL4))
write.csv(table4,file="C:/Users/chens/Desktop/table.csv")
# felt need: real need
# expressed need: real need to be expressed
ADL1 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+UNS+PAC1,data=d, family = gaussian(),na.action='na.omit')) # heart issues
ADL2 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+UNS+PAC1,data=nd, family = gaussian(),na.action='na.omit')) # heart issues
ADL3 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+UNS*PAC1,data=d, family = gaussian(),na.action='na.omit')) # heart issues
ADL4 = (glm( ADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+UNS*PAC1,data=nd, family = gaussian(),na.action='na.omit')) # heart issues
IADL1 = (glm(IADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+UNS+PAC1,data=d, family = gaussian(),na.action='na.omit')) # heart issues
IADL2 = (glm(IADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+UNS+PAC1,data=nd, family = gaussian(),na.action='na.omit')) # heart issues
IADL3 = (glm(IADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+UNS*PAC1,data=d, family = gaussian(),na.action='na.omit')) # heart issues
IADL4 = (glm(IADL_UN ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+UNS*PAC1,data=nd, family = gaussian(),na.action='na.omit')) # heart issues
table4 = outreg(list(ADL1,ADL2,ADL3,ADL4,IADL1,IADL2,IADL3,IADL4))
write.csv(table4,file="C:/Users/chens/Desktop/table1.csv")
############
m1 = (glm(UNS ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+ ADL_UN+C6T+PAC,data=d, family=poisson,na.action='na.omit'))
m2 = (glm(UNS ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+ ADL_UN+C6T+PAC,data=nd, family=poisson,na.action='na.omit'))
m3 = (glm(UNS ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+ ADL_UN+C6T*PAC,data=d, family=poisson,na.action='na.omit'))
m4 = (glm(UNS ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+ ADL_UN+C6T*PAC,data=nd, family=poisson,na.action='na.omit'))
m5 = (glm(UNS ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+IADL_UN+C6T+PAC,data=d, family=poisson,na.action='na.omit'))
m6 = (glm(UNS ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+IADL_UN+C6T+PAC,data=nd, family=poisson,na.action='na.omit'))
m7 = (glm(UNS ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+IADL_UN+C6T*PAC,data=d, family=poisson,na.action='na.omit'))
m8 = (glm(UNS ~ ageCR+genderCR+ageCG+genderCG+CGMarry+eduCG+resid+dr+CF+economicCG+phyFra+depressive+B16c+IADL_UN+C6T*PAC,data=nd, family=poisson,na.action='na.omit'))
UNS_ADL_d = interplot(m3, var1 = "PAC",var2 = "C6T", predPro = TRUE, var2_vals = c(min( d$C6T,na.rm=T), max( d$C6T,na.rm=T))) + ggtitle("Unmet Need on PAC by ZBI among CG of Dementia Caregivers") + scale_colour_discrete(guide = guide_legend(title = "Mean"), labels = c("LowestZBI", "Highest ZBI")) + scale_fill_discrete(guide = guide_legend(title = "Intervals"), labels = c("LowestZBI", "Highest ZBI")) + theme(legend.position = c(.1, .8), legend.justification = c(0, .5)) + ylab("Estimated Coefficient for Unmet Need of Services")
UNS_ADL_nd = interplot(m4, var1 = "PAC",var2 = "C6T", predPro = TRUE, var2_vals = c(min( nd$C6T,na.rm=T), max(nd$C6T,na.rm=T))) + ggtitle("Unmet Need on PAC by ZBI among CG of non-Dementia Caregiver") + scale_colour_discrete(guide = guide_legend(title = "Mean"), labels = c("LowestZBI", "Highest ZBI")) + scale_fill_discrete(guide = guide_legend(title = "Intervals"), labels = c("LowestZBI", "Highest ZBI")) + theme(legend.position = c(.1, .8), legend.justification = c(0, .5))
UNS_IADL_d = interplot(m7, var1 = "PAC",var2 = "C6T", predPro = TRUE, var2_vals = c(min( d$C6T,na.rm=T),max( d$C6T,na.rm=T))) + ggtitle("Unmet Need on PAC by ZBI among CG of Dementia Caregiver") + scale_colour_discrete(guide = guide_legend(title = "Mean"), labels = c("LowestZBI", "Highest ZBI")) + scale_fill_discrete(guide = guide_legend(title = "Intervals"), labels = c("LowestZBI", "Highest ZBI")) + theme(legend.position = c(.1, .8), legend.justification = c(0, .5)) + ylab("Estimated Coefficient for Unmet Need of Services")
UNS_IADL_nd = interplot(m8, var1 = "PAC",var2 = "C6T", predPro = TRUE, var2_vals = c(min( nd$C6T,na.rm=T),max( nd$C6T,na.rm=T))) + ggtitle("Unmet Need on PAC by ZBI among CG of non-Dementia Caregiver") + scale_colour_discrete(guide = guide_legend(title = "Mean"), labels = c("LowestZBI", "Highest ZBI")) + scale_fill_discrete(guide = guide_legend(title = "Intervals"), labels = c("LowestZBI", "Highest ZBI")) + theme(legend.position = c(.1, .8), legend.justification = c(0, .5))
grid.arrange(UNS_ADL_d,UNS_ADL_nd,UNS_IADL_d,UNS_IADL_nd, ncol=2, nrow=2) # library(gridExtra)
table4 = outreg(list(m1,m2,m3,m4,m5,m6,m7,m8))
write.csv(table4,file="C:/Users/chens/Desktop/table1.csv")
###############
# interplot(ADL3, var1 = 'US',var2 = 'C5T', predPro = FALSE) + ggtitle("Average Conditional Effects")
# V1 on x-axis; prediction of V2 on DV on y-axis
# impute(d$C5T,median)
# impute(d$US,median)
library(gridExtra)
ADLd = interplot(ADL3, var1 = "PAC1",var2 = "UNS", predPro = TRUE, var2_vals = c(min( d$UNS,na.rm=T), max( d$UNS,na.rm=T))) + ggtitle("Unmet Need of ADL on` PAC by SU among CG of Dementia Population") + scale_colour_discrete(guide = guide_legend(title = "Mean"), labels = c("Least Service Unmet Need", "Most Service Unmet Need")) + scale_fill_discrete(guide = guide_legend(title = "Intervals"), labels = c("Least Service Unmet Need", "Most Service Unmet Need")) + theme(legend.position = c(.1, .8), legend.justification = c(0, .5)) + ylab("Estimated Coefficient for Service Utilization")
ADLnd = interplot(ADL4, var1 = "PAC1",var2 = "UNS", predPro = TRUE, var2_vals = c(min(nd$UNS,na.rm=T), max(nd$UNS,na.rm=T))) + ggtitle("Unmet Need of ADL on` PAC by SU among CG of Other Population") + scale_colour_discrete(guide = guide_legend(title = "Mean"), labels = c("Least Service Unmet Need", "Most Service Unmet Need")) + scale_fill_discrete(guide = guide_legend(title = "Intervals"), labels = c("Least Service Unmet Need", "Most Service Unmet Need")) + theme(legend.position = c(.1, .8), legend.justification = c(0, .5))
IADLd = interplot(IADL3, var1 = "PAC1",var2 = "UNS", predPro = TRUE, var2_vals = c(min( d$UNS,na.rm=T), max( d$UNS,na.rm=T))) + ggtitle("Unmet Need of IADL on` PAC by SU among CG of Dementia Population") + scale_colour_discrete(guide = guide_legend(title = "Mean"), labels = c("Least Service Unmet Need", "Most Service Unmet Need")) + scale_fill_discrete(guide = guide_legend(title = "Intervals"), labels = c("Least Service Unmet Need", "Most Service Unmet Need")) + theme(legend.position = c(.1, .8), legend.justification = c(0, .5)) + xlab("PAC") + ylab("Estimated Coefficient for Service Utilization")
IADLnd = interplot(IADL4,var1 = "PAC1",var2 = "UNS", predPro = TRUE, var2_vals = c(min(nd$UNS,na.rm=T), max(nd$UNS,na.rm=T))) + ggtitle("Unmet Need of IADL on` PAC by SU among CG of Other Population") + scale_colour_discrete(guide = guide_legend(title = "Mean"), labels = c("Least Service Unmet Need", "Most Service Unmet Need")) + scale_fill_discrete(guide = guide_legend(title = "Intervals"), labels = c("Least Service Unmet Need", "Most Service Unmet Need")) + theme(legend.position = c(.1, .8), legend.justification = c(0, .5)) + xlab("PAC")
grid.arrange(ADLd,ADLnd,IADLd,IADLnd, ncol=2, nrow=2) # library(gridExtra)
# plot_3val <- interplot(ADL3, var1 = "US",var2 = "C5T", predPro = TRUE, var2_vals = c(min(d$C5T), max(d$C5T))) + ggtitle("Conditional Predicted Probabilities for \nCitizens with Low and High Incomes") + scale_colour_discrete(guide = guide_legend(title = "Income"), labels = c("Low", "High")) + scale_fill_discrete(guide = guide_legend(title = "Income"), labels = c("Low", "High")) + theme(legend.position = c(0, .8), legend.justification = c(0, .5))
interplot(ADL3, var1 = "US",var2 = "C5T", predPro = TRUE, var2_vals = c(min(d$US,na.rm=T), max(d$US,na.rm=T)), point=T) + ggtitle("Unmet Need of ADL on PAC by SU among CG of Dementia Population") + scale_colour_discrete(guide = guide_legend(title = "Mean"), labels = c("Least Service", "Most Service")) + scale_fill_discrete(guide = guide_legend(title = "Intervals"), labels = c("Least Service", "Most Service")) + theme(legend.position = c(.1, .8), legend.justification = c(0, .5))
## Framework
# DV: utilization of service in need [US]
# IV: unmet need of CR in services[D2_UN], in ADL[ADL_UN], in IADL[IADL_UN], health status [FrailtyT], CR support [CRCsp,CREsp,CRFsp,CRdm], CG support [CGCsp,CGEsp,CGFsp,CGdm]
# CV: demographics of CR[genderCR, ageCR, resCR, B15p/cohabit(living alone)], of CG[genderCG, ageCG, workCG, eduCG, maritalCG, economicCG, incomeCG], caregiving[CMon], PAC [C5T], ZBI [C6T], caregiving hours weekly [B13], relationship[C12, C10T]
# MV: meaning [PAC, PACas,PACel]
m1 = glm(US ~ ageCG+genderCG+eduCG+CGMarry+B13+economicCG+DemCom+ADL_UN+C6T+C10T,data=w, family=poisson)
m2 = glm(US ~ ageCG+genderCG+eduCG+CGMarry+B13+economicCG+DemCom+ADL_UN+C6T*C10T,data=w, family=poisson)
m3 = glm(US ~ ageCG+genderCG+eduCG+CGMarry+B13+economicCG+DemCom+ADL_UN+C6T+C10T,data=nw, family=poisson)
m4 = glm(US ~ ageCG+genderCG+eduCG+CGMarry+B13+economicCG+DemCom+ADL_UN+C6T*C10T,data=nw, family=poisson)
summary(glm(US ~ ageCG+genderCG+eduCG+CGMarry+B13+economicCG+DemCom+ADL_UN+C6T+C10T,data=w, family=poisson))
summary(glm(US ~ ageCG+genderCG+eduCG+CGMarry+B13+economicCG+DemCom+ADL_UN+C6T+C10T,data=nw, family=poisson))
## Variables - basic check (finished)
# ad1=un[,c("D1_UN", "D2_UN", "ADL_UN", "IADL_UN", "phyFra", "psyFra", "socFra")]
# pairs(ad1) # between DV and IVs
# cor(ad1,method = c("pearson", "kendall", "spearman"))
# cor(ad1,na.rm=T)
# describe(ad1)
# table(c(D1_UN, D2_UN, ADL_UN, IADL_UN, FrailtyT))
# hist(D1_UN, breaks=0:6)
## Models
# # Model 1: CR demographics -> CR UN ***
# m1.1 = glm(ADL_UN ~ genderCR+ageCR+CMon+CF+comb+CRCsp+CREsp+CRdm, data=data, family=poisson)
# m1.2 = glm(ADL_UN ~ genderCR+ageCR+CMon+CF+CRCsp*comb+CREsp*comb+CRdm*comb, data=data, family=poisson)
# m1.3 = glm(IADL_UN ~ genderCR+ageCR+CMon+CF+comb+CRCsp+CREsp+CRdm, data=data, family=poisson)
# m1.4 = glm(IADL_UN ~ genderCR+ageCR+CMon+CF+CRCsp*comb+CREsp*comb+CRdm*comb, data=data, family=poisson)
# # stargazer(m1.1,m1.2,m1.3,m1.4,title="ModelResult",column.labels=c('ADL','Interaction','IADL','Interaction'),align=T,type="text",out="table.htm")
#
# # Model 2: CR UN -> CR Health *
# m2.1 = glm(FrailtyT ~ ADL_UN+IADL_UN+genderCR+ageCR, data=data)
# m2.2 = glm(phyFra ~ ADL_UN+IADL_UN+genderCR+ageCR, data=data)
# m2.3 = glm(psyFra ~ ADL_UN+IADL_UN+genderCR+ageCR, data=data)
# m2.4 = glm(socFra ~ ADL_UN+IADL_UN+genderCR+ageCR, data=data)
# # stargazer('m2.1',m2.2,m2.3,'m2.4',title="ModelResult",column.labels=c('Frailty','Phy Frail','Psy Frail','Soc Frail'),align=T,type="text",out="table.htm")
# # m2.2, m2.3, m2.3
#
# # Model 3: CR UN -> CG ZBI | PAC
# m3.1 = glm(ZBI4Score ~ ageCG+genderCG+CF+economicCG+ADL_UN+IADL_UN+phyFra+psyFra+PAC+C12+C10T, data=w)
# m3.2 = glm(ZBI4Score ~ ageCG+genderCG+CF+economicCG+ADL_UN+IADL_UN+phyFra+psyFra+PAC+C12+C10T, data=nw)
# # stargazer(m3.1,m3.2,title="ModelResult",column.labels=c('Working','Non-working'),align=T,type="text",out="table.htm")
#
# # Model 4: CR Health -> CG utilization of services in need | PAC
# m4.1 = glm(US ~ ZBI4Score+workCG, data=data, family=poisson)
# m4.2 = glm(US ~ ZBI4Score*workCG, data=data, family=poisson)
# # stargazer(m4.1,m4.2,title="ModelResult",column.labels=c('Main','Interaction'),align=T,type="text",out="table.htm")
#
# # check actual coefficients if model is desirable
# # t1 = cbind(exp(coef(m1)),exp(confint(m1)))
# # t1.1 = cbind(exp(coef(m1.1)),exp(confint(m1.1)))
# # t1.2 = cbind(exp(coef(m1.2)),exp(confint(m1.2)))
# # stepAIC(m1,direction="both") # library(MASS)
# Model 1: CR demographics + PAC -> frailty
# summary(glm(US ~ genderCG+ageCG+workCG+eduCG+economicCG+incomeCG+ADL_UN+IADL_UN+C6T+C7,data=data, family=poisson))
# summary(glm(US ~ genderCG+ageCG+workCG+eduCG+economicCG+ADL_UN+IADL_UN+C6T+C7,data=data, family=poisson))
## Model Tables
# Regression Models - export to Excel
# Model Table Set 1
# table1 = outreg(list(m1.1,m1.2,m1.3,m1.4))
# table2 = outreg(list(m2.1,m2.2,m2.3,m2.4))
# table3 = outreg(list(m3.1,m3.2))
# table4 = outreg(list(m4.1,m4.2))
# reg1 = (glm(US ~ ageCR+genderCR+ageCG+genderCG+phyFra+ADL_UN+eduCG+economicCG+fr+resid+CF,data=d, family=poisson))
# reg2 = (glm(US ~ ageCR+genderCR+ageCG+genderCG+phyFra+ADL_UN+eduCG+economicCG+fr+resid+CF,data=nd, family=poisson))
# table2 = outreg(list(reg1,reg2))
# Combine all tables
# table = rbind.fill(table1,table2,table3,table4) # require library(plyr)
# output to excel
# write.csv(table1,file="C:/Users/chens/Desktop/test1.csv")
# write.csv(table2,file="C:/Users/chens/Desktop/test2.csv")
# write.csv(table3,file="C:/Users/chens/Desktop/test3.csv")
# write.csv(table4,file="C:/Users/chens/Desktop/test4.csv")
#anova12=anova(linear1,linear2)
#anova23=anova(linear2,linear3)
#anova34=anova(linear3,linear4)
#anova14=anova(linear1,linear4)
#stargazer(anova12,anova23,anova34,anova14,title="Model Comparison",align=T,type="text",out="table.htm")
library(jiebaR)
library(rJava)
library(Rwordseg) # install.packages("Rwordseg", repos = "http://R-Forge.R-project.org")
### Word Cloud
data = read.table("C:/Users/chens/Desktop/wordcloud.text")
segmentCN("C:/Users/chens/Desktop/wordcloud.text",returnType="tm")
wk = worker()
segment("C:/Users/chens/Desktop/wordcloud.text", wk)
|
f3d3bc997679bf29a8e37e64139bb441daedafc5
|
2d1a8db7061ceda55e5f37990f764317d4c193d8
|
/LOLA Enrichments/LOLA_mmarge_4_30_19.R
|
4bcb600080c809cdd2856795f9c32acd11c6aff7
|
[] |
no_license
|
aciernia/BTBR-BMDM-Endotoxin-Tolerance
|
643a007011b7a273eed111480a57c7b957579f9c
|
be119492b3d1208dc59f578ca2e1f53bbef30afa
|
refs/heads/master
| 2022-12-27T07:25:31.580457
| 2020-10-09T14:42:38
| 2020-10-09T14:42:38
| 258,902,236
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,206
|
r
|
LOLA_mmarge_4_30_19.R
|
#author: Annie Vogel Ciernia
#a.ciernia@gmail.com
#10/9/2018
##############################################################################################################
library(dplyr)
library(tidyr)
library(cowplot)
library(gplots)
#if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
#BiocManager::install("LOLA")
library(LOLA)
#source("https://bioconductor.org/biocLite.R")
#biocLite("GenomicRanges")
library(GenomicRanges)
#source("https://bioconductor.org/biocLite.R")
#biocLite("qvalue")
library(qvalue)
options("scipen"=100, "digits"=4) #prevent exponents
##############################################################################################################
#read in peak bed files to GRanges List
library(ChIPseeker)
library("zonator")
setwd("/Users/annieciernia/Sync/collaborations/Ashwood/BTBR_BMDM/atac_2019/consensus\ peaks")
path <- getwd()
#get lists
files <- list.files(path=".", pattern="*.mm10.bed", all.files=T, full.names=T)
filelist <- lapply(files, readPeakFile)
#get and fix names of files
names_list <- paste0(basename(file_path_sans_ext(files)))
names_list <- gsub("_DEpeaks.mm10", "", names_list)
names_list <- gsub("_L_", "<", names_list)
names_list <- gsub("_G_", ">", names_list)
names(filelist) <- names_list
names(filelist)
#load background regions: all possible peaks called in all samples
Background <- readBed(file = "/Users/annieciernia/Sync/collaborations/Ashwood/BTBR_BMDM/atac_2019/DiffBind/Allconsensuspeaks.bed")
#load DB:
#regionDB <- loadRegionDB(dbLocation = "/Users/annieciernia/Desktop/regionDB/mm10/",limit = NULL, collections = c("collection1","collection3","collection4","collection5","collection6","collection7","collection8"))
#save(regionDB,file="/Users/annieciernia/Desktop/regionDB/RegionDBmm10_9_3_18.Rdata")
#support is the overlap, and b, c, and d complete the 2x2 table
regionDB_TF <- loadRegionDB(dbLocation = "/Users/annieciernia/Desktop/mm10_LOLA_DB/mm10/",limit = NULL, collections = c("collection5","collection6","collection7","collection8"))
setwd("/Users/annieciernia/Sync/collaborations/Ashwood/BTBR_BMDM/atac_2019/peak_overlaps1_5_2020")
#two tailed fisher exact test:
Results <- runLOLA(userSets = filelist, userUniverse = Background, regionDB = regionDB_TF, minOverlap = 1, cores=2, redefineUserSets = FALSE,direction = "enrichment")
#locResult = Results[2,]
#extractEnrichmentOverlaps(locResult, filelist, regionDB_TF)
writeCombinedEnrichment(combinedResults = Results, outFolder = "DEpeak_RegionOverlaps", includeSplits=F)
########################################################################################################################
merge <- Results
#pValueLog:=-log10(pValueLog + 10^-322)
#make pvalue
#merge$pvalue <- 10^-(merge$pValueLog)
#undo pseudo count:
# merge$pvalue <- merge$pvalue - 10^-322
# merge$pvalue <- abs(merge$pvalue)
# merge$FDR <- p.adjust(merge$pvalue,method = "fdr")
#merge <- enrichments
names(merge)[names(merge) == 'support'] <- 'userSet.in.target.list'
names(merge)[names(merge) == 'b'] <- 'NonuserSet.in.target.list'
names(merge)[names(merge) == 'c'] <- 'userSet.not.in.target.list'
names(merge)[names(merge) == 'd'] <- 'NonuserSet.not.in.target.list'
#% enrichment
merge$percent_userSet_in_Target <- (merge$userSet.in.target.list/(merge$userSet.in.target.list + merge$userSet.not.in.target.list)*100)
merge$percent_BG_in_Target <- (merge$NonuserSet.in.target.list/(merge$NonuserSet.in.target.list + merge$NonuserSet.not.in.target.list)*100)
#fold enrichment relative to background
#merge$FC <- (merge$percent_userSet_in_Target - merge$percent_BG_in_Target)/merge$percent_BG_in_Target
#read in list descriptions
listdescript <- read.csv("descriptionorder_fixed.csv")
merge2 <- merge(merge,listdescript,by.x="description",by.y="old")
write.csv(merge2,file="FisherExact_TF_Enrichements_1_5_20.csv")
########################################################################################################################
#clean up names
enrichments <- merge2
unique(enrichments$userSet)
neworder <- c("BTBRmedia<C57media" ,"BTBRmedia>C57media",
"C57LPS1>C57media","BTBRLPS1>BTBRmedia", "C57LPS2>C57media","BTBRLPS2>BTBRmedia",
"C57LPS1<C57media","BTBRLPS1<BTBRmedia", "C57LPS2<C57media","BTBRLPS2<BTBRmedia",
"BTBRLPS1<C57LPS1","BTBRLPS2<C57LPS2",
"BTBRLPS1>C57LPS1","BTBRLPS2>C57LPS2")
enrichments$userSet <- factor(enrichments$userSet, levels = neworder)
#enrichments_sig <- filter(enrichments,enrichments$qValue<0.05)
########################################################################################################################
#plots
########################################################################################################################
#significant enrichments only:
Collection5678 <- enrichments %>% filter(collection == "collection5"|
collection == "collection6"|
collection == "collection7"|
collection == "collection8") %>%
filter(qValue < 0.05)
#plot of odds ratios as dot size and pvalues as heatmap color for all lists sig across samples
ggplot(Collection5678, aes(y = newnames, x = oddsRatio)) +
facet_grid(~userSet)+
geom_point( alpha=0.75, aes(size = userSet.in.target.list,color=qValue)) +
scale_size(name = "Number of \n Overlapping Regions",
breaks = signif(fivenum(Collection5678$userSet.in.target.list),2), #returns rounded values for 5 sets
labels = signif(fivenum(Collection5678$userSet.in.target.list),2))+
theme_bw() +
xlab("Odds Ratio") +
ylab("Comparison List") +
scale_color_gradient(low="blue",high="red")+
theme(strip.text.x = element_text(size = 14)) +
#theme(axis.text.x=element_text(angle=65,hjust=1)) +
theme(legend.key = element_blank(), strip.background = element_blank(),panel.border = element_rect(colour = "black"))+
theme(axis.text=element_text(size=14),axis.title=element_text(size=14))
ggsave(filename="Collection5678_BMDMmarks_1_5_20.pdf",width = 16, height = 6, dpi = 300, useDingbats = FALSE)
########################################################################################################################
#significant enrichments only: media<LPS
Collection5678 <- enrichments %>% filter(collection == "collection5"|
collection == "collection6"|
collection == "collection7"|
collection == "collection8") %>%
filter(qValue < 0.05) %>%
filter(grepl("<", userSet))
#plot of odds ratios as dot size and pvalues as heatmap color for all lists sig across samples
ggplot(Collection5678, aes(y = newnames, x = oddsRatio)) +
facet_grid(~userSet)+
geom_point( alpha=0.75, aes(size = userSet.in.target.list,color=qValue)) +
scale_size(name = "Number of \n Overlapping Regions",
breaks = signif(fivenum(Collection5678$userSet.in.target.list),2), #returns rounded values for 5 sets
labels = signif(fivenum(Collection5678$userSet.in.target.list),2))+
theme_bw() +
xlab("Odds Ratio") +
ylab("Comparison List") +
scale_color_gradient(low="red",high="blue")+
theme(legend.key = element_blank(), strip.background = element_blank(),panel.border = element_rect(colour = "black"))+
theme(axis.text=element_text(size=14),axis.title=element_text(size=14))
ggsave(filename="LessThanPeaks_BMDMmarks_5_2_19.pdf",width = 12, height = 5, dpi = 300, useDingbats = FALSE)
########################################################################################################################
#significant enrichments only: media>LPS
Collection5678 <- enrichments %>% filter(collection == "collection5"|
collection == "collection6"|
collection == "collection7"|
collection == "collection8") %>%
filter(qValue < 0.05) %>%
filter(grepl(">", userSet))
#plot of odds ratios as dot size and pvalues as heatmap color for all lists sig across samples
ggplot(Collection5678, aes(y = newnames, x = oddsRatio)) +
facet_grid(~userSet)+
geom_point( alpha=0.75, aes(size = userSet.in.target.list,color=qValue)) +
scale_size(name = "Number of \n Overlapping Regions",
breaks = signif(fivenum(Collection5678$userSet.in.target.list),2), #returns rounded values for 5 sets
labels = signif(fivenum(Collection5678$userSet.in.target.list),2))+
theme_bw() +
xlab("Odds Ratio") +
ylab("Comparison List") +
scale_color_gradient(low="red",high="blue")+
theme(strip.text.x = element_text(size = 14)) +
#theme(axis.text.x=element_text(angle=65,hjust=1)) +
theme(legend.key = element_blank(), strip.background = element_blank(),panel.border = element_rect(colour = "black"))+
theme(axis.text=element_text(size=14),axis.title=element_text(size=14))
ggsave(filename="GreaterThanPeaks_BMDMmarks_5_2_19.pdf",width = 12, height = 6, dpi = 300, useDingbats = FALSE)
|
3ea323ca0b8191427c28a9204923ed0194bd30d7
|
c12d52663ecd6f7088337fe371e77e2f82398758
|
/man/colCumprods.Rd
|
0657457c085919eedfb57f3e899e0d3bb0f740a2
|
[] |
no_license
|
federicomarini/sparseMatrixStats
|
8d581ad4db29583c4f9d56f18fbdfdcebc3cf1d0
|
b5c036095d3aac4be00096f793b199aebf4d1fcd
|
refs/heads/master
| 2020-08-05T03:24:15.701873
| 2019-10-02T14:37:51
| 2019-10-02T14:37:51
| 212,375,029
| 0
| 0
| null | 2019-10-02T15:22:15
| 2019-10-02T15:22:14
| null |
UTF-8
|
R
| false
| true
| 1,683
|
rd
|
colCumprods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R, R/methods_row.R
\docType{methods}
\name{colCumprods}
\alias{colCumprods}
\alias{colCumprods,dgCMatrix-method}
\alias{rowCumprods}
\alias{rowCumprods,dgCMatrix-method}
\title{Cumulative sums, products, minima and maxima for each row (column) in a
matrix}
\usage{
colCumprods(x, rows = NULL, cols = NULL, ...)
\S4method{colCumprods}{dgCMatrix}(x, rows = NULL, cols = NULL, ...)
rowCumprods(x, rows = NULL, cols = NULL, ...)
\S4method{rowCumprods}{dgCMatrix}(x, rows = NULL, cols = NULL, ...)
}
\arguments{
\item{x}{A \code{\link[base]{numeric}} NxK \code{\link[base]{matrix}}.}
\item{rows}{A \code{\link[base]{vector}} indicating subset of elements
(or rows and/or columns) to operate over. If \code{\link[base]{NULL}}, no
subsetting is done.}
\item{cols}{A \code{\link[base]{vector}} indicating subset of elements
(or rows and/or columns) to operate over. If \code{\link[base]{NULL}}, no
subsetting is done.}
\item{...}{Not used.}
}
\value{
Returns a \code{\link[base]{numeric}} NxK \code{\link[base]{matrix}}
of the same mode as \code{x}.
}
\description{
Cumulative sums, products, minima and maxima for each row (column) in a
matrix.
}
\examples{
x <- matrix(1:12, nrow = 4, ncol = 3)
print(x)
yr <- rowCumsums(x)
print(yr)
yc <- colCumsums(x)
print(yc)
yr <- rowCumprods(x)
print(yr)
yc <- colCumprods(x)
print(yc)
yr <- rowCummaxs(x)
print(yr)
yc <- colCummaxs(x)
print(yc)
yr <- rowCummins(x)
print(yr)
yc <- colCummins(x)
print(yc)
}
\seealso{
See \code{\link[base]{cumsum}}(), \code{\link[base]{cumprod}}(),
\code{\link[base]{cummin}}(), and \code{\link[base]{cummax}}().
}
|
acc1ebcb4ba5200928f3921f1032624565f69f6e
|
9cfdec25ad3ec65679a4cca555422bfef54e73ab
|
/hw/hw1/test.r
|
ae5bcb829cbfd48e1f4165baae5c362083895282
|
[] |
no_license
|
huberf/matlab-class
|
a5e186f558cf3bbdd42340ee4fb134a723cf26a0
|
202e30ec217bf6e50ff48757e4a4fedc9598987b
|
refs/heads/master
| 2021-01-17T16:46:47.765418
| 2016-07-01T05:35:27
| 2016-07-01T05:35:27
| 62,062,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
test.r
|
total <- ((pi * 0.04^2 * 0.07) + (0.25 * 0.08 * 0.07) - 3 * (pi * 0.015^2 * 0.07)) * 8050
total <- ((pi * 0.04^2 * 0.07) + (0.25 * 0.08 * 0.07)) * 8050
total <- ((pi * 0.04^2 * 0.07) + (0.25 * 0.08 * 0.07) - 3 * (pi * 0.02779327^2 * 0.07)) * 8050
|
acd97d4dca644eea97cbcd14b375cb8b59c47633
|
538b909ebc208800939ee38d479e19f34e033123
|
/cachematrix.R
|
704ee2eba5b1a8b0e6453c9c49cecbc29e76c056
|
[] |
no_license
|
mwirth7070/ProgrammingAssignment2
|
0e4b61610eba71462c92a8ac40cb3d36fed02a78
|
d7d3adb67c6c03963bd5b88f3db4c8c2a2af9e6c
|
refs/heads/master
| 2021-01-18T04:35:00.814275
| 2014-07-25T01:48:10
| 2014-07-25T01:48:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 993
|
r
|
cachematrix.R
|
#makeCacheMatrix: This function creates a special "matrix" object that caches its inverse.
#cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL # Set the value of the vector
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x #get the value of the vector
setmatrix<-function(solve) m<<- solve #set the value of the matrix
getmatrix<-function() m #get the value of the matrix
list(set=set, get=get,setmatrix=setmatrix,getmatrix=getmatrix)
}
cacheSolve <- function(x=matrix(), ...) {
m<-x$getmatrix()
if(!is.null(m)){ #if the matrix has not already been calculated, notify and then calculate, otherwise skip
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...) #calculates the matrix of the special "vector" created with makeCacheMatrix function
x$setmatrix(m) #sets the value of the matrix
m
}
|
ac7d6e0230b835e2f9823101f9deb82360236543
|
fadd25738df09516aedb88a53579e7e121ad51f4
|
/R/signalInfo.R
|
38ce552e112d47f10fbe8ed9d65e3819f4b83c1d
|
[] |
no_license
|
JangSeonghoon/maintcivil
|
2630dee5df3512c5f9ea39b71169b590138e4ddc
|
7a5c61eedfdd4bb3b10f506b8e11aac11b475f30
|
refs/heads/master
| 2021-09-06T21:48:32.855105
| 2018-02-12T05:23:12
| 2018-02-12T05:23:12
| 103,596,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,203
|
r
|
signalInfo.R
|
#'
#' signal information
#'
#' @param workspace_no, startT,lastT,direction, order, kind
#' @return km of the signal
devtools::use_package("stringr")
#' @importFrom stringr str_c
#' @importFrom stringr str_detect
#' @importFrom compiler cmpfun
#' @export
signal=function(workspace_no,startT,lastT,direction,order,kind){
A=cmpfun(
function(){
if(Sys.info()['sysname']=="Windows"){
path=
paste0(
Sys.getenv("CATALINA_HOME"),"/webapps/bigTeam/"
)
}else if(Sys.info()['sysname']=="Linux"){
load("/home/jsh/eclipse-workspace/bigTeam/src/main/webapp/")
}
startT=as.character(startT)
lastT=as.character(lastT)
direction=as.character(direction)
kind=as.character(kind)
workspace_no=floor(workspace_no/100)*100
load(path,"RData/DB(utf8).RData")
compare=eval(parse(text=paste0("signal_",workspace_no)))
compareSet=str_c(compare[,1],collape=",",compare[,2],collape=",",compare[,3],collape=",",compare[,4],collape="번,",compare[,6],collape="")
no=which(
str_detect(compareSet,startT)*
str_detect(compareSet,lastT)*
str_detect(compareSet,direction)*
str_detect(compareSet,paste0(order,"번"))*
str_detect(compareSet,kind)==1
)[1]
return(compare[no,5])
}
)
A()
}
|
bcbb2fe83d7872ebab19e0a4f150a3047b37b399
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/gaston/R/bm_vcf.r
|
58f833053d8accfd2207f9559e2c14b3022b4aa1
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,622
|
r
|
bm_vcf.r
|
read.vcf <- function(file, max.snps, get.info = FALSE, convert.chr = TRUE, verbose = getOption("gaston.verbose",TRUE)) {
xx <- NULL;
filename <- path.expand(file)
if(missing(max.snps)) max.snps = -1L;
L <- .Call("gg_read_vcf2", PACKAGE = "gaston", filename, max.snps, get.info)
snp <- data.frame(chr = L$chr, id = L$id, dist = 0, pos = L$pos , A1 = L$A1, A2 = L$A2,
quality = L$quality, filter = factor(L$filter), stringsAsFactors = FALSE)
if(get.info) snp$info <- L$info
if(convert.chr) {
chr <- as.integer(L$chr)
chr[L$chr == "X" | L$chr == "x"] <- getOption("gaston.chr.x")[1]
chr[L$chr == "Y" | L$chr == "y"] <- getOption("gaston.chr.y")[1]
chr[L$chr == "MT" | L$chr == "mt"] <- getOption("gaston.chr.mt")[1]
if(any(is.na(chr)))
warning("Some unknown chromosomes id's (try to set convert.chr = FALSE)")
snp$chr <- chr
}
ped <- data.frame(famid = L$samples, id = L$samples, father = 0, mother = 0, sex = 0, pheno = NA, stringsAsFactors = FALSE)
x <- new("bed.matrix", bed = L$bed, snps = snp, ped = ped,
p = NULL, mu = NULL, sigma = NULL, standardize_p = FALSE,
standardize_mu_sigma = FALSE )
if(getOption("gaston.auto.set.stats", TRUE)) x <- set.stats(x, verbose = verbose)
x
}
read.vcf.filtered <- function(file, positions, max.snps, get.info = FALSE, convert.chr = TRUE, verbose = getOption("gaston.verbose",TRUE)) {
xx <- NULL;
filename <- path.expand(file)
if(missing(max.snps)) max.snps = -1L;
L <- .Call("gg_read_vcf_filtered", PACKAGE = "gaston", filename, positions, max.snps, get.info)
snp <- data.frame(chr = L$chr, id = L$id, dist = 0, pos = L$pos , A1 = L$A1, A2 = L$A2,
quality = L$quality, filter = factor(L$filter), stringsAsFactors = FALSE)
if(get.info) snp$info <- L$info
if(convert.chr) {
chr <- as.integer(L$chr)
chr[L$chr == "X" | L$chr == "x"] <- getOption("gaston.chr.x")[1]
chr[L$chr == "Y" | L$chr == "y"] <- getOption("gaston.chr.y")[1]
chr[L$chr == "MT" | L$chr == "mt"] <- getOption("gaston.chr.mt")[1]
if(any(is.na(chr)))
warning("Some unknown chromosomes id's (try to set convert.chr = FALSE)")
snp$chr <- chr
}
ped <- data.frame(famid = L$samples, id = L$samples, father = 0, mother = 0, sex = 0, pheno = NA, stringsAsFactors = FALSE)
x <- new("bed.matrix", bed = L$bed, snps = snp, ped = ped,
p = NULL, mu = NULL, sigma = NULL, standardize_p = FALSE,
standardize_mu_sigma = FALSE )
if(getOption("gaston.auto.set.stats", TRUE)) x <- set.stats(x, verbose = verbose)
x
}
|
9c51a3575f9d3bd376f51a56927ca818bf8e2c80
|
4a6b5be2d735c8d6c3caa4ba2c47803dd386d546
|
/R/centrality.R
|
25e798696ef5541ceab33fd2f9afefd3e4184de8
|
[] |
no_license
|
jonmcalder/tidygraph
|
8e19df9c90c696a24878d8bcdd4a1d3762b7faa4
|
fba663d33b1ac4dfc18b30488b4f5ea24a0d079a
|
refs/heads/master
| 2020-12-02T22:17:46.282865
| 2017-07-03T12:08:28
| 2017-07-03T12:08:28
| 96,108,967
| 0
| 0
| null | 2017-07-03T12:35:01
| 2017-07-03T12:35:01
| null |
UTF-8
|
R
| false
| false
| 4,411
|
r
|
centrality.R
|
#' Calculate node and edge centrality
#'
#' The centrality of a node measures the importance of node in the network. As
#' the concept of importance is ill-defined and dependent on the network and
#' the questions under consideration, many centrality measures exist.
#' `tidygraph` provides a consistent set of wrappers for all the centrality
#' measures implemented in `igraph` for use inside [dplyr::mutate()] and other
#' relevant verbs. All functions provided by `tidygraph` have a consistent
#' naming scheme and automatically calls the function on the graph, returning a
#' vector with measures ready to be added to the node data.
#'
#' @param ... Parameters passed on to the `igraph` implementation.
#'
#' @return A numeric vector giving the centrality measure of each node.
#'
#' @name centrality
#' @rdname centrality
#'
#' @examples
#' create_notable('bull') %>%
#' activate(nodes) %>%
#' mutate(importance = centrality_alpha())
#'
#' # Most centrality measures are for nodes but not all
#' create_notable('bull') %>%
#' activate(edges) %>%
#' mutate(importance = centrality_edge_betweenness())
NULL
#' @describeIn centrality Wrapper for [igraph::alpha_centrality()]
#' @importFrom igraph V alpha_centrality
#' @export
centrality_alpha <- function(...) {
expect_nodes()
graph <- .G()
alpha_centrality(graph = graph, nodes = V(graph), ...)
}
#' @describeIn centrality Wrapper for [igraph::authority_score()]
#' @importFrom igraph authority_score
#' @export
centrality_authority <- function(...) {
expect_nodes()
authority_score(graph = .G(), ...)$vector
}
#' @describeIn centrality Wrapper for [igraph::betweenness()] and [igraph::estimate_betweenness()]
#' @importFrom igraph V betweenness estimate_betweenness
#' @importFrom rlang quos
#' @export
centrality_betweenness <- function(...) {
expect_nodes()
graph <- .G()
dots <- quos(...)
if (is.null(dots$cutoff)) {
betweenness(graph = graph, v = V(graph), ...)
} else {
estimate_betweenness(graph = graph, vids = V(graph), ...)
}
}
#' @describeIn centrality Wrapper for [igraph::power_centrality()]
#' @importFrom igraph V power_centrality
#' @export
centrality_power <- function(...) {
expect_nodes()
graph <- .G()
power_centrality(graph = graph, nodes = V(graph), ...)
}
#' @describeIn centrality Wrapper for [igraph::closeness()] and [igraph::estimate_closeness()]
#' @importFrom igraph V closeness estimate_closeness
#' @importFrom rlang quos
#' @export
centrality_closeness <- function(...) {
expect_nodes()
graph <- .G()
dots <- quos(...)
if (is.null(dots$cutoff)) {
closeness(graph = graph, vids = V(graph), ...)
} else {
estimate_closeness(graph = graph, vids = V(graph), ...)
}
}
#' @describeIn centrality Wrapper for [igraph::eigen_centrality()]
#' @importFrom igraph eigen_centrality
#' @export
centrality_eigen <- function(...) {
expect_nodes()
eigen_centrality(graph = .G(), ...)$vector
}
#' @describeIn centrality Wrapper for [igraph::hub_score()]
#' @importFrom igraph hub_score
#' @export
centrality_hub <- function(...) {
expect_nodes()
hub_score(graph = .G(), ...)$vector
}
#' @describeIn centrality Wrapper for [igraph::page_rank()]
#' @importFrom igraph V page_rank
#' @export
centrality_pagerank <- function(...) {
expect_nodes()
graph <- .G()
page_rank(graph = graph, vids = V(graph), ...)$vector
}
#' @describeIn centrality Wrapper for [igraph::subgraph_centrality()]
#' @importFrom igraph subgraph_centrality
#' @export
centrality_subgraph <- function(...) {
expect_nodes()
subgraph_centrality(graph = .G(), ...)
}
#' @describeIn centrality Wrapper for [igraph::degree()] and [igraph::strength()]
#' @importFrom igraph V degree strength
#' @importFrom rlang quos
#' @export
centrality_degree <- function(...) {
expect_nodes()
graph <- .G()
dots <- quos(...)
if (is.null(dots$weights)) {
degree(graph = graph, v = V(graph), ...)
} else {
strength(graph = graph, vids = V(graph), ...)
}
}
#' @describeIn centrality Wrapper for [igraph::edge_betweenness()]
#' @importFrom igraph edge_betweenness estimate_edge_betweenness E
#' @importFrom rlang quos
#' @export
centrality_edge_betweenness <- function(...) {
expect_edges()
graph <- .G()
dots <- quos(...)
if (is.null(dots$cutoff)) {
edge_betweenness(graph = graph, e = E(graph), ...)
} else {
estimate_edge_betweenness(graph = graph, e = E(graph), ...)
}
}
|
9d9ea2a547a338dac268ba05d2e348d4788e0ca8
|
d33e98129206021371f50e4d74c44486a5a0a5a1
|
/install_load.R
|
3a8e4f8b38a423feddecf3bcda3a1524e4f5fa15
|
[] |
no_license
|
therealcrowder/Case_Study_2
|
312367728bdee372feedf29a6365caca4b58447d
|
1746696b4d0f39da3c3277edb7b04096dd0c9e03
|
refs/heads/master
| 2021-01-20T00:46:03.247362
| 2017-04-24T22:43:29
| 2017-04-24T22:43:29
| 89,184,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 596
|
r
|
install_load.R
|
install.packages("weathermetrics", repos = 'http://cran.us.r-project.org')
install.packages("knitr", repos='http://cran.us.r-project.org')
install.packages("markdown", repos='http://cran.us.r-project.org')
install.packages("ggplot2", repos='http://cran.us.r-project.org')
install.packages("plyr", repos='http://cran.us.r-project.org')
install.packages("lubridate", repos='http://cran.us.r-project.org')
install.packages("formatR", repos='http://cran.us.r-project.org')
library(weathermetrics)
library(knitr)
library(markdown)
library(ggplot2)
library(plyr)
library(lubridate)
library(formatR)
|
4a643bcf4bbf6140e42def32c75c5f0931150198
|
91f977492d1e2757c0fabc52e3ade6680c5dec30
|
/tests/testthat/test_helsinki.R
|
87c8cfc3e83b0716c433b7fd6ea4ba5628a5959e
|
[] |
no_license
|
cran/helsinki
|
1fa8b241c639f87446ebced598ab59ec0e9a754b
|
13d68daba1321e156f77ae47d2c5e89a235d1669
|
refs/heads/master
| 2022-12-15T08:13:36.309621
| 2022-12-02T08:30:05
| 2022-12-02T08:30:05
| 18,805,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 892
|
r
|
test_helsinki.R
|
test_that("wfs_api() works correctly", {
expect_error(wfs_api(base.url = NULL))
expect_error(wfs_api(base.url = "gopher://gopher.quux.org"))
suppressMessages(expect_message(wfs_api(base.url = "https://httpstat.us/404", queries = "search")))
suppressMessages(expect_message(wfs_api(base.url = "https://httpstat.us/200",
queries = c("sleep" = 11000))))
})
test_that("get_city_map() works correctly", {
# Non-supported city
expect_error(get_city_map(city = "porvoo"))
# Non-supported level
expect_error(get_city_map(city = "helsinki", level = "keskialue"))
# Extremely short timeout parameter (1 ms) to ensure connection timeout
suppressMessages(expect_message(get_city_map(city = "helsinki",
level = "suuralue",
timeout.s = 0.001)))
})
|
7bedde392ef9b7c4a9c32c44c63c45ebb9e98738
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.analytics/man/mturk_list_workers_with_qualification_type.Rd
|
59a2f19022f6f534e48750a6b38ddbe7a308b618
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,616
|
rd
|
mturk_list_workers_with_qualification_type.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mturk_operations.R
\name{mturk_list_workers_with_qualification_type}
\alias{mturk_list_workers_with_qualification_type}
\title{The ListWorkersWithQualificationType operation returns all of the
Workers that have been associated with a given Qualification type}
\usage{
mturk_list_workers_with_qualification_type(QualificationTypeId, Status,
NextToken, MaxResults)
}
\arguments{
\item{QualificationTypeId}{[required] The ID of the Qualification type of the Qualifications to return.}
\item{Status}{The status of the Qualifications to return. Can be \code{Granted | Revoked}.}
\item{NextToken}{Pagination Token}
\item{MaxResults}{Limit the number of results returned.}
}
\value{
A list with the following syntax:\preformatted{list(
NextToken = "string",
NumResults = 123,
Qualifications = list(
list(
QualificationTypeId = "string",
WorkerId = "string",
GrantTime = as.POSIXct(
"2015-01-01"
),
IntegerValue = 123,
LocaleValue = list(
Country = "string",
Subdivision = "string"
),
Status = "Granted"|"Revoked"
)
)
)
}
}
\description{
The
\code{\link[=mturk_list_workers_with_qualification_type]{list_workers_with_qualification_type}}
operation returns all of the Workers that have been associated with a
given Qualification type.
}
\section{Request syntax}{
\preformatted{svc$list_workers_with_qualification_type(
QualificationTypeId = "string",
Status = "Granted"|"Revoked",
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
3c08c46dee2fa0ee110936e2e4736754cf46c388
|
f7018991debe81fc53a55e9bf125e6514932379d
|
/NegBinModel.R
|
b0aa3e447aba8657e93d2d915600968945ccfb8d
|
[] |
no_license
|
nguyenty/stat544
|
66b583bbab1ea3a7ce858cacfff5be4ae22a7634
|
01c82e02c4ac72f77719a02a8c50f989d9a0028b
|
refs/heads/master
| 2016-08-04T17:46:33.683260
| 2014-06-02T17:32:25
| 2014-06-02T17:32:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,798
|
r
|
NegBinModel.R
|
library(rjags)
library(xtable)
#############modelm - using point mass mixture prior for signals###############
modelm <- "
model{
# likelihood
for (i in 1:length(y)){
y[i] ~ dnegbin((1/omega[gene[i]])/(lambda[i] + 1/omega[gene[i]]), 1/omega[gene[i]])
log(lambda[i]) <- alpha[gene[i]] + (-1)^line[i]*tau[gene[i]] + beta[gene[i]]*cov[i]
}
# prior level 1
for (i in 1:ngene){
alpha[i] ~ dnorm(0,1/10000)
omega[i] ~ dlnorm(0, 1/10000)
tau[i] <- (1-bintau[i])*normtau[i]
bintau[i] ~ dbern(pitau)
normtau[i] ~ dnorm(0,1/sigmatau^2)
beta[i] <- (1-binbeta[i])*normbeta[i]
binbeta[i] ~ dbern(pibeta)
normbeta[i] ~ dnorm(0,1/sigmabeta^2)
}
#prior level 2
pitau ~ dbeta(8,1)
pibeta ~ dbeta(8,1)
sigmatau ~ dunif(0,100)
sigmabeta ~ dunif(0,100)
}
"
#########modelh - using horseshoe prior for the signals##############
modelh <- "
model{
# likelihood
for (i in 1:length(y)){
y[i] ~ dpois(lambda[i])
log(lambda[i]) <- alpha[gene[i]] + (-1)^line[i]*tau[gene[i]] + beta[gene[i]]*cov[i]
}
# prior level 1
for (j in 1:ngene){
alpha[j] ~ dnorm(0,1/10000)
tau[j] ~ dnorm(0, 1/sigmatauj[j]^2)
sigmatauj[j] ~ dt(0, 1/sigmatau^2, 1) T(0,)
beta[j] ~ dnorm(0, 1/sigmabetaj[j]^2)
sigmabetaj[j] ~ dt(0, 1/sigmabeta^2, 1) T(0,)
}
# prior level 2
sigmatau ~ dt(0, 1, 1) T(0,)
sigmabeta ~ dt(0, 1, 1) T(0,)
}
"
#############Sim_data_function ###################
library(reshape)
sim_data <- function(K, ngene, mualpha, sigmaalpha,
pitau,mutau, sigmatau, pibeta,
mubeta, sigmabeta){
# prior level 1
x <- rnorm(2*K, 0,1)
bintau <- rbinom(ngene,1,pitau)
tau <-(1-bintau)*rnorm(ngene, mutau, sigmatau)
binbeta <- rbinom(ngene,1,pibeta)
beta <- (1-binbeta)*rnorm(ngene, mubeta, sigmabeta)
alpha <- rnorm(ngene, mualpha,sigmaalpha )
lambda <- matrix(0, ncol = 2*K, nrow = ngene)
omega <- exp(rnorm(ngene, 0, 2))
count <- matrix(0, ncol = 2*K, nrow = ngene)
for (j in 1:ngene){
for (k in 1:K){
lambda[j,k] <- exp(alpha[j] - tau[j] + beta[j]*x[k])
lambda[j, k+K] <- exp(alpha[j] + tau[j] + beta[j]*x[k+K])
count[j,k] <- rnbinom(1, size = 1/omega[j], mu = lambda[j,k])
count[j,k+K] <- rnbinom(1, size = 1/omega[j], mu = lambda[j,k+K])
}
}
melt_count <- melt(count)
melt_count$line <- NULL
melt_count$line[melt_count$X2%in%c(1:K)] <- 1
melt_count$line[melt_count$X2%in%c((K+1):(2*K))] <- 2
melt_count$cov <- NULL
for(i in 1:(2*K)) melt_count$cov[melt_count$X2==i] <- x[i]
dat <- list(y = melt_count$value,
gene = melt_count$X1,
line = melt_count$line,
cov = melt_count$cov,
ngene = ngene,
bintau=bintau,
tau = tau,
binbeta = binbeta,
beta = beta,
alpha = alpha,
omega = omega)
return(dat)
}
###############run_mm_simulationdata#######################
out <- function(K, ngene, mualpha, sigmaalpha,
pitau,mutau, sigmatau, pibeta,
mubeta, sigmabeta, epstau, epsbeta){
data <- sim_data(K, ngene, mualpha, sigmaalpha,
pitau,mutau, sigmatau, pibeta,
mubeta, sigmabeta)
mm <- jags.model(textConnection(modelm), data[1:5],n.chains = 1)
resm <- coda.samples(mm, c("tau","alpha","beta",
"pitau","pibeta",
"binbeta","bintau",
"sigmatau","sigmabeta"), 2000)
mm_tau_est <- which(apply(resm[[1]][,paste("bintau[", 1:ngene,"]",sep ="")], 2,
function(x) mean(abs(1-x))) > 0.5)
mm_tau_est_eps <- which(apply(resm[[1]][,paste("tau[", 1:ngene,"]",sep ="")], 2,
function(x) mean(abs(x)>epstau)) > 0.5)
mm_tau_true <- which(data$tau!=0)
mm_tau_correct <- sum(mm_tau_est%in%mm_tau_true)
mm_tau_correct_eps <- sum(mm_tau_est_eps%in%mm_tau_true)
# mh <- jags.model(textConnection(modelh), data[1:5] ,n.chains = 1)
# resh <- coda.samples(mh, c("tau","alpha","beta",
# "sigmatauj", "sigmabetaj",
# "sigmatau","sigmabeta"), 2000)
# mh_tau_est <- which(apply(resh[[1]][,paste("sigmatauj[", 1:ngene,"]",sep ="")], 2,
# function(x) mean(1-1/(x^2+1))) > 0.5)
# mh_tau_est_eps <- which(apply(resh[[1]][,paste("tau[", 1:ngene,"]",sep ="")], 2,
# function(x) mean(abs(x)>epstau)) > 0.5)
# mh_tau_true <- which(data$tau!=0)
# mh_tau_correct <- sum(mh_tau_est%in%mh_tau_true)
# mh_tau_correct_eps <- sum(mh_tau_est_eps%in%mh_tau_true)
return(c( mm_tau_est = length(mm_tau_est),
mm_tau_correct_est = mm_tau_correct,
# mh_tau_est = length(mh_tau_est),
# mh_tau_correct_est = mh_tau_correct,
mm_tau_est_eps = length(mm_tau_est_eps),
mm_tau_correct_est_eps = mm_tau_correct_eps,
# mh_tau_est_eps = length(mh_tau_est_eps),
# mh_tau_correct_est_eps = mh_tau_correct_eps,
tau_true = length(mm_tau_true)
))
}
@
######run_sim##########
K <- 12
ngene <- 100
# prior level 2
mualpha <- 3
sigmaalpha <- 2
pitau <- 0.8
#mutau <- c(0.5,1,2)
mutau <- 1
sigmatau <- 0.25
pibeta <- 0.8
#mubeta <- c(0.5,1,2)
mubeta <- 1
sigmabeta <- 0.25
epstau <- epsbeta <- 2*mutau/3
post_out <- array(0, dim = c(3,3,9))
for(i in 1:3){
for (j in 1:3){
post_out[i,j,] <- out(K, ngene, mualpha, sigmaalpha,
pitau,mutau[i], sigmatau, pibeta,
mubeta[j] , sigmabeta, epstau[i], epsbeta[j])
}
}
|
51620b1f440a0cf57ae651a1722362b0ac40e9c0
|
599e6d59345ba36cbfb297de29a61243cc728e4d
|
/learn lattice.R
|
0b94a960cabb4dc2f4f2eef97f4ae75d0a902d70
|
[] |
no_license
|
abhatia2014/practice-R-Models
|
a0433e11ea49dd0598cac4648b8bb9779b71f130
|
c2e6c639e5565c2ccb22116bde562d60ed4c52f8
|
refs/heads/master
| 2021-01-11T00:06:47.639003
| 2016-10-13T01:21:29
| 2016-10-13T01:21:29
| 69,142,991
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,441
|
r
|
learn lattice.R
|
getwd()
#y~x|A*B means display relationship between numeric variables x&y separately for every combination of factors A,B
library(lattice)
attach(mtcars)
# create factors with value labels
str(mtcars)
head(mtcars,3)
gearf=factor(gear,levels=c(3,4,5),labels=c("3gears","4gears","5gears"))
table(gearf)
summary(cyl)
table(cyl)
cylf=factor(cyl,levels=c(4,6,8),labels=c("4cyl","6cyl","8cyl"))
table(cylf)
#kernel density plot
densityplot(~mpg,main="Density Plot",xlab="Miles per Gallon")
#Kernel density plot by factor levels
densityplot(~mpg|cylf,main="density plot by number of cyls",xlab="miles per gallon")
#kernel density plot by factor level(alternate method
densityplot(~mpg|cylf,layout=c(1,3))
#boxplot for each combination of two factors
bwplot(cylf~mpg|gearf,ylab="cylinders",xlab="miles per gallon",main="mileage by cylinders and gears",layout=c(1,3))
#scatterplots for each combination of two factors
xyplot(mpg~wt|cylf,layout=c(1,3),ylab="miles per gallon", xlab="Car Weight")
#3D scatter plot by factor level
cloud(mpg~wt*qsec|cylf,main="3D scatter plot by cylinders")
#dotplot forcombination of two factors
dotplot(cylf~mpg|gearf)
#scatterplot matrix
names(mtcars)
splom(mtcars[c(1,3,4,5,6)],)
#smooothen the graph
smooth=function(x,y){
panel.xyplot(x,y)
panel.loess(x,y)
}
hpc=cut(hp,3)
xyplot(mpg~wt|hpc,scales=list(cex=0.8,col="red"),panel=smooth,xlab="car wt",ylab="miles per gallon")
|
fe620ec168b86a5e56342d4d0db7983505bb4074
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-48/tlc02-nonuniform-depth-48.R
|
2fb97624991ddbf68d1007c88a1a69b6b1938944
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 78
|
r
|
tlc02-nonuniform-depth-48.R
|
1e0c557ad954c2663f2c790d1f284f4b tlc02-nonuniform-depth-48.qdimacs 11222 29588
|
b742a166ca5b12280121cd112e1de34e05811854
|
b2d074c532e4077987d1452d79622eeda753d158
|
/kNNImputationNonRandom.R
|
d1000a5be0df6443de61f6850bc6fbf77f17d8c6
|
[] |
no_license
|
Alex-Nguyen/CS5331R
|
f7c477b68acc96f7ab0fbc6d3ef5a256d1627c3f
|
c2fb18ac0520e2e2a5d595df41338a49535e1d9b
|
refs/heads/master
| 2021-09-06T17:13:14.148598
| 2018-02-08T20:45:51
| 2018-02-08T20:45:51
| 104,676,043
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,915
|
r
|
kNNImputationNonRandom.R
|
original_data <-iris
set.seed(104)
####### Set initial parameters
portion <-0.2 # percent of missing values to occupy the data. 0.02 = 2 %
training_size <-0.7 # percent of data for training
data_length <-nrow(original_data)
missing_data <-original_data
id <-portion*data_length
missing_data[1:id,'Petal.Length'] <-NA
missing_data
# missing_data <-knnImputation(missing_data)
#impute missing data with mean
# missing_data$Petal.Length[is.na(missing_data$Petal.Length)] <-mean(missing_data$Petal.Length, na.rm = TRUE)
missing_data <-knnImputation(missing_data)
# #root mean square between imputed and true values
rmse = sqrt(mean( (original_data$Petal.Length - missing_data$Petal.Length)^2, na.rm = TRUE) )
print("RMSE")
rmse
#Random splitting of iris data as 70% train and 30%test datasets
#first we normalize whole dataset
indexes <- sample(1:nrow(iris), floor(training_size*nrow(iris)))
iris.train <- iris[indexes,-5]
iris.train.target <- iris[indexes,5]
iris.test <- iris[-indexes,-5]
iris.test.target <- iris[-indexes,5]
original_prediction <- knn(train=iris.train, test=iris.test, cl=iris.train.target, k=3)
confusion_matrix <- table(iris.test.target, original_prediction)
accuracy <- (sum(diag(confusion_matrix)))/sum(confusion_matrix)
accuracy
set.seed(103)
indexes_imputed <- sample(1:nrow(missing_data), floor(training_size*nrow(missing_data)))
iris.imputed.train <- missing_data[indexes_imputed,-5]
iris.imputed.train.target <- missing_data[indexes_imputed,5]
iris.imputed.test <- missing_data[-indexes_imputed,-5]
iris.imputed.test.target <- missing_data[-indexes_imputed,5]
imputed_prediction <- knn(train=iris.imputed.train, test=iris.imputed.test, cl=iris.imputed.train.target, k=3)
imputed_confusion_matrix <- table(iris.imputed.test.target, imputed_prediction)
imputed_confusion_matrix
imputed.accuracy <- (sum(diag(imputed_confusion_matrix)))/sum(imputed_confusion_matrix)
imputed.accuracy
|
a6678e50d2bb8ae9c1b37dacdd4dd70bf08ed6bb
|
4af4d40aaf9ce8311c75774d41be1256bb5730c7
|
/R/data.R
|
4d1fb663ea911f82c08af9f8b4cdcbbdff9af3fb
|
[] |
no_license
|
zhgarfield/violationsandpunishmentsdata
|
69376879d3dd0932ff40eedfabc14963e4436014
|
5bb456f8bd52298fe1772760498473470f8c7a4b
|
refs/heads/master
| 2023-04-12T06:27:17.070753
| 2023-03-15T09:38:12
| 2023-03-15T09:38:12
| 552,007,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,863
|
r
|
data.R
|
#' @title violations and punishments data
#' @description Primary data set of researcher-coded punishment types, SCCS socioecological predictor variables, and phylogenetic tree.
#' @format A data frame with 131 rows and 14 variables:
#' \describe{
#' \item{\code{SCCS_NAME}}{character SCCS culture name associated with ethnographic document.}
#' \item{\code{EHRAF_NAME}}{character HRAF culture name associated with ethnographic document.}
#' \item{\code{SCCS_ID}}{double SCCS identificaiton number of society associated with ethnographic document.}
#' \item{\code{Rape_viol}}{double Evidence for a violation of norms against rape.}
#' \item{\code{Rape_SN_present}}{double Evidence for punishment of a violation of norms against rape.}
#' \item{\code{War_viol}}{double Evidence for violation of norms against war cowardice.}
#' \item{\code{War_SN_present}}{double Evidence for punishment of a violation of norms against war cowardice.}
#' \item{\code{Religion_viol}}{double Evidence for violation of religous norms.}
#' \item{\code{Religion_SN_present}}{double Evidence for punishment of a violation of religous norms.}
#' \item{\code{Food_viol}}{double Evidence for a violation of food related or food sharing norms.}
#' \item{\code{Food_SN_present}}{double Evidence for punishment of a violation of food related or food sharing norms.}
#' \item{\code{Adultery_viol}}{double Evidence for a violation of norms against adultry.}
#' \item{\code{Adultery_SN_present}}{double Evidence for punishment of a violation of norms against adultry.}
#' \item{\code{Reputation_SN_present}}{double Evidence of reputational sanctions, where reputations sanctions are generally expected or specific instance of community endorsed reputational damage (more than gossip, results in net cost or general devaluation of violator). Coded as 1 for evidence for, 0 for no evidence.}
#' \item{\code{Material_SN_present}}{double Generally expected or specific instance of community endorsed outcome that imposes direct economic or material costs on violator as a result of their violation. Coded as 1 for evidence for, 0 for no evidence.}
#' \item{\code{Physical_SN_present}}{double Generally expected or specific instance of community endorsed outcome that results in a specific instance of physical harm or restraint as a result of their violation (not revenge). Coded as 1 for evidence for, 0 for no evidence.}
#' \item{\code{Execution_SN_present}}{double Generally expected or specific instance of community endorsed outcome that results in the death of the violator as a result of their violation (not murder). Coded as 1 for evidence for, 0 for no evidence.}
#' \item{\code{soc_strat}}{double Recoded SCCS V158 Social stratification. Coded as 1 for "Stratified", 0 for "Egalitarian".}
#' \item{\code{storage}}{double Recoded SCCS V20 Food storage. Coded as 1 for present, 0 for absent.}
#' \item{\code{husb}}{double Recoded SCCS V5 Animal husbandry - contribution to food supply. Coded as 1 for "None", 2 for "Present, not food source", 3 for "< 10% Food Supply", 4 for "< 50% Food supply", and 5 for "> 50% Food supply".}
#' \item{\code{hunt}}{double Recoded SCCS V9 Hunting - contribution to food supply. Coded as 1 for "None", 2 for "< 10% of Food supply", 3 for "<50%,andlessthan any other single source", 4 for "<50%,andmorethan any other single source", 5 for ">50%".}
#' \item{\code{comm_size}}{double Recoded SCCS V63 Community size. Coded as 1 for "< 50", 2 for "50-99", 3 for "100-199", 4 for "200-399", 5 for "400-999", 6 for "1,000-4,999", 7 for "5,000-49,999", and 8 for "> 50,000".}
#' \item{\code{trade}}{double Recoded SCCS V1 Intercommunity trade as food source. Coded as 0 for "Minimal/Absent", 1 for "Present".}
#' \item{\code{tree_name}}{character Phylogenetic tree name.}
#'}
"violpundata"
#' @title punishments data (long form)
#' @description A long-form version of the data of norm violation, punishments, and codings for all cultures.
#' @format A data frame with 2620 rows and 5 variables:
#' \describe{
#' \item{\code{HRAF_ID}}{character HRAF OWC Culture ID.}
#' \item{\code{Coding}}{double Coding for culture by variable. Coded as 1 for evidence for, 0 for no evidence.}
#' \item{\code{Domain}}{character Domain of norm violation type being coded.}
#' \item{\code{Sanction}}{character Punishment type being coded.}
#' \item{\code{Coding_label}}{character Coding for culture by varable, with text as label.}
#'}
#'
"punishments_data_long"
#' @title culture map data
#' @description A data frame for producing a map of societies in the sample.
#' @format A data frame with 131 rows and 4 variables:
#' \describe{
#' \item{\code{HRAF_ID}}{character HRAF OWC Culture ID.}
#' \item{\code{Subsistence Type}}{character HRAF subsistence type for society.}
#' \item{\code{latitude}}{double Latitude for society location.}
#' \item{\code{longitude}}{double Longitude for society location.}
#'}
#'
"culturemapdata"
#' @title document data
#' @description A data frame of document-level metadata.
#' @format A data frame with 131 rows and 6 variables:
#' \describe{
#' \item{\code{EHRAF_NAME}}{character eHRAF culture name.}
#' \item{\code{document_ID}}{character eHRAF document ID for document.}
#' \item{\code{culture_ID}}{double eHRAF OWC ID for culture.}
#' \item{\code{document_publication_date}}{integer Document publication year.}
#' \item{\code{document_page_count}}{integer Documentn page count.}
#' \item{\code{female_coauthor_present}}{integer Presence of a female author or co-author. Coded as 1 for present, 0 for absent.}
#' \item{\code{tree_order}}{integer Order of societies in phylogentic tree.}
#'}
#'
"documentdata"
#' @title phylogenetic tree
#' @description A phylogentic tree in list form consisting of 254 edges.
#' @format A list of four vectors.
#
#'
"tree"
|
9078d60c0c43dafd6186c1e61bb51b45def2c90e
|
198aafbe613df9a2cad68e70329b4cb133572018
|
/R_passion_tool2.R
|
7726a13835291661351256ad985465b4b0b3a81a
|
[] |
no_license
|
josemtnzjmnz/PASSION_WDM_planner
|
332972968e8b6713f59d357c57cd42fe7403a01d
|
8e27457d96946e5c8c4b6cdf845caae755a6da18
|
refs/heads/main
| 2023-06-01T14:43:00.083688
| 2021-06-16T08:51:47
| 2021-06-16T08:51:47
| 377,280,190
| 0
| 0
| null | 2021-06-15T20:01:38
| 2021-06-15T20:01:37
| null |
UTF-8
|
R
| false
| false
| 36,999
|
r
|
R_passion_tool2.R
|
# EU H2020 PASSION
# Planning tool
# Jose Alberto Hernandez
# May 2021
# Inputs:
# Network topology and traffic (nodesLabeling and crossmatrix)
# Passion OSNR characterisation for lightpaths
# Passion cost values
# Output:
# Lightpaths, both primary and secondary, and their allocation in the fibre/wavelengths (First-Fit)
# Node dimensioning (number of ROADM degrees and Passion S-BVTs)
# Cost per node and total
# Required libraries
library(igraph)
setwd("D:/github_projects/PASSION_jose")
#setwd("~/Google Drive/Research/Proyectos/PASSION_jose")
#setwd("~/github_projects")
rm(list=ls())
options(warn=-1)
# Auxiliar functions:
# Graph preparation
get_gPass <- function(nodes.df, connectivity.mat, distCoeff = 1) {
gPass = graph_from_adjacency_matrix(connectivity.mat, mode = c("undirected"),
weighted = TRUE, diag = TRUE, add.colnames = NULL, add.rownames = NA)
V(gPass)$name = paste(nodes.df$Nodes,nodes.df$Types,sep="_")
V(gPass)$type = as.character(nodes.df$Types)
E(gPass)$name = paste(get.edgelist(gPass)[,1],get.edgelist(gPass)[,2],sep="--")
# Removing HL5s
gPass = delete_vertices(gPass, V(gPass)[which(V(gPass)$type=="HL5")])
return(gPass)
}
# Main code
alpha = 1
# Loading topology
print("Loading Topology and OSNR configuration")
# Choose nodesLabeling_Germany.csv, nodesLabeling_Tokyo.csv, nodesLabeling_Milano.csv, nodesLabeling_Mexico_short.csv,
nodes.df = read.csv(file="nodesLabeling_Tokyo.csv", sep=";", header=F);
colnames(nodes.df) = c("Nodes","Types","Traffic")
nodes.df$Types = as.character(nodes.df$Types)
# Choose crossMatrix_Germany.csv, crossMatrix_Tokyo.csv, crossMatrix_Milano.csv, crossMatrix_Mexico_short.csv
connectivity.mat = alpha*as.matrix(read.csv(file="crossMatrix_Tokyo.csv", sep = ";",header=F))
nodes.df[which(nodes.df$Types=="HL5"),"Types"] = "HL5"
nodes.df[which(nodes.df$Types=="HL4"),"Types"] = "HL4"
nodes.df[which(nodes.df$Types=="HL3"),"Types"] = "HL3"
nodes.df[which(nodes.df$Types=="HL2"),"Types"] = "HL12"
nodes.df$Types = factor(nodes.df$Types)
rownames(nodes.df)=paste(nodes.df$Nodes,nodes.df$Types,sep="_");
colnames(connectivity.mat)=paste(nodes.df$Nodes,nodes.df$Types,sep="_");
rownames(connectivity.mat)=paste(nodes.df$Nodes,nodes.df$Types,sep="_");
# OSNR values
osnr_25G.mat = read.csv("osnr_25_oh_fec.csv",header=TRUE,sep=";")
osnr_40G.mat = read.csv("osnr_40_oh_fec.csv",header=TRUE,sep=";")
osnr_50G.mat = read.csv("osnr_50_oh_fec.csv",header=TRUE,sep=";")
# Load graph
gPass = get_gPass(nodes.df,connectivity.mat)
N_HL12s = length(which(V(gPass)$type=="HL12"))
N_HL3s = length(which(V(gPass)$type=="HL3"))
N_HL4s = length(which(V(gPass)$type=="HL4"))
if ("Traffic" %in% colnames(nodes.df)) {
demand_matrix = nodes.df$Traffic
Traff = mean(nodes.df$Traffic)
} else {
Traff = 600; # 600G per HL4 toward HL12
demand_matrix = rnorm(N_HL4s,mean=Traff,sd=0.2*Traff)
}
FFallocation = as.data.frame(matrix(NA,
nrow=length(E(gPass)),
ncol=40*ceiling(ceiling(0.6*Traff/50*length(V(gPass)))/40)))
Sallocation = FFallocation; Dallocation = FFallocation
nlambdas_HL12s = 0*(1:length(V(gPass)[which(V(gPass)$type=="HL12")]))
nlambdas_HL3s = 0*(1:length(V(gPass)[which(V(gPass)$type=="HL3")]))
nlambdas_HL4s = 0*(1:length(V(gPass)[which(V(gPass)$type=="HL4")]))
speed_HL3s = 0*(1:length(V(gPass)[which(V(gPass)$type=="HL3")]))
speed_HL4s = 0*(1:length(V(gPass)[which(V(gPass)$type=="HL4")]))
Results.df = data.frame(matrix(c(1:18),nrow=1,ncol=18), stringsAsFactors = FALSE)
colnames(Results.df) = c("Source","Destination","prim_sec",
"distance_KM","distance_hops",
"N_HL5s","N_HL4s","N_HL3s","N_HL12s",
"OSNR_e2e","OSNR_req50G","OSNR_req40G","OSNR_req25G",
"Can_50G","Can_40G","Can_25G",
"FullPath","LinksDistance")
E(gPass)$traff = 0;
ll_traff = E(gPass)$traff
ll_links = E(gPass)$name
print("Finding lightpaths for HL4 nodes")
n_exec = 0;
for (HL4index in (1:N_HL4s)) { #N_HL4s)) {
n_exec = n_exec + 1;
gPass = get_gPass(nodes.df,connectivity.mat)
HL12s = V(gPass)[which(V(gPass)$type=="HL12")]
HL3s = V(gPass)[which(V(gPass)$type=="HL3")]
HL4s = V(gPass)[which(V(gPass)$type=="HL4")]
HL5s = V(gPass)[which(V(gPass)$type=="HL5")]
E(gPass)$traff = ll_traff
Source_node = V(gPass)[which(V(gPass)$type=="HL4")][HL4index]
Source = V(gPass)[which(V(gPass)$type=="HL4")][HL4index]$name
aa_minhops = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath',
weights = NA)
aa_minKm = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath')
aux_HL12_winner = unlist(lapply(aa_minhops$epath,length))*1e5
for (ii in c(1:length(aux_HL12_winner))){
aux_HL12_winner[ii] = aux_HL12_winner[ii] + sum(aa_minKm$epath[[ii]]$weight)
}
HL12_winner_prim = which(aux_HL12_winner==min(aux_HL12_winner))
# Primary path
HL12_winner_prim = order(unlist(lapply(aa_minhops$epath,length)),decreasing = F)[1]
Destination = HL12s[HL12_winner_prim]$name
Destination_node = HL12s[HL12_winner_prim]
aa_primary_e = get.shortest.paths(gPass,
from = Source_node,
to = Destination_node,
output = 'epath',
weights = NA)
aa_primary_v = get.shortest.paths(gPass,
from = Source_node,
to = Destination_node,
output = 'vpath',
weights = NA)
E(gPass)$traff = ll_traff
E(gPass)[which(E(gPass) %in% aa_primary_e$epath[[1]])]$traff = E(gPass)[which(E(gPass) %in% aa_primary_e$epath[[1]])]$traff +1
ll_traff = E(gPass)$traff
if (n_exec == 1) {
append_var = F;
}else{
append_var = T;
}
write.table( t(aa_primary_v$vpath[[1]]$name),
file="primary_path.csv",
append = append_var,
sep=';',
row.names=F,
col.names=F )
#write.csv(aa_primary_v$vpath[[1]]$name, file = "primary_path.csv", row.names = FALSE) # guarda un archivo csv
PrimaryPath = paste0(aa_primary_v$vpath[[1]]$name,collapse="++++")
Node_sequence = PrimaryPath
# node sequence metrics
disthops_winner = length(aa_primary_v$vpath[[1]])-1
HL5_hops = sum(aa_primary_v$vpath[[1]] %in% HL5s)
HL4_hops = sum(aa_primary_v$vpath[[1]] %in% HL4s)
HL3_hops = sum(aa_primary_v$vpath[[1]] %in% HL3s)
HL12_hops = sum(aa_primary_v$vpath[[1]] %in% HL12s)
# 1 amplifier per link
dist_links = aa_primary_e$epath[[1]]$weight
distKm_winner = sum(aa_primary_e$epath[[1]]$weight)
Link_sequence = paste0(dist_links,collapse = " ++++ ") # in km
osnr_e2e = -10*log10(sum(10^(-0.1*(58-6-0.25*dist_links))))
# similar to 58-0.25*sum(dist_links)-6-10*log10(length(dist_links)-1) , but the above is exact
# osnr of path
osnr_req_50G = osnr_50G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_40G = osnr_40G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_25G = osnr_25G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
can50G = ifelse(osnr_e2e>osnr_req_50G,TRUE,FALSE)
can40G = ifelse(osnr_e2e>osnr_req_40G,TRUE,FALSE)
can25G = ifelse(osnr_e2e>osnr_req_25G,TRUE,FALSE)
if (can50G == TRUE) {
maxSpeed = 50
} else {
if (can40G == TRUE) {
maxSpeed = 40
} else {
if (can25G == TRUE) {
maxSpeed = 25
} else {
maxSpeed = 0
}
}
}
nlambdas = ceiling(demand_matrix[HL4index]/maxSpeed)
# we do the first fit allocation
eindex = which(E(gPass) %in% aa_primary_e$epath[[1]])
conditionFF = FALSE
elambda = 0
while (conditionFF == FALSE) {
elambda = elambda + 1
if (prod(is.na(FFallocation[eindex,(elambda:(elambda+nlambdas-1))])) == 1) { # hueco libre
FFallocation[eindex, elambda:(elambda+nlambdas-1)] = paste("lightpath",Source,Destination, sep = "++")
Sallocation[eindex, elambda:(elambda+nlambdas-1)] = Source
Dallocation[eindex, elambda:(elambda+nlambdas-1)] = Destination
conditionFF = TRUE
} else { # ocupado, sigo buscando
conditionFF = FALSE
}
}
speed_HL4s[HL4index] = maxSpeed
nlambdas_HL4s[HL4index] = nlambdas_HL4s[HL4index] + nlambdas
nlambdas_HL12s[which(HL12s$name == Destination)] = nlambdas_HL12s[which(HL12s$name == Destination)] + nlambdas
datapoint = c(Source, Destination, "Primary_path",
distKm_winner, disthops_winner,
HL5_hops, HL4_hops, HL3_hops, HL12_hops,
osnr_e2e, osnr_req_50G, osnr_req_40G, osnr_req_25G,
ifelse(osnr_e2e>osnr_req_50G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_40G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_25G,TRUE,FALSE),
Node_sequence,Link_sequence)
Results.df = rbind(Results.df,datapoint)
colnames(Results.df) = c("Source","Destination","prim_sec",
"distance_KM","distance_hops",
"N_HL5s","N_HL4s","N_HL3s","N_HL12s",
"OSNR_e2e","OSNR_req50G","OSNR_req40G","OSNR_req25G",
"Can_50G","Can_40G","Can_25G",
"FullPath","LinksDistance")
# Plan-B secondary path
all_weights_orig = E(gPass)$weight
weights_aux = all_weights_orig
weights_aux[(E(gPass) %in% aa_primary_e$epath[[1]])] = weights_aux[(E(gPass) %in% aa_primary_e$epath[[1]])] + 1000
E(gPass)$weight = weights_aux
aa_minKm_plan_b = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath')
aux_HL12_winner_sec_plan_b = unlist(lapply(aa_minKm_plan_b$epath,length))*1e5
for (ii in c(1:length(aux_HL12_winner_sec_plan_b))){
aux_HL12_winner_sec_plan_b[ii] = aux_HL12_winner_sec_plan_b[ii] + sum(aa_minKm_plan_b$epath[[ii]]$weight)
}
HL12_winner_sec_planb = which(aux_HL12_winner_sec_plan_b==min(aux_HL12_winner_sec_plan_b))
if (HL12_winner_prim == HL12_winner_sec_planb) {
HL12_winner_sec_planb = order(aux_HL12_winner_sec_plan_b,decreasing = F)[2]
}
Destination = HL12s[HL12_winner_sec_planb]$name
Destination_node = HL12s[HL12_winner_sec_planb]
aa_secondary_e_planb = get.shortest.paths(gPass,
from = Source_node,
to = Destination_node,
output = 'epath')
aa_secondary_v_planb = get.shortest.paths(gPass,
from = Source_node,
to = Destination_node,
output = 'vpath')
write.table( t(aa_secondary_v_planb$vpath[[1]]$name),
file="secondary_path.csv",
append = append_var,
sep=';',
row.names=F,
col.names=F )
SecondaryPath_PlanB = paste0(aa_secondary_v_planb$vpath[[1]]$name,collapse="++++")
Destination = HL12s[HL12_winner_sec_planb]$name
Node_sequence = SecondaryPath_PlanB
# node sequence metrics
disthops_winner = length(aa_secondary_v_planb$vpath[[1]])-1
HL5_hops = sum(aa_secondary_v_planb$vpath[[1]] %in% HL5s)
HL4_hops = sum(aa_secondary_v_planb$vpath[[1]] %in% HL4s)
HL3_hops = sum(aa_secondary_v_planb$vpath[[1]] %in% HL3s)
HL12_hops = sum(aa_secondary_v_planb$vpath[[1]] %in% HL12s)
# 1 amplifier per link
dist_links = aa_secondary_e_planb$epath[[1]]$weight
dist_links[which(dist_links>999)] = dist_links[which(dist_links>999)] -1000
Nshared_links = floor(sum(aa_secondary_e_planb$epath[[1]]$weight)/1000)
Nshared_nodes = sum(as.numeric(aa_secondary_v_planb$vpath[[1]] %in% aa_primary_v$vpath[[1]]))-1
distKm_winner = sum(dist_links)
Link_sequence = paste0(dist_links,collapse = " ++++ ") # in km
osnr_e2e = -10*log10(sum(10^(-0.1*(58-6-0.25*dist_links))))
# osnr of path
osnr_req_50G = osnr_50G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_40G = osnr_40G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_25G = osnr_25G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
# Secondary path, link and node disjoint
E(gPass)$weight = all_weights_orig
gPass = delete_vertices(gPass, aa_primary_v$vpath[[1]][2:length(aa_primary_v$vpath[[1]])])
HL12s = V(gPass)[which(V(gPass)$type=="HL12")]
HL3s = V(gPass)[which(V(gPass)$type=="HL3")]
HL4s = V(gPass)[which(V(gPass)$type=="HL4")]
HL5s = V(gPass)[which(V(gPass)$type=="HL5")]
aa_secondary_e = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath',
weights=NA)
aa_secondary_minKm_e = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath')
aux_HL12_winner_sec = unlist(lapply(aa_secondary_minKm_e$epath,length))*1e5
for (ii in c(1:length(aux_HL12_winner_sec))){
aux_HL12_winner_sec[ii] = aux_HL12_winner_sec[ii] + sum(aa_secondary_minKm_e$epath[[ii]]$weight)
}
HL12s_winner_sec = which(aux_HL12_winner_sec==min(aux_HL12_winner_sec))
if (min(unlist(lapply(aa_secondary_e$epath,length)))==0) {
# node unreachable
datapoint = c(Source, Destination, paste("Secondary_path_shared_",Nshared_nodes,"nodes",Nshared_links,"links",sep=""),
distKm_winner, disthops_winner,
HL5_hops, HL4_hops, HL3_hops, HL12_hops,
osnr_e2e, osnr_req_50G, osnr_req_40G, osnr_req_25G,
ifelse(osnr_e2e>osnr_req_50G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_40G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_25G,TRUE,FALSE),
Node_sequence,Link_sequence)
Results.df = rbind(Results.df,datapoint)
} else {
HL12s_winner_sec = order(unlist(lapply(aa_secondary_e$epath,length)),decreasing = F)[1]
Destination = V(gPass)[which(V(gPass)$type=="HL12")][HL12s_winner_sec]$name
aa_secondary_e = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")][HL12s_winner_sec],
output = 'epath',
weights = NA)
aa_secondary_v = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")][HL12s_winner_sec],
output = 'vpath',
weights = NA)
SecondaryPath = paste0(aa_secondary_v$vpath[[1]]$name,collapse="++++")
Node_sequence = SecondaryPath
# node sequence metrics
disthops_winner = length(aa_secondary_v$vpath[[1]])-1
HL5_hops = sum(aa_secondary_v$vpath[[1]] %in% HL5s)
HL4_hops = sum(aa_secondary_v$vpath[[1]] %in% HL4s)
HL3_hops = sum(aa_secondary_v$vpath[[1]] %in% HL3s)
HL12_hops = sum(aa_secondary_v$vpath[[1]] %in% HL12s)
# 1 amplifier per link
dist_links = aa_secondary_e$epath[[1]]$weight
distKm_winner = sum(aa_secondary_e$epath[[1]]$weight)
Link_sequence = paste0(dist_links,collapse = " ++++ ") # in km
osnr_e2e = -10*log10(sum(10^(-0.1*(58-6-0.25*dist_links))))
# osnr of path
osnr_req_50G = osnr_50G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_40G = osnr_40G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_25G = osnr_25G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
datapoint = c(Source, Destination, "Secondary_path_totally_disjoint",
distKm_winner, disthops_winner,
HL5_hops, HL4_hops, HL3_hops, HL12_hops,
osnr_e2e, osnr_req_50G, osnr_req_40G, osnr_req_25G,
ifelse(osnr_e2e>osnr_req_50G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_40G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_25G,TRUE,FALSE),
Node_sequence,Link_sequence)
Results.df = rbind(Results.df,datapoint)
}
}
print("Finding lightpaths for HL3 nodes")
for (HL3index in (1:N_HL3s)) { #N_HL4s)) {
gPass = get_gPass(nodes.df,connectivity.mat)
HL12s = V(gPass)[which(V(gPass)$type=="HL12")]
HL3s = V(gPass)[which(V(gPass)$type=="HL3")]
HL4s = V(gPass)[which(V(gPass)$type=="HL4")]
HL5s = V(gPass)[which(V(gPass)$type=="HL5")]
E(gPass)$traff = ll_traff
Source_node = V(gPass)[which(V(gPass)$type=="HL3")][HL3index]
Source = V(gPass)[which(V(gPass)$type=="HL3")][HL3index]$name
aa_minhops = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath',
weights = NA)
aa_minKm = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath')
aux_HL12_winner = unlist(lapply(aa_minhops$epath,length))*1e5
for (ii in c(1:length(aux_HL12_winner))){
aux_HL12_winner[ii] = aux_HL12_winner[ii] + sum(aa_minKm$epath[[ii]]$weight)
}
HL12_winner_prim = which(aux_HL12_winner==min(aux_HL12_winner))
# Primary path
HL12_winner_prim = order(unlist(lapply(aa_minhops$epath,length)),decreasing = F)[1]
Destination = HL12s[HL12_winner_prim]$name
Destination_node = HL12s[HL12_winner_prim]
aa_primary_e = get.shortest.paths(gPass,
from = Source_node,
to = Destination_node,
output = 'epath',
weights = NA)
aa_primary_v = get.shortest.paths(gPass,
from = Source_node,
to = Destination_node,
output = 'vpath',
weights = NA)
E(gPass)$traff = ll_traff
E(gPass)[which(E(gPass) %in% aa_primary_e$epath[[1]])]$traff = E(gPass)[which(E(gPass) %in% aa_primary_e$epath[[1]])]$traff +1
ll_traff = E(gPass)$traff
PrimaryPath = paste0(aa_primary_v$vpath[[1]]$name,collapse="++++")
Node_sequence = PrimaryPath
# node sequence metrics
disthops_winner = length(aa_primary_v$vpath[[1]])-1
HL5_hops = sum(aa_primary_v$vpath[[1]] %in% HL5s)
HL4_hops = sum(aa_primary_v$vpath[[1]] %in% HL4s)
HL3_hops = sum(aa_primary_v$vpath[[1]] %in% HL3s)
HL12_hops = sum(aa_primary_v$vpath[[1]] %in% HL12s)
# 1 amplifier per link
dist_links = aa_primary_e$epath[[1]]$weight
distKm_winner = sum(aa_primary_e$epath[[1]]$weight)
Link_sequence = paste0(dist_links,collapse = " ++++ ") # in km
osnr_e2e = -10*log10(sum(10^(-0.1*(58-6-0.25*dist_links))))
# osnr of path
osnr_req_50G = osnr_50G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_40G = osnr_40G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_25G = osnr_25G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
can50G = ifelse(osnr_e2e>osnr_req_50G,TRUE,FALSE)
can40G = ifelse(osnr_e2e>osnr_req_40G,TRUE,FALSE)
can25G = ifelse(osnr_e2e>osnr_req_25G,TRUE,FALSE)
if (can50G == TRUE) {
maxSpeed = 50
} else {
if (can40G == TRUE) {
maxSpeed = 40
} else {
if (can25G == TRUE) {
maxSpeed = 25
} else {
maxSpeed = 0
}
}
}
nlambdas = ceiling(demand_matrix[HL3index]/maxSpeed)
# we do the first fit allocation
eindex = which(E(gPass) %in% aa_primary_e$epath[[1]])
conditionFF = FALSE
elambda = 0
while (conditionFF == FALSE) {
elambda = elambda + 1
if (prod(is.na(FFallocation[eindex,(elambda:(elambda+nlambdas-1))])) == 1) { # hueco libre
FFallocation[eindex, elambda:(elambda+nlambdas-1)] = paste("lightpath",Source,Destination, sep = "++")
Sallocation[eindex, elambda:(elambda+nlambdas-1)] = Source
Dallocation[eindex, elambda:(elambda+nlambdas-1)] = Destination
conditionFF = TRUE
} else { # ocupado, sigo buscando
conditionFF = FALSE
}
}
speed_HL3s[HL3index] = maxSpeed
nlambdas_HL3s[HL3index] = nlambdas_HL3s[HL3index] + nlambdas
nlambdas_HL12s[which(HL12s$name == Destination)] = nlambdas_HL12s[which(HL12s$name == Destination)] + nlambdas
datapoint = c(Source, Destination, "Primary_path",
distKm_winner, disthops_winner,
HL5_hops, HL4_hops, HL3_hops, HL12_hops,
osnr_e2e, osnr_req_50G, osnr_req_40G, osnr_req_25G,
ifelse(osnr_e2e>osnr_req_50G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_40G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_25G,TRUE,FALSE),
Node_sequence,Link_sequence)
Results.df = rbind(Results.df,datapoint)
colnames(Results.df) = c("Source","Destination","prim_sec",
"distance_KM","distance_hops",
"N_HL5s","N_HL4s","N_HL3s","N_HL12s",
"OSNR_e2e","OSNR_req50G","OSNR_req40G","OSNR_req25G",
"Can_50G","Can_40G","Can_25G",
"FullPath","LinksDistance")
# Plan-B secondary path
all_weights_orig = E(gPass)$weight
weights_aux = all_weights_orig
weights_aux[(E(gPass) %in% aa_primary_e$epath[[1]])] = weights_aux[(E(gPass) %in% aa_primary_e$epath[[1]])] + 1000
E(gPass)$weight = weights_aux
aa_minKm_plan_b = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath')
aux_HL12_winner_sec_plan_b = unlist(lapply(aa_minKm_plan_b$epath,length))*1e5
for (ii in c(1:length(aux_HL12_winner_sec_plan_b))){
aux_HL12_winner_sec_plan_b[ii] = aux_HL12_winner_sec_plan_b[ii] + sum(aa_minKm_plan_b$epath[[ii]]$weight)
}
HL12_winner_sec_planb = which(aux_HL12_winner_sec_plan_b==min(aux_HL12_winner_sec_plan_b))
if (HL12_winner_prim == HL12_winner_sec_planb) {
HL12_winner_sec_planb = order(aux_HL12_winner_sec_plan_b,decreasing = F)[2]
}
Destination = HL12s[HL12_winner_sec_planb]$name
Destination_node = HL12s[HL12_winner_sec_planb]
aa_secondary_e_planb = get.shortest.paths(gPass,
from = Source_node,
to = Destination_node,
output = 'epath')
aa_secondary_v_planb = get.shortest.paths(gPass,
from = Source_node,
to = Destination_node,
output = 'vpath')
SecondaryPath_PlanB = paste0(aa_secondary_v_planb$vpath[[1]]$name,collapse="++++")
Destination = HL12s[HL12_winner_sec_planb]$name
Node_sequence = SecondaryPath_PlanB
# node sequence metrics
disthops_winner = length(aa_secondary_v_planb$vpath[[1]])-1
HL5_hops = sum(aa_secondary_v_planb$vpath[[1]] %in% HL5s)
HL4_hops = sum(aa_secondary_v_planb$vpath[[1]] %in% HL4s)
HL3_hops = sum(aa_secondary_v_planb$vpath[[1]] %in% HL3s)
HL12_hops = sum(aa_secondary_v_planb$vpath[[1]] %in% HL12s)
# 1 amplifier per link
dist_links = aa_secondary_e_planb$epath[[1]]$weight
dist_links[which(dist_links>999)] = dist_links[which(dist_links>999)] -1000
Nshared_links = floor(sum(aa_secondary_e_planb$epath[[1]]$weight)/1000)
Nshared_nodes = sum(as.numeric(aa_secondary_v_planb$vpath[[1]] %in% aa_primary_v$vpath[[1]]))-1
distKm_winner = sum(dist_links)
Link_sequence = paste0(dist_links,collapse = " ++++ ") # in km
osnr_e2e = -10*log10(sum(10^(-0.1*(58-6-0.25*dist_links))))
# osnr of path
osnr_req_50G = osnr_50G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_40G = osnr_40G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_25G = osnr_25G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
# Secondary path
E(gPass)$weight = all_weights_orig
gPass = delete_vertices(gPass, aa_primary_v$vpath[[1]][2:length(aa_primary_v$vpath[[1]])])
HL12s = V(gPass)[which(V(gPass)$type=="HL12")]
HL3s = V(gPass)[which(V(gPass)$type=="HL3")]
HL4s = V(gPass)[which(V(gPass)$type=="HL4")]
HL5s = V(gPass)[which(V(gPass)$type=="HL5")]
aa_secondary_e = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath',
weights=NA)
aa_secondary_minKm_e = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")],
output = 'epath')
aux_HL12_winner_sec = unlist(lapply(aa_secondary_minKm_e$epath,length))*1e5
for (ii in c(1:length(aux_HL12_winner_sec))){
aux_HL12_winner_sec[ii] = aux_HL12_winner_sec[ii] + sum(aa_secondary_minKm_e$epath[[ii]]$weight)
}
HL12s_winner_sec = which(aux_HL12_winner_sec==min(aux_HL12_winner_sec))
if (min(unlist(lapply(aa_secondary_e$epath,length)))==0) {
# node unreachable
datapoint = c(Source, Destination, paste("Secondary_path_shared_",Nshared_nodes,"nodes",Nshared_links,"links",sep=""),
distKm_winner, disthops_winner,
HL5_hops, HL4_hops, HL3_hops, HL12_hops,
osnr_e2e, osnr_req_50G, osnr_req_40G, osnr_req_25G,
ifelse(osnr_e2e>osnr_req_50G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_40G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_25G,TRUE,FALSE),
Node_sequence,Link_sequence)
Results.df = rbind(Results.df,datapoint)
} else {
HL12s_winner_sec = order(unlist(lapply(aa_secondary_e$epath,length)),decreasing = F)[1]
Destination = V(gPass)[which(V(gPass)$type=="HL12")][HL12s_winner_sec]$name
aa_secondary_e = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")][HL12s_winner_sec],
output = 'epath',
weights = NA)
aa_secondary_v = get.shortest.paths(gPass,
from = V(gPass)[which(V(gPass)$name==Source)], #from = V(gPass)[which(V(gPass)$type=="HL4")][HL4index],
to = V(gPass)[which(V(gPass)$type=="HL12")][HL12s_winner_sec],
output = 'vpath',
weights = NA)
SecondaryPath = paste0(aa_secondary_v$vpath[[1]]$name,collapse="++++")
Node_sequence = SecondaryPath
# node sequence metrics
disthops_winner = length(aa_secondary_v$vpath[[1]])-1
HL5_hops = sum(aa_secondary_v$vpath[[1]] %in% HL5s)
HL4_hops = sum(aa_secondary_v$vpath[[1]] %in% HL4s)
HL3_hops = sum(aa_secondary_v$vpath[[1]] %in% HL3s)
HL12_hops = sum(aa_secondary_v$vpath[[1]] %in% HL12s)
# 1 amplifier per link
dist_links = aa_secondary_e$epath[[1]]$weight
distKm_winner = sum(aa_secondary_e$epath[[1]]$weight)
Link_sequence = paste0(dist_links,collapse = " ++++ ") # in km
osnr_e2e = -10*log10(sum(10^(-0.1*(58-6-0.25*dist_links))))
# osnr of path
osnr_req_50G = osnr_50G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_40G = osnr_40G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
osnr_req_25G = osnr_25G.mat[HL4_hops+1,HL12_hops+HL3_hops+2]
datapoint = c(Source, Destination, "Secondary_path_totally_disjoint",
distKm_winner, disthops_winner,
HL5_hops, HL4_hops, HL3_hops, HL12_hops,
osnr_e2e, osnr_req_50G, osnr_req_40G, osnr_req_25G,
ifelse(osnr_e2e>osnr_req_50G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_40G,TRUE,FALSE),
ifelse(osnr_e2e>osnr_req_25G,TRUE,FALSE),
Node_sequence,Link_sequence)
Results.df = rbind(Results.df,datapoint)
}
}
dimFFalloc = ceiling(length(which(colSums(is.na(FFallocation))<dim(FFallocation)[1]))/40)*40
FFallocation_final = FFallocation[,c(1:dimFFalloc)]
Sallocation_final = Sallocation[,c(1:dimFFalloc)]
Dallocation_final = Dallocation[,c(1:dimFFalloc)]
heat_table = apply(FFallocation_final,2,is.na)
heat_table2 = matrix(as.numeric(heat_table), nrow=dim(heat_table)[1], ncol=dim(heat_table)[2], byrow=F)
heatmap(heat_table2, Colv = NA, Rowv = NA, scale="none",
xlab="Freq. Slots", ylab="links", main="First-Fit allocation")
colnames(Results.df) = c("Source","Destination","prim_sec",
"distance_KM","distance_hops",
"N_HL5s","N_HL4s","N_HL3s","N_HL12s",
"OSNR_e2e","OSNR_req50G","OSNR_req40G","OSNR_req25G",
"Can_50G","Can_40G","Can_25G",
"FullPath","LinksDistance")
print("Writing results in output files")
Results.df = Results.df[-1,]
write.csv(Results.df,file="lightpaths.csv")
write.csv(FFallocation_final,file="FFlightpaths.csv")
# Analysis of primary paths
Results.df$Source = as.factor(Results.df$Source)
Results.df$Destination = as.factor(Results.df$Destination)
Results.df$prim_sec = as.factor(Results.df$prim_sec)
Results.df$distance_KM = as.numeric(Results.df$distance_KM)
Results.df$distance_hops = as.numeric(Results.df$distance_hops)
Results.df$N_HL5s = as.numeric(Results.df$N_HL5s)
Results.df$N_HL4s = as.numeric(Results.df$N_HL4s)
Results.df$N_HL3s = as.numeric(Results.df$N_HL3s)
Results.df$N_HL12s = as.numeric(Results.df$N_HL12s)
Results.df$OSNR_e2e = as.numeric(Results.df$OSNR_e2e)
Results.df$OSNR_req50G = as.numeric(Results.df$OSNR_req50G)
Results.df$OSNR_req40G = as.numeric(Results.df$OSNR_req40G)
Results.df$OSNR_req25G = as.numeric(Results.df$OSNR_req25G)
Results.df[which(Results.df$OSNR_req50G>100),"OSNR_req50G"] = NA
Results.df[which(Results.df$OSNR_req40G>100),"OSNR_req40G"] = NA
Results.df[which(Results.df$OSNR_req25G>100),"OSNR_req25G"] = NA
Results.df$Can_50G = as.logical(Results.df$Can_50G)
Results.df$Can_40G = as.logical(Results.df$Can_40G)
Results.df$Can_25G = as.logical(Results.df$Can_25G)
Ppaths.df = Results.df[which(Results.df$prim_sec=="Primary_path"),]
SecPaths.df = Results.df[-which(Results.df$prim_sec=="Primary_path"),]
boxplot(Ppaths.df[,"distance_KM"], SecPaths.df[,"distance_KM"],
main = "Distance (KM)",
at = c(1,2),
names = c("primary","secondary"),
las = 2,
col = c("green","red"),
border = "brown",
horizontal = FALSE,
notch = TRUE
)
boxplot(Ppaths.df[,"distance_hops"], SecPaths.df[,"distance_hops"],
main = "# Hops",
at = c(1,2),
names = c("primary","secondary"),
las = 2,
col = c("green","red"),
border = "brown",
horizontal = FALSE,
notch = TRUE
)
boxplot(Ppaths.df[,"OSNR_e2e"], SecPaths.df[,"OSNR_e2e"],
main = "# End-to-End OSNR (dB)",
at = c(1,2),
names = c("primary","secondary"),
las = 2,
col = c("green","red"),
border = "brown",
horizontal = FALSE,
notch = TRUE
)
Ppath_NA.df = na.omit(Ppaths.df)
SecPath_NA.df = na.omit(SecPaths.df)
# Node configuration
gPass = get_gPass(nodes.df,connectivity.mat)
HL12s = V(gPass)[which(V(gPass)$type=="HL12")]
HL3s = V(gPass)[which(V(gPass)$type=="HL3")]
HL4s = V(gPass)[which(V(gPass)$type=="HL4")]
HL5s = V(gPass)[which(V(gPass)$type=="HL5")]
Component_cost = read.csv("Passion_cost_components.csv",header=F, sep=";")
# HL4 configuration
HL4conf.df = data.frame(node_name = HL4s$name,
nlambdas = nlambdas_HL4s,
speed = speed_HL4s,
deg = degree(gPass)[which(V(gPass)$type=="HL4")],
degRoadm = NA,
cost = rep(NA, length(HL4s)))
for (ii in HL4s) {
aux = which(E(gPass) %in% incident(gPass,V(gPass)[ii]))
deg = 0
for (jj in 1:length(aux)) {
mm = (matrix(FFallocation[aux[jj],],nrow=(dim(FFallocation)[2]/40),ncol=40,byrow=TRUE))
deg = deg + ceiling(sum(abs(as.numeric(apply(mm,1,is.na))-1))/40)
}
HL4conf.df[V(gPass)[ii]$name,"degRoadm"] = deg
}
HL4conf.df$cost = Component_cost[which(Component_cost=="ROADM_degree"),2] * apply(HL4conf.df[,c("deg","degRoadm")],1,max) +
Component_cost[which(Component_cost=="SBVT"),2]/40 * HL4conf.df$nlambdas + Component_cost[which(Component_cost=="HL4_Router"),2]
# HL3 configuration
HL3conf.df = data.frame(node_name = HL3s$name,
nlambdas = nlambdas_HL3s,
speed = speed_HL3s,
deg = degree(gPass)[which(V(gPass)$type=="HL3")],
degRoadm = NA,
cost = rep(NA, length(HL3s)))
for (ii in HL3s) {
aux = which(E(gPass) %in% incident(gPass,V(gPass)[ii]))
deg = 0
for (jj in 1:length(aux)) {
mm = (matrix(FFallocation[aux[jj],],nrow=(dim(FFallocation)[2]/40),ncol=40,byrow=TRUE))
deg = deg + ceiling(sum(abs(as.numeric(apply(mm,1,is.na))-1))/40)
}
HL3conf.df[V(gPass)[ii]$name,"degRoadm"] = deg
}
HL3conf.df$cost = Component_cost[which(Component_cost=="ROADM_degree"),2] * apply(HL3conf.df[,c("deg","degRoadm")],1,max) +
Component_cost[which(Component_cost=="SBVT"),2]/40 * HL3conf.df$nlambdas + Component_cost[which(Component_cost=="HL4_Router"),2]
# HL12 configuration
HL12conf.df = data.frame(node_name = HL12s$name,
nlambdas = nlambdas_HL12s,
deg = degree(gPass)[which(V(gPass)$type=="HL12")],
degRoadm = NA,
cost = rep(NA,length(HL12s)))
for (ii in HL12s) {
aux = which(E(gPass) %in% incident(gPass,V(gPass)[ii]))
deg = 0
for (jj in 1:length(aux)) {
mm = (matrix(FFallocation[aux[jj],],nrow=(dim(FFallocation)[2]/40),ncol=40,byrow=TRUE))
deg = deg + ceiling(sum(abs(as.numeric(apply(mm,1,is.na))-1))/40)
}
HL12conf.df[V(gPass)[ii]$name,"degRoadm"] = deg
}
# cost = roadm-degree + S-BVTs + router
HL12conf.df$cost = Component_cost[which(Component_cost=="ROADM_degree"),2] * apply(HL12conf.df[,c("deg","degRoadm")],1,max) +
Component_cost[which(Component_cost=="SBVT"),2]/40 * HL12conf.df$nlambdas + Component_cost[which(Component_cost=="HL12_Router"),2]
# Total cost
TCO = sum(HL12conf.df$cost) + sum(HL3conf.df$cost) + sum(HL4conf.df$cost)
write.csv(rbind(HL4conf.df[,colnames(HL4conf.df)[c(1:2,4:6)]],HL3conf.df[,colnames(HL3conf.df)[c(1:2,4:6)]],HL12conf.df),
file = "NodeDesign.csv")
|
4128b90c3e8575be447f152bdc6f0d9128a562df
|
d121f587f7e0678030d33a4c5428e594c5978dad
|
/R/quant_txrevise.R
|
965adf4dc8c166628958eca01c439db4bfbfe4fc
|
[
"Apache-2.0"
] |
permissive
|
kauralasoo/eQTLUtils
|
fcf0907721b3a8f19fe68e611cecb4f16d7a0c9d
|
26242562a4e244334fd9691d03bc1ef4d2d6c1d9
|
refs/heads/master
| 2023-03-05T19:10:45.247191
| 2023-03-03T13:33:08
| 2023-03-03T13:33:08
| 149,779,618
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 758
|
r
|
quant_txrevise.R
|
constructTxreviseRowData <- function(phenotype_ids, transcript_meta){
#Split phenotype ids into components
event_metadata = dplyr::data_frame(phenotype_id = phenotype_ids) %>%
tidyr::separate(phenotype_id, c("gene_id", "txrevise_grp", "txrevise_pos", "transcript_id"), sep = "\\.", remove = FALSE) %>%
dplyr::mutate(group_id = paste(gene_id, txrevise_pos, sep = "."), quant_id = paste(gene_id, txrevise_grp, txrevise_pos, sep = ".")) %>%
dplyr::select(phenotype_id, quant_id, group_id, gene_id)
#Extract gene metadata
gene_meta = extractGeneMetadataFromBiomartFile(transcript_meta) %>%
dplyr::select(-phenotype_id, -group_id, -quant_id)
row_data = dplyr::left_join(event_metadata, gene_meta, by = "gene_id")
return(row_data)
}
|
d67e4bbb18bd81370f47e0fff20cdcdb9f65aa31
|
15011c6bec5eff7ab07b14b423879b022851c5a6
|
/TUCUMAN/SMT/Circuitos/grid.circuitos.smt.R
|
b5410ff0000afdd00954f9da18306c6607a7702b
|
[
"MIT"
] |
permissive
|
shirosweets/geofacet_ARG
|
97ae4ee815ccf0cfa7c1a1acf7eb4ece64861e79
|
48685f8535ae628eeacff97e1a79511b8e139b65
|
refs/heads/master
| 2023-03-18T21:54:58.972109
| 2019-04-22T16:26:58
| 2019-04-22T16:26:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 773
|
r
|
grid.circuitos.smt.R
|
SMT.circuitos <- data.frame(
name = c("B15", "A15", "A13", "A12", "A14", "A16", "16", "15", "13", "12", "14", "A17", "17", "7A", "6", "5", "11", "18", "10", "B18", "7", "2", "1", "8", "2A", "1A", "A18", "A10", "8A", "3", "4", "9A", "19", "9", "20", "21", "22"),
code = c("B15", "A15", "A13", "A12", "A14", "A16", "16", "15", "13", "12", "14", "A17", "17", "7A", "6", "5", "11", "18", "10", "B18", "7", "2", "1", "8", "2A", "1A", "A18", "A10", "8A", "3", "4", "9A", "19", "9", "20", "21", "22"),
row = c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9),
col = c(2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 2, 6, 1, 3, 4, 5, 3, 4, 5, 2, 6, 3, 5, 4, 4, 3, 5, 4, 5, 5),
stringsAsFactors = FALSE
)
|
cd3d7ee8fd2866ef2d10a31e9d530ee4b07ef47c
|
beca8e699a02bf123aa98ee5eaacba9ad245aa8c
|
/T_32_Tables.R
|
b981e1321a414879647fc66d9d52bbe3f3deb730
|
[] |
no_license
|
brandtn/R_Code_Misc
|
d7bd4539a07684c88a032836fc563842628e1426
|
cc7cdbef22dd75084892717361dd6240d924c7c0
|
refs/heads/master
| 2021-09-26T00:32:47.320529
| 2018-10-26T15:24:21
| 2018-10-26T15:24:21
| 120,939,745
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 555
|
r
|
T_32_Tables.R
|
#Load libraries
library("tidyverse")
library("googlesheets")
student_data <- gs_read(ss = gs_title("T_32"))
glimpse(student_data)
#function to convert date
#take in x which is a 4 digit number
convertdate {
#convert x to a character
#first charater is either 0 or 1
#0 = 19
#1 = 20
#Second two characters are the year
#concate to first character transformation
# set to x convert to intergar
# Last/Fourth character conver to semseter
# 2 = Winter
# 4 = Spring
# 6 = Summer
# 8 = Fall
#set to y
#return x and y
}
|
4bfb9b4705862e221c4c62d2a7efe0c6df62cd11
|
ac84d0a57c45731f36048895b604ea2721d5c8ba
|
/src/figure-cum-mul.R
|
7e05072f928316444dc805ca7aefbbf36d289dc3
|
[] |
no_license
|
yshin12/llss-rz
|
de2a9e84a463243e8627e2936e21c940fc696c9f
|
d561bdbacd21a27f3ee34ebc1728ff5205fe5d80
|
refs/heads/master
| 2020-05-03T10:30:56.360791
| 2019-11-01T13:22:34
| 2019-11-01T13:22:34
| 178,581,261
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,058
|
r
|
figure-cum-mul.R
|
library('ggplot2')
junkmultse.original = data.frame(read.csv(file='../output/junkmultse-original.csv', header=T))
junkmultse.original = junkmultse.original[1:20,]
junkmultse.original[,'h'] = c(1:20)
junkmultse = data.frame(read.csv(file='../output/junkmultse-newsy.csv', header=T))
junkmultse = junkmultse[1:20,]
junkmultse[,'h'] = c(1:20)
#---------------------------------
# Draw Graphs
#---------------------------------
pdf(file="../output/fig-mul-original.pdf", width=12, height=7.5)
f1 = ggplot(data=junkmultse.original)
f1 = f1 + geom_ribbon(aes(x=h, ymin=multrec1-1.96*seyrec,ymax=multrec1+1.96*seyrec), fill = 'grey70')
f1 = f1 + geom_line(aes(x=h,y=multexp1), linetype='F1',color='red') + geom_line(aes(x=h,y=multrec1), color='blue')
f1 = f1 + geom_line(aes(x=h,y=multexp1+1.96*seyexp), linetype='longdash', color='red') + geom_line(aes(x=h,y=multexp1-1.96*seyexp), linetype='longdash', color='red')
f1 = f1 + geom_line(aes(x=h,y=1), linetype='dotdash')
f1 = f1 + labs(x='Quarters', y='Cumulative Multiplier')
f1 = f1 + theme(axis.text=element_text(size=16),axis.title=element_text(size=20))
f1 = f1 + coord_cartesian(xlim=c(1.9, 19.5))
f1 = f1 + scale_x_continuous(breaks=seq(2,20,2), limits=c(1,20))
print(f1)
dev.off()
pdf(file="../output/fig-mul-llss-newsy.pdf", width=12, height=7.5)
f2 = ggplot(data=junkmultse)
f2 = f2 + geom_ribbon(aes(x=h, ymin=multrec1-1.96*seyrec,ymax=multrec1+1.96*seyrec), fill = 'grey70') + xlim(1,20)
f2 = f2 + geom_line(aes(x=h,y=multexp1), linetype='f2',color='red') + geom_line(aes(x=h,y=multrec1), color='blue')
f2 = f2 + geom_line(aes(x=h,y=multexp1+1.96*seyexp), linetype='longdash', color='red') + geom_line(aes(x=h,y=multexp1-1.96*seyexp), linetype='longdash', color='red')
f2 = f2 + geom_line(aes(x=h,y=1), linetype='dotdash')
f2 = f2 + labs(x='Quarters', y='Cumulative Multiplier')
f2 = f2 + theme(axis.text=element_text(size=16),axis.title=element_text(size=20))
f2 = f2 + coord_cartesian(xlim=c(1.9, 19.5))
f2 = f2 + scale_x_continuous(breaks=seq(2,20,2), limits=c(1,20))
print(f2)
dev.off()
|
222168fe47da460398f21fa2dda40c7dc5c4e05c
|
7f9f945c8a02dfd5f38d30abfcbbfa20d24a4391
|
/man/print.fixest_multi.Rd
|
1fb8f62071ee5d4ce5ccb472ff1f830a7dacaa5d
|
[] |
no_license
|
lrberge/fixest
|
96428663b68c3701f1063f0fb76a87b68333b7d4
|
6b852fa277b947cea0bad8630986225ddb2d6f1b
|
refs/heads/master
| 2023-08-19T22:36:19.299625
| 2023-04-24T08:25:17
| 2023-04-24T08:25:17
| 200,205,405
| 309
| 64
| null | 2023-09-13T09:51:03
| 2019-08-02T09:19:18
|
R
|
UTF-8
|
R
| false
| true
| 1,123
|
rd
|
print.fixest_multi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fixest_multi.R
\name{print.fixest_multi}
\alias{print.fixest_multi}
\title{Print method for fixest_multi objects}
\usage{
\method{print}{fixest_multi}(x, ...)
}
\arguments{
\item{x}{A \code{fixest_multi} object, obtained from a \code{fixest} estimation leading to multiple results.}
\item{...}{Other arguments to be passed to \code{\link{summary.fixest_multi}}.}
}
\description{
Displays summary information on fixest_multi objects in the R console.
}
\examples{
base = iris
names(base) = c("y", "x1", "x2", "x3", "species")
# Multiple estimation
res = feols(y ~ csw(x1, x2, x3), base, split = ~species)
# Let's print all that
res
}
\seealso{
The main fixest estimation functions: \code{\link{feols}}, \code{\link[=feglm]{fepois}}, \code{\link[=femlm]{fenegbin}}, \code{\link{feglm}}, \code{\link{feNmlm}}. Tools for mutliple fixest estimations: \code{\link{summary.fixest_multi}}, \code{\link{print.fixest_multi}}, \code{\link{as.list.fixest_multi}}, \code{\link[fixest]{sub-sub-.fixest_multi}}, \code{\link[fixest]{sub-.fixest_multi}}.
}
|
d2ce81b8350fa52993103282ed08aaf4f28c1fe3
|
37665649d838e477d74d48888be750d15bfeb651
|
/man/tree_idx.Rd
|
21411aea8a2a17ed08f57382dd9db7f91993e298
|
[] |
no_license
|
manueleleonelli/stagedtrees
|
880c9ecf5b9ec8ba68fd62d2320e8239c803cb5c
|
f0ebb7ca2f1fa05ccda5558baed2fe086625d7da
|
refs/heads/master
| 2020-05-21T00:08:39.125640
| 2019-03-06T15:50:34
| 2019-03-06T15:50:34
| 185,819,192
| 0
| 0
| null | 2019-05-09T14:50:05
| 2019-05-09T14:50:05
| null |
UTF-8
|
R
| false
| true
| 536
|
rd
|
tree_idx.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/0-util-functions.R
\name{tree_idx}
\alias{tree_idx}
\title{return path index}
\usage{
tree_idx(path, tree)
}
\arguments{
\item{path}{a path from root in the tree}
\item{tree}{a symmetric tree given as a list of levels
This function return the integer index of the node associated with the
given path in a symmetric tree defined by \code{tree}.}
}
\value{
an integer, the index of the node corresponding to \code{path}
}
\description{
return path index
}
|
eafa270ef41ae4c9ed16853608337ac47e5a029c
|
58facb39c3292cbfd100b5adae942f313f9e682e
|
/src/pkgSetup.R
|
47321bac6ffe77c72fea974a445e91da75c36bca
|
[] |
no_license
|
amitpatil21/page-2018-mrgsolve
|
c86f023bccc863b3041808ff62ef1a5be9585a6d
|
1d3c8227e95d5244976b4d770fce528adee64030
|
refs/heads/master
| 2020-07-30T07:19:21.767448
| 2018-09-18T15:05:25
| 2018-09-18T15:05:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,006
|
r
|
pkgSetup.R
|
author <- c("*")
pkgs <- c("tidyverse", "mrgsolve", "knitr", "rmarkdown", "data.table",
"caTools", "bitops", "formatR", "git2r")
pkgRoot <- "/data/page-packages"
pkgDir <- file.path(pkgRoot, "src", "contrib")
pkgDir <- normalizePath(pkgDir)
libDir <- "/data/page-Rlibs"
if(!dir.exists(pkgDir)) dir.create(pkgDir, recursive = TRUE)
if(!dir.exists(libDir)) dir.create(libDir)
.libPaths(libDir)
user <- Sys.info()["user"]
fromCRAN <- user %in% author | "*" %in% author
local_repos <- paste0("file://",pkgRoot)
metrum_repos <- "https://metrumresearchgroup.github.io/r_validated/"
cran_repos <- "https://cran.rstudio.com/"
repos <- c(mrg = metrum_repos, cran = cran_repos, local = local_repos)
deps <- tools::package_dependencies(
packages = pkgs,
which = c("Depends", "Imports", "LinkingTo"),
recursive = TRUE,
db = available.packages(repos=repos[c("mrg", "cran")])
)
deps <- unlist(deps, use.names=FALSE)
pkgs <- unique(c(pkgs,deps))
base <- rownames(installed.packages(priority=c("base", "recommended")))
pkgs <- setdiff(pkgs,base)
tools::write_PACKAGES(pkgDir)
if(file.exists(file.path(pkgDir,"PACKAGES"))){
available <- available.packages(repos = repos["local"])[,"Package"]
} else{
available <- NULL
file.create(file.path(pkgDir,"PACKAGES"))
tools::write_PACKAGES(pkgDir)
}
if(fromCRAN){
newpkgs <- setdiff(pkgs, available)
if(length(newpkgs) > 0){
## These packages are installed either from mrg or cran
install.packages(newpkgs,
lib=libDir,
repos = repos[c("mrg", "cran")],
destdir=pkgDir,
type="source",
INSTALL_opts="--no-multiarch")
tools::write_PACKAGES(pkgDir)
}
## If multiple authors qcing each other, a package could be available
## but uninstalled. Install from local.
uninstalled <- setdiff(pkgs, installed.packages(libDir))
if(length(uninstalled)>0){
install.packages(uninstalled,
lib = libDir,
repos = repos["local"],
type = "source",
INSTALL_opts="--no-multiarch")
}
}
if(!fromCRAN){
installed <- row.names(installed.packages(libDir))
newpkgs <- setdiff(pkgs, installed)
if(length(newpkgs)>0){
install.packages(newpkgs,
lib = libDir,
repos = repos["local"],
type = "source",
INSTALL_opts="--no-multiarch")
}
}
.ignore_libs <- function(root=getwd(),lib="lib", ci=FALSE) {
if(!missing(root) & file.exists(root)) {
lib <- file.path(root,"lib")
}
if(!file.exists(lib)) stop("Could not find lib directory")
libs <- list.files(lib, full.names=FALSE)
libs <- c(libs, "ignore.txt", "PACKAGES", "PACKAGES.gz")
writeLines(con=file.path(lib,"ignore.txt"), libs)
setwd(lib)
system("svn propset svn:ignore -F ignore.txt .")
setwd("..")
if(ci) system("svn ci -m \"ignoring libs\" .")
}
|
eddced5385bd1dd7e1a5e28bf5c36da0c7036158
|
969711eebedba44718b75ef6ad2a2a39a070ca08
|
/ui.R
|
a4a67d41547b1a00341209401329ec79b8f740c5
|
[
"MIT"
] |
permissive
|
SubramaniamLab/DEGenR
|
1f88fee026d6a26f40b63a9bc507824584876495
|
5f5cf58a46e11f20cd692e920edffdc56edbe158
|
refs/heads/main
| 2023-04-07T00:37:02.381321
| 2021-10-08T15:11:39
| 2021-10-08T15:11:39
| 339,816,245
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52,377
|
r
|
ui.R
|
source(paste(getwd(),'global.R',sep="/"))
header <- dashboardHeader(
title = "DEGenR"
#titleWidth = 250
)
header$children[[3]]$children[[3]] <- div(tags$img(src='', align="right", height='50px'))
sidebar <- dashboardSidebar(
width =250,
sidebarMenu(id = "sidebarmenu",
menuItem("DEGenR Introduction", icon = icon("user"),
menuSubItem("Introduction", tabName = "intro")),
menuItem("Data Upload", tabName = "Manual", icon = icon("dashboard"),
menuItem("Data from recount2", tabName = "", icon = icon("dashboard"),
menuSubItem("Select RNA-seq data", tabName="SRPdataset"),
menuSubItem("Data Summary", tabName="groupassign_SRP")
),
menuItem("Data from GEO", tabName = "", icon = icon("dashboard"),
menuSubItem("Select microarray expression data", tabName="GEOdataset"),
#menuSubItem("Sample Clustering", tabName="dataCluster"),
menuSubItem("Data Summary", tabName="groupassign")
),
menuItem("Count Data Upload", tabName = "Manual", icon = icon("dashboard"),
menuSubItem("File Upload", tabName="dataInputCounts"),
menuSubItem("Data Summary", tabName="dataSummary")
)
),
menuItem("Sample Contrast", tabName="limmavoom", icon = icon("dashboard")),
menuItem("Differential Gene Expression", icon = icon("dashboard"),
menuSubItem("eBayes", tabName="DEGs"),
menuSubItem("TREAT", tabName="treatmethod"),
menuSubItem("topConfects", tabName="confect")
),
menuItem("Ontology Enrichment Analysis", tabName="enrichment", icon = icon("chart-bar"),
menuSubItem("camera", tabName="camera"),
menuSubItem("fGSEA", tabName="fgsea"),
menuSubItem("CERNO", tabName="Cerno"),
menuItem("Enrichr","", icon =icon("chart-bar"),
menuSubItem("Enrichr Ontology", tabName="enrichrontology"),
menuSubItem("Enrichr Plots", tabName="enrichrontologyplots")),
menuSubItem("Hypergeometric", tabName="Hypergeometric")
),
menuItem("TF Enrichment Analysis", tabName="TFenrichment", icon = icon("chart-bar"),
menuItem("Enrichr","", icon =icon("chart-bar"),
menuSubItem("Enrichr TF analysis", tabName="enrichr"),
menuSubItem("Enrichr Plots", tabName="enrichrTFplots")),
menuItem("DoRothEA", tabName="", icon =icon("chart-bar"),
menuSubItem("DoRothEA TF analysis", tabName="dorothEA"),
menuSubItem("VIPER Shadow analysis", tabName="shadowana")),
menuSubItem("fGSEA", tabName="fgseaTF"),
menuSubItem("CERNO", tabName="CernoTF"),
menuSubItem("Hypergeometric", tabName="HypergeometricTF"))
))
body <- dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "my_style.css"),
tags$script(
HTML("
$(document).ready(function(){
resize();
})
function resize(){
var h = window.innerHeight - $('.navbar').height() - 150; // Get dashboardBody height
$('#box').height(h);
}"
)
)
),
tags$h4(
tags$link(rel = "stylesheet", type = "text/css", href = "my_style.css"),
),
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
tabItems(
############UPLOAD your own###########
tabItem(tabName="intro",
#img(src="cri.png"),
h2("Introduction"),
p("We developed DEGenR, an interactive web interface that provides integrated tools for
performing differential gene expression, rank-based ontological geneset and pathway enrichment analysis,
and transcription factor regulatory analysis from user-uploaded raw read counts as well as microarray and sequencing datasets available at the NCBI Gene Expression Omnibus
(GEO) and Sequencing Read Archive (SRA)."),
h3("Data Upload", style="padding-left: 1em"),
h4("Data from recount2"),
p("The recount2 project has processed over 2000 RNA-seq studies in the Sequencing Read Archive (SRA)
and other sources, included GTEx and TCGA datasets, using the RNA-seq alignment program Rail-RNA.
Entering a SRP dataset here will download the recount2-processed RNA expression and metadata for the
dataset you select and prepare it for analysis with DEGenR. For a full list of datasets,
see https://jhubiostatistics.shinyapps.io/recount/ and select the accession for the dataset you would like to analyze.
References: Collado-Torres L, Nellore A, Kammers K, Ellis SE, Taub MA, Hansen KD, Jaffe AE, Langmead B, Leek JT. Reproducible RNA-seq analysis using recount2. Nature Biotechnology, 2017. doi: 10.1038/nbt.3838.
Nellore A, Collado-Torres L, Jaffe AE, Alquicira-Hernández J, Wilks C, Pritt J, Morton J, Leek JT, Langmead B. Rail-RNA: scalable analysis of RNA-seq splicing and coverage. Bioinformatics, 2017. doi: 10.1093/bioinformatics/btw575."),
h4("Data from GEO"),
p("The Gene Expression Omnibus (GEO) housed at NCBI is repository of gene expression data, including
numerous human microarray gene expression studies. This step uses the R package GEOquery to download
the expression data and metadata for a user-selected microarray study. Search for datasets to
analyze at https://www.ncbi.nlm.nih.gov/geo/. Only those datatsets having matrix file can be analyzed."),
h4("Count Data Upload"),
p("Users are required to upload two files."),
p("1. RNA-seq raw count data."),
p("2. Metadata table"),
p("Please check the example files in .data/public_data folder")
),
tabItem(tabName="dataInputCounts",
fluidRow(
box(title = "Upload Data",
solidHeader = T, status = "primary",
width = 6,
helpText("Upload your RNA-seq raw count data here. Format the count data file for your dataset as a comma separated values (.csv) file,
with the first column providing the gene info and all subsequent columns providing the
raw counts for each sample. The header for each sample column should correspond to the sample name. To test the DEGenR pipeline with an example dataset (GSE76987, left_colon_processed), click the submit button"
,style="color:black; padding-right:0em; font-size:16px;"),
# popify(placement = "bottom", title = "File-input info",
fileInput("countFile","Choose CSV File",
accept=c( "text/csv",
"text/comma-separated-values,text/plain",
".csv")),
helpText("Upload your metadata table here, formatted as a tab-delimited file. The first column header should be “Sample” with the sample names provided,
and the second column header should be “Condition” with the sample conditions provided."
,style="color:black; padding-right:0em;font-size:16px;"),
fileInput("metaTab",
"Choose tab-delimited file",
accept = c('text/tab-separated-values',
'text/tab-separated-values',
'.txt',
'.tsv')
),
selectInput(inputId = "geneinfo",
label = "Choose gene ID information:",
choices = c("ENSEMBL",
"ENTREZID",
"SYMBOL")),
actionButton("upload", label = "Submit",icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
),
##Input information summary under Data Input Panel
box(title = "Input data Summary",
solidHeader = T, status = "primary",
width = 6,
h4("samples in the datasets", style="font-size:20px"),
tableOutput("sampleInfo")
)
)),
############# GEO Dataset ##############
tabItem(tabName="GEOdataset",
fluidRow(
box(title = "Enter a GEO dataset ID",
solidHeader = T, status = "primary", width=12,
textInput(inputId="geodataset",
label="Enter GSE accession from GEO"),
actionButton("uploadGEO", label = "Submit",icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
)),
##Input information summary under Data Input Panel
fluidRow(
box(title = "Input data Summary",
solidHeader = T, status = "primary", width=12,
# width = 6,
h4("samples in the datasets", style="font-size:20px"),
tableOutput("sampleInfo2")
),
)
),
############# SRP Dataset ##############
tabItem(tabName="SRPdataset",
fluidRow(
box(title = "Select RNA-seq data",
solidHeader = T, status = "primary", width=12,
textInput(inputId="srpdataset",
label="Enter SRP accession from recount2. For GTEx data, enter SRP012682; for TCGA, enter TCGA"),
actionButton("uploadSRP", label = "Submit",icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
)),
##Input information summary under Data Input Panel
fluidRow(
box(title = "Input data Summary",
solidHeader = T, status = "primary", width=12,
# width = 6,
h4("samples in the datasets", style="font-size:20px"),
tableOutput("sampleInfo3")
)
)),
#########################################
## Second tab content for data summarization panel
tabItem(tabName="dataSummary",
##First 3 box under Data Summarization panel
fluidRow(
box(title = "Data summary without filtering",
solidHeader = T, status = "primary", width=12,
fluidRow(
##Raw Count Summary box under the Data Summarization panel
box(title = "Raw Count Summary",
status = "primary",
width = 4,
#fluidRow(
# column(4,
div(tableOutput("orgLibsizeNormfactor"),style = "font-size:70%"),
tags$style("#orgLibsizeNormfactor table {border: 1px solid black; float: center; position:relative;}","#orgLibsizeNormfactor th {border: 1px solid black;}","#orgLibsizeNormfactor td {border: 1px solid black;}")
),
box(title = "Density Plot of unfiltered gene expression",
status = "primary",
width = 4,
#column(4,
plotOutput("plotdensityunfiltered")
),
box(title = "Box Plot of unfiltered gene expression",
status = "primary",
width = 4,
plotOutput("boxplotunfiltered")
)))),
actionButton("Filter", label = "Filter via EdgeR", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
fluidRow(
box(title = "After filtering via EdgeR",
solidHeader = T, status = "primary", width=12,
## Sample Normalization box plot under Data Summarization panel
fluidRow(
box(title = "Plot densities after filteration",
status = "primary",
width = 4,
plotOutput("plotdensities")
),
box(title = "Boxplot after filteration",
status = "primary",
width = 4,
plotOutput("sampleBoxplot")
),
box(title = "MDS plot",
status = "primary",
width = 4,
plotOutput("mdsplot")
)
)))
),
tabItem(tabName='groupassign',
box (title ="Group assignment",
solidHeader = T, status = "primary",
width = 12,
helpText('Enter GEO accsion ids separated by comma for both Groups (no space between commas). And name the group accordingly
e.g: control, diseased',style="color:black; padding-right:0em;font-size:16px;"),
splitLayout(
textInput("factor1", "Baseline Group"),
textInput("nam1", "Define Group")
),
splitLayout(
textInput("factor2", "Comparison Group"),
textInput("nam2", "Define Group")
),
actionButton(inputId="groupassignment", label="Submit", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
helpText('Alternatively, you can enter column name and assign sample names to groups. And name the group accordingly
e.g: control, diseased. Use either of the two options.',style="color:black; padding-right:0em;font-size:16px;"),
textInput(inputId="colfactor",
label="Column name of the factor"
),
splitLayout(
textInput("factor1geo", "Baseline Group"),
textInput("namgeo1", "Define Group")
),
splitLayout(
textInput("factor2geo", "Comparison Group"),
textInput("namgeo2", "Define Group")
),
actionButton(inputId="groupassignmentgeo", label="Submit", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")),
fluidRow(
box(title = "Box Plot",
status = "primary",
width = 6,
plotOutput("boxplot1")
),
box(title = "Expression Density Plot",
status = "primary",
width = 6,
plotOutput("expressiondensityplot")
)
)
),
tabItem(tabName='groupassign_SRP',
box (title ="Group assignment",
solidHeader = T, status = "primary",
width = 12,
textInput(inputId="colfactor_SRP",
label="Column name of the factor"
),
textInput(inputId="factor1_SRP",
label="Baseline Group"
),
textInput(inputId="factor2_SRP",
label="Comparison Group"
),
actionButton(inputId="groupassignment2", label="Submit", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")),
fluidRow(
box(title = "Box Plot",
status = "primary",
width = 4,
plotOutput("boxplot2")
),
box(title = "Expression Density Plot",
status = "primary",
width = 4,
plotOutput("expressiondensityplot2")
),
box(title = "MDS Plot",
status = "primary",
width = 4,
plotOutput("MDSplot2")
)
)
),
tabItem(tabName="limmavoom",
## 1st row with 2 boxes under limma-voom tab panel
fluidRow(
box(title = "Sample contrast matrix and mean-variance trend",
solidHeader = T, status = "primary",
width = 12,
fluidRow(
box(title = "Create the group for contrast matrix and DEGs",
status = "primary",
uiOutput("grouplevels"),
#tags$style("#voomGroupLevel{ font-weight: bold; color: #0033FF; padding-top: .3cm; padding-bottom: .3cm;}"),
p("Please select any two groups for comparison"
, style="font-weight: bold"),
fluidRow(
column(6,
textInput(inputId="Group1",
label="Baseline Group"
)
),
column(6,
textInput(inputId="Group2",
label="Comparison group"
)
),
actionButton(inputId="degAnalysis", label="Submit", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
)
# )
),
column(6,
box(title = "Plot of fitted microarray linear model",
width = NULL,
status = "primary",
plotOutput("voommeanvariance")
)))))),
tabItem(tabName="DEGs",
fluidRow(
#column(8,
## Estimated dispersion under limma-voom tab panel
box(title = "topTable of differentially expressed genes (eBayes method)",
width = 12,
solidHeader = T, status = "primary",
div(DTOutput("reslimma"),style = "font-size:100%"),
downloadButton("voomDownload",
label = "Download")
),
## DEGs Summary to summarize the up- and down-regulated DEGs
box(title = "DEG Summary",
width = 12,
solidHeader = T, status = "primary",
helpText('Default cutoff is P-value=0.05', style="color:black; padding-right:0em; font-size:16px;"),
# h4(textOutput("voomTestDGEtitle"), align="center" ),
tableOutput("summarydegs"),
textInput(inputId="pvalcutoff",
label="FDR adjusted p-value",
value="0.05"),
actionButton(inputId="pvalfilter", label="Submit", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
)
)
),
tabItem(tabName="treatmethod",
## 1st row with 2 boxes under limma-voom tab panel
fluidRow(
box(title = "TREAT analysis of differentially expressed genes",
solidHeader = T, status = "primary",
width = 12,
box(title = "topTable of differentially expressed genes (TREAT method)",
width = NULL,
status = "primary",
div(DTOutput("restreat"), style = "font-size:100%"),
downloadButton("treatDownload",
label = "Download",
class = NULL)
),
column(6,
box(title = "TREAT analysis",
width = NULL,
status = "primary",
h4("TREAT analysis tests differential gene expression relative to a fold-change threshold",style="font-size:20px"),
helpText('Default cutoff is Adj.P-value=0.05', style="color:black; padding-right:0em; font-size:16px;"),
textInput(inputId="FC",
label=HTML("log<sub>2</sub>FC cutoff"),
value="1.5"),
textInput(inputId="pvalcutoff2",
label="FDR adjusted p-value cutoff",
value="0.05"),
actionButton(inputId="treatanalysis", label="Treat", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
tableOutput("summaryaftertreat"),
)),
column(6,
box(title = "MD-plot after TREAT analysis",
width = NULL,
status = "primary",
#tableOutput("summaryaftertreat"),
downloadButton("MDplotDownload",
label = "Download"),
plotOutput("MDplot")
))
))
),
tabItem(tabName="confect",
# # 1st row with 2 boxes under limma-voom tab panel
fluidRow(
box(title = "topConfects analysis of differentially expressed genes",
solidHeader = T, status = "primary",
width = 12,
box(title = "top differentially expressed genes (topConfects method)",
width = NULL,
status = "primary",
h4("topConfects builds on the TREAT method to rank genes by confident effect size (based on the Confidence Interval) at a fixed FDR",style="font-size:20px"),
textInput(inputId="fdr",
label="FDR adjusted p-value",
value="0.05"),
actionButton(inputId="runconfect", label="Run topconfects", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
div(DTOutput("restopconfect"), style = "font-size:100%"),
downloadButton("topconfectsDownload",
label = "Download")
))),
fluidRow(
column(4,
box(title = "Plot topConfects results",
width = NULL,
status = "primary",
#tableOutput("summaryaftertreat"),
plotOutput("confectplot"),
downloadButton("confectplotDownload",
label = "Download")
)),
column(4,
box(title = "Compare between eBayes and topConfects DEGs",
width = NULL,
status = "primary",
#tableOutput("summaryaftertreat"),
plotOutput("voomvsconfectplot"),
downloadButton("voomvsconfectplotDownload",
label = "Download")
)),
column(4,
box(title = "MD plot of the top n number of genes ranked by eBayes or topConfects",
width = NULL,
status = "primary",
#tableOutput("summaryaftertreat"),
textInput(inputId="n",
label="No. of genes",
value="500"),
actionButton(inputId="plotlimma_confect", label="MD plot", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
plotOutput("MD_limma_confects"),
downloadButton("MD_limma_confectsDownload",
label = "Download")
))
)
),
tabItem(tabName="camera",
fluidRow(
box(title = "Competitive Geneset Test Accounting for Inter-gene Correlation (camera method)",
width = NULL,
solidHeader = T, status = "primary",
#helpText('Choose Enriched gene sets',style="color:black; padding-right:0em; font-size:16px;"),
selectInput(inputId = "pathwaysname",
label = "Select geneset database:",
choices = c(
"All Gene_Ontology",
"Gene Ontology: Biological Process (Full)",
"Gene Ontology: Cellular Component (Full)",
"Gene Ontology: Molecular Function (Full)",
"Human Phenotype Ontology",
"Reactome",
"MSigDB Hallmark",
"BioCarta",
"KEGG",
"PID",
"WikiPathways",
"MSigDB Chemical and Genetic Perturbations ",
"MSigDB Computational Genesets",
"MSigDB Oncogenic Signature Genesets",
"MSigDB Immunologic signature Genesets",
"MSigDB Cell Types"
)),
actionButton(inputId="pathwayenrichment", label="Enrichment (CAMERA)", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
div(DTOutput("enrichmentout"), style = "font-size:90%"),
downloadButton("enrichmentontologyDownload",
label = "Download")
)),
fluidRow(
box(title = "Barcode plot",width = NULL,
solidHeader = T, status = "primary",
h4("Enter Gene-set for plotting Barcode Plot"),
textInput(inputId="geneset",
label="Gene Set",
value= ""),
actionButton(inputId="barcode", label="Barcode Plot", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
plotOutput("barcodeout"),
downloadButton("barcodeDownload",
label = "Download")
)
)
),
tabItem(tabName="fgsea",
fluidRow(
box(title = "fast Gene Set Enrichment Analysis (fGSEA)",
width = NULL,
solidHeader = T, status = "primary",
#column(4,
box(title = "Gene ranking and geneset database selection",
width = NULL,
solidHeader = T, status = "primary",
helpText('Make sure to run TREAT and/or topConfects in order to use as a ranking method',style="color:black; padding-right:0em; font-size:16px;"),
selectInput(inputId = "genelist",
label = "Select a gene ranking method:",
choices = c("eBayes_tvalue",
"TREAT_tvalue",
"topConfects")),
helpText('Choose Enriched gene sets',style="color:black; padding-right:0em; font-size:16px;"),
selectInput(inputId = "pathwaylist",
label = "Choose Enriched gene sets:",
choices = c("All Gene_Ontology",
"Gene Ontology: Biological Process (Full)",
"Gene Ontology: Cellular Component (Full)",
"Gene Ontology: Molecular Function (Full)",
"Human Phenotype Ontology",
"Reactome",
"MSigDB Hallmark",
"BioCarta",
"KEGG",
"PID",
"WikiPathways",
"MSigDB Chemical and Genetic Perturbations",
"MSigDB Computational Genesets",
"MSigDB Oncogenic Signature Genesets",
"MSigDB Immunologic signature Genesets",
"MSigDB Cell Types")),
actionButton(inputId="runfgsea", label="Run fgsea", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
),
box(title = "fGSEA Results",
width = NULL,
solidHeader = T, status = "primary",
div(DTOutput("fgseaout_gobp"), style = "font-size:90%"),
downloadButton("fgseaDownload",
label = "Download")),
box(title = "fGSEA Plot",
width = NULL,
solidHeader = T, status = "primary",
plotOutput('fgseaplot'),
downloadButton("fgseaplotDownload",
label = "Download"))
))
),
tabItem(tabName="fgseaTF",
fluidRow(
box(title = "Fast Gene Set TF Enrichment Analysis (fgsea)",
width = NULL,
solidHeader = T, status = "primary",
#column(4,
box(title = "Filter criteria",
width = NULL,
solidHeader = T, status = "primary",
helpText('Choose from the gene ranking matrix, be careful to run Treat and/or Topconfects if you choose either of these',style="color:black; padding-right:0em; font-size:16px;"),
selectInput(inputId = "genelistTF",
label = "Choose a gene ranking matrix:",
choices = c("eBayes_tvalue",
"TREAT_tvalue",
"topConfects")),
helpText('Choose Enriched gene sets',style="color:black; padding-right:0em; font-size:16px;"),
selectInput(inputId = "pathwaylistTF",
label = "Choose Enriched gene sets:",
choices = c("ENCODE-ChEA Consensus (Enrichr)",
"ChEA 2016 (Enrichr)",
"ENCODE 2015 (Enrichr)",
"ReMap ChIP-seq 2018 Human",
"TRRUST 2019 Human",
"ChEA3 Literature ChIP-Seq",
"TRANSFAC/JASPAR PWMs (Enrichr)",
"Gene Transcription Regulation Database (GTRD v20.06)",
"MSigDB Legacy TF targets",
"miRTarBase 2017",
"miRNA TargetScan 2017",
"miRDB v6.0")),
actionButton(inputId="runfgseaTF", label="Run fgsea", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
),
div(DTOutput("fgseaout_TF"), style = "font-size:90%"),
downloadButton("fgseaTFDownload",
label = "Download"),
plotOutput('fgseaplotTF')
))
),
tabItem(tabName="enrichrontology",
fluidRow(
box(title = "Ontology enrichment using Enrichr",
width = NULL,
solidHeader = T, status = "primary",
box(title = "Filter criteria",
width = NULL,
solidHeader = T, status = "primary",
helpText('Make sure to run TREAT and/or topConfects in order to use as a ranking method',style="color:black; padding-right:0em; font-size:16px;"),
selectInput(inputId = "genesenrichr",
label = "Select a gene ranking method:",
choices = c("eBayes_tvalue",
"TREAT_tvalue",
"topConfects")),
selectInput(inputId = "databaseOntology",
label = "Select geneset database:",
choices = c("GO_Biological_Process_2018",
"GO_Biological_Process_2017b",
"GO_Molecular_Function_2018",
"GO_Molecular_Function_2017b",
"GO_Cellular_Component_2018",
"GO_Cellular_Component_2017b",
"MSigDB_Hallmark_2020",
"Reactome_2016",
"BioCarta_2016",
"KEGG_2019_Human",
"Panther_2016",
"WikiPathways_2019_Human",
"BioPlanet_2019")),
textInput(inputId="FC4",
label=HTML("log<sub>2</sub>FC or confect (topConfects) cutoff"),
value="0.5"),
textInput(inputId="pvalenrichonto",
label="P value cutoff",
value="0.05"
),
actionButton(inputId="runenrichrOntology", label="Run enrichr Ontology", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
),
box(title = "Ontology enrichment on up-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
div(DTOutput("up_ontologyenrichr"), style = "font-size:90%; width: 90%"),
downloadButton("upenrichrontologyDownload",
label = "Download")
),
box(title = "Ontology Enrichment analysis on down-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
div(DTOutput("down_ontologyenrichr"), style = "font-size:90%; width: 90%"),
downloadButton("downenrichrontologyDownload",
label = "Download")
)
))
),
tabItem(tabName="enrichrontologyplots",
fluidRow(
box(title = "Filter criteria",
width = NULL,
solidHeader = T, status = "primary",
textInput(inputId="num",
label="Number of terms to plot",
value="20"),
selectInput(inputId = "pval_FDR2",
label = "Select P-val or FDR:",
choices = c("P.value",
"Adjusted.P.value")),
actionButton(inputId="filterenrichrOntology", label="Plotting", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
),
),
fluidRow(column(6,
box(title = "Bar plot of up-regulated terms",
width = NULL,
solidHeader = T, status = "primary",
plotOutput('upplots'),
downloadButton("upplotsenrichrOntologyDownload",
label = "Download")
)),
column(6,
box(title = "Bar plot of down-regulated terms",
width = NULL,
solidHeader = T, status = "primary",
plotOutput('downplots'),
downloadButton("downplotsenrichrOntologyDownload",
label = "Download")
))),
fluidRow(
box(title = "Combined scores from up-regulated and down-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
helpText("Combined plot will have double the number of terms used in filter crieria"),
plotOutput('updownplots'),
downloadButton("updownplotsenrichrOntologyDownload",
label = "Download")
))),
tabItem(tabName="enrichr",
fluidRow(
box(title = "TF enrichment using Enrichr",
width = NULL,
solidHeader = T, status = "primary",
box(title = "Filter criteria",
width = NULL,
solidHeader = T, status = "primary",
helpText('Make sure to run TREAT and/or topConfects in order to use as a ranking method',style="color:black; padding-right:0em; font-size:16px;"),
selectInput(inputId = "genes",
label = "Select a gene ranking method:",
choices = c("eBayes_tvalue",
"TREAT_tvalue",
"topConfects")),
selectInput(inputId = "database",
label = "Select geneset database:",
choices = c("ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X",
"ENCODE_TF_ChIP-seq_2015",
"ChEA_2016",
"TRANSFAC_and_JASPAR_PWMs",
"TargetScan_microRNA",
"ARCHS4_TFs_Coexp",
"TRRUST_Transcription_Factors_2019",
"TargetScan_microRNA_2017",
"miRTarBase_2017"))
,
textInput(inputId="FC3",
label=HTML("log<sub>2</sub>FC or confect (topConfects) cutoff"),
value="0.5"),
textInput(inputId="pvalenrichTF",
label="P value cutoff",
value="0.05"
),
actionButton(inputId="runenrichr", label="Run enrichr", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
),
box(title = "TF enrichment on up-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
div(DTOutput("up_enrichr"), style = "font-size:90%; width: 90%"),
downloadButton("upenrichrDownload",
label = "Download")
),
box(title = "TF Enrichment analysis on down-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
div(DTOutput("down_enrichr"), style = "font-size:90%; width: 90%"),
downloadButton("downenrichrDownload",
label = "Download")
)
))
#,uiOutput("Next_stepdorothEA", align="center")
),
tabItem(tabName="enrichrTFplots",
fluidRow(
box(title = "Filter criteria",
width = NULL,
solidHeader = T, status = "primary",
textInput(inputId="num2",
label="Number of terms to plot",
value="20"),
selectInput(inputId = "pval_FDR",
label = "Select P-val or FDR:",
choices = c("P.value",
"Adjusted.P.value")),
actionButton(inputId="filterenrichrTF", label="Plotting", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
),
),
fluidRow(column(6,
box(title = "Bar plot of up-regulated terms",
width = NULL,
solidHeader = T, status = "primary",
plotOutput('upTFplots'),
downloadButton("upTFplotsDownload",
label = "Download")
)),
column(6,
box(title = "Bar plot of down-regulated terms",
width = NULL,
solidHeader = T, status = "primary",
plotOutput('downTFplots'),
downloadButton("downTFplotsDownload",
label = "Download")
))),
fluidRow(
box(title = "Combined scores from up-regulated and down-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
helpText("Combined plot will have double the number of terms used in filter crieria"),
plotOutput('updownTFplots'),
downloadButton("updownTFplotsDownload",
label = "Download")
))),
tabItem(tabName="dorothEA",
fluidRow(
box(title = "TF activity analysis using viper algorithm and DoRothEA regulons",
width = NULL,
solidHeader = T, status = "primary",
selectInput(inputId = "dorothearegulon",
label = "Select the DoRothEA regulon:",
choices = c("regulon_a",
"regulon_b",
"regulon_c",
"regulon_d",
"regulon_e")),
helpText('Make sure to run TREAT and/or topConfects in order to use as a ranking method',style="color:black; padding-right:0em; font-size:16px;"),
selectInput(inputId = "genesl",
label = "Select a gene ranking method:",
choices = c("eBayes_tvalue",
"TREAT_tvalue",
"topConfects")),
actionButton(inputId="rundorothea", label="Run DoRothEA", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")),
box(title = "DoRothEA TF Activity Analysis",
width = NULL,
solidHeader = T, status = "primary",
div(DTOutput("dorothea"), style = "font-size:90%"),
downloadButton("dorotheaDownload",
label = "Download")
)),
fluidRow(
box(title = "Genes contributing most to these TF activity",
width = NULL,
solidHeader = T, status = "primary",
tableOutput("genesummary")
),
box(title = "A graphics representation of the results (msVIPER plot)",
width = NULL,
solidHeader = T, status = "primary",
plotOutput("Plotdorothea"),
downloadButton("dorotheaplotDownload",
label = "Download")
)
)),
tabItem(tabName="shadowana",
fluidRow(
box(title = "Shadow analysis",
width = NULL,
solidHeader = T, status = "primary",
helpText('A regulator may appear to be significantly activated because it may share its regulon
of its with a activated TF (shadow effect). To account for this shadow analysis is performed which can list shadow pairs.',style="color:black; padding-right:0em; font-size:16px;"),
textInput(inputId="number",
label="Number of top regulators",
value="25"),
actionButton(inputId="runshadow", label="Run shadow analysis", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")),
box(title = "Result of Shadow Analysis",
width = NULL,
solidHeader = T, status = "primary",
tableOutput("shadowanalysis"),
downloadButton("shadowDownload",
label = "Download")
),
box(title = "Shadow pairs",
width = NULL,
solidHeader = T, status = "primary",
tableOutput("shadowpairssummary")
)
)),
tabItem(tabName="Cerno",
fluidRow(
box(title = "Coincident Extreme Ranks in Numerical Observations (CERNO)",
width = NULL,
solidHeader = T, status = "primary",
selectInput(inputId = "cernogene",
label = "Select geneset database:",
choices = c("Hallmark",
"Reactome",
"Gene Ontology Biological Process (MSigDB Filtered)",
"Gene Ontology Biological Process (Full)",
"Biocarta",
"Gene Ontology Molecular Function (MSigDB Filtered)",
"Gene Ontology Molecular Function (Full)",
"Gene Ontology Cellular Compartment (MSigDB Filtered)",
"Gene Ontology Cellular Compartment (Full)",
"Human Phenotype Ontology",
"KEGG",
"Pathway Interaction Database",
"Wikipathways",
"MSigDB Chemical and Genetic Perturbations",
"MSigDB Computational Genesets",
"MSigDB Oncogenic Signature Genesets",
"MSigDB Immunologic signature Genesets",
"MSigDB Cell Types"
)),
textInput(inputId="textsize",
label="Text size for the Panel Plot",
value="0.5"),
actionButton(inputId="runcerno", label="Run cerno", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
),
box(title = "Results from CERNO analysis",
width = NULL,
solidHeader = T, status = "primary",
#tableOutput("cernoanalysis"),
div(DTOutput("cernoanalysis"), style = "font-size:90%"),
downloadButton("cernoDownload",
label = "Download")
),
box(title = "Panel Plot",
width = NULL, status = "primary",
downloadButton("PanelplotDownload",
label = "Download"),
plotOutput("Panelplot")
)
)),
tabItem(tabName="Hypergeometric",
fluidRow(
box(title = "Hypergeometric Enrichment",
width = NULL,
solidHeader = T, status = "primary",
helpText("Make sure to run TREAT and/or topConfects in order to use as a ranking method"),
selectInput(inputId = "geneshyper",
label = "Select a gene ranking method:",
choices = c("eBayes_tvalue",
"TREAT_tvalue",
"topConfects")),
selectInput(inputId = "hypergene",
label = "Select geneset database:",
choices = c("Hallmark",
"Reactome",
"Gene Ontology Biological Process (MSigDB Filtered)",
"Gene Ontology Biological Process (Full)",
"Biocarta",
"Gene Ontology Molecular Function (MSigDB Filtered)",
"Gene Ontology Molecular Function (Full)",
"Gene Ontology Cellular Compartment (MSigDB Filtered)",
"Gene Ontology Cellular Compartment (Full)",
"Human Phenotype Ontology",
"KEGG",
"Pathway Interaction Database",
"Wikipathways",
"MSigDB Chemical and Genetic Perturbations",
"MSigDB Computational Genesets",
"MSigDB Oncogenic Signature Genesets",
"MSigDB Immunologic signature Genesets",
"MSigDB Cell Types")),
textInput(inputId="pvalcutoff3",
label="FDR adjusted p-value cutoff",
value="0.05"),
textInput(inputId="FC2",
label=HTML("log<sub>2</sub>FC or confect (topConfects) cutoff"),
value="0.5"),
actionButton(inputId="runhyper", label="Run Hypergeometric", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
),
box(title = "Hypergeometric test of up-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
#tableOutput("hyperanalysis"),
div(DTOutput("hyperanalysis"), style = "font-size:90%"),
downloadButton("hyperDownload",
label = "Download")
),
box(title = "Hypergeometric test of down-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
#tableOutput("hyperanalysisdown"),
div(DTOutput("hyperanalysisdown"), style = "font-size:90%"),
downloadButton("hyperdownDownload",
label = "Download")
)
)),
tabItem(tabName="CernoTF",
fluidRow(
box(title = "TF-gene target enrichment using Coincident Extreme Ranks in Numerical Observations (CERNO)",
width = NULL,
solidHeader = T, status = "primary",
selectInput(inputId = "cernoTFgene",
label = "Select geneset database:",
choices = c("ENCODE/ChEA Consensus (Enrichr)",
"ReMap ChIP-Seq",
"TRRUST" ,
"TRANSFAC/JASPAR PWMs (Enrichr)",
"Gene Transcription Regulation Database (GTRD v20.06)",
"miRTarBase 2017" ,
"miRDB v6.0")),
textInput(inputId="textsize2",
label="Text size for the Panel Plot",
value="0.5"),
actionButton(inputId="runTFcerno", label="Run cerno", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
),
box(title = "Results from CERNO TF analysis",
width = NULL,
solidHeader = T, status = "primary",
#tableOutput("cernoTFanalysis"),
div(DTOutput("cernoTFanalysis"), style = "font-size:90%"),
downloadButton("cernoTFDownload",
label = "Download")
),
box(title = "Panel Plot",
width = NULL, status = "primary",
downloadButton("PanelplotTFDownload",
label = "Download"),
plotOutput("PanelplotTF")
)
)),
tabItem(tabName="HypergeometricTF",
fluidRow(
box(title = "Hypergeometric TF Enrichment",
width = NULL,
solidHeader = T, status = "primary",
helpText("Make sure to run TREAT and/or topConfects in order to use as a ranking method"),
selectInput(inputId = "geneshyperTF",
label = "Select a gene ranking method:",
choices = c("eBayes_tvalue",
"TREAT_tvalue",
"topConfects")),
selectInput(inputId = "hyperTFgene",
label = "Select geneset database:",
choices = c("ENCODE/ChEA Consensus (Enrichr)",
"ReMap ChIP-Seq",
"TRRUST" ,
"TRANSFAC/JASPAR PWMs (Enrichr)",
"Gene Transcription Regulation Database (GTRD v20.06)",
"miRTarBase 2017" ,
"miRDB v6.0")),
textInput(inputId="pvalcutoff4",
label="FDR adjusted p-value cutoff",
value="0.05"),
textInput(inputId="FC4",
label=HTML("log<sub>2</sub>FC or confect (topConfects) cutoff"),
value="0.5"),
actionButton(inputId="runTFhyper", label="Run Hypergeometric", icon("paper-plane"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
),
box(title = "Hypergeometric TF enrichment of up-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
#tableOutput("hyperTFanalysis"),
div(DTOutput("hyperTFanalysis"), style = "font-size:90%"),
downloadButton("hyperTFDownload",
label = "Download")
),
box(title = "Hypergeometric TF enrichment of down-regulated genes",
width = NULL,
solidHeader = T, status = "primary",
#tableOutput("hyperTFanalysisdown"),
div(DTOutput("hyperTFanalysisdown"), style = "font-size:90%"),
downloadButton("hyperTFdownDownload",
label = "Download")
)
))
)
)
ui <- dashboardPage(header, sidebar, body)
|
f512374c6dae0825b6b15a0695b4648f4be9d210
|
4cee6dec70875ca85f20dd738932be86f361a63e
|
/pkg/tests/testthat/test-ci.R
|
3049404f27ec88de239ab86e2f5db38c74d2f6e7
|
[] |
no_license
|
dieterich-lab/pulseR
|
9b7114769b48a305ba0a11357226e8f774b73a20
|
1323b378e95b483c8bda99d6c71befccd45c810f
|
refs/heads/master
| 2021-01-18T20:40:00.474158
| 2018-10-26T10:45:32
| 2018-10-26T10:45:32
| 72,013,067
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,458
|
r
|
test-ci.R
|
context("Confidence intervals")
set.seed(259)
formulas <- MeanFormulas(X = mu, Y = nu)
formulaIndexes <- list(
EX = 'X',
EXandY = c('X', 'Y'))
normFactors <- list(
EX = c(1),
EXandY = c(1, .1)
)
nTime <- 1
nReplicates <- 4
conditions <- data.frame(condition = rep(names(formulaIndexes), each = nTime),
time = rep(1:nTime, length(formulas) * nReplicates))
rownames(conditions) <- paste0("sample_", seq_along(conditions$condition))
known <- addKnownToFormulas(formulas, formulaIndexes, conditions)
normFactors <- known$formulaIndexes[unique(names(known$formulaIndexes))]
fractions <- as.character(interaction(conditions))
nGenes <- 2
par <- list(size = 1e4)
par <- c(par, list(
mu = runif(nGenes, 100, 1000), nu = runif(nGenes,100,1000)))
allNormFactors <- multiplyList(normFactors, fractions)
counts <- generateTestDataFrom(
formulas, formulaIndexes, allNormFactors, par, conditions)
pd <- PulseData(
counts = counts,
conditions = conditions,
formulas = formulas,
formulaIndexes = formulaIndexes,
groups = fractions
)
options <- list()
options$lb <- list(mu = 1, nu = 1)
options$lb <- pulseR:::.b(options$lb, par)
options$ub <- list(mu = 1e4, nu = 1e4)
options$ub <- pulseR:::.b(options$ub, par)
options$lb$size <- 1
options$ub$size <- 1e6
options$lb$normFactors <- pulseR:::assignList(normFactors, .01)
options$ub$normFactors <- pulseR:::assignList(normFactors, 20)
options <- setTolerance(.01,shared = .01, normFactors = .01,options = options)
options$verbose <- "silent"
par$normFactors <- normFactors
fit <- fitModel(pd, par,options)
test_that("plGene is zero at optimum", {
expect_lte(
abs(plGene("mu",1,fit, pd,options)(fit$mu[1])$value), 1e-4)
expect_lte(
abs(pl(list("mu",1),fit, pd,options)(fit$mu[1])$value), 1e-4)
})
test_that("profile estimations on the interval", {
prof <- profile(list("mu", 1), pd, fit, options,
interval = rep(fit$mu[1], 2), numPoints = 1)
expect_lte(abs(prof$logL), 1e-6)
prof <- profileGene("mu", 1, pd, fit, options,
interval = rep(fit$mu[1], 2), numPoints = 1)
expect_lte(abs(prof$logL), 1e-6)
})
test_that("ci calculation", {
cis <- ciGene("mu",1,pd,fit,options)
optimum <- evaluateLikelihood(fit, pd)
vapply(cis, function(x) {
p <- .assignElement(fit, list("mu",1), x)
evaluateLikelihood(p, pd) - optimum
}, double(1))
cis <- ci(list("mu", 1), pd, fit, options)
})
|
7a03d797ceddba1aaeaa34dcac0741b0e31da057
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tangram/examples/table_builder.Rd.R
|
51a33b43f43385016eb8a56b090d5f625acccfe6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,077
|
r
|
table_builder.Rd.R
|
library(tangram)
### Name: table_builder
### Title: Table Construction Toolset
### Aliases: table_builder col_header row_header write_cell home cursor_up
### cursor_down cursor_left cursor_right cursor_pos carriage_return
### line_feed new_line new_row new_col table_builder_apply add_col
### add_row
### ** Examples
library(magrittr)
table_builder() %>%
col_header("One","Two","Three","Four") %>%
row_header("A", "B", "C") %>%
write_cell("A1") %>%
cursor_right() %>%
add_col("A2", "A3") %>%
home() %>%
new_line() %>%
table_builder_apply(1:3, FUN=function(tb, x) {
tb %>% write_cell(paste0("B",x)) %>% cursor_right()
}) %>%
new_col() %>%
add_row(paste0(c("A","B","C"), 4)) %>%
cursor_up(2) %>%
line_feed() %>%
cursor_left(3) %>%
add_col(paste0("C", 1:4))
|
ee78e6c58da9897236571c83da25a21cebe58de4
|
948b78fc214a1b9981790c83abb6284758dbfa89
|
/r-library/man/locfitGrowthEstimate.Rd
|
aa1ce924d42b0597c62fdea0ec965936be6454fc
|
[
"MIT"
] |
permissive
|
terminological/jepidemic
|
4ea81235273649b21cf11108c5e78dd7612fdf6e
|
f73cc26b0d0c431ecc31fcb03838e83d925bce7a
|
refs/heads/main
| 2023-04-14T10:13:56.372983
| 2022-05-24T22:07:10
| 2022-05-24T22:07:10
| 309,675,032
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 900
|
rd
|
locfitGrowthEstimate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimators.R
\name{locfitGrowthEstimate}
\alias{locfitGrowthEstimate}
\title{Generate a smoothed estimate of the absolute growth rate of cases using a poisson model.}
\usage{
locfitGrowthEstimate(
simpleTimeseries,
degree = 2,
window = 14,
weightByWeekday = FALSE,
...
)
}
\arguments{
\item{simpleTimeseries}{- a minimal time-series including date, value, and if available total. If total is present the proportion is value/total. otherwise it is value.}
\item{degree}{the polynomial degree}
\item{window}{the data window in days}
\item{...}{may include "nearestNeigbour=FALSE" to disable the tail behaviour of locfit}
}
\value{
a timeseries with growth rate estimates (columns starting with "Growth")
}
\description{
Generate a smoothed estimate of the absolute growth rate of cases using a poisson model.
}
|
99d4bac7d4b24acd7e4faec33478c5d50b3598d4
|
bf6eeabe8154eb0c192c1f27603dbd53fca4bdec
|
/R/class.R
|
7f1acfa1bb5f9e9c8f510d2c147ed8908d45ba9b
|
[] |
no_license
|
gokmenzararsiz/MLSeq
|
a81484c77bc77cc43a9317a5cc71ec10eb751a63
|
f58cf5297d97b1f338d1748cc38df75f2e2accd3
|
refs/heads/master
| 2021-01-17T09:21:45.177052
| 2016-12-22T07:42:47
| 2016-12-22T07:42:47
| 18,428,996
| 1
| 1
| null | 2016-04-19T13:10:50
| 2014-04-04T05:58:29
|
R
|
UTF-8
|
R
| false
| false
| 3,387
|
r
|
class.R
|
setOldClass(c("confusionMatrix","train"))
#' \code{MLSeq} object
#'
#' For classification, this is the main class for the \code{MLSeq} package.
#'
#' Objects can be created by calls of the form \code{new("MLSeq", ...)}. This type
#' of objects is created as a result of \code{classify} function of \code{MLSeq} package.
#' It is then used in \code{predictClassify} function for predicting the class labels of new samples.
#'
#' @section Slots:
#'
#' \describe{
#' \item{\code{method}:}{stores the name of used classification method in the classification model}
#' \item{\code{transformation}:}{stores the name of used transformation method in the classification model}
#' \item{\code{normalization}:}{stores the name of used normalization method in the classification model}
#' \item{\code{confusionMat}:}{stores the information of classification performance results}
#' \item{\code{trainedModel}:}{stores the information about training process and model parameters that used in the corresponding model}
#' \item{\code{ref}:}{stores user defined reference class}
#' }
#'
#' @note An \code{MLSeq} class stores the results of \code{classify} function and offers further slots that are populated
#' during the analysis. The slot \code{confusionMat} stores the information of classification performance results. These
#' results contain the classification table and several statistical measures including accuracy rate, sensitivity, specifity,
#' positive and negative predictive rates, etc. \code{method}, \code{normalization} and \code{deseqTransform} slots store
#' the name of used classification method, normalization method and transformation method in the classification model respectively.
#' Lastly, the slot \code{trained} stores the information about training process and model parameters that used in the corresponding model.
#'
#' @author Gokmen Zararsiz, Dincer Goksuluk, Selcuk Korkmaz, Vahap Eldem, Izzet Parug Duru, Turgay Unver, Ahmet Ozturk
#'
#' @docType class
#' @name MLSeq-class
#' @rdname MLSeq-class
#' @aliases MLSeq-class
#' @exportClass MLSeq
setClass("MLSeq",
slots = c(method = "character",
transformation = "character",
normalization = "character",
confusionMat = "confusionMatrix",
trainedModel = "train",
ref = "character"),
prototype = prototype(confusionMat=structure(list(), class="confusionMatrix"),
trainedModel = structure(list(), class="train")))
setValidity("MLSeq", function( object ) {
if (!(method(object) %in% c("svm", "bagsvm", "randomforest", "cart")))
return("Error: 'method' slot must be in one of the following methods: \"svm\", \"bagsvm\", \"randomforest\", \"cart\" ")
if (!(normalization(object) %in% c("deseq", "none", "tmm")))
return("Error: 'normalization' slot must be in one of the following: \"deseq\", \"none\", \"tmm\" ")
if (!(transformation(object) %in% c("vst", "voomCPM", "NULL")))
return("Error: 'transformation' slot must be in one of the following: \"vst\", \"voomCPM\" ")
if (!is.character(ref(object)))
return("Error: 'ref' slot must be a character ")
if ((normalization(object) == "tmm" & transformation(object) == "vst"))
return("Warning: \"vst\" transformation can be applied only with \"deseq\" normalization. \"voom-CPM\" transformation is used. ")
TRUE
} )
|
fe6c6bb235b114dd84c17b6c5e246700037bdeaa
|
548f28065c18662debd5b6514fc634913a77b49c
|
/medium_case_animate.R
|
72b2467944c25b0926ca917558f536c463484b5d
|
[
"MIT"
] |
permissive
|
EngyMa/animated-case
|
34a5f6ab669d008af3d473961ac08b57b7c59d80
|
cb762351ac6ecc7d218babe4b9a9c46e01558d18
|
refs/heads/master
| 2020-04-25T17:11:30.399169
| 2018-12-04T12:35:46
| 2018-12-04T12:35:46
| 172,938,451
| 1
| 0
|
MIT
| 2019-02-27T15:15:01
| 2019-02-27T15:15:00
| null |
UTF-8
|
R
| false
| false
| 6,451
|
r
|
medium_case_animate.R
|
# ggplot2 theme to use later
theme_chris <- function (base_size = 12, base_family = "serif", ticks = TRUE)
{
ret <- theme_bw(base_family = base_family, base_size = base_size) +
theme(legend.background = element_blank(), legend.key = element_blank(),
panel.border = element_blank(),
strip.background = element_blank(),
panel.background = element_rect(fill = "#94B1C533", colour = NA),
plot.background = element_rect(fill = "#ffffff"),
axis.line = element_blank(),
panel.grid = element_blank(),
axis.text.x = element_text(colour = "#2a3132"),
axis.title.x = element_text(colour = "#2a3132"),
axis.title.y = element_text(colour="#2a3132"),
axis.text.y = element_text(colour="#2a3132"),
axis.title = element_text(colour = "#2a3132"),
plot.title = element_text(colour = "#2a3132",
margin = margin(0,0,10,0)),
plot.subtitle = element_text(colour = "#2a3132"),
plot.caption = element_text(colour = "#2a3132"),
legend.title = element_text(colour = "#2a3132"),
legend.text = element_text(colour = "#2a3132"))
if (!ticks) {
ret <- ret + theme(axis.ticks = element_blank())
}
ret
}
# import yearly data (total, summed values, not means or medians)
# dataset compiled from historical Ross-CASE reports
library(readr)
fund_df <- read_csv("year_sum.csv")
# quick look at data
library(dplyr)
glimpse(fund_df)
library(ggplot2)
ggplot(fund_df, aes(x = year,
y = new_funds_raised)) +
geom_line()
# create contactable alumni x100 variable to place values on equivalent scale
fund_df <-
fund_df %>%
mutate(contact_alum_x100 = contactable_alumni * 100)
# create tidy dataframe
library(tidyr)
fund_tidy <-
fund_df %>%
gather(kpi, value, - year) %>%
mutate(kpi = as.factor(kpi))
glimpse(fund_tidy)
# create animated plot
library(gganimate)
library(transformr)
first_animate <-
fund_tidy %>%
filter(kpi != "contactable_alumni") %>%
ggplot(aes(x = year,
y = value,
colour = kpi)) +
geom_line() +
transition_reveal(kpi, year) +
labs(title = "Trends in University Fundraising KPIs Over Time",
subtitle = "Data from Ross-CASE reports",
x = "Year",
y = 'Value',
caption = "y axis labelling omitted due to differences in scale between KPIs",
colour = "KPI") +
scale_colour_discrete(labels = c("Cash received",
"Contactable alumni",
"Fundraising staff",
"New funds raised")) +
scale_y_discrete(labels = NULL) +
theme_chris()
# animate and save
first_animated <- animate(first_animate, height = 500, width = 800)
anim_save("first_animated.gif", animation = first_animated)
# create non-animated plot with trendlines
fund_tidy %>%
filter(kpi != "contactable_alumni") %>%
ggplot(aes(x = year,
y = value,
colour = kpi)) +
geom_line() +
geom_smooth(method = "lm", linetype = "dashed", se = FALSE) +
labs(title = "Trends in University Fundraising KPIs Over Time",
subtitle = "Data from Ross-CASE reports",
x = "Year",
y = 'Value',
caption = "y axis labelling omitted due to differences in scale between KPIs",
colour = "KPI") +
scale_colour_discrete(labels = c("Cash received",
"Contactable alumni",
"Fundraising staff",
"New funds raised")) +
scale_y_discrete(labels = NULL) +
theme_chris()
#---- create linear model and augmented dataframe ----
# build pre-filtered dataframe
fund_tidy2 <-
fund_tidy %>%
filter(kpi != "contactable_alumni")
# build linear model
lin_mod <- lm(value ~ year + kpi, data = fund_tidy2)
# augment linear model to produce tidy dataframe with fitted values
library(broom)
aug_mod <- augment(lin_mod)
# create animated graph
aug_animate <-
aug_mod %>%
ggplot(aes(x = year,
y = value,
colour = kpi)) +
geom_line(aes(group = kpi, y = .fitted), size = 0.5, linetype = "dashed") +
geom_point(size = 2) +
geom_line(aes(group = kpi)) +
transition_reveal(kpi, year) +
labs(title = "Trends in University Fundraising KPIs Over Time",
subtitle = "Data from Ross-CASE reports",
x = "Year",
y = 'Value',
caption = "y axis labelling omitted due to differences in scale between KPIs",
colour = "KPI") +
scale_colour_discrete(labels = c("Cash received",
"Contactable alumni",
"Fundraising staff",
"New funds raised")) +
theme_chris()
# animate and save
aug_animated <- animate(aug_animate, height = 500, width = 800)
anim_save("aug_animated.gif", animation = aug_animated)
#---- build multiple models for animated plot with trendlines ----
# build nested tibble
fund_nested <-
fund_tidy2 %>%
group_by(kpi) %>%
nest()
# build separate regression models
fund_models <-
fund_nested %>%
mutate(lm_mod = map(data,
~lm(formula = value ~ year,
data = .x)))
# augment models and unnest tibble
fund_models_aug <-
fund_models %>%
mutate(aug = map(lm_mod, ~augment(.x))) %>%
unnest(aug)
case_animate <-
fund_models_aug %>%
ggplot(aes(x = year,
y = value,
colour = kpi)) +
geom_line(aes(group = kpi, y = .fitted), size = 0.5, linetype = "dashed") +
geom_point(size = 2) +
geom_line(aes(group = kpi)) +
transition_reveal(kpi, year) +
labs(title = "Trends in University Fundraising KPIs Over Time",
subtitle = "Data from Ross-CASE reports",
x = "Year",
y = 'Value',
caption = "y axis labelling omitted due to differences in scale between KPIs",
colour = "KPI") +
scale_colour_discrete(labels = c("Cash received",
"Contactable alumni",
"Fundraising staff",
"New funds raised")) +
scale_fill_discrete() +
theme_chris()
# animate and save
case_animation <- animate(case_animate, height = 500, width = 800)
anim_save("case_animation.gif", animation = case_animation)
|
cea4f1e96f40a1c560cae6bf13fcdae1c6895e44
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.storage/man/backup_create_backup_vault.Rd
|
83e644fc1c102fc6d87ea7aa771a1800b8fbf9d5
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,609
|
rd
|
backup_create_backup_vault.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backup_operations.R
\name{backup_create_backup_vault}
\alias{backup_create_backup_vault}
\title{Creates a logical container where backups are stored}
\usage{
backup_create_backup_vault(
BackupVaultName,
BackupVaultTags = NULL,
EncryptionKeyArn = NULL,
CreatorRequestId = NULL
)
}
\arguments{
\item{BackupVaultName}{[required] The name of a logical container where backups are stored. Backup vaults
are identified by names that are unique to the account used to create
them and the Amazon Web Services Region where they are created. They
consist of letters, numbers, and hyphens.}
\item{BackupVaultTags}{Metadata that you can assign to help organize the resources that you
create. Each tag is a key-value pair.}
\item{EncryptionKeyArn}{The server-side encryption key that is used to protect your backups; for
example,
\verb{arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab}.}
\item{CreatorRequestId}{A unique string that identifies the request and allows failed requests
to be retried without the risk of running the operation twice. This
parameter is optional.
If used, this parameter must contain 1 to 50 alphanumeric or '-_.'
characters.}
}
\description{
Creates a logical container where backups are stored. A \code{\link[=backup_create_backup_vault]{create_backup_vault}} request includes a name, optionally one or more resource tags, an encryption key, and a request ID.
See \url{https://www.paws-r-sdk.com/docs/backup_create_backup_vault/} for full documentation.
}
\keyword{internal}
|
2a5dd8493029ecad3647acc8f67fef555fec4628
|
3f0498c8f6463302b1a8ca3ea5d357f41cd08e60
|
/Composite.R
|
601773e41093ee63684eaf75fa32dd10e3033a63
|
[] |
no_license
|
YTTom/R
|
3f30f29597e8d41724678b670e674948753b569a
|
38fa42889ff1abc856eefe086e225962e07c6062
|
refs/heads/master
| 2022-11-06T15:50:29.478437
| 2020-06-27T06:16:47
| 2020-06-27T06:16:47
| 273,939,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 814
|
r
|
Composite.R
|
#讀取檔案(路徑要改成自己的)
data <- read.csv('~/Downloads/data.csv')
#抓取106和106年度的資料
year106<-data[年度=='106',]
year107<-data[年度=='107',]
#抓取總人數的資料
year106_people <- year106[,c(4)]
year107_people <- year107[,c(4)]
#將總人數轉換為matrix(向量)
matrix106<-matrix(year106_people)
matrix107<-matrix(year107_people)
#合併兩年的matrix
merge_matrix<-cbind(matrix106,matrix107)
#補上欄位名稱
rownames(merge_matrix) <- c('英國','美國','日本')
colnames(merge_matrix) <- c('106','107')
#設定中文編碼和字體(原來會顯示亂碼)
Sys.setlocale(category="LC_ALL",locale="en_US.UTF-8")
par(family='宋體-繁 細體')
#繪製長條圖
barplot(merge_matrix,beside=TRUE,, xlab="國別", ylab="留學生人數",family="宋體-繁 細體")
|
444efeea20bd9a73b9c2aa86708aa506aedc0142
|
d2c7b6f677eb501b6f08c54fce7aebaf4119ae15
|
/man/plot.ssgraph.Rd
|
b62403c6c493c09e29afc4b460b8d0f2b36129de
|
[] |
no_license
|
cran/ssgraph
|
9b792a284ee5ca70c24bbeeaf998fe769ef323db
|
15e27003a9ef1bf99ccc881f255853e309e17914
|
refs/heads/master
| 2023-01-12T03:58:58.043221
| 2022-12-24T12:30:02
| 2022-12-24T12:30:02
| 130,663,048
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,583
|
rd
|
plot.ssgraph.Rd
|
\name{plot.ssgraph}
\alias{plot.ssgraph}
\title{ Plot function for \code{S3} class \code{"ssgraph"} }
\description{ Visualizes structure of the selected graphs which could be a graph with links for which their estimated posterior probabilities are greater than 0.5
or graph with the highest posterior probability. }
\usage{ \method{plot}{ssgraph}( x, cut = 0.5, ... ) }
\arguments{
\item{x }{An object of \code{S3} class \code{"ssgraph"}, from function \code{\link{ssgraph}}. }
\item{cut }{Threshold for including the links in the selected graph based on the estimated posterior probabilities of the links; See the examples. }
\item{\dots}{System reserved (no specific usage).}
}
\references{
Mohammadi, R. and Wit, E. C. (2019). \pkg{BDgraph}: An \code{R} Package for Bayesian Structure Learning in Graphical Models, \emph{Journal of Statistical Software}, 89(3):1-30
Mohammadi, A. and Wit, E. C. (2015). Bayesian Structure Learning in Sparse Gaussian Graphical Models, \emph{Bayesian Analysis}, 10(1):109-138
Mohammadi, A. et al (2017). Bayesian modelling of Dupuytren disease by using Gaussian copula graphical models, \emph{Journal of the Royal Statistical Society: Series C}, 66(3):629-645
}
\author{ Reza Mohammadi \email{a.mohammadi@uva.nl} }
\seealso{ \code{\link{ssgraph}} }
\examples{
\dontrun{
# Generating multivariate normal data from a 'scale-free' graph
data.sim <- bdgraph.sim( n = 60, p = 7, graph = "scale-free", vis = TRUE )
ssgraph.obj <- ssgraph( data = data.sim )
plot( ssgraph.obj )
plot( ssgraph.obj, cut = 0.3 )
}
}
\keyword{hplot}
|
01d4a7aaddbf9e56e4582b228f734bf4033eba3d
|
bd454c45d38cc48f6247d9dec829de0533793549
|
/man/piat.feedback.no_score.Rd
|
242e74f2aad06c5a6ef4c5bd284b9d50ff5d6064
|
[
"MIT"
] |
permissive
|
pmcharrison/piat
|
f445431e6d59cbf63228619547ad4e078af58c2f
|
73c77acf379c233480819738214187cd9b1ba3f7
|
refs/heads/master
| 2023-08-14T17:02:04.665315
| 2023-07-26T21:27:39
| 2023-07-26T21:27:39
| 131,727,383
| 2
| 3
|
NOASSERTION
| 2022-12-21T10:09:03
| 2018-05-01T15:09:06
|
R
|
UTF-8
|
R
| false
| true
| 406
|
rd
|
piat.feedback.no_score.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feedback.R
\name{piat.feedback.no_score}
\alias{piat.feedback.no_score}
\title{PIAT feedback (no score)}
\usage{
piat.feedback.no_score(dict = piat::piat_dict)
}
\arguments{
\item{dict}{The psychTestR dictionary used for internationalisation.}
}
\description{
Here the participant is given no feedback at the end of the test.
}
|
876aa0e93b35623f6b4d50db30e6392b133124df
|
1522b308afd42bc80bf4b5192c2d1670f8579c26
|
/man/Fragman-package.Rd
|
4de27f2a5cee6ffb60d2a65dd7431b30bf8cead1
|
[] |
no_license
|
covaruber/Fragman
|
2c1830036ccd968c1d4df82983c0cb74d7c84651
|
55fd3627d9f6699ad97f1643883ce93387b382c3
|
refs/heads/master
| 2020-04-11T21:44:30.948814
| 2018-12-17T10:45:13
| 2018-12-17T10:45:13
| 162,114,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,194
|
rd
|
Fragman-package.Rd
|
\name{Fragman-package}
\alias{Fragman}
\docType{package}
\author{
Giovanny Covarrubias-Pazaran, Luis Diaz-Garcia, Brandon Schlautman, Walter Salazar, Juan Zalapa.
}
\title{Fragment analysis and automatic scoring}
\description{Fragman is a package designed for Fragment analysis and automatic scoring of biparental populations (such as F1, F2, BC types) and populations for diversity studies. The program is designed to read files with FSA extension (which stands for FASTA-type file and contains lectures for DNA fragments), and .txt files from Beckman CEQ 8000 system, and extract the DNA intensities from the channels/colors where they are located, based on ABi machine plattforms to perform sizing and allele scoring.
The core of the package and the workflow of the fragment analysis rely in the following 4 functions;
1) \code{\link{storing.inds}}(function in charge of reading the FSA or txt(CQS) files and storing them with a list structure)
2) \code{\link{ladder.info.attach}} (uses the information read from the FSA files and a vector containing the ladder information (DNA size of the fragments) and matches the peaks from the channel where the ladder was run with the DNA sizes for all samples. Then loads such information in the R environment for the use of posterior functions)
3) \code{\link{overview2}} (create friendly plots for any number of individuals specified and can be used to design panels (\code{\link{overview2}}) for posterior automatic scoring (like licensed software does), or make manual scoring (\code{\link{overview}}) of individuals such as parents of biparental populations or diversity populations)
4) The \code{\link{score.markers}} (function score the alleles by finding the peaks provided in the panel (if provided), otherwise returns all peaks present in the channel). Thisfinal function can be automatized if several markers are located in the same channel by creating lists of panels taking advantage of R capabilities and data structures.
** Sometimes during the ladder sizing process some samples can go wrong for several reasons related to the sample quality (low intensity in ladder channel, extreme number of noisy peaks, etc.), because of that we have introduced \code{\link{ladder.corrector}} function which allows the user to correct the bad samples by clicking over the real peaks, by default the \code{\link{ladder.info.attach}} function returns the names of the samples that had a low correlation with the expected peaks.
When automatic scoring is not desired the function \code{\link{overview}} can be used for getting an interactive session and click over the peaks (using the \code{\link{locator}} function) in order to get the allele sizes.
}
\section{Contact}{
Feel free to contact us with questions and improvement suggestions at:
covarrubiasp@wis.edu
Just send a sample file with your question to recreate the issue or bug reported along with vector for your ladder.
}
\section{Citation}{
We have spent valuable time developing this package, please cite it in your publication:
Covarrubias-Pazaran G, Diaz-Garcia L, Schlautman B, Salazar W, Zalapa J. Fragman: An R package for fragment analysis. 2016. BMC Genetics 17(62):1-8.
}
\references{
Covarrubias-Pazaran G, Diaz-Garcia L, Schlautman B, Salazar W, Zalapa J. Fragman: An R package for fragment analysis. 2016. BMC Genetics 17(62):1-8.
Robert J. Henry. 2013. Molecular Markers in Plants. Wiley-Blackwell. ISBN 978-0-470-95951-0.
Ben Hui Liu. 1998. Statistical Genomics. CRC Press LLC. ISBN 0-8493-3166-8.
}
\keyword{ package }
\seealso{
http://cggl.horticulture.wisc.edu/home-page/
}
\examples{
## ================================= ##
## ================================= ##
## Fragment analysis requires
## 1) loading your data
## 2) matching your ladder
## 3) define a panel for scoring
## 4) score the samples
## ================================= ##
## ================================= ##
#####################
## 1) Load your data
#####################
### you would use something like:
# folder <- "~/myfolder"
# my.plants <- storing.inds(folder)
### here we just load our sample data and use the first 2 plants
?my.plants
data(my.plants)
my.plants <- my.plants[1:2]
class(my.plants) <- "fsa_stored"
# plot(my.plants) # to visualize the raw data
#######################
## 2) Match your ladder
#######################
### create a vector indicating the sizes of your ladder and do the match
my.ladder <- c(50, 75, 100, 125, 129, 150, 175, 200, 225, 250, 275, 300, 325, 350, 375)
ladder.info.attach(stored=my.plants, ladder=my.ladder)
### matching your ladder is a critical step and should only happen once per batch of
### samples read
###****************************************************************************************###
### OPTIONAL:
### If the ladder.info attach function detects some bad samples
### that you can correct them manually using
### the ladder.corrector() function
### For example to correct one sample in the previous data
### ladder.corrector(stored=my.plants,
#to.correct="FHN152-CPN01_01A_GH1x35_152-148-209_717-704-793_367-382-381.fsa",
#ladder=my.ladder)
###****************************************************************************************###
#######################
## 3) Define a panel
#######################
### In fragment analysis you usually design a panel where you indicate
### which peaks are real. You may use the overview2 function which plots all the
### plants in the channel you want in the base pair range you want
overview2(my.inds=my.plants, channel = 2:3, ladder=my.ladder, init.thresh=5000)
### You can click on the peaks you think are real, given that the ones
### suggested by the program may not be correct. This can be done by using the
### 'locator' function and press 'Esc' when you're done, i.e.:
# my.panel <- locator(type="p", pch=20, col="red")$x
### That way you can click over the peaks and get the sizes
### in base pairs stored in a vector named my.panel
### Just for demonstration purposes I will use the suggested peaks by
### the program using overview2, which will return a vector with
### expected DNA sizes to be used in the next step for scoring
### we'll do it in the 160-190 bp region
my.panel <- overview2(my.inds=my.plants, channel = 3,
ladder=my.ladder, init.thresh=7000,
xlim=c(160,190)); my.panel
##########################
## 4) Score the samples
##########################
### When a panel is created is time to score the samples by providing the initial
### data we read, the ladder vector, the panel vector, and our specifications
### of channel to score (other arguments are available)
### Here we will score our samples for channel 3 with our panel created previously
res <- score.markers(my.inds=my.plants, channel = 3, panel=my.panel$channel_3,
ladder=my.ladder, electro=FALSE)
### Check the plots and make sure they were scored correctly. In case some samples
### are wrong you might want to use the locator function again and figure out
### the size of your peaks. To extract your peaks in a data.frame do the following:
final.results <- get.scores(res)
final.results
}
|
e45e5c9ae5fa18baaaaeacfe138207c9c584a6c6
|
0e6d8c50bd6c0ef5e3c97b17626bb42c9e3d8eff
|
/R/RcppExports.R
|
834ce8db68665acb5c17090dbe87c748e4492e6b
|
[] |
no_license
|
tobiasmuetze/gscounts
|
e04903db1993df538065cc427c45f01d2904796f
|
1c614a3fd36be86a5608b83df91df040fbf0d98d
|
refs/heads/master
| 2021-11-23T23:36:17.203662
| 2021-11-01T16:14:35
| 2021-11-01T16:14:35
| 92,069,741
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 503
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
cpp_calc_critical <- function(r, lower, upper, error_spend, information, theta, side) {
.Call('_gscounts_cpp_calc_critical', PACKAGE = 'gscounts', r, lower, upper, error_spend, information, theta, side)
}
cpp_pmultinorm <- function(r, lower, upper, information, theta) {
.Call('_gscounts_cpp_pmultinorm', PACKAGE = 'gscounts', r, lower, upper, information, theta)
}
|
f9f1bc3dd54383ff64b71930259786ac326b7109
|
9580717f9f09fe026dee8224b35f3f72c9f78675
|
/man/create_net_animate.Rd
|
516314018fae56531e704ad4bdaf72c2282197d8
|
[] |
no_license
|
sctyner/netvizinf
|
dc0fed9791ae7c29255ff133b010ed5f6bc39a17
|
d9cd0e249ad6b351b59b9478d238dbaf0a8762ce
|
refs/heads/master
| 2021-05-03T22:30:58.889245
| 2017-10-26T19:52:35
| 2017-10-26T19:52:35
| 71,607,611
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 372
|
rd
|
create_net_animate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crete-net-animate.R
\name{create_net_animate}
\alias{create_net_animate}
\title{Construct plots to be animated.}
\usage{
create_net_animate(dat)
}
\arguments{
\item{dat}{\code{data.frame} The output of a call to \code{\link{tween_microsteps}}}
}
\description{
Construct plots to be animated.
}
|
3f87ab69eb4dfe1532b38316ecaf047610ebed19
|
b8aed4a0a0f267d15c97ddc8957949999b9f5004
|
/R/fsf_query.R
|
b0151eb27f60cac5ecd00670603d577317d8860e
|
[] |
no_license
|
jtbradt/firstStreetAPI
|
af0d721bd9c76c8055a407043bf79ff33902a356
|
7870fc59dfb0e7d9e630a486ba356723ea9bdd0f
|
refs/heads/master
| 2023-06-12T14:01:03.068212
| 2021-07-13T15:27:50
| 2021-07-13T15:27:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 718
|
r
|
fsf_query.R
|
#' FSF query function
#'
#' This function constructs a fsf API request:
#' @param api.cat is one of FSF's 7 API categories
#' @param api is one of FSF's 18 APIs
#' @param arg is a query argument
#' @keywords fsf.query
#' @export
fsf.query <- function(api.cat, api, arg) {
# Create path:
path <- paste(pkg.env$api.version, api.cat, api, sep = "/")
# Add arguments to path:
path <- paste0(path, "/", paste(arg, collapse = ";"))
# Query FSF API:
url <- httr::modify_url("https://api.firststreet.org/", path = path)
resp <- httr::GET(url, query = list(key = pkg.env$api.key))
if (resp$status_code == "200") {
return(resp)
} else {
return(resp$status_code)
}
}
|
73e3f96948fee51fb6f06e96037aa95aa32f324b
|
05884bd8afb3222aec86c6a2b363e67ed3c64590
|
/toolbox/examples/ecoex.R
|
d1171d45a50f9684f63aa3a1fb5dbde781072cc7
|
[] |
no_license
|
nmarticorena/mineria_datos
|
bcfbea31e6de6f292e4404068b360638ab8a3cbb
|
6e3f22c2fb79fe551a5d8c94136f495638088813
|
refs/heads/master
| 2020-03-09T00:36:28.806062
| 2018-06-14T03:12:35
| 2018-06-14T03:12:35
| 128,492,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 118
|
r
|
ecoex.R
|
# eco example 1
x=1:10
z=eco(x,3)
#x_1=(0,1,2,3...)
#x_2=(0,0,1,2,...)
#x_3=(0,0,0,1,2...)
#z=x_1+x_2+x_3
z
|
738f7243f740f9e22f605a88da362a4f9ee50987
|
5a3e9cad940ab62c63177618397f4f7fa91069cc
|
/Destructive_harvest_2018_BRA_LAU_GAL_WUU.R
|
6cbcfe8c2da0db18db49a16cc0725f5697ecdaf7
|
[] |
no_license
|
mirodemol/destr_valid_BGLW
|
ade749e5aca48937ed4b38cf974fa4f5abfd1287
|
312461570b48ee7301c36471e00754758bb9d983
|
refs/heads/master
| 2020-03-15T11:34:34.753516
| 2018-05-15T13:52:05
| 2018-05-15T13:52:05
| 132,123,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,925
|
r
|
Destructive_harvest_2018_BRA_LAU_GAL_WUU.R
|
####
# This code is to analyse the 2018 field data from destructive harvests
# written by Miro
####
remove(list=ls())
# load packages and functions ----
delete.na <- function(DF, n=0) {
DF[rowSums(is.na(DF)) <= n,]}
# read data, check data ----
setwd('C:/Users/midemol/Dropbox/Doctoraat/fun_in_R/destr_valid_BGLW')
inventory = read.csv('inventory_BRA_LAU_WUU_GAL.csv')
str(inventory)
# clean up inventory
inventory=inventory[!(inventory$Tree_Code=='GAL-12'), ] # remove GAL12
inventory$site_code=factor(substring(inventory$Tree_Code,1,3)) # make site code factors (4 factor levels, one per site)
levels(inventory$Tree_height_felled_flb_.m.)[levels(inventory$Tree_height_felled_flb_.m.)=="9,3 / 7,6"] <- "9.3" # forking tree. keep only max height of flb for now.
inventory$Tree_height_felled_flb_.m.=as.numeric(as.character(inventory$Tree_height_felled_flb_.m.))
# the dead braches of WUU-01 are not in the main database, we add them here:
id=c('pom','tien1','tien2');fresh_volume=c(5050,1084,3235);fresh_mass=c(5501,797,2814);dry_mass=c(2787,501,1540);WUU01=data.frame(id,fresh_volume,fresh_mass,dry_mass)
# add columns
inventory$WSG_coreA=inventory$coreA_dry_mass_.g./inventory$coreA_fresh_volume_.mL.
inventory$WSG_coreB=inventory$coreB_dry_mass_.g./inventory$coreB_fresh_volume_.mL.
inventory$WSG_cores=(inventory$WSG_coreA+inventory$WSG_coreB)/2
# exploratory analyses & descriptive statistics ----
summary(inventory)
attach(inventory)
mean_dbh=mean(DBH)
mean_th=mean(Tree_height_felled_.m.)
aggregate(Tree_height_felled_.m., list(site=site_code), FUN=function(x) c(mean(x,na.rm = T), min(x,na.rm = T), max(x,na.rm = T)))
par(pty="s")
plot(Circumference_standing_.cm.,Circ_felled_.cm.,col=site_code,pch=c(0,1,2,3)[site_code])
plot(Tree_height_felled_.m.,Tree_height_.m.,col=site_code,pch=c(0,1,2,3)[site_code],xlim = c(15,26),ylim = c(15,26),pty='s');abline(0,1)
plot(Tree_height_felled_.m.,Tree_height_felled_flb_.m.,col=site_code,pch=c(0,1,2,3)[site_code],pty='s')
# relations between fresh mass and dbh/h
#pdf("C:/Users/midemol/Dropbox/Doctoraat/fun_in_R/figures/DBH2TH_biomass.pdf",4,4)
par(mar = c(4, 4, 1, 0.2))
plot(Tree_height_felled_.m.,Crown_weight_.kg. + Stem_weight_.kg.,col=site_code,pch=c(0,1,2,3)[site_code],pty='s',xlab = 'Total tree length (m)',ylab = 'Tree total fresh mass (kg)')
plot(DBH,Crown_weight_.kg. + Stem_weight_.kg.,col=site_code,pch=c(0,1,2,3)[site_code],pty='s',xlab = 'DBH (cm)',ylab = 'Tree total fresh mass (kg)')
with(inventory,plot(DBH^2*Tree_height_felled_.m.,Crown_weight_.kg. + Stem_weight_.kg.,xlab=expression(~ DBH^{2} ~ x~Tree ~length ~ (cm^{2} ~ m)),ylab='Tree total fresh mass (kg)',col=site_code,pch=c(0,1,2,3)[site_code]))
#abline(lm(Crown_weight_.kg. + Stem_weight_.kg.~I(DBH^2*Tree_height_felled_.m.)+0,data=inventory))
for(i in c(1:4)){
lm_temp=lm(Crown_weight_.kg. + Stem_weight_.kg.~I(DBH^2*Tree_height_felled_.m.)+0,data=inventory[inventory$site_code==c("BRA","GAL","LAU","WUU")[i],])
abline(lm_temp,col=i)}
legend('topleft',title='Site',c("BRA (.995)","GAL (.991)","LAU (.983)","WUU (.987)"),
col=seq_along(levels(factor(site_code))), pch=c(0,1,2,3),bty='n', cex=.75)
dev.off
detach(inventory)
# standing vs felled ----
#pdf("C:/Users/midemol/Dropbox/Doctoraat/fun_in_R/figures/DBH_DBH.pdf",4,4)
#pdf("C:/Users/midemol/Dropbox/Doctoraat/fun_in_R/figures/TH_TH.pdf",4,4)
#pdf("C:/Users/midemol/Dropbox/Doctoraat/fun_in_R/figures/THflb_THflb.pdf",4,4)
par(mar = c(4, 3, 1, 0))
with(inventory,plot(Circumference_standing_.cm.,Circumference_standing_.cm.-Circ_felled_.cm., xlab='Circumference of standing tree (cm)',ylab='Difference (standing - felled, cm)',col=site_code,pch=c(0,1,2,3)[site_code]))
abline(h = 0, lty = 2)
with(inventory,plot(Tree_height_.m.,Tree_height_.m.- Tree_height_felled_.m.,ylim=c(-4,4),xlab='Forestry Pro tree height (m)',ylab='Difference (Forestry Pro - felled, m)',col=site_code,pch=c(0,1,2,3)[site_code]))
abline(h = 0, lty = 2)
with(inventory,plot(Tree_height_flb_.m.,Tree_height_flb_.m.- Tree_height_felled_flb_.m.,ylim=c(-4,4),xlab='Forestry Pro tree height till first living branch (m)',ylab='Difference (Forestry Pro - felled, m)',col=site_code,pch=c(0,1,2,3)[site_code]))
abline(h = 0, lty = 2)
dev.off()
legend('topleft',title='Site',c("BRA","GAL","LAU","WUU"),
col=seq_along(levels(factor(site_code))), pch=c(0,1,2,3),bty='n', cex=.75)
# wood cores and wood density ----
with(inventory,plot(DBH,WSG_cores,xlab='DBH (cm)',ylab='WSG cores',col=site_code,pch=c(0,1,2,3)[site_code]))
with(inventory,plot(DBH,Stem_disks_fresh_mass_.g._POM/Stem_disks_fresh_volume_.ml._POM,xlab='DBH (cm)',ylab='Fresh disc density',col=site_code,pch=c(0,1,2,3)[site_code]))
with(inventory,plot(DBH,Dry_mass_POM../Stem_disks_fresh_volume_.ml._POM,xlab='DBH (cm)',ylab='Fresh disc density',col=site_code,pch=c(0,1,2,3)[site_code]))
with(inventory,plot(WSG_coreA,abs(WSG_coreA- WSG_coreB),xlab='WSG core A',ylab='WSG core A - WSG core B',col=site_code,pch=c(0,1,2,3)[site_code])); abline(h = 0, lty = 2)
boxplot(WSG_cores ~ site_code, data = inventory,ylab='WSG from cores (g/cu. cm)')
# dead wood from WUU01 (WSG=wood specific gravity, dmc=dry matter content, FWD=fresh wood density)
WUU01$WSG=WUU01$dry_mass/WUU01$fresh_volume;WUU01$DMC=WUU01$dry_mass/WUU01$fresh_mass;WUU01$FWD=WUU01$fresh_mass/WUU01$fresh_volume
#pdf("C:/Users/midemol/Dropbox/Doctoraat/fun_in_R/figures/fresh_biomass_boxplot.pdf",4,4)
par(mar = c(3, 4, 1, 1))
boxplot(DBH ~ site_code, data = inventory,ylab='DBH (cm)')
boxplot(Tree_height_felled_.m. ~ site_code, data = inventory,ylab='Tree length (m)')
boxplot(Stem_weight_.kg. ~ site_code, data = inventory)
boxplot(Crown_weight_.kg. ~ site_code, data = inventory)
boxplot(Crown_weight_.kg. + Stem_weight_.kg.~ site_code, data = inventory,ylab='Tree total fresh mass (kg)')
dev.off()
# WD and water content at different heights ----
#make a dataframe "discs" with columns (height, site code, fresh mass and volume and dry mass)
discs=data.frame(130,inventory$site_code,inventory$Stem_disks_fresh_mass_.g._POM,inventory$Stem_disks_fresh_volume_.ml._POM,inventory$Dry_mass_POM..)
names(discs)=c('h','site_code','fresh_mass','fresh_volume','dry_mass')
for (i in c(3,6,9,12,15,18,21,24)){
discs_temp=data.frame(i*100,inventory$site_code,inventory[[paste('Fresh_mass_POM.',i,'m',sep = '')]],inventory[[paste('Fresh_volume_POM.',i,'m',sep = '')]],inventory[[paste('Dry_mass_POM.',i,'m',sep = '')]])
names(discs_temp)=c('h','site_code','fresh_mass','fresh_volume','dry_mass')
discs=rbind(discs,discs_temp)
rm(discs_temp)
}
#discs0=delete.na(discs)
discs=delete.na(discs,n=1) # max number of NA in discs per row is n=1
#discs2=delete.na(discs,n=2) # max number of NA in discs per row is n=2
# calculate wsg etc in discs
discs$WSG=discs$dry_mass/discs$fresh_volume;discs$DMC=discs$dry_mass/discs$fresh_mass;discs$FWD=discs$fresh_mass/discs$fresh_volume
# plot the results!
with(discs,plot(WSG,h,xlab='WSG',ylab='Height of disc (cm)',col=site_code,pch=c(0,1,2,3)[site_code]))
with(discs,plot(DMC,h,xlab='DMC',ylab='Height of disc (cm)',col=site_code,pch=c(0,1,2,3)[site_code]))
with(discs,plot(FWD,h,xlab='FWD',ylab='Height of disc (cm)',col=site_code,pch=c(0,1,2,3)[site_code]))
legend('topleft',title='Site',c("BRA","GAL","LAU","WUU"),
col=seq_along(levels(factor(discs$site_code))), pch=c(0,1,2,3),bty='n', cex=.75)
|
fd77c009690f53f04b653ddeaef5046ed9c6a99a
|
b71ce56fa3133ad7040e493a525cfe7ca0b07b2f
|
/man/metroTilesGrid.Rd
|
ebbe396c09dc2f2561e1458a7cc66806e471417e
|
[] |
no_license
|
bright-spark/shinyMetroUi
|
2a22ede6e35b91fe5780e4f2bebb83d57332d1fb
|
2c2acfe9abd3f1d444d3f3d95c99d441532f4d8a
|
refs/heads/master
| 2023-03-19T03:42:01.896849
| 2019-12-20T16:04:10
| 2019-12-20T16:04:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,948
|
rd
|
metroTilesGrid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metro-tiles.R
\name{metroTilesGrid}
\alias{metroTilesGrid}
\title{Create a Metro 4 Tiles Grid}
\usage{
metroTilesGrid(..., group = FALSE, title = NULL, size = 2)
}
\arguments{
\item{...}{Insert metroTile inside.}
\item{group}{Whether tiles are displayed by group. FALSE by default.}
\item{title}{If group is TRUE, the group title.}
\item{size}{Tile group size: between 1 and 10.}
}
\description{
Build a Metro grid for tiles
}
\examples{
if(interactive()){
library(shiny)
library(shinyMetroUi)
shiny::shinyApp(
ui = metroPage(
metroTilesGrid(
metroTile(size = "small", color = "red"),
metroTile(size = "small", color = "green"),
metroTile(size = "small", color = "blue", col_position = 1, row_position = 2),
metroTile(size = "small", color = "orange", col_position = 2, row_position = 2),
metroTile(size = "wide", color = "brown"),
metroTile(size = "medium", color = "green", selected = TRUE)
),
br(), br(), br(),
metroTilesGrid(
group = TRUE,
size = 2,
metroTile(
size = "small",
color = "indigo",
icon = "github",
url = "https://github.com/olton/Metro-UI-CSS"),
metroTile(size = "small", color = "green", icon = "envelop"),
metroTile(size = "small", color = "blue", col_position = 1, row_position = 2),
metroTile(size = "small", color = "orange", col_position = 2, row_position = 2),
metroTile(
size = "wide",
color = "pink",
sliderInput("obs", "Number of observations:",
min = 0, max = 1000, value = 500
)
),
metroTile(
size = "large",
color = "green",
selected = TRUE,
plotOutput("distPlot")
)
)
),
server = function(input, output) {
output$distPlot <- renderPlot({
hist(rnorm(input$obs))
})
}
)
}
}
\author{
David Granjon, \email{dgranjon@ymail.com}
}
|
76b3f7b269a9d39bb81acef8af72bad24010d85d
|
9f89cc309f9ddf8765f43605409de498d5e8f0e3
|
/Assignment4/Assignment4.R
|
d61482a4d208286425df02ab9ce9bfa243b26822
|
[] |
no_license
|
htdrajiv/r_programming
|
386f9266c04778d59a335ab5c0b1f4918259dd80
|
bb780777069df22958aafdaa203a813be90cd54c
|
refs/heads/master
| 2021-01-20T18:27:21.465794
| 2016-08-17T02:09:35
| 2016-08-17T02:09:35
| 65,594,777
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,384
|
r
|
Assignment4.R
|
data1 <- read.csv("E:/Projects/R_Programming/Data/Divvy_Stations_Trips_2014_Q1Q2/Divvy_Trips_2014_Q1Q2.csv")
data2 <- read.csv("E:/Projects/R_Programming/Data/Divvy_Stations_Trips_2014_Q3Q4/Divvy_Trips_2014-Q3-07.csv")
data3 <- read.csv("E:/Projects/R_Programming/Data/Divvy_Stations_Trips_2014_Q3Q4/Divvy_Trips_2014-Q3-0809.csv")
data4 <- read.csv("E:/Projects/R_Programming/Data/Divvy_Stations_Trips_2014_Q3Q4/Divvy_Trips_2014-Q4.csv")
fileData <- rbind(data1,data2,data3,data4)
library(sqldf)
subscriberData <- sqldf("select * from fileData fd where fd.usertype = 'Subscriber' ")
customerData <- sqldf("select * from fileData fd where fd.usertype = 'Customer' ")
subscriberData$numericGender[subscriberData$gender=="Male"] <- "1"
subscriberData$numericGender[subscriberData$gender=="Female"] <- "0"
asDateStartTimeSubscriber <- strptime(subscriberData$starttime, format = c("%m/%d/%Y %H:%M"))
asDateStartTimeCustomer <- strptime(customerData$starttime, format = c("%m/%d/%Y %H:%M"))
dfSubscriber <- data.frame(months = months(asDateStartTimeSubscriber),
dayOfWeek = weekdays(asDateStartTimeSubscriber),
hours = as.numeric(format(asDateStartTimeSubscriber,"%H")),
lengthOfRentalsInHours = subscriberData$tripduration/3600,
age = as.numeric(format(Sys.Date(),'%Y')) - as.numeric(subscriberData$birthyear,format("%Y")),
gender = subscriberData$gender
)
dfSubscriberNumeric <- data.frame(months = as.numeric(format(asDateStartTimeSubscriber,"%m")),
dayOfWeek = as.numeric(format(asDateStartTimeSubscriber,"%u")),
hours = as.numeric(format(asDateStartTimeSubscriber,"%H")),
lengthOfRentalsInHours = as.numeric(subscriberData$tripduration/3600),
age = as.numeric(format(Sys.Date(),'%Y')) - as.numeric(subscriberData$birthyear,format("%Y")),
gender = as.numeric(format(subscriberData$numericGender))
)
View(dfSubscriberNumeric)
dfCustomer <- data.frame(months = months(asDateStartTimeCustomer),
dayOfWeek = weekdays(asDateStartTimeCustomer),
hours = as.numeric(format(asDateStartTimeCustomer,"%H")),
lengthOfRentalsInHours = customerData$tripduration/3600
)
dfCustomerNumeric <- data.frame(months = as.numeric(format(asDateStartTimeCustomer,"%m")),
dayOfWeek = as.numeric(format(asDateStartTimeCustomer,"%u")),
hours = as.numeric(format(asDateStartTimeCustomer,"%H")),
lengthOfRentalsInHours = customerData$tripduration/3600
)
write.csv(dfCustomer,"E:/Projects/R_Programming/Data/customerData.csv")
write.csv(dfSubscriber,"E:/Projects/R_Programming/Data/subscriberData.csv")
write.csv(dfCustomerNumeric,"E:/Projects/R_Programming/Data/customerDataNumeric.csv")
write.csv(dfSubscriberNumeric,"E:/Projects/R_Programming/Data/subscriberDataNumeric.csv")
install.packages("xlsx")
library("xlsx")
write.xlsx(dfCustomer,"C:/Users/985176/Desktop/WorkingDirectory/customerData.xlsx")
write.xlsx(dfSubscriber,"C:/Users/985176/Desktop/WorkingDirectory/subscriberData.xlsx")
fit <- kmeans(dfCustomer, 5)
|
9282d5297e11663bfaf6f87ee3ebadaab9403ae1
|
c3f09d043409d3f30cc1de6732a55d4a91d6a0a7
|
/scripts/script_0.R
|
336d20cdbe7fbf8f15a7cb86b6a40a1c4efe67bf
|
[] |
no_license
|
mark-andrews/repdemoproj
|
77afdbeab6e56bc02fb26b88b0321070bd571a41
|
f51871f1b22347f39decf708a21c617da10bc3c6
|
refs/heads/master
| 2022-11-11T14:20:28.881053
| 2020-07-04T15:22:44
| 2020-07-04T15:28:35
| 276,920,438
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 139
|
r
|
script_0.R
|
library(here)
library(bayeslmm)
result <- lmm(rt ~ day + (1|subject), data = sleepstudy_df)
saveRDS(result, file=here('tmp/model_0.rds'))
|
b6ead726b3b371b0617b94fa377c1af584f168de
|
b98ece6254219513180cc730f7e26f7f9a277124
|
/plot3.R
|
c38b2b6a76673b8bce2e6acb0865ce2e47cc68b4
|
[] |
no_license
|
OlgaRusyaeva/ExData_Plotting1
|
543fdf16bba72a3cac2d60170f56325b362a3e63
|
0c0988359a8be37155b5c0142b09eb3a9a26f6c5
|
refs/heads/master
| 2021-01-14T10:23:38.695401
| 2015-06-07T14:41:30
| 2015-06-07T14:41:30
| 35,160,355
| 0
| 0
| null | 2015-05-06T13:33:03
| 2015-05-06T13:33:03
| null |
UTF-8
|
R
| false
| false
| 920
|
r
|
plot3.R
|
#read data from a file with the dates 2007-02-01 and 2007-02-02
library(sqldf)
fileName <- "household_power_consumption.txt"
df <- read.csv.sql(fileName, sql='select * from file where Date="1/2/2007" OR Date="2/2/2007"',sep=";",header=T)
closeAllConnections()
#create new column out of Date and Time columns
df$DateTime <- strptime(paste(df$Date,df$Time),"%d/%m/%Y %H:%M:%S")
#create a graph of Energy sub metering (three types) in days of the week
png("plot3.png")
#DateTime-Sub_metering_1 in black
plot(df$DateTime,df$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering",ylim=yrange,col="black")
#DateTime-Sub_metering_2 in red
lines(df$DateTime,df$Sub_metering_2,type="l",col="red")
#DateTime-Sub_metering_3 in blue
lines(df$DateTime,df$Sub_metering_3,type="l",col="blue")
#legend
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),col=c("black","red","blue"))
dev.off()
|
06cf0ad90de9811f7e889ae93d8c0014e442490d
|
0f709b508989fc77d8f1d62ad97ef050e1bbed48
|
/Week_06_HypothesisTesting/exercises.week.06.hypothesis.testing.applied.R
|
3742be067332f33af1180cd1a7795d88298989d7
|
[] |
no_license
|
alekssro/DataScienceBioinf
|
b1bcf5afa4e0edb38d6cc25cc694bc948d47a54f
|
968dba2e87af764223e6d15a7a1e510581176a8b
|
refs/heads/master
| 2021-08-29T01:43:21.753379
| 2017-12-13T09:56:16
| 2017-12-13T09:56:16
| 112,108,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,485
|
r
|
exercises.week.06.hypothesis.testing.applied.R
|
library(tidyverse)
#### Hypothesis testing ####
# Our null hypothesis is: minor allele frequencies are the same everywhere on chromosome 6
# Our alternative hypothesis: some places have higher minor allele frequencies than expected (balancing selection)
# Load data and calculate maf
d = read_delim(file="1000g.allele.frequencies.tsv.gz",delim=c("\t"))
d = d %>%
filter(population=="EUR") %>%
mutate(maf=ifelse(frequency>0.5, 1-frequency, frequency)) %>%
filter(maf > 0) %>%
mutate(population=factor(population),
reference_allele=factor(reference_allele),
alternative_allele=factor(alternative_allele))
summary(d)
# For all snps calculate a "bin25k" variable based on position, each bin should be 25 kb
d = d %>%
mutate(bin25k = position %/% 25000) %>%
ungroup()
#Checkpoint
names(d)
#[1] "position" "reference_allele" "alternative_allele" "population" "frequency" "maf" "bin25k"
d %>% head(3)
#1 63979 C T EUR 0.1044 0.1044 2
#2 63980 A G EUR 0.1044 0.1044 2
#3 73938 A G EUR 0.0010 0.0010 2
# For each bin calculate number of snps with maf > 0.2 (n20) , number of snps (n) , a test statistic (ts) = n20/n
# Also calculate the position of the bin midpoint (x)
# Call the binned results (1 row pr bin) for "br" (binned results)
# Checkpoint
names(br)
#[1] "bin25k" "n20" "n" "ts" "x"
br %>% head(5)
#1 2 0 3 0.0000000 68958.5
#2 3 0 2 0.0000000 88089.0
#3 4 0 7 0.0000000 111903.5
#4 5 1 7 0.1428571 146700.5
#5 6 2 25 0.0800000 162538.5
# Q: Plot this teststatistic along the chromosome and also visualise the number of snps in each bin
# Q: What is the observed p20 = n20/n for the entire chromosome
#### Continuing from last week ####
# Q: select the best/easiest way of tesing if the observed proportion of SNPs with maf > 0.2 in the bin is higher than expected
# Q: List the 10 most significant bins
# What is the lowest pvalue?
# How many of your bins have p < 0.001?
# If all bins followed H0: how many would you expect to have p > 0.001
# Q: Plot the p value as function of bin position
# Q: It is really difficult to see the small pvalues - try to mutate a new pvalue2 = -log10(pvalue) and plot it
# This is called a Manhattan plot - strong signals will be skycrapers of significance
# Q: Do you see a skyscraper?
#### Dividing the chromosome into bins with same number of snps ####
# Q: Do the same analysis as before but using bins of size 500 snps instead
# Basically we just want the Manhattan plot
# HINT:
d %>%
arrange(position) %>%
mutate(SNPnumber = row_number())
#### Testing a specific hypothesis ####
# Assume that I speculate that the overall frequency of SNPs with maf > 0.05 is really 50% in humans.
# NOTE: maf > 0.05 - I call these "high maf snps"
# Can you test if some bins of size 100 kb have significantly more high maf SNPs?
# Visualize the test results so it is easy to see where significant bins
# HINT: Make manhattan plots but also try and remove all bins with less than 50% of the snps having maf > 0.05
#### Bonus question ####
# Can you repeat the final test for all three populations?
# Do you see a difference between the populations?
|
871c330a074ba89c695e7142b8fe0694ebc916f3
|
b98c5cbe6ab6887097e0337376fc56b6cec15996
|
/man/methyvolc.Rd
|
375f88455f04ed25e312ac3e9c68c1d869356a00
|
[
"MIT"
] |
permissive
|
nhejazi/methyvim
|
bebd0758f8aff2ad06430aac43b82cf0ec4f45b1
|
7b4ee9f83aa7d2cfd11645fcb0658de2ea7a0df7
|
refs/heads/master
| 2021-03-24T13:29:34.450771
| 2020-04-27T19:11:00
| 2020-04-27T19:11:00
| 79,256,902
| 1
| 1
|
MIT
| 2020-02-06T01:09:32
| 2017-01-17T18:15:07
|
TeX
|
UTF-8
|
R
| false
| true
| 1,512
|
rd
|
methyvolc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{methyvolc}
\alias{methyvolc}
\title{Volcano plot for methytmle objects}
\usage{
methyvolc(x, param_bound = 2, pval_bound = 0.2)
}
\arguments{
\item{x}{Object of class \code{methytmle} as produced by an appropriate call
to \code{methyvim}.}
\item{param_bound}{Numeric for a threshold indicating the magnitude of the
size of the effect considered to be interesting. This is used to assign
groupings and colors to individual CpG sites.}
\item{pval_bound}{Numeric for a threshold indicating the magnitude of
p-values deemed to be interesting. This is used to assign groupings and
colors to individual CpG sites.}
}
\value{
Object of class \code{ggplot} containing a volcano plot of the
estimated effect size on the x-axis and the -log10(p-value) on the y-axis.
The volcano plot is used to detect possibly false positive cases, where a
test statistic is significant due to low variance.
}
\description{
Volcano plot for methytmle objects
}
\examples{
suppressMessages(library(SummarizedExperiment))
library(methyvimData)
data(grsExample)
var_int <- as.numeric(colData(grsExample)[, 1])
# TMLE procedure for the ATE parameter over M-values with Limma filtering
methyvim_out_ate <- suppressWarnings(
methyvim(
data_grs = grsExample, sites_comp = 25, var_int = var_int,
vim = "ate", type = "Mval", filter = "limma", filter_cutoff = 0.1,
parallel = FALSE, tmle_type = "glm"
)
)
methyvolc(methyvim_out_ate)
}
|
316bd99da2b5173f5fc5951b2891de6ac84c0699
|
745d585395acad1376d84f8ca1284c13f2db70f0
|
/man/make.ISOyear.Rd
|
5837e68752a5193145a0f08625b8db9ce73bcec4
|
[] |
no_license
|
pik-piam/quitte
|
50e2ddace0b0e2cbfabf8539a0e08efe6bb68a0b
|
4f5330695bd3d0e05d70160c1af64f0e436f89ea
|
refs/heads/master
| 2023-08-20T04:15:16.472271
| 2023-08-09T08:14:32
| 2023-08-09T08:14:32
| 206,053,101
| 0
| 8
| null | 2023-08-09T08:14:34
| 2019-09-03T10:39:07
|
R
|
UTF-8
|
R
| false
| true
| 748
|
rd
|
make.ISOyear.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make.ISOyear.R
\name{make.ISOyear}
\alias{make.ISOyear}
\title{speedily converting years to POSIXct values}
\usage{
make.ISOyear(years)
}
\arguments{
\item{years}{ignored.}
}
\value{
The \code{\link[=ISOyear]{ISOyear()}} function.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}
}
\details{
This function was deprecated because the \code{\link[=ISOyear]{ISOyear()}} function can be used
directly.
}
\examples{
ISOyear <- make.ISOyear()
ISOyear(c(2005, 2010, 2100, 1900))
# ->
ISOyear(c(2005, 2010, 2100, 1900))
}
\keyword{internal}
|
ba2dde2cda9c740ea85db49eff513c31406aa4a1
|
1bd99e7010d5314765a4fa482176ee2963e460d5
|
/tests/testthat/test-expectation.R
|
d19c12563cda42191971647fbc9e5f536a534820
|
[] |
no_license
|
hadley/rv2
|
6470147c77b7ffacd2ea5d4546c27113aed9ab24
|
a56d359026f9fd8d57fa47aa81430c84af922c89
|
refs/heads/master
| 2021-01-24T03:58:07.773158
| 2017-01-12T02:08:37
| 2017-01-12T02:08:37
| 15,084,986
| 7
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 736
|
r
|
test-expectation.R
|
context("Expectation")
dice <- rv(1:6)
coin <- rv(c(-1, 1))
test_that("expectation correct for known cases", {
expect_equal(E(dice), 3.5)
expect_equal(E(coin), 0)
})
test_that("expectation is additive", {
expect_equal(E(dice + coin), E(dice) + E(coin))
expect_equal(E(dice + dice), 2 * E(dice))
expect_equal(E(dice + dice + dice), 3 * E(dice))
})
test_that("expectation is multiplicatve", {
expect_equal(E( 6 * dice), 6 * E(dice))
expect_equal(E( 1 * dice), 1 * E(dice))
expect_equal(E(-1 * dice), -1 * E(dice))
expect_equal(E( 0 * dice), 0 * E(dice))
})
test_that("expectation throws error if input not an rv", {
expect_error(E(5), "must be an rv object")
expect_error(E("a"), "must be an rv object")
})
|
d03e0b1eac3a7b66d792f25ab586fc815b161e76
|
cbdfc6b1ee1121090a538a74a9408fa5c206a4f8
|
/R/prev.R
|
dbc462f6341ca94287f429bae0edd4ca865c03eb
|
[] |
no_license
|
mplex/multiplex
|
9d1eb7e1289f2fce4181094f831f6f020f1526d9
|
4153f723ac0c8d1e42c78fce91c26977d955f329
|
refs/heads/master
| 2023-07-21T17:23:08.732663
| 2023-07-10T12:10:46
| 2023-07-10T12:10:46
| 65,552,701
| 23
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,249
|
r
|
prev.R
|
prev <-
function (x)
{
if (is.array(x) == FALSE)
stop("Data must be a stacked array of square matrices.")
if (is.na(dim(x)[3]) == TRUE) {
s0 <- data.frame(matrix(ncol = 1L, nrow = 1L))
if (isTRUE(all.equal(replace(x %*% x, x %*% x >= 1L,
1L), x) == TRUE))
s0[1, 1] <- 1L
Bx <- array(dim = c(dim(x)[1], dim(x)[2], 2L))
Bx[, , 1] <- as.matrix(x)
Bx[, , 2] <- replace(x %*% x, x %*% x >= 1L, 1L)
}
if (is.na(dim(x)[3]) == FALSE) {
tmp0 <- data.frame(matrix(ncol = (dim(x)[1] * dim(x)[2]),
nrow = 0L))
for (i in 1:dim(x)[3]) {
ifelse(isTRUE(dim(x)[3] > 1L) == TRUE, tmp0[i, ] <- as.vector(x[,
, i]), tmp0 <- as.vector(x))
}
rm(i)
if (isTRUE(is.null(dim(tmp0)) == FALSE) == TRUE)
rownames(tmp0) <- dimnames(x)[[3]]
if (isTRUE(dim(x)[3] < 2L) == TRUE)
x <- array(tmp0, c(dim(x)[1], dim(x)[2]))
if (isTRUE(dim(x)[3] > 1L) == TRUE) {
tmp <- array(dim = c(dim(x)[1], dim(x)[2], nrow(unique(tmp0))))
for (i in 1:nrow(unique(tmp0))) {
tmp[, , i][1:(dim(x)[1] * dim(x)[2])] <- as.numeric(unique(tmp0)[i,
])
}
rm(i)
if (is.null(dimnames(tmp)[[1]]) == FALSE)
dimnames(tmp)[[3]] <- rownames(unique(tmp0))
if (is.null(dimnames(x)[[1]]) == FALSE)
dimnames(tmp)[[1]] <- dimnames(tmp)[[2]] <- dimnames(x)[[1]]
x <- tmp
dimnames(x)[[3]] <- as.list(rownames(unique(tmp0)))
}
rm(tmp0, tmp)
s0 <- data.frame(matrix(ncol = dim(x)[3], nrow = dim(x)[3]))
for (k in 1:dim(x)[3]) {
for (j in 1:dim(x)[3]) {
tmp <- x[, , j] %*% x[, , k]
tmp <- replace(tmp, tmp >= 1L, 1L)
for (i in dim(x)[3]:1) {
if (isTRUE(all.equal(tmp, x[, , i]) == TRUE))
s0[j, k] <- i
}
}
}
rm(i, j, k)
dimnames(s0)[[1]] <- 1:dim(x)[3]
dimnames(s0)[[2]] <- 1:dim(x)[3]
if (sum(as.numeric(is.na(s0))) == 0L)
Bx <- x
if (sum(as.numeric(is.na(s0))) > 0L) {
Bx <- array(dim = c(dim(x)[1], dim(x)[2], 0L))
for (i in 1:nrow(s0)) {
for (j in 1:length(which(is.na(s0[i, ])))) {
if (length(which(is.na(s0[i, ]))) > 0L)
Bx <- zbnd(Bx, (replace(x[, , i] %*% x[,
, which(is.na(s0[i, ]))[j]], x[, , i] %*%
x[, , which(is.na(s0[i, ]))[j]] >= 1L,
1L)))
}
}
rm(i, j)
tmp <- data.frame(matrix(ncol = (dim(x)[1] * dim(x)[2]),
nrow = 0L))
for (i in 1:dim(Bx)[3]) {
tmp[i, ] <- as.vector(Bx[, , i])
}
rm(i)
xBx <- array(dim = c(dim(x)[1], dim(x)[2], nrow(unique(tmp))))
for (i in 1:nrow(unique(tmp))) {
xBx[, , i][1:(dim(Bx)[1] * dim(Bx)[2])] <- as.numeric(unique(tmp)[i,
])
}
rm(i)
if (is.null(dimnames(xBx)) == FALSE)
dimnames(xBx)[[3]] <- (dim(x)[3] + 1L):(dim(xBx)[3] +
dim(x)[3])
Bx <- zbnd(x, xBx)
rm(xBx, tmp)
}
}
if (is.null(dimnames(x)[[3]]) == FALSE)
dimnames(s0)[[2]] <- dimnames(x)[[3]]
pct <- round(length(attr(stats::na.omit(as.vector(unlist(s0))),
"na.action"))/length(as.vector(unlist(s0))), 2)
d <- as.numeric(sort(unlist(s0), decreasing = TRUE))[1]
if (isTRUE(d > 7L) == TRUE) {
if (isTRUE(pct < 0.5) == TRUE)
return(list(`2stpT` = s0, PcU2stpT = pct, ordr = d))
if (isTRUE(pct > 0.5) == TRUE)
return(list(`2stpT` = s0, PcU2stpT = pct, ordr = d,
Note = c("Complete semigroup construction may take long time")))
}
return(list(`2stpT` = s0, PcU2stpT = pct, ordr = d))
}
|
39e8763bd1cfd9ac71e7333d1e9507853f851e47
|
236f960cf07b0b68034821234dc6ae45c1bf2e79
|
/Bayesian Statistics/multiparameter models,HW5-1.R
|
7a4489ccf4435d36a7b335e23c549af4ea7b9198
|
[] |
no_license
|
xiaojianzhang/R_life
|
b001b65eeab429a394f841f62d1c76e6676d2db1
|
62ad4072079b0fe814f3c5ba57bc3db57358cf6d
|
refs/heads/master
| 2021-01-01T04:11:51.997116
| 2016-05-17T04:12:39
| 2016-05-17T04:12:39
| 58,959,582
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,886
|
r
|
multiparameter models,HW5-1.R
|
#Chapter 5 Exercise 5
#setup
library(geoR)
ydata <- c(10, 10, 12, 11, 9)
n = 5
y_bar = 10.4
s_square = 1.3
#(c) How do the incorrect and correct posterior
# distributions differ?
#(1)Consider incorrect posterior
#draw sigma_square from inverse chi square(n-1, s^2)
sample_sigma_square <- rinvchisq(1000, n-1, s_square)
#draw miu from normal(y_bar, sigma_square/n)
sample_mu <- rnorm(1000, mean=y_bar, sd=sqrt(sample_sigma_square/n))
#sample mean
mean_mu <- mean(sample_mu)
mean_sigma_square <- mean(sample_sigma_square)
print(mean_mu)
print(mean_sigma_square)
#sample variance
var_mu <- var(sample_mu)
var_sigma_square <- var(sample_sigma_square)
print(var_mu)
print(var_sigma_square)
#contour plot
mu <- seq(0,20,0.05)
log_sigma <- seq(-2,4,0.02)
sigma <- exp(log_sigma)
log_post_mu_log_sigma <- function(mu, sigma, y){
z <- 0
for(i in 1:length(y)){
z <- z + log(dnorm(y[i], mean=mu, sd=sigma))
}
return(z)
}
log_post <- outer(mu, sigma, log_post_mu_log_sigma, ydata)
post <- exp(log_post - max(log_post))
contours <- c(.0001,.001,.01,seq(.05,.95,.05))
contour (main="contour plot of incorrect posterior dist",
mu, log_sigma, post, levels=contours,
xlab="mu", ylab="log sigma", cex=2)
#(2)Consider correct posterior
log_post_mu_log_sigma <- function(mu, sigma, y){
z <- 0
for(i in 1:length(y)){
z <- z + log(pnorm(y[i] + 0.5, mean=mu, sd=sigma) -
pnorm(y[i] - 0.5, mean=mu, sd=sigma))
}
return(z)
}
log_post <- outer(mu, sigma, log_post_mu_log_sigma, ydata)
post <- exp(log_post - max(log_post))
contours <- c(.0001,.001,.01,seq(.05,.95,.05))
contour (main="contour plot of correct posterior dist",
mu, log_sigma, post, levels=contours,
xlab="mu", ylab="log sigma", cex=2)
normalized_post <- post / sum(post)
post_mu <- rowSums(post)
mu_index <- sample (1:length(mu), 500, replace=T,
prob=post_mu)
mu_sample <- mu[mu_index]
for(i in 1:length(post_mu)){
normalized_post[i, ] <- normalized_post[i, ] / post_mu[i]
}
sigma_square_sample <- rep(NA, 500)
for(i in 1:500){
sigma_square_sample[i] <- exp(sample(log_sigma, 1,
prob=normalized_post[mu_index[i], ]))^2
}
#sample mean
mean_mu <- mean(mu_sample)
mean_sigma_square <- mean(sigma_square_sample)
print(mean_mu)
print(mean_sigma_square)
#sample variance
var_mu <- var(mu_sample)
var_sigma_square <- var(sigma_square_sample)
print(var_mu)
print(var_sigma_square)
#(d) draw simulatons from the posterior dist of z.
# compute the posterior mean of (z_1 - z_2)^2
z <- matrix(0, 500, 5)
for(i in 1:500){
for(j in 1:5){
rn = rnorm(1,mean=mu_sample[i],sd=sqrt(sigma_square_sample[i]))
while(rn >= ydata[j] + 0.5 || rn <= ydata[j] - 0.5){
rn = rnorm(1,mean=mu_sample[i],sd=sqrt(sigma_square_sample[i]))
}
z[i,j] <- rn
}
}
#posterior mean of (z[1]-z[2])^2
print(mean((z[,1]-z[,2])^2))
|
850d20a2b39db6b1820fb55060ac69129bc20e89
|
d9112b28db3cdc905fa4ee5abb223b969da81579
|
/man/PerformPeakAnnotation.Rd
|
2713d3b7bc6501bd4b3857aee145324b82fa42ca
|
[] |
no_license
|
wangyongdalt/OptiLCMS
|
323d4a483fc01ae5ffede01e474640bca31285f4
|
2ed8b90f7cd54cd38275240c6801d779b00398c0
|
refs/heads/master
| 2023-07-27T01:45:54.421922
| 2021-09-02T13:28:08
| 2021-09-02T13:28:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,033
|
rd
|
PerformPeakAnnotation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Perform_functions.R
\name{PerformPeakAnnotation}
\alias{PerformPeakAnnotation}
\title{Perform peak annotation}
\usage{
PerformPeakAnnotation(mSet, annotaParam, ncore = 1, running.controller = NULL)
}
\arguments{
\item{mSet}{mSet object, usually generated by 'PerformPeakProfiling' here.}
\item{annotaParam}{The object created using the SetAnnotationParam function,
containing user's specified or default parameters for downstream
raw MS data pre-processing.}
\item{ncore}{annotation running core. Default is 1. Parallel running will be supported soon.}
\item{running.controller}{The resuming pipeline running controller. Optional. Don't need to define by hand.}
}
\value{
will return an mSet object wirh annotation finished
}
\description{
This function performs peak annotation on
the xset object created using the PerformPeakPicking function.
}
\examples{
data(mSet);
newPath <- dir(system.file("mzData", package = "mtbls2"),
full.names = TRUE, recursive = TRUE)[c(10, 11, 12)]
mSet <- updateRawSpectraPath(mSet, newPath);
annParams <- SetAnnotationParam(polarity = 'positive',
mz_abs_add = 0.035);
## Perform peak annotation with newly deinfed annParams
# mSet <- PerformPeakAnnotation(mSet = mSet,
# annotaParam = annParams,
# ncore =1)
}
\references{
Kuhl C, Tautenhahn R, Boettcher C, Larson TR, Neumann S (2012).
"CAMERA: an integrated strategy for compound spectra extraction and annotation of
liquid chromatography/mass spectrometry data sets." Analytical Chemistry, 84, 283-289.
http://pubs.acs.org/doi/abs/10.1021/ac202450g.
}
\seealso{
\code{\link{ExecutePlan}} and \code{\link{PerformPeakProfiling}} for the whole pipeline.
}
\author{
Zhiqiang Pang \email{zhiqiang.pang@mail.mcgill.ca}, Jasmine Chong \email{jasmine.chong@mail.mcgill.ca},
and Jeff Xia \email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
|
12350b16a7859436c7676b5714ace1a108826f5f
|
4d3672136d43264176fe42ea42196f113532138d
|
/man/Rehab.Rd
|
5b8691b589fafd59ae224175eb582eb15332d637
|
[] |
no_license
|
alanarnholt/BSDA
|
43c851749a402c6fe73213c31d42c26fa968303e
|
2098ae86a552d69e4af0287c8b1828f7fa0ee325
|
refs/heads/master
| 2022-06-10T10:52:15.879117
| 2022-05-14T23:58:15
| 2022-05-14T23:58:15
| 52,566,969
| 5
| 13
| null | 2017-07-27T02:06:33
| 2016-02-26T00:28:07
|
R
|
UTF-8
|
R
| false
| true
| 939
|
rd
|
Rehab.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Rehab}
\alias{Rehab}
\title{Rehabilitative potential of 20 prison inmates as judged by two psychiatrists}
\format{
A data frame/tibble with 20 observations on four variables
\describe{
\item{inmate}{inmate identification number}
\item{psych1}{rating from first psychiatrist on the inmates rehabilative potential}
\item{psych2}{rating from second psychiatrist on the inmates rehabilative potential}
\item{differ}{\code{psych1} - \code{psych2}}
}
}
\usage{
Rehab
}
\description{
Data for Exercise 7.61
}
\examples{
boxplot(Rehab$differ)
qqnorm(Rehab$differ)
qqline(Rehab$differ)
t.test(Rehab$differ)
# Or
t.test(Rehab$psych1, Rehab$psych2, paired = TRUE)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
}
\keyword{datasets}
|
971f4407e94b3a1ee886d555c0f4a50c8c5c9d81
|
062355817e0682b8eaaefaf0e7477c031895e02f
|
/test_web_scraping.R
|
a75394dc903cb26d35e7b54299a6478e0ce20c7d
|
[] |
no_license
|
informationista/wikiscraping
|
5f8b7b6e2b5f1211fbbca9805e7e2a53d8a000c8
|
3fda53a5a91efcc71ca6f90d2dc85ec9cef95946
|
refs/heads/master
| 2021-01-12T07:26:47.316999
| 2017-01-04T20:48:47
| 2017-01-04T20:48:47
| 76,962,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,600
|
r
|
test_web_scraping.R
|
library(rvest)
library(RCurl)
library(plyr)
library(tidyr)
library(ggplot2)
library(httr)
library(dplyr)
##create a list of URLs to retrieve
##create the list of years of interest
year_list <- as.list(c(2006:2016))
url_df <- function(year){
url <- paste("https://en.wikipedia.org/wiki/Deaths_in_", month.name, "_", year, sep = "")
data.frame(year, month.name, url)
}
df <- lapply(year_list, url_df) %>% rbind.fill()
##this part got the day only - this got combined with the next part where we get the day and the deaths at the same time
#get_day <- function(year, month, url){
# day <- read_html(as.character(url)) %>% html_nodes(xpath = '//h3') %>% html_text()
# day <- gsub("\\[edit\\]", "", day) %>% as.numeric(as.character(day)) #remove the edit text and non-numerics
# day <- day[!is.na(day)] #get rid of NAs introduced in last part
# data.frame(year, month, day)
#}
##this function takes the data frame and retrieves the days and deaths on those days for each url
get_data <- function(year, month, url){
day <- read_html(as.character(url)) %>% html_nodes(xpath = '//h3') %>% html_text()
day <- gsub("\\[edit\\]", "", day) %>% as.numeric(as.character(day)) #remove the edit text and non-numerics
day <- day[!is.na(day)] #get rid of NAs introduced in last part
df_mid <- data.frame(year, month, day, death = as.character(length(day)), stringsAsFactors = FALSE)
death <- read_html(as.character(url)) %>% html_nodes("div#mw-content-text.mw-content-ltr") %>% html_nodes("ul") %>% html_text()
death <- death[c(-1, -2)] #remove the extraneous content
for (i in 1:nrow(df_mid)){
df_mid[i, 4] <- as.character(death[i])
}
return(df_mid)
}
##use the get day function to retrieve all data for all years/months in the list
df_data <- data.frame()
for (i in 1:nrow(df)){
test1 <- get_data(df[i, 1], df[i, 2], df[i, 3])
df_data <- rbind(df_data, test1)
}
##Now to clean up the death data
##split each person into their own line
df_data <- df_data %>% mutate(death = strsplit(as.character(death), "\n")) %>% unnest(death)
##remove any blank lines
df_data <- subset(df_data, death != "")
##split the name and age into separate columns, dump the other info
cleaned_df <- separate(df_data, col = death, into = c("name", "age"), sep = ",", remove = TRUE, extra = "drop")
##remove any stuff that got into the age column that's not an actual age
cleaned_df$age <- as.numeric(as.character(cleaned_df$age))
##put months in ordered factor for nice charts
cleaned_df$month <- factor(cleaned_df$month, levels = month.name)
##Some visualizations
#create a style to use in all charts
style <- theme(plot.title = element_text(size = 20, face = "bold"), text = element_text(family = "serif"))
##create a chart of deaths by year
ggplot(cleaned_df, aes(x = as.factor(year))) + geom_bar(fill = "skyblue", color = "black") + xlab("Year") + ylab("Number of deaths") + ggtitle("Deaths of Notable People on Wikipedia, 2006 - 2016") + style
##chart of deaths by month, all years
ggplot(cleaned_df, aes(x = month)) + geom_bar(fill = "khaki1", color = "black") + xlab("Month") + ylab("Number of deaths") + ggtitle("Deaths of Notable People on Wikipedia by Month, 2006 - 2016") + style
##boxplot of age at death
ggplot(cleaned_df, aes(x = as.factor(year), y = age)) + geom_boxplot(outlier.colour = "red") + style + ggtitle("Age at Death of Notable People on Wikipedia by Year") + xlab("Year") + ylab("Age at Death")
##get summary data about age at death
mean_age <- with(cleaned_df, aggregate(list(age), by = list(year), FUN = function(x) mean(x, na.rm = TRUE)))
|
58c18088279abc304368b5464c9ff4471711a07c
|
8ab151cc5bfb154cc4ae4b1d97ddd6b2bedc95fa
|
/R/filter.date.R
|
4919c2821e4eb20b7159cec9598b6f0dbec4f709
|
[] |
no_license
|
arturochian/MetFns
|
5eafd4bc404edbbdefd27223c5b8a99d32cd048d
|
5ce9fc52efdac3c2a12aa18282ab71e53aacf115
|
refs/heads/master
| 2020-04-06T04:20:15.871591
| 2014-09-16T00:00:00
| 2014-09-16T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
filter.date.R
|
filter.date<-function(data,year,month, day.beg,day.end=day.beg)
{
if(!is.data.frame(data) || !is.numeric(c(year,month,day.beg,day.end)) || year<1984
|| (month<1) || (month>12) || (any(c(day.beg,day.end)<1)) || any(c(day.beg,day.end)>31))
stop("invalid input parameter(s) specification")
day<-day.mid(data)[,2]
data[(data$year==year%%100) & (data$month==month) & (day>=day.beg & day<=day.end),]
}
|
ace5b9f69c36119e52d9e0c60e198f8ed2c11ab3
|
36b14b336e0efdda255fa3f163af65127e88105f
|
/man/Problem2.33.Rd
|
62417be1c52bea0442a5820d811c9f75a3bb5f6d
|
[] |
no_license
|
ehassler/MontgomeryDAE
|
31fcc5b46ae165255446e13beee9540ab51d98b3
|
43a750f092410208b6d1694367633a104726bc83
|
refs/heads/master
| 2021-06-24T13:46:19.817322
| 2021-03-11T16:36:37
| 2021-03-11T17:13:18
| 199,803,056
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 388
|
rd
|
Problem2.33.Rd
|
\name{Problem2.33}
\alias{Problem2.33}
\docType{data}
\title{Exercise 2.33}
\usage{data("Problem2.33")}
\format{A data frame with 20 observations on the following variable(s).\describe{
\item{\code{Uniformity}}{a numeric vector}
}}
\references{Montgomery, D.C.(2017, 10th ed.) \emph{Design and Analysis of Experiments}, Wiley, New York.}
\examples{data(Problem2.33)}
\keyword{{datasets}}
|
cf9c260afef9aa5ee359dea14bd8fde0d43ddb4c
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed_and_cleaned/10304_2/rinput.R
|
416b7b832a32829cf8369914c1225bd00dbfdca9
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("10304_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10304_2_unrooted.txt")
|
449c26ee542d0ee91b917b3679fb1feb3e841e3b
|
7d125cf7b30e9be0ef1f02e24ad13495b4481f4e
|
/src/Library/filterGeneExpSamples.R
|
03f628624b8199ed3cc3ec42fccf7c6af481f605
|
[] |
no_license
|
DToxS/Differential-Comparison
|
0616004e275cfa17d605505cecc6842a0baa4b2a
|
d6b3d4cc7c4ef2bdb21527655fb927c146453942
|
refs/heads/master
| 2022-04-06T22:12:37.767298
| 2020-02-27T22:19:56
| 2020-02-27T22:19:56
| 105,199,221
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,661
|
r
|
filterGeneExpSamples.R
|
# Filter outlier samples by removing outlier samples.
filterGeneExpSamples <- function(exprt_design_merged, read_counts_merged, drug_names, dist_cutoffs, subset_field_name=NULL, dist_cutoff_outlier=0.01, dist_cutoff_group=0.015, min_samples=3, filter_outlier=TRUE, keep_under_samples=FALSE, plot_orig_clust=FALSE, plot_filter_clust=FALSE, plot_pass_clust=FALSE, plot_empty_clust=FALSE, color_sample_groups=TRUE, group_colors=NULL, color_field_pos=3, color_field_type="numeric", hline_color="deeppink", hline_type=4, hline_width=1, clust_title_elems_names=c(State="State",Culture="Culture",Measurement="Measurement",Subject="Subject"), clust_title_elems_flags=c(State=TRUE,Culture=TRUE,Measurement=TRUE,Subject=TRUE), clust_title_elems_keys=c("State", "Culture", "Measurement", "Subject"), title_cex=1.75, branch_line_width=2, leaf_lab_cex=0.75, ylab_cex=1.5, leg_title="Culture", leg_pos="topright", leg_cex=1, leg_title_col="black", leg_title_adj=1, verbose=FALSE, func_dir=NULL)
{
# Load required library
require("matrixStats")
# Load user-defined functions.
if(is.null(func_dir)) func_dir <- getwd()
source(file.path(func_dir, "getCutoff.R"), local=TRUE)
source(file.path(func_dir, "filterGeneExpSamplesSubset.R"), local=TRUE)
# Check input arguments.
# Check subset_field_name.
if(!is.null(subset_field_name) && !all(subset_field_name %in% colnames(exprt_design_merged)))
{
warning("subset_field_name must be one of the column names of experiment design table!")
return(NULL)
}
# The title text for sample cluster plots is composed of:
#
# - the number of samples
# - the name of treatment state
# - the name of cell culture experiment
# - the name of measurement plate
# - the name of cell subject
#
# in the form of:
#
# [#] [State] in [Culture] [Measurement] [Subject]
#
# e.g. 4 TRS in Exp 15 Assay 6 iPSC B
if(!is.null(clust_title_elems_names) && !is.null(clust_title_elems_flags))
{
if(!(is.character(clust_title_elems_names) && is.vector(clust_title_elems_names) && length(clust_title_elems_names)==length(clust_title_elems_keys)))
{
warning(paste0("clust_title_elems_names must be a vector of character strings with a length of ", length(clust_title_elems_keys), "!"))
return(exprt_design_merged)
}
if(names(clust_title_elems_names) != clust_title_elems_keys)
{
warning(paste0("The names of clust_title_elems_names must be ", paste0(clust_title_elems_keys,collapse=", "), "!"))
return(exprt_design_merged)
}
if(!(is.logical(clust_title_elems_flags) && is.vector(clust_title_elems_flags) && length(clust_title_elems_flags)==length(clust_title_elems_keys)))
{
warning(paste0("clust_title_elems_flags must be a vector of logical values with a length of ", length(clust_title_elems_keys), "!"))
return(exprt_design_merged)
}
if(names(clust_title_elems_flags) != clust_title_elems_keys)
{
warning(paste0("The names of clust_title_elems_flags must be ", paste0(clust_title_elems_keys,collapse=", "), "!"))
return(exprt_design_merged)
}
}
else
{
warning("Neither of clust_title_elems_names and clust_title_elems_flags can be NULL!")
return(exprt_design_merged)
}
# Retrieve subset names from experiment design table if a subset
# category is specified.
# Note: the name of a subset category must be one of the column
# names of experiment design table.
if(!is.null(subset_field_name)) subset_names <- exprt_design_merged[,subset_field_name]
else subset_names <- NULL
# Sort and filter sample replicates in each drug-treated condition for
# all drug groups and cell lines.
exprt_design_merged_sorted <- NULL
for(drug_name in drug_names)
{
sample_drug_flags <- exprt_design_merged$State==drug_name
exprt_design_merged_drug <- exprt_design_merged[sample_drug_flags,,drop=FALSE]
if(!is.null(subset_names)) subset_names_drug <- subset_names[sample_drug_flags]
else subset_names_drug <- NULL
for(cell_line in sort(unique(exprt_design_merged$Cell)))
{
sample_drug_cell_flags <- exprt_design_merged_drug$Cell==cell_line
exprt_design_merged_drug_cell <- exprt_design_merged_drug[sample_drug_cell_flags,,drop=FALSE]
if(!is.null(subset_names_drug)) subset_names_drug_cell <- subset_names_drug[sample_drug_cell_flags]
else subset_names_drug_cell <- NULL
for(plate in sort(unique(exprt_design_merged_drug_cell$Plate)))
{
# Prepar the dataset of current condition.
sample_drug_cell_plate_flags <- exprt_design_merged_drug_cell$Plate==plate
exprt_design_merged_drug_cell_plate <- exprt_design_merged_drug_cell[sample_drug_cell_plate_flags,,drop=FALSE]
sample_names_drug_cell_plate <- exprt_design_merged_drug_cell_plate$ID
read_counts_drug_cell_plate <- read_counts_merged[,sample_names_drug_cell_plate,drop=FALSE]
if(is.matrix(read_counts_drug_cell_plate)) read_counts_drug_cell_plate <- read_counts_drug_cell_plate[!rowAnys(is.na(read_counts_drug_cell_plate)),,drop=FALSE]
else read_counts_drug_cell_plate <- read_counts_drug_cell_plate[!is.na(read_counts_drug_cell_plate)]
if(!is.null(subset_names_drug_cell)) subset_names_drug_cell_plate <- subset_names_drug_cell[sample_drug_cell_plate_flags]
else subset_names_drug_cell_plate <- NULL
# Calculate the cutoff line for outlier samples.
# Set cutoff values for outlier samples.
cutoff <- getCutoff(state=drug_name, cell=cell_line, plate=plate, cutoffs=dist_cutoffs, single=dist_cutoff_outlier, group=dist_cutoff_group)
if(is.matrix(read_counts_drug_cell_plate) && ncol(read_counts_drug_cell_plate)>min_samples) hline <- cutoff[1]
else hline <- cutoff[2]
# Filter outlier samples for each subset of samples at current drug, cell and plate.
if(!is.null(subset_names_drug_cell_plate))
{
exprt_design_merged_drug_cell_plate_filtered <- NULL
for(subset_name in sort(unique(subset_names_drug_cell_plate)))
{
# Prepar the dataset of current condition.
sample_drug_cell_plate_subset_flags <- subset_names_drug_cell_plate==subset_name
exprt_design_merged_drug_cell_plate_subset <- exprt_design_merged_drug_cell_plate[sample_drug_cell_plate_subset_flags,,drop=FALSE]
sample_names_drug_cell_plate_subset <- exprt_design_merged_drug_cell_plate_subset$ID
read_counts_drug_cell_plate_subset <- read_counts_drug_cell_plate[,sample_names_drug_cell_plate_subset,drop=FALSE]
# Set title text for sample cluster plots.
clust_title_elems_values <- c(State=drug_name, Culture=subset_name, Measurement=plate, Subject=cell_line)
clust_title_elems <- trimws(paste(clust_title_elems_names, clust_title_elems_values))
names(clust_title_elems) <- names(clust_title_elems_names)
clust_title_elems <- clust_title_elems[clust_title_elems_flags]
if("State"%in%names(clust_title_elems)) clust_title <- paste(clust_title_elems["State"], "in", paste0(clust_title_elems[names(clust_title_elems)!="State"],collapse=" "))
else clust_title <- paste0(clust_title_elems[names(clust_title_elems)!="State"],collapse=" ")
# Filter outlier samples for current subset of samples.
exprt_design_merged_drug_cell_plate_subset_filtered <- filterGeneExpSamplesSubset(subset_name=subset_name, exprt_design=exprt_design_merged_drug_cell_plate_subset, read_counts=read_counts_drug_cell_plate_subset, cutoff=cutoff, hline=hline, dist_cutoff_outlier=dist_cutoff_outlier, dist_cutoff_group=dist_cutoff_group, min_samples=min_samples, filter_outlier=filter_outlier, keep_under_samples=keep_under_samples, plot_orig_clust=plot_orig_clust, plot_filter_clust=plot_filter_clust, plot_pass_clust=plot_pass_clust, plot_empty_clust=plot_empty_clust, color_sample_groups=color_sample_groups, group_colors=group_colors, color_field_pos=color_field_pos, color_field_type=color_field_type, hline_color=hline_color, hline_type=hline_type, hline_width=hline_width, clust_title=clust_title, title_cex=title_cex, branch_line_width=branch_line_width, leaf_lab_cex=leaf_lab_cex, ylab_cex=ylab_cex, leg_title=leg_title, leg_pos=leg_pos, leg_cex=leg_cex, leg_title_col=leg_title_col, leg_title_adj=leg_title_adj, verbose=verbose, func_dir=func_dir)
# Save filtered samples for current subset of samples.
if(nrow(exprt_design_merged_drug_cell_plate_subset_filtered)>0) exprt_design_merged_drug_cell_plate_filtered <- rbind(exprt_design_merged_drug_cell_plate_filtered, exprt_design_merged_drug_cell_plate_subset_filtered)
}
}
else
{
# Set title text for sample cluster plots.
clust_title_elems_values <- c(State=drug_name, Culture="", Measurement=plate, Subject=cell_line)
clust_title_elems <- trimws(paste(clust_title_elems_names, clust_title_elems_values))
names(clust_title_elems) <- names(clust_title_elems_names)
clust_title_elems <- clust_title_elems[clust_title_elems_flags]
if("State"%in%names(clust_title_elems)) clust_title <- paste(clust_title_elems["State"], "in", paste0(clust_title_elems[names(clust_title_elems)!="State"],collapse=" "))
else clust_title <- paste0(clust_title_elems[names(clust_title_elems)!="State"],collapse=" ")
# Filter outlier samples for all the samples at current drug, cell and plate.
exprt_design_merged_drug_cell_plate_filtered <- filterGeneExpSamplesSubset(exprt_design=exprt_design_merged_drug_cell_plate, read_counts=read_counts_drug_cell_plate, cutoff=cutoff, hline=hline, dist_cutoff_outlier=dist_cutoff_outlier, dist_cutoff_group=dist_cutoff_group, min_samples=min_samples, filter_outlier=filter_outlier, keep_under_samples=keep_under_samples, plot_orig_clust=plot_orig_clust, plot_filter_clust=plot_filter_clust, plot_pass_clust=plot_pass_clust, plot_empty_clust=plot_empty_clust, color_sample_groups=color_sample_groups, group_colors=group_colors, color_field_pos=color_field_pos, color_field_type=color_field_type, hline_color=hline_color, hline_type=hline_type, hline_width=hline_width, clust_title=clust_title, title_cex=title_cex, branch_line_width=branch_line_width, leaf_lab_cex=leaf_lab_cex, ylab_cex=ylab_cex, leg_title=leg_title, leg_pos=leg_pos, leg_cex=leg_cex, leg_title_col=leg_title_col, leg_title_adj=leg_title_adj, verbose=verbose, func_dir=func_dir)
}
# Save filtered samples at current drug, cell and plate.
if(nrow(exprt_design_merged_drug_cell_plate_filtered)>0) exprt_design_merged_sorted <- rbind(exprt_design_merged_sorted, exprt_design_merged_drug_cell_plate_filtered)
}
}
}
# Return filtered experiment design table.
return(exprt_design_merged_sorted)
}
|
05d6e4fc665c68b0fce9a9d40098b9ca605600ec
|
202684be012c3153a9791a6430a8f7eae997a036
|
/data_handling.R
|
d6b46d9ac821c9c5c415d3f483245d82e9ee9965
|
[] |
no_license
|
fbaffie/NVE_API_readR
|
cfc2090df2a9e8e737a85278486059ff6b38790f
|
b8dc5b1bb25f6374bfe3ee6f1ddf23584220a4b5
|
refs/heads/master
| 2020-03-21T12:48:16.703179
| 2018-07-03T09:16:00
| 2018-07-03T09:16:00
| 138,572,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,787
|
r
|
data_handling.R
|
# Construct metadata table from data list
metadata_for_app <- function(data_main) {
df_meta <- c()
for (i in 1:length(data_main)) {
df_meta <- rbind(df_meta, data_main[[i]]$metadata)
}
df_meta$prec_mean <- sapply(data_main, function(x) x$prec_mean)
df_meta$runoff_mean <- sapply(data_main, function(x) x$runoff_mean)
df_meta$runoff_eff <- sapply(data_main, function(x) x$runoff_eff)
return(df_meta)
}
# Compute mean runoff/precipitation and runoff efficiency
comp_stats <- function(data_list) {
# Remove missing data
df <- data.frame(prec = data_list$Prec,
runoff = data_list$Runoff)
df <- na.omit(df)
# Annual average precipitation
data_list$prec_mean <- 365*mean(df$prec, na.rm = TRUE)
# Annual average runoff
data_list$runoff_mean <- 365*mean(df$runoff, na.rm = TRUE)
# Runoff efficiency
data_list$runoff_eff <- sum(df$runoff)/sum(df$prec)
return(data_list)
}
# Read metadata file (excel table)
read_metadata_file <- function(filename) {
# Read station metadata
meta_data <- read_excel(filename)
meta_data <- tbl_df(meta_data)
# # Keep rows with runoff data (parameter == 1001)
#
# meta_data <- filter(meta_data, param_key==1001)
# Remove duplicated stations
idup <- duplicated(meta_data[, 1:3])
meta_data <- meta_data[!idup, ]
# Add station name as 'regine_area.main_no'
meta_data <- mutate(meta_data, regine_main = paste(regine_area, main_no, sep = "."))
# Add observation series as 'regine_area.main_no.point_no.param_key.version_no_end'
meta_data <- mutate(meta_data, obs_series = paste(regine_area, main_no, point_no, param_key, version_no_end, sep = "."))
return(meta_data)
}
|
687ed98d7f3390038dfb73757cfc1bbdc83cdb95
|
20a9435ef4586a43a4e55502d0f0ac40aa185821
|
/tests/testthat/test_binary_single.R
|
2d140af5d1cce490b5ea74adea763cd3b70e4dbe
|
[] |
no_license
|
cran/hmi
|
a9df9353e459bfe45d9952370a962fa879c8f5a1
|
6d1edb0d025c182cedb325fa9826f4ba00e988d1
|
refs/heads/master
| 2021-01-23T06:20:51.264453
| 2020-10-01T22:20:02
| 2020-10-01T22:20:02
| 86,358,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 672
|
r
|
test_binary_single.R
|
context("binary_single")
library(testthat)
library(hmi)
library(mice)
set.seed(123)
y_imp <- sample(c(0, 1, NA), size = 150, replace = TRUE)
y_imp2 <- sample(c("A", "B", NA), size = 150, replace = TRUE)
X_imp <- cbind(1, iris[, 1:4])
#test_check("hmi")
test_that("binary_single returns plausible values", {
expect_equal(unique(imp_binary_single(y_imp = y_imp,
X_imp = X_imp,
pvalue = 1)$y_ret), c(0, 1))
expect_equal(sort(as.character(unique(imp_binary_single(y_imp = y_imp2,
X_imp = X_imp,
pvalue = 1)$y_ret))), c("A", "B"))
})
|
5a306bb094f0a9483b86faae18f0446d2356e1f5
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/issuestests/SpatialEpi/man/binomialLogLkhd.Rd
|
51099b4faaff8ebf088a74d98fc2d920ff6c8782
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 687
|
rd
|
binomialLogLkhd.Rd
|
\name{binomialLogLkhd}
\alias{binomialLogLkhd}
\title{Compute Binomial Likelihoods}
\description{Compute binomial likelihood ratio test statistic for Kulldorff method}
\usage{binomialLogLkhd(cz, nz, N, C)}
\arguments{
\item{cz}{count inside zone}
\item{nz}{expected count inside zone}
\item{N}{total expected count in region}
\item{C}{total number of cases in region}
}
\value{Binomial likelihood ratio test statistic}
\references{ Kulldorff M. and Nagarwalla N. (1995) Spatial disease clusters: Detection and Inference.\emph{Statistics in Medicine}, \bold{14}, 799--810.}
\author{Albert Y. Kim}
\seealso{\code{\link{poissonLogLkhd}}, \code{\link{kulldorff}}}
\keyword{internal}
|
5678eedf2d3ee288e53c1b24919ae308952191b1
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/s2dverification/R/ConfigApplyMatchingEntries.R
|
ae976662923794df22ef1ffcbf9d0b14d4fe2349
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,647
|
r
|
ConfigApplyMatchingEntries.R
|
ConfigApplyMatchingEntries <- function(configuration, var, exp = NULL, obs = NULL, show_entries = FALSE, show_result = TRUE) {
## Function to tell if a regexpr() match is a complete match to a specified name
isFullMatch <- function(x, name) {
ifelse(x > 0 && attributes(x)$match.length == nchar(name), TRUE, FALSE)
}
var_entries_in_exps <- c()
if (length(unlist(configuration$experiments, recursive = FALSE)) > 0) {
var_entries_in_exps <- which(unlist(lapply(lapply(as.list(unlist(lapply(configuration$experiments, lapply, "[[", 2))), regexpr, var), isFullMatch, var) > 0))
}
var_entries_in_obs <- c()
if (length(unlist(configuration$observations, recursive = FALSE)) > 0) {
var_entries_in_obs <- which(unlist(lapply(lapply(as.list(unlist(lapply(configuration$observations, lapply, "[[", 2))), regexpr, var), isFullMatch, var) > 0))
}
exp_info <- list()
jmod <- 1
for (mod in exp) {
mod_var_matching_entries <- mod_var_matching_indices <- mod_var_matching_entries_levels <- c()
if (length(unlist(configuration$experiments, recursive = FALSE)) > 0) {
mod_entries_in_exps <- which(unlist(lapply(lapply(unlist(lapply(configuration$experiments, lapply, "[[", 1), recursive = FALSE), regexpr, mod), isFullMatch, mod)))
if (length(mod_entries_in_exps) > 0) {
mod_var_matching_indices <- intersect(var_entries_in_exps, mod_entries_in_exps)
mod_var_matching_entries <- unlist(configuration$experiments, recursive = FALSE)[mod_var_matching_indices]
exps_levels <- lapply(as.list(1:4), f <- function(x) {x <- array(x, length(configuration$experiments[[x]]))})
mod_var_matching_entries_levels <- unlist(exps_levels)[intersect(var_entries_in_exps, mod_entries_in_exps)]
}
}
if (length(mod_var_matching_entries) == 0) {
stop(paste('Error: There are no matching entries in the configuration file for the experiment', mod, 'and the variable', var,
'. Please check the configuration file.)'))
} else {
if (show_entries) {
header <- paste0("# Matching entries for experiment '", exp[jmod], "' and variable '", var, "' #\n")
cat(paste0(paste(rep("#", nchar(header) - 1), collapse = ''), "\n"))
cat(header)
cat(paste0(paste(rep("#", nchar(header) - 1), collapse = ''), "\n"))
ConfigShowTable(list(experiments = list(mod_var_matching_entries)), 'experiments', mod_var_matching_indices)
cat("\n")
}
result <- .ConfigGetDatasetInfo(mod_var_matching_entries, 'experiments')
if (show_result) {
cat(paste0("The result of applying the matching entries to experiment name '", exp[jmod], "' and variable name '", var, "' is:\n"))
configuration$definitions[["VAR_NAME"]] <- var
configuration$definitions[["EXP_NAME"]] <- exp[jmod]
fields <- c("MAIN_PATH: ", "FILE_PATH: ", "NC_VAR_NAME: ", "SUFFIX: ", "VAR_MIN: ", "VAR_MAX: ")
values <- lapply(result, lapply, function (x) .ConfigReplaceVariablesInString(x, configuration$definitions, TRUE))
lapply(paste0(fields, unlist(values), "\n"), cat)
cat("\n")
}
exp_info <- c(exp_info, list(result))
}
jmod <- jmod + 1
}
obs_info <- list()
jobs <- 1
for (ref in obs) {
ref_var_matching_entries <- ref_var_matching_indices <- ref_var_matching_entries_levels <- c()
if (length(unlist(configuration$observations, recursive = FALSE)) > 0) {
ref_entries_in_obs <- which(unlist(lapply(lapply(unlist(lapply(configuration$observations, lapply, "[[", 1), recursive = FALSE), regexpr, ref), isFullMatch, ref)))
if (length(ref_entries_in_obs) > 0) {
ref_var_matching_indices <- intersect(var_entries_in_obs, ref_entries_in_obs)
ref_var_matching_entries <- unlist(configuration$observations, recursive = FALSE)[ref_var_matching_indices]
obs_levels <- lapply(as.list(1:4), f <- function(x) {x <- array(x, length(configuration$observations[[x]]))})
ref_var_matching_entries_levels <- unlist(obs_levels)[intersect(var_entries_in_obs, ref_entries_in_obs)]
}
}
if (length(ref_var_matching_entries) == 0) {
stop(paste('Error: There are no matching entries in the configuration file for the observation', ref, 'and the variable', var,
'. Please check the configuration file.)'))
} else {
if (show_entries) {
header <- paste0("# Matching entries for observation '", obs[jobs], "' and variable '", var, "' #\n")
cat(paste0(paste(rep("#", nchar(header) - 1), collapse = ''), "\n"))
cat(header)
cat(paste0(paste(rep("#", nchar(header) - 1), collapse = ''), "\n"))
ConfigShowTable(list(observations = list(ref_var_matching_entries)), 'observations', ref_var_matching_indices)
cat("\n")
}
result <- .ConfigGetDatasetInfo(ref_var_matching_entries, 'observations')
if (show_result) {
cat(paste0("The result of applying the matching entries to observation name '", obs[jobs], "' and variable name '", var, "' is:\n"))
configuration$definitions[['VAR_NAME']] <- var
configuration$definitions[["OBS_NAME"]] <- obs[jobs]
fields <- c("MAIN_PATH: ", "FILE_PATH: ", "NC_VAR_NAME: ", "SUFFIX: ", "VAR_MIN: ", "VAR_MAX: ")
values <- lapply(result, lapply, function (x) .ConfigReplaceVariablesInString(x, configuration$definitions, TRUE))
lapply(paste0(fields, unlist(values), "\n"), cat)
cat("\n")
}
obs_info <- c(obs_info, list(result))
}
jobs <- jobs + 1
}
invisible(list(exp_info = exp_info, obs_info = obs_info))
}
|
a19f31d7c42618633cf4da9c30927048770be22a
|
efa60dd053fbeb2c176315ee269eae9f2ecb1a58
|
/enseignements/rcode/xgboost.R
|
014767664f1bb3ab7ae70b688cd0d9cd235766ef
|
[
"MIT"
] |
permissive
|
masedki/masedki.github.io
|
b5e5f5ac2fc9d2241b1b022560fb1b63fe067f26
|
ee79edc83016714151ebf2cddcf951e1da214f2e
|
refs/heads/master
| 2023-07-10T05:19:27.703327
| 2023-07-07T07:35:32
| 2023-07-07T07:35:32
| 89,248,820
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,698
|
r
|
xgboost.R
|
setwd("~/Dropbox/enseignement/M1/supports/Rscripts")
rm(list=ls())
load("insurance.rda")
require(rpart)
require(rpart.plot)
require(caret)
require(doParallel)
require(xgboost)
# lecture du jeu de données
summary(insurance)
set.seed(11)
train = sample(1:nrow(insurance), round(0.75*nrow(insurance)))
insurance.tr = insurance[train,]
insurance.te = insurance[-train,]
## CART sans élagage
cart.0 <- rpart(charges~.,
data=insurance.tr,
control=rpart.control(minsplit=7,cp=0, xval=5))
rpart.plot(cart.0)
pred.0 <- predict(cart.0, insurance.te)
sqrt(mean((insurance.te$charges - pred.0)**2))
plotcp(cart.0)
which.min(cart.0$cptable[,"xerror"])
cart.0$cptable
## CART avec élagage
cart.pruned <- prune(cart.0, cp = cart.0$cptable[which.min(cart.0$cptable[,"xerror"]),"CP"])
rpart.plot(cart.pruned)
pred.pruned <- predict(cart.pruned, insurance.te)
sqrt(mean((insurance.te$charges - pred.pruned)**2))
##xboost
?xgboost
y.tr = insurance.tr[,7]
x.tr = data.matrix(insurance.tr[,-7])
y.te = insurance.te[,7]
x.te = data.matrix(insurance.te[,-7])
boosted = xgboost(data = x.tr, label = y.tr,
objective = "reg:linear" , # la fonction de perte quadratique
booster = "gbtree", # règle faible : arbre
max.depth = 2, # nombre de feuilles de la règle faible
eta = 1, # le paramètre de régularisation appelé lambda en cours
nrounds = 5) # le nombre d'itération du GB
## réglage avec caret
fitControl <- trainControl(method = "repeatedcv", number = 3,
repeats = 2, search = "random")
# boost.grid = expand.grid(eta =c(0.1, 0.2, 0.3),
# nrounds = 10*(5:20),
# max_depth = c(2, 3, 4, 5),
# subsample = 1,
# min_child_weight = 1.,
# colsample_bytree = 1,
# gamma = 0.)
#cl <- makePSOCKcluster(7)
#registerDoParallel(cl)
boosted.cv <- train(x.tr,
y.tr,
method = "xgbTree",
trControl = fitControl)
#tuneGrid = boost.grid)
#stopCluster(cl)
plot(boosted.cv)
boosted.cv$bestTune
pred.boost = predict(boosted.cv, x.te)
sqrt(mean((y.te - pred.boost)^2))
## Paramètres optimaux d'après Etienne
boost.opt = xgboost(data = x.tr, label = y.tr,
objective = "reg:linear",
booster = "gbtree",
max.depth = 9,
eta = 0.1111882,
nrounds = 913)
pred.boost = predict(boost.opt,x.te)
sqrt(mean((y.te - pred.boost)^2))
|
a20424ef3e73f44cb485f17581ff47eb2cf31404
|
9e1d5eaa04362bd5c2669b62cef8da6e5f99586f
|
/project-prototype/dataprocessing.R
|
2ca2c51f1091668b01c8d2a038ab76be4cf57989
|
[] |
no_license
|
deekshachugh/msan622
|
6e4f672bad23d28ebc5716e9ad59fead9a36aea4
|
49ba336831cd3d75845c0504b39bc2d67eaf307e
|
refs/heads/master
| 2021-01-21T03:50:37.782150
| 2014-05-16T06:03:58
| 2014-05-16T06:03:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,064
|
r
|
dataprocessing.R
|
data <- read.csv("/home/deeksha/github/msan622/project-dataset/COmpleteweatherdata.csv")
head(data)
colnames(data) <- c("Date", "Temperature", "Dew Point Temperature","Precipitation","Humidity","Wind Speed","Percent Cloud Cover", "City")
latlongdata <- read.csv("/home/deeksha/Desktop/airports/airports_fsx_icao_lat_lon_alt_feet.txt",header=F)
head(latlongdata)
colnames(latlongdata) <- c("City","Latitude","Longitude","x")
library(plyr)
joineddata1 <- join(data, latlongdata[,1:3], by = "City")
head(joineddata1)
mapping <- read.csv("/media/deeksha/e/Deeksha/Dropbox/Coursework/PracticumIII/Data/MappingCityCOde.csv",header =F)
head(mapping)
names(mapping)<-c("CityCode","City")
library(plyr)
joineddata <- join(joineddata1, mapping[,1:2], by = "City")
head(joineddata)
#joineddata <- joineddata[,c(1:7,9:11)]
ncol(joineddata)
colnames(joineddata)[8] <- "CityCode"
colnames(joineddata)[11] <- "City"
joineddata <- joineddata[!is.na(joineddata$City),]
write.csv(joineddata,"/home/deeksha/github/msan622/project-prototype/weatherdata.csv")
summary(joineddata)
|
155f75e448a195e0aef9e2220fb93a52bef4d63a
|
01b8fa708e8e0318871d0ee7b4155ae35d64dd9a
|
/R/rd_sens_cutoff.R
|
25ab22e4d0ef98b3f8f6df95ef06ced38e9123be
|
[] |
no_license
|
felixthoemmes/rddapp
|
b313f32dd89248de26b173be077ac28ea90bf022
|
f81091ab1978c1ee0a50f7608a4a42ae56f6e4b1
|
refs/heads/main
| 2023-04-12T23:46:30.030859
| 2023-04-07T01:49:56
| 2023-04-07T01:49:56
| 119,074,922
| 10
| 4
| null | 2022-01-31T14:50:54
| 2018-01-26T16:24:45
|
HTML
|
UTF-8
|
R
| false
| false
| 2,423
|
r
|
rd_sens_cutoff.R
|
#' Cutoff Sensitivity Simulation for Regression Discontinuity
#'
#' \code{rd_sens_cutoff} refits the supplied model with varying cutoff(s).
#' All other aspects of the model, such as the automatically calculated bandwidth, are held constant.
#'
#' @param object An object returned by \code{rd_est} or \code{rd_impute}.
#' @param cutoffs A numeric vector of cutoff values to be used for refitting
#' an \code{rd} object.
#'
#' @return \code{rd_sens_cutoff} returns a dataframe containing the estimate \code{est} and standard error \code{se}
#' for each cutoff value (\code{A1}). Column \code{A1} contains varying cutoffs
#' on the assignment variable. The \code{model} column contains the parametric model (linear, quadratic, or cubic) or
#' non-parametric bandwidth setting (Imbens-Kalyanaraman 2012 optimal, half, or double) used for estimation.
#'
#' @references Imbens, G., Kalyanaraman, K. (2012).
#' Optimal bandwidth choice for the regression discontinuity estimator.
#' The Review of Economic Studies, 79(3), 933-959.
#' \url{https://academic.oup.com/restud/article/79/3/933/1533189}.
#'
#' @export
#'
#' @examples
#' set.seed(12345)
#' x <- runif(1000, -1, 1)
#' cov <- rnorm(1000)
#' y <- 3 + 2 * x + 3 * cov + 10 * (x >= 0) + rnorm(1000)
#' rd <- rd_est(y ~ x | cov, t.design = "geq")
#' rd_sens_cutoff(rd, seq(-.5, .5, length.out = 10))
rd_sens_cutoff <- function(object, cutoffs) {
if (!inherits(object, "rd"))
stop("Not an object of class rd.")
sim_results <- lapply(cutoffs,
function(cutoff) {
object$call$cutpoint <- cutoff
object$call$est.cov <- FALSE
object$call$bw <- object$bw["Opt"]
new_model <- eval.parent(object$call, 3)
return(
data.frame(
est = new_model$est,
se = new_model$se,
A1 = cutoff,
model = c("linear", "quadratic", "cubic", "optimal", "half", "double"),
stringsAsFactors = FALSE,
row.names = 1:6)
)
}
)
combined_sim_results <- do.call(rbind.data.frame, sim_results)
original_results <- data.frame(
est = object$est,
se = object$se,
A1 = if (is.null(object$call$cutpoint)) 0 else eval.parent(object$call$cutpoint),
model = c("linear", "quadratic", "cubic", "optimal", "half", "double"),
stringsAsFactors = FALSE,
row.names = 1:6)
return(rbind(combined_sim_results, original_results))
}
|
bb8ba4055b3d45e6a187e43d5e93cdef6be15354
|
a56e7a0ce097b8da6ae95f750b5bf1a6bbb251c5
|
/r/tests/testthat/test_trading_api.R
|
128205dcff8814edb09fb75b8f28bfd1e4bab268
|
[] |
no_license
|
harshabakku/deribit_options
|
d08f7a61386f6a047ec0c4726d882fead8fbb7a7
|
4f344f2fbf0b761cc5378d852a38d33849223b53
|
refs/heads/master
| 2022-12-08T20:58:47.873520
| 2020-09-01T16:38:04
| 2020-09-01T16:38:04
| 292,051,793
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,327
|
r
|
test_trading_api.R
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test TradingApi")
api.instance <- TradingApi$new()
test_that("PrivateBuyGet", {
# tests for PrivateBuyGet
# base path: https://www.deribit.com/api/v2
# Places a buy order for an instrument.
# @param character instrument.name Instrument name
# @param numeric amount It represents the requested order size. For perpetual and futures the amount is in USD units, for options it is amount of corresponding cryptocurrency contracts, e.g., BTC or ETH
# @param character type The order type, default: `\"limit\"` (optional)
# @param character label user defined label for the order (maximum 32 characters) (optional)
# @param numeric price <p>The order price in base currency (Only for limit and stop_limit orders)</p> <p>When adding order with advanced=usd, the field price should be the option price value in USD.</p> <p>When adding order with advanced=implv, the field price should be a value of implied volatility in percentages. For example, price=100, means implied volatility of 100%</p> (optional)
# @param character time.in.force <p>Specifies how long the order remains in effect. Default `\"good_til_cancelled\"`</p> <ul> <li>`\"good_til_cancelled\"` - unfilled order remains in order book until cancelled</li> <li>`\"fill_or_kill\"` - execute a transaction immediately and completely or not at all</li> <li>`\"immediate_or_cancel\"` - execute a transaction immediately, and any portion of the order that cannot be immediately filled is cancelled</li> </ul> (optional)
# @param numeric max.show Maximum amount within an order to be shown to other customers, `0` for invisible order (optional)
# @param character post.only <p>If true, the order is considered post-only. If the new price would cause the order to be filled immediately (as taker), the price will be changed to be just below the bid.</p> <p>Only valid in combination with time_in_force=`\"good_til_cancelled\"`</p> (optional)
# @param character reduce.only If `true`, the order is considered reduce-only which is intended to only reduce a current position (optional)
# @param numeric stop.price Stop price, required for stop limit orders (Only for stop orders) (optional)
# @param character trigger Defines trigger type, required for `\"stop_limit\"` order type (optional)
# @param character advanced Advanced option order type. (Only for options) (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateCancelAllByCurrencyGet", {
# tests for PrivateCancelAllByCurrencyGet
# base path: https://www.deribit.com/api/v2
# Cancels all orders by currency, optionally filtered by instrument kind and/or order type.
# @param character currency The currency symbol
# @param character kind Instrument kind, if not provided instruments of all kinds are considered (optional)
# @param character type Order type - limit, stop or all, default - `all` (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateCancelAllByInstrumentGet", {
# tests for PrivateCancelAllByInstrumentGet
# base path: https://www.deribit.com/api/v2
# Cancels all orders by instrument, optionally filtered by order type.
# @param character instrument.name Instrument name
# @param character type Order type - limit, stop or all, default - `all` (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateCancelAllGet", {
# tests for PrivateCancelAllGet
# base path: https://www.deribit.com/api/v2
# This method cancels all users orders and stop orders within all currencies and instrument kinds.
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateCancelGet", {
# tests for PrivateCancelGet
# base path: https://www.deribit.com/api/v2
# Cancel an order, specified by order id
# @param character order.id The order id
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateClosePositionGet", {
# tests for PrivateClosePositionGet
# base path: https://www.deribit.com/api/v2
# Makes closing position reduce only order .
# @param character instrument.name Instrument name
# @param character type The order type
# @param numeric price Optional price for limit order. (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateEditGet", {
# tests for PrivateEditGet
# base path: https://www.deribit.com/api/v2
# Change price, amount and/or other properties of an order.
# @param character order.id The order id
# @param numeric amount It represents the requested order size. For perpetual and futures the amount is in USD units, for options it is amount of corresponding cryptocurrency contracts, e.g., BTC or ETH
# @param numeric price <p>The order price in base currency.</p> <p>When editing an option order with advanced=usd, the field price should be the option price value in USD.</p> <p>When editing an option order with advanced=implv, the field price should be a value of implied volatility in percentages. For example, price=100, means implied volatility of 100%</p>
# @param character post.only <p>If true, the order is considered post-only. If the new price would cause the order to be filled immediately (as taker), the price will be changed to be just below the bid.</p> <p>Only valid in combination with time_in_force=`\"good_til_cancelled\"`</p> (optional)
# @param character advanced Advanced option order type. If you have posted an advanced option order, it is necessary to re-supply this parameter when editing it (Only for options) (optional)
# @param numeric stop.price Stop price, required for stop limit orders (Only for stop orders) (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetMarginsGet", {
# tests for PrivateGetMarginsGet
# base path: https://www.deribit.com/api/v2
# Get margins for given instrument, amount and price.
# @param character instrument.name Instrument name
# @param numeric amount Amount, integer for future, float for option. For perpetual and futures the amount is in USD units, for options it is amount of corresponding cryptocurrency contracts, e.g., BTC or ETH.
# @param numeric price Price
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetOpenOrdersByCurrencyGet", {
# tests for PrivateGetOpenOrdersByCurrencyGet
# base path: https://www.deribit.com/api/v2
# Retrieves list of user's open orders.
# @param character currency The currency symbol
# @param character kind Instrument kind, if not provided instruments of all kinds are considered (optional)
# @param character type Order type, default - `all` (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetOpenOrdersByInstrumentGet", {
# tests for PrivateGetOpenOrdersByInstrumentGet
# base path: https://www.deribit.com/api/v2
# Retrieves list of user's open orders within given Instrument.
# @param character instrument.name Instrument name
# @param character type Order type, default - `all` (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetOrderHistoryByCurrencyGet", {
# tests for PrivateGetOrderHistoryByCurrencyGet
# base path: https://www.deribit.com/api/v2
# Retrieves history of orders that have been partially or fully filled.
# @param character currency The currency symbol
# @param character kind Instrument kind, if not provided instruments of all kinds are considered (optional)
# @param integer count Number of requested items, default - `20` (optional)
# @param integer offset The offset for pagination, default - `0` (optional)
# @param character include.old Include in result orders older than 2 days, default - `false` (optional)
# @param character include.unfilled Include in result fully unfilled closed orders, default - `false` (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetOrderHistoryByInstrumentGet", {
# tests for PrivateGetOrderHistoryByInstrumentGet
# base path: https://www.deribit.com/api/v2
# Retrieves history of orders that have been partially or fully filled.
# @param character instrument.name Instrument name
# @param integer count Number of requested items, default - `20` (optional)
# @param integer offset The offset for pagination, default - `0` (optional)
# @param character include.old Include in result orders older than 2 days, default - `false` (optional)
# @param character include.unfilled Include in result fully unfilled closed orders, default - `false` (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetOrderMarginByIdsGet", {
# tests for PrivateGetOrderMarginByIdsGet
# base path: https://www.deribit.com/api/v2
# Retrieves initial margins of given orders
# @param character ids Ids of orders
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetOrderStateGet", {
# tests for PrivateGetOrderStateGet
# base path: https://www.deribit.com/api/v2
# Retrieve the current state of an order.
# @param character order.id The order id
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetSettlementHistoryByCurrencyGet", {
# tests for PrivateGetSettlementHistoryByCurrencyGet
# base path: https://www.deribit.com/api/v2
# Retrieves settlement, delivery and bankruptcy events that have affected your account.
# @param character currency The currency symbol
# @param character type Settlement type (optional)
# @param integer count Number of requested items, default - `20` (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetSettlementHistoryByInstrumentGet", {
# tests for PrivateGetSettlementHistoryByInstrumentGet
# base path: https://www.deribit.com/api/v2
# Retrieves public settlement, delivery and bankruptcy events filtered by instrument name
# @param character instrument.name Instrument name
# @param character type Settlement type (optional)
# @param integer count Number of requested items, default - `20` (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetUserTradesByCurrencyAndTimeGet", {
# tests for PrivateGetUserTradesByCurrencyAndTimeGet
# base path: https://www.deribit.com/api/v2
# Retrieve the latest user trades that have occurred for instruments in a specific currency symbol and within given time range.
# @param character currency The currency symbol
# @param integer start.timestamp The earliest timestamp to return result for
# @param integer end.timestamp The most recent timestamp to return result for
# @param character kind Instrument kind, if not provided instruments of all kinds are considered (optional)
# @param integer count Number of requested items, default - `10` (optional)
# @param character include.old Include trades older than 7 days, default - `false` (optional)
# @param character sorting Direction of results sorting (`default` value means no sorting, results will be returned in order in which they left the database) (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetUserTradesByCurrencyGet", {
# tests for PrivateGetUserTradesByCurrencyGet
# base path: https://www.deribit.com/api/v2
# Retrieve the latest user trades that have occurred for instruments in a specific currency symbol.
# @param character currency The currency symbol
# @param character kind Instrument kind, if not provided instruments of all kinds are considered (optional)
# @param character start.id The ID number of the first trade to be returned (optional)
# @param character end.id The ID number of the last trade to be returned (optional)
# @param integer count Number of requested items, default - `10` (optional)
# @param character include.old Include trades older than 7 days, default - `false` (optional)
# @param character sorting Direction of results sorting (`default` value means no sorting, results will be returned in order in which they left the database) (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetUserTradesByInstrumentAndTimeGet", {
# tests for PrivateGetUserTradesByInstrumentAndTimeGet
# base path: https://www.deribit.com/api/v2
# Retrieve the latest user trades that have occurred for a specific instrument and within given time range.
# @param character instrument.name Instrument name
# @param integer start.timestamp The earliest timestamp to return result for
# @param integer end.timestamp The most recent timestamp to return result for
# @param integer count Number of requested items, default - `10` (optional)
# @param character include.old Include trades older than 7 days, default - `false` (optional)
# @param character sorting Direction of results sorting (`default` value means no sorting, results will be returned in order in which they left the database) (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetUserTradesByInstrumentGet", {
# tests for PrivateGetUserTradesByInstrumentGet
# base path: https://www.deribit.com/api/v2
# Retrieve the latest user trades that have occurred for a specific instrument.
# @param character instrument.name Instrument name
# @param integer start.seq The sequence number of the first trade to be returned (optional)
# @param integer end.seq The sequence number of the last trade to be returned (optional)
# @param integer count Number of requested items, default - `10` (optional)
# @param character include.old Include trades older than 7 days, default - `false` (optional)
# @param character sorting Direction of results sorting (`default` value means no sorting, results will be returned in order in which they left the database) (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateGetUserTradesByOrderGet", {
# tests for PrivateGetUserTradesByOrderGet
# base path: https://www.deribit.com/api/v2
# Retrieve the list of user trades that was created for given order
# @param character order.id The order id
# @param character sorting Direction of results sorting (`default` value means no sorting, results will be returned in order in which they left the database) (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("PrivateSellGet", {
# tests for PrivateSellGet
# base path: https://www.deribit.com/api/v2
# Places a sell order for an instrument.
# @param character instrument.name Instrument name
# @param numeric amount It represents the requested order size. For perpetual and futures the amount is in USD units, for options it is amount of corresponding cryptocurrency contracts, e.g., BTC or ETH
# @param character type The order type, default: `\"limit\"` (optional)
# @param character label user defined label for the order (maximum 32 characters) (optional)
# @param numeric price <p>The order price in base currency (Only for limit and stop_limit orders)</p> <p>When adding order with advanced=usd, the field price should be the option price value in USD.</p> <p>When adding order with advanced=implv, the field price should be a value of implied volatility in percentages. For example, price=100, means implied volatility of 100%</p> (optional)
# @param character time.in.force <p>Specifies how long the order remains in effect. Default `\"good_til_cancelled\"`</p> <ul> <li>`\"good_til_cancelled\"` - unfilled order remains in order book until cancelled</li> <li>`\"fill_or_kill\"` - execute a transaction immediately and completely or not at all</li> <li>`\"immediate_or_cancel\"` - execute a transaction immediately, and any portion of the order that cannot be immediately filled is cancelled</li> </ul> (optional)
# @param numeric max.show Maximum amount within an order to be shown to other customers, `0` for invisible order (optional)
# @param character post.only <p>If true, the order is considered post-only. If the new price would cause the order to be filled immediately (as taker), the price will be changed to be just below the bid.</p> <p>Only valid in combination with time_in_force=`\"good_til_cancelled\"`</p> (optional)
# @param character reduce.only If `true`, the order is considered reduce-only which is intended to only reduce a current position (optional)
# @param numeric stop.price Stop price, required for stop limit orders (Only for stop orders) (optional)
# @param character trigger Defines trigger type, required for `\"stop_limit\"` order type (optional)
# @param character advanced Advanced option order type. (Only for options) (optional)
# @return [object]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
|
7eb174d9f8f5ddd70fe2506da2b89f7ff030f10b
|
c104b6569f1bc152b2e05c8cbbd91a5d88962be2
|
/man-roxygen/file-plural.R
|
90c69e609614bb579febe2dc133c42c20fb8c12f
|
[
"MIT"
] |
permissive
|
tidyverse/googledrive
|
46057f3ea96ae0fc47da30fd5d38d35e01a67d2a
|
74a69a3a1fd66e930802ce6e461538c9e15f7c86
|
refs/heads/main
| 2023-09-01T18:13:56.084465
| 2023-06-27T15:59:01
| 2023-06-27T15:59:01
| 89,535,184
| 317
| 60
|
NOASSERTION
| 2023-06-27T06:13:13
| 2017-04-26T23:22:32
|
R
|
UTF-8
|
R
| false
| false
| 219
|
r
|
file-plural.R
|
#' @param file Something that identifies the file(s) of interest on your Google
#' Drive. Can be a character vector of names/paths, a character vector of file
#' ids or URLs marked with [as_id()], or a [`dribble`].
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.