blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
0a7705938b332cfc59ff41ae5fdb8ac955df91d8
75e6920c6c8d5c44da0c1c44e3dcc5559efc8e47
/R/sim.occ.R
314774a33018de5cd0c228160a40f1dddd227da8
[]
no_license
cran/fossil
111edf36c4fe19cd59d52011c1e621e395813c21
f139d41cea588210fc72eb71910a648d4dab6afd
refs/heads/master
2021-01-10T20:05:22.617612
2020-03-23T10:30:05
2020-03-23T10:30:05
17,696,124
1
0
null
null
null
null
UTF-8
R
false
false
798
r
sim.occ.R
sim.occ <- function(total.species = 100, endemics = 0.1, regions = 3, locs = 30, avg.abund = 1) { cosmop <- round(total.species*(1-endemics)) endem <- total.species-cosmop end.per.reg <- floor(endem/regions) extras <- endem%%regions orig.groups <- matrix(0, total.species, regions*locs) reg<-1 for (i in 1:(regions*locs)) { #cosmopolitan spp orig.groups[1:cosmop, i] <- 1 #endemic spp start at strt <- cosmop+1+((reg-1)*end.per.reg) ends <- strt+end.per.reg-1 if (reg==regions) ends <- ends+extras orig.groups[strt:ends, i] <- 1 orig.groups[orig.groups[,i]==1,i]<-round(rlnorm(length(orig.groups[orig.groups[,i]==1,i]), 0, 1)*avg.abund) if (i%%locs==0) reg<-reg+1 } return(orig.groups) }
ab6d077d631ca21f23afdb78391fdec364411f3f
f8b38e2c2e74081fa2e2b9471b2ddeb0f2fb6e0c
/simfiles/oldFiles/20200907_testInvLength.R
3ebe773c5473a87949e793069e448a691bda0c76
[]
no_license
SaraSchaal/CoreSim_QTL
ef125cd5b5e4d193c33d907b21837a6f18ba2dbc
e1ba94e5f4c09faf1f981de1fa1e7039445f9274
refs/heads/master
2022-05-02T10:30:51.445865
2022-04-12T18:20:01
2022-04-12T18:20:01
138,634,067
1
0
null
null
null
null
UTF-8
R
false
false
3,438
r
20200907_testInvLength.R
#### Test for inversion size issue #### #df.params <- as.data.frame(unique(substr(list.files("./results/Inversion/20200907_testForInvLength", #pattern = "\\d{13}")[1], 1, 13))) #colnames(df.params) <- "seed" df.params <- data.frame(seed = 1) #folder <- "results/Inversion/20200907_testForInvLength/" folder <- "results/Inversion/" ## upload datasets ################################################################################################### ## Simulation parameters df.simStats <- NULL for(i in 1:nrow(df.params)){ seed <- df.params$seed[i] if(seed %in% not.done.seeds){ } else { simStatsNewFile <- read.table(paste(folder, seed, "_outputSimStats.txt", sep=""), stringsAsFactors = FALSE) simStatsNewFile$seed <- seed df.simStats <- rbind(df.simStats, simStatsNewFile) } } df.simStats <- df.simStats[,2:ncol(df.simStats)] colnames(df.simStats) <- c("mig1", "mig2", "pop1N", "pop2N", "mu_base", "mu_inv", "r", "alpha", "sigmaK", "burnin", "dom", "enVar", "Seed") unique.params <- df.simStats ## Inversion Through Time df.invTime <- NULL for(i in 1:nrow(df.params)){ seed <- df.params$seed[i] invTimeNewFile <- read.table(paste(folder, seed, "_outputInvTime.txt", sep=""), header = TRUE, stringsAsFactors = FALSE) if(nrow(invTimeNewFile) > 0){ invTimeNewFile$seed <- seed df.invTime <- rbind(df.invTime, invTimeNewFile) } } df.invData <- NULL for(i in 1:nrow(df.params)){ seed <- df.params$seed[i] invData <- read.table(paste(folder, seed, "_outputInvSumInfo.txt", sep=""), header = TRUE, stringsAsFactors = FALSE) if(nrow(invData) > 0){ invData$seed <- seed df.invData <- rbind(df.invData, invData) } } ############################################################################# ## inversion calculations ############################################################################# #df.invTimeFreq <- subset(df.invTime, subset = df.invTime$freq > 0.05) df.invData$inv_id <- as.numeric(df.invData$inv_id) df.invAllData <- merge(df.invData, df.invTime, by.x = c("seed", "inv_id"), by.y = c("seed", "inv_id"), all.y = TRUE) colnames(df.invAllData)[8] <- "sim_gen" df.invLength.average <- NULL inv.sims <- subset(unique.params, subset = unique.params$mu_inv > 0) ## average columns of interest # df.averageSubset <- df.average[,vect.aggCols] new.length.average <- aggregate(inv_length~sim_gen + seed, data = df.invAllData, FUN = mean) new.length.SD <- aggregate(inv_length~sim_gen + seed, data = df.invAllData, FUN = sd) new.num.inv <- aggregate(inv_length~sim_gen, data = df.length.average, FUN = length) new.numQTNs.average <- aggregate(num_qtns~sim_gen, data = df.length.average, FUN = mean) df.average.inv <- cbind(new.length.average, new.length.SD[,2], new.numQTNs.average[,2], new.num.inv[,2]) df.simParams <- data.frame(mu_inv = rep(inv.sims$mu_inv[i], nrow(df.average.inv)), mig = rep(inv.sims$mig1[i], nrow(df.average.inv)), alpha = rep(inv.sims$alpha[i], nrow(df.average.inv)), sigmaK = rep(inv.sims$sigmaK[i], nrow(df.average.inv)), enVar = rep(inv.sims$enVar[i], nrow(df.average.inv)), mu_base = rep(inv.sims$mu_base[i], nrow(df.average.inv)))
13f00f497db6fcfaf5df50741c0c0443f2dd5171
4cb8a786b8f833fb6fb128cfdcce53674d807e82
/Hydrology/HydroecologyTools/q167_lp3.R
d61d0a68680b4783549e95ae90bdb425c69275fc
[]
no_license
sulochandhungel/Data-Analysis-using-R
c7427ecf83ed5ddb7f2d3856aee113859077288e
63476e22cc309efe9af92bd648531843a1211c4d
refs/heads/master
2023-06-09T08:14:53.563015
2021-06-28T03:54:31
2021-06-28T03:54:31
254,683,739
0
0
null
null
null
null
UTF-8
R
false
false
1,242
r
q167_lp3.R
q167_lp3.R=function(WYmat){ lp3ff=function(cs,T) { F=1/T shp=4/(cs*cs) if(abs(cs)<0.00001) {ans=qnorm(F)} else {if(cs>0){ ans=qgamma(F,shp)*cs/2-2/cs } else { ans=qgamma(1-F,shp)*cs/2-2/cs } } return(ans) } temp=matrix(NA,nrow=dim(WYmat)[2],ncol=1) for (i in 1:dim(WYmat)[2]){ if ((366-length(WYmat[,i][WYmat[,i]=="NA"])>36)){ temp[i,1]=max(WYmat[,i],na.rm=T) } else { temp[i,1]=NA } } maxflow=as.vector(temp) maxflow2=maxflow[which(maxflow!=0)] y1=as.vector(maxflow2) n=length(y1) y=log10(y1) ybar=mean(y,na.rm=T) sy=sd(y,na.rm=T) Cs=(n*sum((y-ybar)^3))/((n-1)*(n-2)*(sy^3)) KT=lp3ff(Cs,1.67) yt=ybar+sy*KT q=10^yt temp_flddur=matrix(NA,nrow=dim(WYmat)[2],ncol=1) fld=0 for (i in 1:dim(WYmat)[2]) { temp2=WYmat[,i] b=temp2[is.na(temp2)==FALSE] if (length(b)>=1){ fld = length(b[b>=q]) } else { fld=NA } temp_flddur[i,1]=fld } flddur=mean(temp_flddur[,1],na.rm=T) noyr=dim(WYmat)[2] x=1:noyr y=temp_flddur[,1] lm.flddur=lm(y~x) slp=as.numeric(coef(lm.flddur)[2]) pval=as.numeric(summary(lm.flddur)$coefficients[2,4]) ans=list(q,flddur,slp,pval,temp_flddur) return(ans) }
254e230fb7638d78f9336cd9a178aa8085c3cd33
2813ad438d07f856455d815cdf8e03f469c76ed6
/man/celsiusServer.Rd
cdd2357a9667021aac6bd1bcfcc9c87834a665e8
[]
no_license
cran/celsius
38b0b034cfc3334293b5e2e00b8739cd605b882d
8cf711f41aad02eb1dd041576318be1bd4549ffe
refs/heads/master
2016-09-06T00:30:37.282072
2007-04-25T00:00:00
2007-04-25T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
3,129
rd
celsiusServer.Rd
\name{celsiusServer} %__CUT__% \docType{class} \alias{celsiusServer} \alias{celsiusServer-class} \title{ Object to hold values pertaining to a Celsius Server } \description{ Holds values like the URL needed to reach the server or the default timings for accessing the features and samples for the server etc.}%- maybe also 'usage' for other objects documented here. \section{Creating Objects}{ \code{new('celsiusServer',}\cr \code{celsiusUrl = ...., #URL}\cr \code{platform = ...., #Platform for data retrieval }\cr \code{protocol = ...., #Normalization protocol for the data to be retrieved }\cr \code{stringency = ...., #Stringency setting for annotation retrieval }\cr \code{verbose = ...., #Verbosity preference }\cr \code{transform = ...., #Transform the annotations vector into 0's and 1's }\cr \code{retry = ...., #Number of retry attempts with web server }\cr \code{sampleTime = ...., #estimated time to retrieve a sample}\cr \code{featureTime = ...., #estimated time to retrieve a feature}\cr \code{ )}} \section{Slots}{ \describe{ \item{\code{celsiusUrl}}{ Provide an approriate URL \code{"character"} to talk to a celsius server } \item{\code{platform}}{ Provide an approriate affymetrix platform \code{"character"} for data retrieval. The preferred format is the affymetrix format, but you can also enter Bioconductor formatted platform names. Default is 'HG-U133A' } \item{\code{protocol}}{ Provide an approriate normalization protocol \code{"character"} for the data to be retrieved. Values can be: 'rma','vsn','gcrma','plier' and 'mas5'. Default value is 'rma'. } \item{\code{stringency}}{ Provide an approriate stringency setting \code{"numeric"}. This is the quality of annotation desired. A higher value results in more NA (un-annotated) values for annotation retrieval. Possible values include: 600,500,400,300,200,100. Default is 500. } \item{\code{verbose}}{ Indicate \code{"logical"} whether to print output to the screen whenever data has been retrieved from the DB. Default is FALSE. } \item{\code{transform}}{ Indicate \code{"logical"} whether or not to format all annotation mask data into a list of only 0's and 1's (ie. to list NAs and and annotation conficts as 0's) Default is FALSE. } \item{\code{retry}}{ Provide the number of retry attempts \code{"character"} for the client to attempt to get data from the web server. Default is 3. } \item{\code{sampleTime}}{ Provide an expected wait time \code{"numeric"} to retrieve a sample from this platform (can determine this by using time and the getSampleData() in this library if you REALLY hate waiting) } \item{\code{featureTime}}{ Provide an expected wait time \code{"numeric"} to retrieve a feature from this platform (can determine this by using time and the getFeatureData() in this library if you REALLY hate waiting)} } } \author{ Marc Carlson } \keyword{ classes }
902f28db355b0abb3d2ab0fb7decd15226b3899b
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/rlist/examples/list.extract.Rd.R
1cfab8ee177a25cf956a5707e30469ac0c25f8ae
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
204
r
list.extract.Rd.R
library(rlist) ### Name: list.extract ### Title: Extract an element from a list or vector ### Aliases: list.extract ### ** Examples x <- list(a=1, b=2, c=3) list.extract(x, 1) list.extract(x, 'a')
2edb46e4ca27b718a9872ce8faede8a04649b1dd
7015b80b12f01a1c064cadbf7d8f4c1f84e278e8
/source_file.R
0b487ab79952e45a44bcf5187c5baef2f16f0e02
[]
no_license
MatixR/bachelor_thesis
75f84f06c16f7f80ea25fd5a3a70165315a11b24
5b9fd59fb510d697e690211427b641eb35b7be36
refs/heads/master
2020-03-23T06:39:45.425232
2018-06-27T10:10:10
2018-06-27T10:10:10
null
0
0
null
null
null
null
UTF-8
R
false
false
660
r
source_file.R
##----------------------------------------------------------------------------------------------------------## ## Source file. ##----------------------------------------------------------------------------------------------------------## library(xts) library(zoo) library(SpatialExtremes) library(rugarch) library(extRemes) library(stringr) library(Hmisc) library(vars) library(stats) source("interpolation_data.R") source("read_data.R") ##----------------------------------------------------------------------------------------------------------## ##----------------------------------------------------------------------------------------------------------##
2059f8ba786afde8975f307f79fd7fc1c0ac448c
45dee4934f377fba8a4fde7030f4741f0b610cb2
/tests/lmer-conv.R
d4b14b8c0bd503ad64a6839282214dfcb6c90f9e
[]
no_license
jknowles/lme4
e7649d74ea5c6ed90686ce5cd2ea3376b7592c50
f8f760a512434199901db78b9269c245cca82e1f
refs/heads/master
2021-01-16T19:50:54.277800
2015-02-01T04:04:03
2015-02-01T04:04:03
30,161,374
1
0
null
2015-02-01T22:00:09
2015-02-01T22:00:08
null
UTF-8
R
false
false
595
r
lmer-conv.R
### lmer() convergence testing / monitoring / ... ## ------------------ ### The output of tests here are *not* 'diff'ed (<==> no *.Rout.save file) library(lme4) ## convergence on boundary warnings load(system.file("external/test3comp.rda", package = "Matrix")) b3 <- lmer(Y3 ~ (1|Sample) + (1|Operator/Run), test3comp, verb = TRUE) if (isTRUE(try(data(Early, package = 'mlmRev')) == 'Early')) { Early$tos <- Early$age - 0.5 # time on study b1 <- lmer(cog ~ tos + trt:tos + (tos|id), Early, verb = TRUE) } cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons''
c9ad02417fd448947125ac3c64c52787e5c6140e
888b76fabb8b490b2f92daa1e53b84940eaf83eb
/analysis/seedling_mortality_analysis/route.R
c9f244f2d407df99d0eaf892f4afc9e1748d85dd
[]
no_license
jmargrove/ForestFloodingSensitivityAnalysis
ba398074537f23c76264bb9ebffaf7390895866c
43eaa322207aa8d8433ce42d359e72909406d1a5
refs/heads/master
2021-11-22T03:22:30.804283
2018-10-31T15:28:11
2018-10-31T15:28:11
120,295,376
1
0
null
null
null
null
UTF-8
R
false
false
91
r
route.R
# route for seedling mortality analysis route <- './analysis/seedling_mortality_analysis/'
cb3e389d6b9f2151b33e7ffa5adb0fb917a7ebb4
ac5cf1135044426715a39d041348afd25c68d585
/Code/ED_5pextvals_results.R
a259ad68f6a977f0b3deee2ec8d908459734dc19
[]
no_license
OScott19/ResearchProject
a173773130ed4c3ae151530691a514b96ac692c2
7f39f7202ee0643985d9b239309033a3e4f3c41d
refs/heads/master
2022-11-11T20:09:06.123108
2020-06-26T14:52:36
2020-06-26T14:52:36
234,543,392
0
0
null
null
null
null
UTF-8
R
false
false
2,739
r
ED_5pextvals_results.R
# this script just created a tree for each scenario, and didn't do an EDGE analysis # we are going to read in each score, calculate the tree PD, PDLoss and ePD under each scenario # we are then going to plot it # Housekeeping rm(list=ls()) graphics.off() setwd("~/Documents/ResearchProject/Code/") # getting ready to ready to read everything in # long-form storage: store <- data.frame(TREE = NA, pext = NA, calc = NA, value = rep(0, 1000) ) # tree: 1-100 # calc: PDtotal, PDloss, ePD # value: length ptype <- c("Isaac","pext50", "pext100","pext500", "pextPessimistic") calc <- c("PDtotal", "PDloss", "ePD") counter <- 0 ##Load up reference data (this contains original PD from each tree) load(file = "../Results/HPC_EDGE/PDLoss_100.Rdata") # work through each ptype for (x in ptype) { # and then each tree for (i in 1:100) { counter <- counter + 1 res <- NA to.load <- paste("../Results/HPC_ED/ED_only_HPC_24hr_", x, "_", i, ".Rdata", sep = "") try(load(file = to.load), silent = F) # called res if(!is.na(res)) { store$TREE[counter] <- i store$pext[counter] <- x store$calc[counter] <- calc[2] store$value[counter] <- sum(res[[2]]$edge.length) counter <- counter + 1 store$TREE[counter] <- i store$pext[counter] <- x store$calc[counter] <- calc[3] store$value[counter] <- pd.loss$TreePD[i] - sum(res[[2]]$edge.length) } } } store.2 <- data.frame(TREE = NA, pext = NA, calc = NA, value = rep(0, (length(store$TREE) + 100))) store.2$TREE <- c(store$TREE, c(1:100)) store.2$pext <- c(store$pext, rep("today", 100)) store.2$calc <- c(store$calc, rep("ePD", 100)) store.2$value <- c(store$value, pd.loss$TreePD) ## what if we try and plot this? # want to compare ePD ePD <- subset(store.2, store.2$calc =="ePD") plot.new() boxplot(ePD$value ~ ePD$pext, main = "Actinopterygii ePD, using 5 probabilities of extinction forecasts", frame = F, xlab = "Probabilities of extinction forecasts", ylab = "PD (millions of years)", col = "hotpink") ### how about percentage remaining? ePD <- subset(store.2, store.2$calc =="ePD") ePD.2 <- subset(ePD, ePD$pext != "today") ePD.2$pext[ePD.2$pext=="pext50"] <- "pext050" # to ensure its in the right order! plot.new() boxplot((ePD.2$value/pd.loss$TreePD[1]*100) ~ ePD.2$pext, main = "Surviving Actinopterygii PD, given \n varying probabilities of extinction", frame = T, colour = "steelblue2", xlab = "Probabilities of extinction forecasts", ylab = "Percentage of current PD (%)", xaxt = 'n', ) axis(1,labels = ptype, at = seq(1:length(ptype)))
ed8d0911ce4e1ef48b6b5325e4fe3cb847ad3333
5c0bbe2c09d7450e565966c5928ec81a261b70a5
/gamR/uniqgenesets.R
3116e86618e96f8ad5a2fc6311d05fa207ca5831
[]
no_license
ramadatta/ShinyApps
c73ba31f3a719b96aac7190351d3e14076e86c1b
24c2576c4e47b8bd60272bd2932ba9600186f03e
refs/heads/master
2023-04-06T21:01:24.417113
2023-03-16T03:28:59
2023-03-16T03:28:59
230,216,276
3
0
null
null
null
null
UTF-8
R
false
false
12,526
r
uniqgenesets.R
#' uniqgenesets function #' #' This function plots the gene arrow map with directionality, aligns the genes, add labels above the gene arrow, #' order gene sets based on presence/absence and generate a gene arrow map for distinct gene sets #' @export #' uniqgenesets <- function(colpal, geneName, arrowhead_height, arrowhead_width, element_line, labelsize){ dummies <- make_alignment_dummies(data(),aes(xmin = start, xmax = end, y = molecule, id = gene ), on = geneName) updown_stream_coords <- data() # Creating a presence/absence matrix for example genes PA_matrix <- as.data.frame(with(updown_stream_coords, table(molecule, gene)) > 0L) + 0L PA_matrix # Sorting the presence/absence matrix for example genes sorted_PA_matrix <- PA_matrix[do.call(order, as.data.frame(PA_matrix)), ] sorted_PA_matrix sorted_genomes <- row.names(sorted_PA_matrix) sorted_genomes distinct_sorted_PA_matrix <- distinct(sorted_PA_matrix) distinct_sorted_PA_matrix sorted_genomes <- row.names(distinct_sorted_PA_matrix) sorted_genomes # Creating sorted_dummies and sorted_example_genes which the final output figure should reflect #sorted_dummies <- dummies[order(unlist(sapply(dummies$molecule, function(x) which(sorted_genomes == x)))),] sorted_dummies <- dummies[dummies$molecule %in% sorted_genomes, ] sorted_dummies dummies #sorted_example_genes <- example_genes[order(unlist(sapply(example_genes$molecule, function(x) which(sorted_genomes == x)))),] #sorted_example_genes <- example_genes[example_genes$molecule %in% sorted_genomes, ] sorted_example_genes <- updown_stream_coords[updown_stream_coords$molecule %in% sorted_genomes, ] sorted_example_genes # Convert molecule variable to a factor -- author sorted_example_genes$molecule <- factor(sorted_example_genes$molecule, levels = unique(sorted_example_genes$molecule)) # sorted_example_genes sorted_dummies$molecule <- factor(sorted_dummies$molecule, levels = unique(sorted_dummies$molecule)) # sorted_dummies # p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- # ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = factor(molecule), fill = gene)) + # geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + # geom_blank(data = sorted_dummies) + # facet_wrap( ~ molecule, scales = "free", ncol = 1) + # scale_fill_manual(values = c25) + # theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + # geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4, size = labelsize ) # print(p_aligned_labelabv_orderbysim_uniqgeneset_gnomes) #sorted_example_genes$strand <- sorted_example_genes$strand == "forward" if (colpal=="c25") { c25 <- c("dodgerblue2", "#E31A1C","green4", "#6A3D9A", "#FF7F00", "LavenderBlush4", "gold1", "skyblue2", "#FB9A99", "palegreen2", "#CAB2D6", "#FDBF6F", "gray70", "khaki2", "maroon", "orchid1", "deeppink1", "blue1", "steelblue4", "darkturquoise", "green1", "yellow4", "yellow3", "darkorange4", "brown" ) p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_manual(values = c25) + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4, size = labelsize) } else if (colpal=="b25") { # b25 <- c("dodgerblue2", "#E31A1C","green4", "#6A3D9A","#FFFFE0","#FFFACD","#FAFAD2","#FFEFD5","#FFE4B5","#FFDAB9", # "#EEE8AA","#F0E68C","#BDB76B","#FFFF00","#F08080","#FA8072", "darkturquoise", "green1", "yellow4", "yellow3", # "darkorange4", "brown", "#C0C0C0","#808080","#FF0000") # Define the number of colors you want nb.cols <- 300 b25 <- colorRampPalette(brewer.pal(8, "Set3"))(nb.cols) p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_manual(values = b25) + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } else if (colpal=="d25") { d25 <- c("#FFFFFF","#C0C0C0","#808080","#FF0000","#800000","#FFFF00","#808000","#00FF00","#008000","#00FFFF","#008080", "#FFFFE0","#FFFACD","#FAFAD2","#FFEFD5","#FFE4B5","#FFDAB9","#EEE8AA","#F0E68C","#BDB76B","#FFFF00","#F08080","#FA8072", "#E9967A","#FFA07A","#FF7F50","#FF6347","#FF4500","#FFD700","#FFA500") p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_manual(values = d25) + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } else if (colpal=="Set3") { p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_brewer(palette = "Set3") + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } else if (colpal=="Set2") { p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_brewer(palette = "Set2") + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } else if (colpal=="Set1") { p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_brewer(palette = "Set1") + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } else if (colpal=="Accent") { p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_brewer(palette = "Accent") + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } else if (colpal=="Dark2") { p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_brewer(palette = "Dark2") + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } else if (colpal=="Paired") { p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_brewer(palette = "Paired") + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } else if (colpal=="Pastel1") { p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_brewer(palette = "Pastel1") + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } else if (colpal=="Pastel2") { p_aligned_labelabv_orderbysim_uniqgeneset_gnomes <- ggplot(sorted_example_genes, aes(xmin = start, xmax = end, y = molecule, fill = gene, forward = strand)) + geom_gene_arrow(arrowhead_height = unit(arrowhead_height, "mm"), arrowhead_width = unit(arrowhead_width, "mm")) + geom_blank(data = sorted_dummies, aes(forward = 1)) + facet_wrap( ~ molecule, scales = "free", ncol = 1) + scale_fill_brewer(palette = "Pastel2") + theme_genes() %+replace% theme(panel.grid.major.y = element_line(colour = input$textElementLine), legend.position = "none") + geom_text(data = sorted_example_genes %>% mutate(start = (start + end) / 2), aes(x = start, label = gene), nudge_y = 0.4,size = labelsize) } }
8c17ad2b09b5e2e670a2430ca54af979385ab54e
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/ypr/examples/ypr_report.Rd.R
fa94f6d4d0db7f4cd958e8d0480fa45c3f54caa2
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
197
r
ypr_report.Rd.R
library(ypr) ### Name: ypr_report ### Title: Report ### Aliases: ypr_report ### ** Examples ## Not run: ##D cat(ypr_report(ypr_population(), file = tempfile()), sep = "\n") ## End(Not run)
0a44d5d9efdb857bfef19e46c56f2e953060cc92
4b5d28e6a031fc0fae5f3da40ffb1b593c18cd48
/man/pairwise-package.Rd
12bab850b84489e6f5066b3c0845e7172b542e7d
[]
no_license
cran/pairwise
446b603f99c237bda8dcc291eeeb916b226eaccc
1f355868d5fb52777e39d5aced27c934984b527b
refs/heads/master
2023-04-28T04:42:27.164640
2023-04-17T20:10:02
2023-04-17T20:10:02
17,698,183
0
0
null
null
null
null
UTF-8
R
false
true
6,930
rd
pairwise-package.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pairwise-package.R \docType{package} \name{pairwise-package} \alias{pairwise-package} \alias{pairwise} \title{Rasch Model Parameters with pairwise} \description{ Performs the explicit calculation -- not estimation! -- of the Rasch item parameters for dichotomous and polytomous response formats using a pairwise comparison approach (see Heine & Tarnai, 2015) a procedure that is based on the principle for item calibration introduced by Choppin (1968, 1985). On the basis of the item parameters, person parameters (WLE) are calculated according to Warm's weighted likelihood approach (Warm, 1989). Item- and person fit statistics and several functions for plotting are available. } \details{ In case of dichotomous answer formats the item parameter calculation for the Rasch Model (Rasch, 1960), is based on the construction of a pairwise comparison matrix M\emph{nij} with entries f\emph{ij} representing the number of respondents who got item \emph{i} right and item \emph{j} wrong according to Choppin's (1968, 1985) conditional pairwise algorithm. For the calculation of the item thresholds and difficulty in case of polytomous answer formats, according to the Partial Credit Model (Masters, 1982), a generalization of the pairwise comparison algorithm is used. The construction of the pairwise comparison matrix is therefore extended to the comparison of answer frequencies for each category of each item. In this case, the pairwise comparison matrix M\emph{nicjc} with entries f\emph{icjc} represents the number of respondents who answered to item \emph{i} in category \emph{c} and to item \emph{j} in category \emph{c-1} widening Choppin's (1968, 1985) conditional pairwise algorithm to polytomous item response formats. Within R this algorithm is simply realized by matrix multiplication. In general, for both polytomous and dichotomous response formats, the benefit in applying this algorithm lies in it's capability to return stable item parameter 'estimates' even when using data with a relative high amount of missing values, as long as the items are still proper linked together. The recent version of the package 'pairwise' computes item parameters for dichotomous and polytomous item responses -- and a mixture of both -- according the partial credit model using the function \code{\link{pair}}. Based on the explicit calculated item parameters for a dataset, the person parameters may thereupon be estimated using any estimation approach. The function \code{\link{pers}} implemented in the package uses Warm's weighted likelihood approach (WLE) for estimation of the person parameters (Warm, 1989). When assessing person characteristics (abilities) using (rotated) booklet designs an 'incidence' matrix should be used, giving the information if the respective item was in the booklet (coded 1) given to the person or not (coded 0). Such a matrix can be constructed (out of a booklet allocation table) using the function \code{\link{make.incidenz}}. Item- and person fit statistics, see functions \code{\link{pairwise.item.fit}} and \code{\link{pairwise.person.fit}} respectively, are calculated based on the squared and standardized residuals of observed and the expected person-item matrix. The implemented procedures for calculating the fit indices are based on the formulas given in Wright & Masters, (1982, p. 100), with further clarification given at \code{http://www.rasch.org/rmt/rmt34e.htm}. Further investigation of item fit can be done by using the function \code{\link{ptbis}} for point biserial correlations. For a graphical representation of the item fit, the function \code{\link{gif}} for plotting empirical and model derived category probability curves, or the function \code{\link{esc}} for plotting expected (and empirical) score curves, can be used. The function \code{\link{iff}} plots or returns values of the item information function and the function \code{\link{tff}} plots or returns values of the test information function. To detect multidimensionality within a set of Items a rasch residual factor analysis proposed by Wright (1996) and further discussed by Linacre (1998) can be performed using the function \code{\link{rfa}}. For a 'heuristic' model check the function \code{\link{grm}} makes the basic calculations for the graphical model check for dicho- or polytomous item response formats. The corresponding S3 plotting method is \code{\link{plot.grm}}. } \references{ Choppin, B. (1968). Item Bank using Samplefree Calibration. \emph{Nature, 219}(5156), 870-872. Choppin, B. (1985). A fully conditional estimation procedure for Rasch model parameters. \emph{Evaluation in Education, 9}(1), 29-42. Heine, J. H. & Tarnai, Ch. (2015). Pairwise Rasch model item parameter recovery under sparse data conditions. \emph{Psychological Test and Assessment Modeling, 57}(1), 3–36. Heine, J. H. & Tarnai, Ch. (2011). Item-Parameter Bestimmung im Rasch-Modell bei unterschiedlichen Datenausfallmechanismen. \emph{Referat im 17. Workshop 'Angewandte Klassifikationsanalyse'} [Item parameter determination in the Rasch model for different missing data mechanisms. Talk at 17. workshop 'Applied classification analysis'], Landhaus Rothenberge, Muenster, Germany 09.-11.11.2011 Heine, J. H., Tarnai, Ch. & Hartmann, F. G. (2011). Eine Methode zur Parameterbestimmung im Rasch-Modell bei fehlenden Werten. \emph{Vortrag auf der 10. Tagung der Fachgruppe Methoden & Evaluation der DGPs.} [A method for parameter estimation in the Rasch model for missing values. Paper presented at the 10th Meeting of the Section Methods & Evaluation of DGPs.] Bamberg, Germany, 21.09.2011 - 23.09. 2011. Heine, J. H., & Tarnai, Ch. (2013). Die Pairwise-Methode zur Parameterschätzung im ordinalen Rasch-Modell. \emph{Vortrag auf der 11. Tagung der Fachgruppe Methoden & Evaluation der DGPs.} [The pairwise method for parameter estimation in the ordinal Rasch model. Paper presented at the 11th Meeting of the Section Methods & Evaluation of DGPs.] Klagenfurt, Austria, 19.09.2013 - 21.09. 2013. Linacre, J. M. (1998). Detecting multidimensionality: which residual data-type works best? \emph{Journal of outcome measurement, 2}, 266–283. Masters, G. N. (1982). A Rasch model for partial credit scoring. \emph{Psychometrika, 47}(2), 149-174. Rasch, G. (1960). \emph{Probabilistic models for some intelligence and attainment tests.} Copenhagen: Danmarks pædagogiske Institut. Warm, T. A. (1989). Weighted likelihood estimation of ability in item response theory. \emph{Psychometrika, 54}(3), 427–450. Wright, B. D., & Masters, G. N. (1982). \emph{Rating Scale Analysis.} Chicago: MESA Press. Wright, B. D. (1996). Comparing Rasch measurement and factor analysis. \emph{Structural Equation Modeling: A Multidisciplinary Journal, 3}(1), 3–24. } \author{ Joerg-Henrik Heine <jhheine@googlemail.com> }
614e77345999149601a64316d61b83107b113296
7b4d8ad2edc8889373d497303fdf0ce95a15fe96
/analysis/stage5_R_analysis.R
1c858253df0920d2c41509cf8cd528d1f82b168f
[ "MIT" ]
permissive
apal4/cs638project
7b845dddca8ce316195e7c4a33288c44fbda4e6a
99ef308d6c1930297cb57bc95dca4241b3f15293
refs/heads/master
2021-04-29T11:03:09.834648
2016-12-18T04:06:00
2016-12-18T04:06:04
null
0
0
null
null
null
null
UTF-8
R
false
false
7,378
r
stage5_R_analysis.R
############################################# ########### read in and explore ############# ############################################# d <- read.csv("labeled.csv") varDescribe(d) str(d) # convert popTrend to be a continuous var --> add order to levels so it makes sense # first convert Unknown field to NA's for cleaner correlation values levels(d$ltable_pop_trend) d$new_pop_trend = d$ltable_pop_trend d[d[,c('new_pop_trend')] == "Unknown",c('new_pop_trend')] = NA d[d[,c('new_pop_trend')] == "",c('new_pop_trend')]= NA d[d[,c('new_pop_trend')] == "" & !is.na(d[,c('new_pop_trend')]),c('new_pop_trend')] = NA levels(d$new_pop_trend) d$new_pop_trend = as.character(d$new_pop_trend) str(d$new_pop_trend) d[d[,c('new_pop_trend')] == "Unknown",c('new_pop_trend')] d$new_pop_trend <- as.factor(d$new_pop_trend) str(d$new_pop_trend) #d$new_pop_trend <- ordered(d$new_pop_trend, levels = c("Unknown","Decreasing", "Increasing")) # select ordinal vars for correlation matrix ordinalVarKeysToLookAt <- c('length','ltable_status','new_pop_trend','ltable_country_count','rtable_tCount') #install.packages("sjPlot") library(sjPlot) dSubset <- d[,ordinalVarKeysToLookAt] # rename some columns for prettiness sake colnames(dSubset) <- c("length","status","popTrend",'countryCount','threatCount') dSubset[] <- lapply(dSubset,as.integer) dSubset # make things a bit prettier sjp.setTheme(theme.font = NULL, title.color = "black", title.size = 1.2, title.align = "left", title.vjust = NULL, geom.outline.color = NULL, geom.outline.size = 0, geom.boxoutline.size = 0.5, geom.boxoutline.color = "black", geom.alpha = 1, geom.linetype = 1, geom.errorbar.size = 0.7, geom.errorbar.linetype = 1, geom.label.color = NULL, geom.label.size = 10, geom.label.alpha = 1, geom.label.angle = 0, axis.title.color = "grey30", axis.title.size = 1.1, axis.title.x.vjust = NULL, axis.title.y.vjust = NULL, axis.angle.x = 0, axis.angle.y = 0, axis.angle = NULL, axis.textcolor.x = "black", axis.textcolor.y = "black", axis.textcolor = NULL, axis.linecolor.x = NULL, axis.linecolor.y = NULL, axis.linecolor = NULL, axis.line.size = 0.5, axis.textsize.x = 1, axis.textsize.y = 1, axis.textsize = NULL, axis.tickslen = NULL, axis.tickscol = NULL, axis.ticksmar = NULL, axis.ticksize.x = NULL, axis.ticksize.y = NULL, panel.backcol = NULL, panel.bordercol = NULL, panel.col = NULL, panel.major.gridcol = NULL, panel.minor.gridcol = NULL, panel.gridcol = NULL, panel.gridcol.x = NULL, panel.gridcol.y = NULL, panel.major.linetype = 1, panel.minor.linetype = 1, plot.backcol = NULL, plot.bordercol = NULL, plot.col = NULL, plot.margins = NULL, legend.pos = "right", legend.just = NULL, legend.inside = FALSE, legend.size = 1, legend.color = "black", legend.title.size = 1, legend.title.color = "black", legend.title.face = "bold", legend.backgroundcol = "white", legend.bordercol = "white", legend.item.size = NULL, legend.item.backcol = "grey90", legend.item.bordercol = "white") sjp.corr(dSubset) ### Next, I'll make a model of some of our most predictive IV's m1 <- lm(status ~ popTrend, data = dSubset) # popTrend seems to have insufficient power (mostly decreasing, only a few increasing), F(1,89) = .429, p=.514 modelSummary(m1,t=F) m2 <- lm(status ~ countryCount, data = dSubset) # approaching significance F(1,146) = 2.696, p = .103 modelSummary(m2,t=F) m3 <- lm(status ~ threatCount, data = dSubset) modelSummary(m3,t=F) modelSummary(m1,t=F) dSubset$popTrend # Now we'll look at categorical data - chisqare with... rtable_conservation_keywords #varsToLookAt <- c(d$ltable_status,d$ltable_pop_trend,d$ltable_country_count,d$ltable_ecology,d$rtable_tCount,d$rtable_kingdom,d$rtable_phylum,d$rtable_class,d$rtable_order,d$ltable_family) chisq.test(varsToLookAt) # yep # Individual chisq tests... these seem to work alright chisq.test(d$ltable_status,d$rtable_phylum) write.table(table(d$ltable_status,d$rtable_phylum),"eyooo.csv") chisq.test(d$ltable_status,d$rtable_class) # nope! chisq.test(d$ltable_status,d$rtable_order) chisq.test(d$ltable_status,d$ltable_family) chisq.test(d$ltable_status,d$genus) levels(d$rtable_phylum) chisq.test(d$ltable_status,d$ltable_family) # yep chisq.test(d$ltable_pop_trend, d$ltable_status) # wow, doesn't predict... surprising chisq.test(d$rtable_kingdom,d$rtable_phylum) # sanity check successful chisq.test(d$ltable_country_count,d$rtable_kingdom) # cool, this is predictive chisq.test(d$ltable_pop_trend,d$rtable_kingdom) # nope chisq.test(d$ltable_pop_trend,d$rtable_phylum) # yep chisq.test(d$length,d$rtable_phylum) # yep chisq.test(d$length,d$ltable_status) # yep chisq.test(d$weight,d$ltable_status) # yep chisq.test(d$ltable_ecology,d$ltable_status) # nope chisq.test(d$rtable_tCount,d$ltable_status) # nope, not even close chisq.test(d$rtable_tCount,d$ltable_ecology) # yep chisq.test(d$rtable_tCount,d$rtable_phylum) # yep chisq.test(d$rtable_conservation_keywords,d$ltable_status) # yep chisq.test(d$rtable_conservation_keywords,d$rtable_phylum) # yep chisq.test(d$ltable_ecology,d$ltable_status) # nope #try to make one whole correlation table #varKeysToLookAt <- c('length','ltable_status','ltable_pop_trend','ltable_country_count','ltable_ecology','rtable_tCount','rtable_kingdom','rtable_phylum','rtable_class','rtable_order','ltable_family') ############################################# ############ scatterplot code ############### ############################################# m3cent <- lm(Wk4IAT ~ ConcernMC + SexC, data=d) modelSummary(m3cent, t=F) modelSummary(m3, t=F) ##################################### #### Plotting predictions #### ##################################### # Step 1: Create a data frame using expand.grid() that specifies the values to pass to each variable # in the model to generate each prediction. In this case, we have two variables, # ConcernM and Sex. The data frame you create needs to contain variables # that correspond exactly to the variables in your model. ?expand.grid pX <- expand.grid(ConcernM=seq(1,10,length=100), Sex=c(0, 1)) pY <- modelPredictions(m3, pX) some(pY) # Lay down the skeleton of the graph plot <- ggplot(data=pY, aes(x = ConcernM, y = Predicted, color = as.factor(Sex))) # Add scatterplot plot <- plot + geom_point(data=d, aes(y = Wk4IAT)) plot # Make scatterplot pretty plot <- plot+scale_colour_manual(values=c("#AF7AC5","#66BB6A"),name ="Gender", labels=c("Female", "Male")) # get creative with your colors! http://htmlcolorcodes.com/color-chart/ # Add regression lines and error bands plot <- plot + geom_smooth(aes(ymin = CILo, ymax = CIHi), stat = "identity") + # make some final aesthetic changes labs(x = 'Concern (Mean)', y = 'Week 4 IAT Score', title = 'Effect of Concern on IAT Scores') + theme_bw(base_size = 14) + # removing background of graph, set font sizes theme(legend.position = c(0.8, 0.9), # positioning legend (play around with values) legend.background = element_blank()) # removing background of legend plot
47c44615a440899cb5a189c0615cd5f6cee2c4ad
3d89548bdda5824889a5f494640e255bc11da095
/plot3.R
ae6a615a0f3aa0b8303c2ba06aa3aaceea6a2b15
[]
no_license
mooly/ExData_Plotting1
39832675170e764664379110aab87f17a0d5234a
d9b844fb6759607dfded0763d9aeeaa8e84c0306
refs/heads/master
2020-12-25T06:25:36.936234
2015-03-08T16:04:46
2015-03-08T16:04:46
31,740,121
0
0
null
2015-03-05T22:27:23
2015-03-05T22:27:21
null
UTF-8
R
false
false
2,312
r
plot3.R
# This script creates a histogram plot from household power consumption data # collected by University of California Irvine (UCI). # # Original data description, info and download location: # https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption # # Note: Data requires approx. 145MB of memory when read in by R # # Script created for Coursera: Exploratory Data Analysis from JHU # To run, place script in working directory, then execute statement in console: # > source('./plot3.R') # Download and unzip data file into working directory: # fileURL0 <- "https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip" fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" dateDownloaded <- date() download.file(fileURL, destfile="./HPC.zip", method="curl") unzip("./HPC.zip") datafile <- "household_power_consumption.txt" # [Optional] Detect column classes, for use as 'colClasses' parameter: # initial <- read.table(datafile, header=TRUE, sep=";", nrows = 100) # classes <- sapply(initial, class) # Read datafile into data frame variable: powerdata <- read.table(datafile, header=TRUE, sep=";", colClasses="character") # Reclassify "Date" column, specify two dates of interest: powerdata$Date <- strptime(powerdata$Date,"%d/%m/%Y") dayone <- strptime("2007-02-01","%Y-%m-%d") # ?OR? as.Date("2007-02-01") daytwo <- strptime("2007-02-02","%Y-%m-%d") # Subset data and reclassify data columns to numeric: powerdata <- powerdata[powerdata$Date == dayone | powerdata$Date == daytwo,]; for (i in 3:9) { powerdata[,i]<-as.numeric(powerdata[,i]) } # Concatenate "Date" and "Time" columns, reclassify, add to data frame: powerdata$DateTime <- do.call(paste,c(powerdata[c("Date","Time")],sep=" ")) powerdata$DateTime <- strptime(powerdata$DateTime,"%Y-%m-%d %H:%M:%S") # Create PNG file with line plot: png(filename="plot3.png", width=480, height=480, units="px") with(powerdata,{ plot(DateTime,Sub_metering_1,type="l",ylab="Energy Sub metering",xlab="",main="") lines(DateTime,Sub_metering_2,col="red") lines(DateTime,Sub_metering_3,col="blue")}) legend("topright",lty=1,col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) dev.off()
8a8949e2b56f57e769a50f3a7ff1b8132dd2078a
8799b6d8763ef537a8dc1a42746af0c96ffff94c
/plot4.R
ebd2c24af39f4d6814292f7645ebb23bc578632b
[]
no_license
ampatha/ExData_Plotting2
ad7f762e552a5c119e6f6b83939c5c0d386f4549
5011f24e6655b6423e308f04bde4ede809f3a17f
refs/heads/master
2016-09-06T03:05:14.804859
2015-07-26T23:12:11
2015-07-26T23:12:11
39,745,612
0
0
null
null
null
null
UTF-8
R
false
false
803
r
plot4.R
## Sanem Seren Sever C_EDA - A2 ## plot4.R (plot4.png) ############################### ## Attention: WD must contain the source data ## Read data for plotting: source("LoadData.R") ##Filer Coal sccCoal <- sccData$SCC[grep("[Cc]oal", sccData$EI.Sector)] emisCoal <- emisData[emisData$SCC %in% sccCoal,] ##Get Yearly Totals totEmisCoal <- aggregate(emisCoal$Emissions, list(emisCoal$year),FUN= sum) ##Correct Column Names names(totEmisCoal)<-c("Year","Emissions") ##Plotting: png(filename = "plot4.png", width = 520, height = 480, units = "px") plot(totEmisCoal, col="green", type="l", main="Coal Caused Emissions Accross USA\nbetween 1999 & 2008", xlab="Year", ylab="Total PM2.5 Emissions") dev.off()
b811b56746d1af46e6a860b223949b1f45391f50
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/linear.tools/examples/check_vec_meaningful.Rd.R
bfc564ebc285efd3bef7431c3c4f87fd5d683b70
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
543
r
check_vec_meaningful.Rd.R
library(linear.tools) ### Name: check_vec_meaningful ### Title: check x is a meaningful vector ### Aliases: check_vec_meaningful ### Keywords: internal ### ** Examples check_vec_meaningful(c(NA,NA)) # NOT PASS tryCatch(check_vec_meaningful(x=list(NA,NaN)), error = function(err){ print(err) } )# NOT PASS check_vec_meaningful(c(NA,1)) # PASS check_vec_meaningful(c(NULL,1)) # PASS check_vec_meaningful(factor(c(NA,1,1))) # PASS check_vec_meaningful(1) # PASS check_vec_meaningful('1') # PASS
b55cda57a47a850035edc4e42d5cb9c5754bc472
05c70039a3492543ab1f6f79367cbe99dae21a0a
/poLCA-mixture-of-gaussian.R
5d0d9bba2ec4f5e71009b85f9c58238edd0ff5ac
[]
no_license
bocaiwen/algorithm-from-scratch
7586cef291029128a9cd178a373771236134f5fd
1dbc3983144390606101f6dfb926a2d21f399d20
refs/heads/master
2021-04-03T01:29:50.298277
2018-03-12T04:42:51
2018-03-12T04:42:51
124,834,363
0
0
null
null
null
null
UTF-8
R
false
false
310
r
poLCA-mixture-of-gaussian.R
#!/usr/bin/env RScript library(poLCA) mof = read.csv('mixture-of-gaussian.csv',header=FALSE) mof$V1 = as.integer(mof$V1) + 5 mof$V2 = as.integer(mof$V2) + 5 f = cbind(V1, V2) ~ 1 lca2 = poLCA(f, mof, nclass=2, nrep = 5) lca3 = poLCA(f, mof, nclass=3, nrep = 5) lca4 = poLCA(f, mof, nclass=4, nrep = 5)
9835ebad1fdc2310ce0729ca95d4daac2d2c17db
1ab9c4951f63b6fcb1e636c2d0124d99fb28c619
/man/psoptim-methods.Rd
d87a88e05e4ac629d062f594d61e3e2c8ff72495
[]
no_license
cran/pso
9b7fa1912c5033b4ec6c9fe6f4e339864cf3d666
067841ef07471b1e20848cc90cb1ae949c9e7760
refs/heads/master
2022-05-06T05:02:10.062793
2022-04-12T15:10:06
2022-04-12T15:10:06
17,698,774
5
1
null
null
null
null
UTF-8
R
false
false
1,088
rd
psoptim-methods.Rd
\name{psoptim-methods} \docType{methods} \alias{psoptim-methods} \alias{psoptim,ANY,ANY,ANY,ANY,ANY-method} \alias{psoptim,test.problem,missing,missing,missing,missing-method} \title{Methods for function psoptim (Particle Swarm Optimization)} \description{ General implementation of particle swarm optimization usable as a direct replacement for \code{\link{optim}}. } \section{Methods}{ \code{signature(par = "ANY", fn = "ANY", gr = "ANY", lower = "ANY", upper = "ANY")}: This is the standard replacement for \code{\link{optim}} without S4 object usage. \code{signature(par = "test.problem", fn = "missing", gr = "missing",}\cr \code{lower = "missing", upper = "missing")}: This is for running PSO on a specific test problem. Typically this is invoked on repetitive runs of a test problem and used to assess the choice of parameters for the underlying PSO algorithm. The function is essentially a wrapper function for \code{\link{psoptim}} but returns an instance of \code{\linkS4class{test.result}} containing summary results. } \keyword{methods} \keyword{optimize}
6ffe561c1d72e6f651b7faa5c146360013352d8c
5fabfb8ce7863ffe6daa91dac2971ee85f9d2ad7
/man/getFullPartisAnnotation.Rd
ffd9357cdf01c889223ca7082282745aece513f4
[]
no_license
Pezhvuk/sumrep
a1ff159aa8b025d15c167e434591788bae8cc4b2
275c6981f57b14db51746735f32e1ecf6bf6c486
refs/heads/master
2020-04-30T12:14:39.350531
2019-01-19T20:44:06
2019-01-19T20:44:06
null
0
0
null
null
null
null
UTF-8
R
false
true
509
rd
getFullPartisAnnotation.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PartisFunctions.R \name{getFullPartisAnnotation} \alias{getFullPartisAnnotation} \title{Do extended partis processing of annotations} \usage{ getFullPartisAnnotation(output_path, output_file, partis_path) } \arguments{ \item{output_path}{Desired output directory, which will contain \code{output_filename}} \item{partis_path}{The full path to the partis executable} } \description{ Do extended partis processing of annotations }
b3e47df241441398c8e919b9a1f2b58a3cdd8dc3
1a4c806a0745850e725040bca561fa109c0d5019
/stat/NeoThy/src/Riaz.R
abab5b9b5df1a92abc9dd8c7fccf87508d96c470
[ "Apache-2.0" ]
permissive
tipplerow/jam
9037ae5aefd6f38315855c0f893eb8fa9d842bf3
464b78819f064a248425b4703dd96fa4f337ea67
refs/heads/master
2021-05-09T05:19:08.698810
2021-02-12T17:01:29
2021-02-12T17:01:29
119,304,231
0
1
null
null
null
null
UTF-8
R
false
false
2,283
r
Riaz.R
Riaz.colorMap <- function() { data.frame(Response = c("CR", "PR", "SD", "PD", "NE"), col = c("green", "yellow", "red", "black", "cyan"), pch15 = c(15, 15, 15, 15, 4), pch16 = c(16, 16, 16, 16, 4)) } Riaz.load <- function() { riaz <- read.csv("Riaz_S2.csv") riaz$NeoPepRatio <- riaz$NeoPeptideLoad / riaz$MutationLoad riaz <- subset(riaz, is.finite(NeoPepRatio)) riaz <- merge(riaz, Riaz.colorMap(), by = "Response") riaz <- riaz[order(riaz$NeoPepRatio),] riaz$R_NR <- ifelse(riaz$Response == "PD", "NR", ifelse(riaz$Response == "NE", NA, "R")) riaz } Riaz.barResponse <- function(riaz) { par(las = 1) par(fig = c(0, 1, 0.1, 0.9)) colors <- Riaz.colorMap() colors <- subset(colors, Response != "NE") riaz <- subset(riaz, Response != "NE") barplot(riaz$NeoPepRatio, ylab = "Neopeptide ratio", ylim = c(-0.1, 4.1), col = riaz$col, density = 40) box() legend("topleft", bty = "n", legend = colors$Response, col = colors$col, pch = 15) } Riaz.plotTimeToDeath <- function(riaz) { par(las = 1) par(fig = c(0, 1, 0.1, 0.9)) plot(riaz$NeoPepRatio, riaz$TimeToDeathWeeks, log = "y", pch = 16, xlab = "Neopeptide ratio", xlim = c(0, 4), ylab = "Time to death [weeks]", ylim = c(1, 200)) } Riaz.correlation <- function(riaz) { colkeys <- c("progression_free", "overall_survival", "mutations", "neos500", "NeoPepRatio") corrP <- cor(riaz[,colkeys], method = "p") corrS <- cor(riaz[,colkeys], method = "s") cormat <- corrP for (ii in 2:5) for (jj in 1:(ii - 1)) cormat[ii,jj] <- corrS[ii,jj] cormat } Riaz.vioplot <- function() { riaz <- Riaz.load() par(las = 1) par(fig = c(0, 1, 0.1, 0.9)) xR <- riaz$NeoPepRatio[which(riaz$Response %in% c("CR", "PR", "SD"))] xNR <- riaz$NeoPepRatio[which(riaz$Response == "PD")] vioplot(xR, xNR, names = c("R", "NR"), col = "cadetblue3", ylim = c(0, 4)) print(wilcox.test(xR, xNR)) }
56be416a71c7134477e0d294b41e332eb332365e
2549aca74c0be9048a0d0f9dfdcbd3634d1e47ab
/R/plot.permDif.R
23a40c9e64bc180391f435578628c476fcf75cba
[]
no_license
PeterVerboon/lagnetw
74107e89406d9e1b2c0e8ce318a347e5d06cb130
61071f8502140c78229b7169fda22ff07df89e5c
refs/heads/master
2021-07-08T17:56:47.478571
2020-08-03T15:26:26
2020-08-03T15:26:26
160,812,465
1
0
null
null
null
null
UTF-8
R
false
false
2,044
r
plot.permDif.R
#' Function to plot the results of the permDif function for network connectivity #' differences between two groups #' #' There are five lot types. The type number indicates what figure is plotted, #' 1 = mean differences total #' 2 = mean differences auto-regression, #' 3 = mean differences, excluding auto regression, #' 4 = mean differences, subset of predictors #' 5 = mean differences of SD's #' #' @param x result of permDif #' @param ... additional parameters #' @return Four or five (when there subsets) figures are build and returned to the plot window #' @export #' plot.permDif <- function(x, ...) { plotall <- list() for (type in c(1:5)) { a <- x$output$permutations[,type] est <- x$output$pvalues.summary[type,1] lab <- colnames(x$output$permutations)[type] if (!is.na(est)) { df <- with(stats::density(a), data.frame(x, y)) meanEst <- mean(df$x) cutoff1 <- stats::quantile(a,0.025) cutoff2 <- stats::quantile(a,0.975) ylim1 <- colMeans((df[(min(abs(df$x - est)) == abs(df$x - est)),])[2]) ylim2 <- colMeans((df[(min(abs(df$x - meanEst)) == abs(df$x - meanEst)),])[2]) shade1 <- rbind(c(df[1, "x"], 0), subset(df ,x<=cutoff1 ),c(cutoff1,0) ) shade2 <- rbind(c(cutoff2,0), subset(df ,x>=cutoff2 ), c(df[nrow(df), "x"], 0)) p <- ggplot(data = df, aes(x = df$x, y = df$y)) + geom_line() p <- p + geom_polygon(data = shade1, aes(shade1$x,shade1$y), fill = "grey") p <- p + geom_polygon(data = shade2, aes(shade2$x,shade2$y), fill = "grey") p <- p + geom_segment(aes(x=meanEst, y=0, xend = meanEst, yend=ylim2),color="black", size=.1) p <- p + geom_segment(aes(x=est, y=0, xend = est, yend=ylim1),color="blue", linetype="dashed", size=.2) p <- p + theme_bw() + xlab("") + ylab("") p <- p + ggtitle(paste("Permutation distribution",lab," with observed estimate")) plotall[[type]] <- p cat(paste("building plottype: ", type, "\n")) print(plotall[[type]]) } else { cat(paste("No plot available for", lab, "\n")) } } }
c5a6945d585c8a671a48dba793a886573307eba9
c6449d068c765254a2d46c51c9efe6ab18a6202e
/man/directionalVariation.Rd
dbc1eb1707872d3d4c159c7a11b31731fb3865a9
[]
no_license
diogro/ratones
42fbbe0332b8a8262a4d8b1fe7f7f2a6b6e81d5a
0f5b02f05bd49698d59db3a64b2dfd1f25d73ebe
refs/heads/master
2021-01-09T06:47:39.198603
2017-07-09T15:03:10
2017-07-09T15:03:10
35,047,728
0
0
null
null
null
null
UTF-8
R
false
true
1,919
rd
directionalVariation.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/directional_Variation.R \name{directionalVariation} \alias{directionalVariation} \title{Directional variation} \usage{ directionalVariation(cov.matrix, line, delta_Z, Wmat = cov.matrix) } \arguments{ \item{cov.matrix}{covariance matrix} \item{line}{current line} \item{delta_Z}{direction in phenotype space} \item{Wmat}{optional fixed matrix for selection gradient reconstruction} } \description{ Calculate the Evolvability and Conditional evolvability in the direction of selection. } \examples{ delta_Z = colMeans(dplyr::select(ratonesdf[ratonesdf$selection == "upwards",], IS_PM:BA_OPI)) - colMeans(dplyr::select(ratonesdf[ratonesdf$selection == "downwards",], IS_PM:BA_OPI)) \dontrun{ # this can take a while library(doMC) registerDoMC(5) p_directional_stats <- ldply(ratones_models, function(model) adply(model$Ps, 1, directionalVariation, model$line, delta_Z), .parallel = TRUE) DzPC1 = densityPlot(p_directional_stats, "DZpc1", expression(paste("Vector correlation of ", delta, "z and E1"))) evolDZ = densityPlot(p_directional_stats, "evolDZ", "Scaled directional\\n evolvability") + theme(legend.position = "none", text = element_text(size = 20)) condevolDZ = densityPlot(p_directional_stats, "condevolDZ", "Scaled directional\\n conditional evolvability") + theme(legend.position = "none", text = element_text(size = 20)) figure_4 <- ggdraw() + draw_plot(evolDZ, 0, 0.5, 0.5, 0.5) + draw_plot(condevolDZ, 0.5, 0.5, 0.5, 0.5) + draw_plot(DzPC1, 0.2, 0, 0.5, 0.5) + draw_plot_label(c("A", "B", "C"), c(0, 0.5, 0.2), c(1, 1, 0.5), size = 20) } }
c93a9e8111b39dca87b2c72e0bd9f0be20f48582
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/hierfstat/examples/fstat.from.adegenet.Rd.R
9b3bff904bad6551040832d96910e5a6d33b5667
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
746
r
fstat.from.adegenet.Rd.R
library(hierfstat) ### Name: fstat ### Title: Wrapper for fst estimator from hierfstat package (from adegenet) ### Aliases: FST fst fstat pairwise.fst ### Keywords: multivariate ### ** Examples ## Not run: ##D if(require(adegenet)){ ##D data(nancycats) ##D ##D ## pairwise Fst ##D mat.fst <- pairwise.fst(nancycats, res.type="matrix") ##D mat.fst ##D } ##D ##D ## Fst, Fis, Fit ##D ## using hierfstat ##D if(require(hierfstat)){ ##D fstat(nancycats) ##D } ##D ##D ## using pegas ##D if(require(pegas)){ ##D data(nancycats) ##D ##D ## conversion to pegas's format ##D as.loci(nancycats) ##D ##D ## use Fst from pegas ##D fsttab <- Fst(as.loci(nancycats)) ##D ##D ## average over loci ##D apply(fsttab, 2, mean) ##D } ## End(Not run)
6b8e31d165f500e5d202334c2907294ae3508dbf
fdf839eeaa710470b7a9d881735e893d9f0fdd0c
/man/minDist.Rd
6118b3bf0e72e5ca1f17e27ea9f6360cab36e58a
[]
no_license
kjrom-sol/plantR
0da8749c532cda13aa4f6681dc0293412e385632
e75fe4f1711c971f5be7a4e77bff5663c069bee3
refs/heads/master
2023-04-28T03:05:08.054234
2021-05-05T01:56:21
2021-05-05T01:56:21
null
0
0
null
null
null
null
UTF-8
R
false
true
1,520
rd
minDist.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/accessory_geo.R \name{minDist} \alias{minDist} \title{Minimun Distance between Coordinates} \usage{ minDist(lon, lat, min.dist = 0.001, output = NULL) } \arguments{ \item{lon}{numerical. Longitude in decimal degrees} \item{lat}{numerical. Latitude in decimal degrees} \item{min.dist}{numerical. Minimun threshold distance (in kilometers) to be used to detect duplicated coordinates. Default to 1 meter.} \item{output}{character. The type of information that should be returned (see Details)} } \value{ A vector of TRUE/FALSE or of minimun distances in kilometers. } \description{ The function calculates the minimum distance between coordinates or the coordinates which are below a mininum threshold distance. } \details{ The argument \code{output} controls the type of output that should be returned: \itemize{ \item 'flag': a TRUE/FALSE vector indicating the coordinates which are below \code{min.dist} from other coordinates. \item 'group': the position of the first coordinate representing a group of duplicated coordinates. \item 'dist': the distance of each coordinate to the closest coordinated. } } \examples{ lat = c(-23.475389, -23.475389, -23.475390, -23.475389) lon = c(-47.123768, -47.123768, -47.123768, -47.123868) \dontrun{ minDist(lon, lat, output = 'group') minDist(lon, lat, output = 'flag') minDist(lon, lat, output = 'dist') } } \seealso{ \link[plantR]{geoDist} } \author{ Renato A. F. de Lima } \keyword{internal}
8b0bbbe2fe071248fb2a51ce4e32dd611baa2be4
6a28ba69be875841ddc9e71ca6af5956110efcb2
/Introduction_To_Probability_And_Statistics_by_William_Mendenhall,_Robert_J_Beaver,_And_Barbara_M_Beaver/CH7/EX7.4/Ex7_4.R
49dc80ab2f1de56ef439cdc4bd06033b85fa5613
[]
permissive
FOSSEE/R_TBC_Uploads
1ea929010b46babb1842b3efe0ed34be0deea3c0
8ab94daf80307aee399c246682cb79ccf6e9c282
refs/heads/master
2023-04-15T04:36:13.331525
2023-03-15T18:39:42
2023-03-15T18:39:42
212,745,783
0
3
MIT
2019-10-04T06:57:33
2019-10-04T05:57:19
null
UTF-8
R
false
false
716
r
Ex7_4.R
sample_size <- 30; x_value <- 7; mean <- 8; sd <- 4; prob_less_7 <- round(pnorm(x_value, mean, round((sd / sqrt(sample_size)),2)),6) cat("The approximate probability duration for average duration is less than 7 years is",prob_less_7) prob_exceed_7 <- round((1 - prob_less_7),5) cat("The approximate probability for average duation exceeds 7 years is",prob_exceed_7) x1_value <- 9; prob_of_interest <- (pnorm(x1_value, mean, round((sd / sqrt(sample_size)),2))) - (pnorm(x_value, mean, round((sd / sqrt(sample_size)),2))) cat("The approximate probability for average duration lies within 1 year of the population mean=8 is",prob_of_interest) #"The answers may sligthly vary due to rounding off values"
632821e07cb7c49a17307446f2ab1a6b0d8c011c
e39195d31e11fc3b8d3d29c206f87adad534e88a
/man/dw_retrieve_chart_metadata.Rd
6c4b5b1588e3c99489a34ef8758a9a80ac32edd4
[ "MIT" ]
permissive
munichrocker/DatawRappr
40640ba9f0954e605e4e56d533a16e417bcbc1bc
4ed8332494a7e11273dac1a546b7a164f99f9c85
refs/heads/master
2022-12-22T18:43:38.945321
2022-12-15T15:56:09
2022-12-15T15:56:09
217,085,284
82
10
MIT
2022-12-15T15:56:10
2019-10-23T15:00:55
R
UTF-8
R
false
true
2,146
rd
dw_retrieve_chart_metadata.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dw_retrieve_chart_metadata.R \name{dw_retrieve_chart_metadata} \alias{dw_retrieve_chart_metadata} \title{Retrieves a Datawrapper chart's metadata} \usage{ dw_retrieve_chart_metadata(chart_id, api_key = "environment") } \arguments{ \item{chart_id}{Required. A Datawrapper-chart-id as character string, usually a five character combination of digits and letters, e.g. "aBcDe". Or a \strong{dw_chart}-object.} \item{api_key}{Optional. A Datawrapper-API-key as character string. Defaults to "environment" - tries to automatically retrieve the key that's stored in the .Reviron-file by \code{\link{datawrapper_auth}}.} } \value{ A S3-structure of type \strong{dw_chart} with the elements from the Datawrapper-API stored under content. Same as in \code{\link{dw_create_chart}}. \item{status}{Returns 'ok' if the API-key used was correct.} \item{$data$id}{Returns the internal id of the chart - the same as used in chart_id.} \item{$data$title}{Returns the chart's title.} \item{$data$theme}{Returns the chart's theme.} \item{$data$createdAt}{Chart's creation date.} \item{$data$lastModifiedAt}{Chart's last modification date.} \item{$data$metadata}{Contains chart's specifications, like transpose, column formats, visualization settings, colors, titles and texts} \item{$data$metadata$visualize}{Contains the chart's visualization settings.} \item{$data$metadata$describe}{Contains the chart's description settings, like title, source name and url, byline} \item{$data$publishedAt}{Chart's publication date - if published yet.} \item{$data$author$id}{The chart-author's id.} } \description{ Return the metadata of a existing Datawrapper chart. } \note{ This function retrieves all metadata about a chart that's stored by Datawrapper. It is helpful to gain insights in the different options that might be changed via the API. } \examples{ \dontrun{ dw_retrieve_chart_metadata("aBcDE") } # uses the preset key in the .Renviron-file \dontrun{dw_retrieve_chart_metadata(chart_id = "a1B2Cd", api_key = "1234ABCD")} # uses the specified key } \author{ Benedict Witzenberger }
9997b09dc7846190992b364c80654c61eb7d22ad
6aad63f658e3207c1bad979d83409d730e1e4f75
/R/passage.R
7cccb18cafeb64e216c37683b294ecf34fc65b35
[]
no_license
mespe/YoloBypassSBM
9448a3ffcb6e9f89b94395f7ddd9285eaa8d12b5
686493da1a232e4ed3cdd978bca245643f36600a
refs/heads/master
2022-07-14T17:55:28.002212
2020-05-15T21:19:02
2020-05-15T21:19:02
263,786,263
0
0
null
2020-05-14T01:38:57
2020-05-14T01:38:56
null
UTF-8
R
false
false
1,033
r
passage.R
#' Passage survival and time from Fremont Weir to Chipps Island #' #' Passage survival and time (days) from Fremont Weir to Chipps Island based on fork length, flow, and route #' #' @md #' @param model_day Model day at start of passage #' @param abundance Abundance of cohort on day route entered #' @param fork_length Fork length (mm) at Fremont Weir #' @param route Route: Sacramento River (Sac) or Yolo Bypass (Yolo) #' @param sim_type Simulation type: deterministic or stochastic #' #' @export #' #' passage <- function(model_day, abundance, fork_length, route = c("Sac", "Yolo"), sim_type){ # route <- match.arg(route) # # if(length(model_day) != length(abundance) || length(abundance) != length(fork_length)) # stop("model_day, abundance, and fork_length must be the same length") flow <- freeport_flow[["Value"]][model_day] list("Abundance" = passage_survival(abundance, fork_length, flow, route, sim_type), "PassageTime" = passage_time(fork_length, flow, route, sim_type)) }
481e33fbc9ec7f6d5d3cbcaf599db577ccd52e59
34907d37aca32ba6333415322b7a1f9399ea41f1
/lecture3.R
c77eb63a1a75b0d9f4e4ad564fe806a7e624b049
[]
no_license
Mathies95/ToA
d2dc94cfe702c679bc2ca66baf315356e6ad3ac7
a3362b8dff0e90c52797907b8556e3575ac60b6a
refs/heads/main
2023-01-19T13:53:27.845891
2020-11-21T10:23:59
2020-11-21T10:23:59
307,641,926
0
0
null
null
null
null
UTF-8
R
false
false
387
r
lecture3.R
devtools::install_github(remotes::install_github("bss-osca/tfa/tfa-package", upgrade = FALSE)) url <- "https://raw.githubusercontent.com/bss-osca/tfa/master/slides/03-transform/03-transform_examples.Rmd" download.file(url, "03-transform_ex.Rmd", # stores file in R working directory mode="w") # "w" means "write," and is used for text files
81055b80943c7c91cebced94948158abf153d7e2
217080d949fe5550e8d5ab284f72711e8850b8b7
/desktop_codes/newRJ.R
e1a6f96cbe7cb1fc3ff99e0db7f8a56c2943e30d
[]
no_license
srahman-24/RJClust
7bddc0f1493016b3d93b03c77d584163b81e7f82
db43196694022d3e0e8c10847551a12958e69c83
refs/heads/master
2022-05-07T00:41:48.394436
2022-04-29T20:43:08
2022-04-29T20:43:08
145,024,703
0
0
null
null
null
null
UTF-8
R
false
false
1,045
r
newRJ.R
# toy example RR = matrix(c(1:16), nrow = 4, ncol = 4, byrow = T) RR JJ = NULL for(ii in 1:4) { JJ = rbind(JJ, RR[ii,][-ii]) } JJ = cbind(JJ, diag(RR)) JJ # Alizadeh Alizadeh= read.csv("/Users/srahman/Documents/Clustering/alizadeh-2000-v2_database1.csv", header = T, row.names = 1) D = t(Alizadeh) Z1 = D[1:41,] Z1 = D p = ncol(Z1) n = nrow(Z1) RR = Z1%*%t(Z1)/p JJ = NULL for(ii in 1:n) { JJ = rbind(JJ, RR[ii,][-ii]) } JJ = cbind(JJ, diag(RR)) MclustJJ = Mclust(JJ, modelNames = "VVI") groupJ = MclustJJ$class group = group_Ali1 = c(rep(2,4), rep(1,7), 2, 1, rep(2,3), rep(1,6), 2,1,2,1,rep(2,3),1,1,2,2,1,rep(2,4),1,2,2,1) group = group_Ali = c(group[1:41], rep(3,9), rep(4,11),1) table(group,groupJ) GG = Z1%*%t(Z1)/p gg = GG gg_wodiag = gg - diag(diag(gg)) gg_wdiag = cbind(gg_wodiag, diag(gg)) GG_new = cbind(gg_wodiag + diag(colSums(gg_wodiag))/(n-1), diag(gg)) Mclustgg = Mclust(GG_new, modelNames = "VVI") groupG = Mclustgg$class table(group,groupG)
e6ceb0de58d2df265c110d186753e5bd90ad6a23
e379d190e31e2e7a4efbad71748611c3f4d7be6b
/R/character_ids.R
1f4b43bc3df744ad43d54f3765395c376cf3bf47
[ "MIT" ]
permissive
Edgar-Zamora/superheroR
f6ea5017797c1226c672e49714b29829cf3d036a
88e9de4888566b3273c931efbb2efea585ac932f
refs/heads/main
2023-06-20T06:48:12.252907
2021-07-16T22:49:29
2021-07-16T22:49:29
356,692,662
0
0
null
null
null
null
UTF-8
R
false
false
297
r
character_ids.R
#' Characters associated with each id. #' #' A dataset containing the ids associated with each superhero #' #' @format A data frame with 731 rows and 2 variables: #' \describe{ #' \item{id}{unique id for ech character} #' \item{character}{name of the superhero} #' ... #' } "character_ids"
0344e837cdd2ab7cd021389580a7b7624b15d5be
b7ac2968b98c3670397077fe42da03e83d4335e4
/CF - portatile.R
69aa67ce58502e6d27664d5c22a38fe960405878
[]
no_license
ndricca/CF-vegs
55e32edddc39505afb8fb68fa8946e59b6beb1e8
b5407bed38d9e72028401f8ffb1bcacf461d706c
refs/heads/master
2020-12-01T23:52:02.488360
2016-09-27T22:27:03
2016-09-27T22:27:03
67,924,523
0
0
null
null
null
null
UTF-8
R
false
false
25,964
r
CF - portatile.R
############ START ############ rm(list=ls()) set.seed(123) par(mfrow=c(1,1)) # Load libraries library(recommenderlab) library(ggplot2) library(devtools) # install_github("ggbiplot", "vqv") library(ggbiplot) library(pls) ############ Data Preparation ############ # Loading data # Desktop PC # data <- read.csv("C:###########\\IT_TN1.csv",header=T,sep=";") # Notebook data <- read.csv("C:###########\\IT_TN1.csv",header=T,sep=";") data <- data[,1:883] # split data into training and test set train_ind <- sample(seq_len(nrow(data)), size = 0.8*nrow(data)) train <- data[train_ind, ] test <- data[-train_ind, ] # View(train[order(train[,4]), ]) # View(test[order(test[,4]), ]) # check train and test differ rm(train_ind) # Familiarity matrix # familiarity <- data[,185:368] # familiarity <- train[,185:368] fam <- as.matrix(train[,185:368]) # Rating matrix # rat <- as.matrix(data[,369:552]) rat <- as.matrix(train[,369:552]) # rat1 <- as.matrix(data[,369:552]) # Input rating = NA every time familiarity is low (ie fam < 3) for (i in 1:nrow(rat)) { for (j in 1:ncol(rat)) { if (fam[i,j]==1 |fam[i,j]==2) { rat[i,j] <- NA } } } ratings <- as(rat, "realRatingMatrix") # ratings_n <- normalize(ratings) # head(ratings) # image(normalize(ratings)) # View(as(ratings,"matrix")) # avg.rat.prod <- colMeans(ratings,na.rm = TRUE) # qplot(seq_along(avg.rat.prod), avg.rat.prod,main = "Product average ratings") # avg.rat.user <- rowMeans(ratings,na.rm = TRUE) ## WARN ## # # qplot(seq_along(avg.rat.user), avg.rat.user,main = "User average ratings") # u <- as(avg.rat.user,'matrix') # ############ RecommenderLab ############ # rec=Recommender(ratings[1:nrow(ratings)],method="IBCF", param=list(normalize = "Z-score",method="COsine")) # print(rec) # names(getModel(rec)) # recom <- predict(rec, ratings[1:nrow(ratings)], type="ratings") ############ My algorithm: ############ ############ Similarity matrices ############ # Function: Adjusted Cosine similarity function avg.rat.user <- rowMeans(ratings,na.rm = TRUE) # qplot(seq_along(avg.rat.user), avg.rat.user,main = "User average ratings") u <- as(avg.rat.user,'matrix') getAdjCos <- function(i,j) { nuser=table(complete.cases(cbind(i,j,u)))["TRUE"] tmp01 <- cbind(i,j,u) concat<- matrix(data=tmp01[complete.cases(tmp01),], nrow = nuser, ncol = 3) i=concat[,1] j=concat[,2] u=concat[,3] # Compute adjusted cosine similarity adj.sim.ij <- sum((i-u)*(j-u))/(sqrt(sum((i-u)^2))*sqrt(sum((j-u)^2))) return(adj.sim.ij) } # Function: Correlation similarity function getCorr <- function(i,j) { ii <- i[complete.cases(i,j)] jj <- j[complete.cases(i,j)] corr.ij <- cor(ii,jj,method = "pearson") } # Get the Adjusted Cosine and the Correlation similarity matrices AdjCos.similarity <- matrix(NA, nrow=ncol(rat), ncol=ncol(rat), dimnames=list(colnames(rat),colnames(rat))) Corr.similarity <- matrix(NA, nrow=ncol(rat), ncol=ncol(rat), dimnames=list(colnames(rat),colnames(rat))) a <- Sys.time() pb.sim <- txtProgressBar(min = 1, max = ncol(rat), initial = 0, style = 3) for (i in 1:ncol(rat)) { for (j in 1:ncol(rat)) { if (i==j) { AdjCos.similarity[i,j] <- NA Corr.similarity[i,j] <- NA } else { # i=3 # j=5 AdjCos.similarity[i,j] <- getAdjCos(as.matrix(rat[,i]),as.matrix(rat[,j])) Corr.similarity[i,j] <- getCorr(as.matrix(rat[,i]),as.matrix(rat[,j])) # Corr.similarity[i,j] # AdjCos.similarity[i,j] } setTxtProgressBar(pb.sim,i) } } b <- Sys.time() b-a # Time difference of 31.98095 secs rm(pb.sim,a,b) # # Plot similarities # View(Corr.similarity) # View(AdjCos.similarity) # image(AdjCos.similarity,main="AdjCos similarity matrix") # image(Corr.similarity,main="Corr similarity matrix") # # Spot check # summary(AdjCos.similarity)[,1:5] # plot(AdjCos.similarity[,60]) ############ Predictions ############ # Select for every product the most similar items ########### Most similar (Adjusted cosine) ########### most.similar.adj <- matrix(NA, nrow=nrow(AdjCos.similarity), ncol=ncol(AdjCos.similarity), dimnames = list(colnames(AdjCos.similarity),colnames(AdjCos.similarity))) a <- Sys.time() pb.similar <- txtProgressBar(min = 1, max = ncol(rat), initial = 0, style = 3) for (i in 1:ncol(AdjCos.similarity)) { for (j in 1:ncol(AdjCos.similarity)) { if (!is.na(AdjCos.similarity[i,j])){ if (AdjCos.similarity[i,j] >= quantile(AdjCos.similarity[,j],0.5,na.rm = T)) { most.similar.adj[i,j] <- AdjCos.similarity[i,j] } } setTxtProgressBar(pb.similar,i) } } b <- Sys.time() b-a # Time difference of 17.14373 secs rm(pb.similar,a,b) # View(most.similar.adj) ########### Most similar (Correlation) ########### # Adjusted cosine similarity most.similar.corr <- matrix(NA, nrow=nrow(Corr.similarity), ncol=ncol(Corr.similarity), dimnames = list(colnames(Corr.similarity),colnames(Corr.similarity))) a <- Sys.time() pb.similar <- txtProgressBar(min = 1, max = ncol(rat), initial = 0, style = 3) for (i in 1:ncol(Corr.similarity)) { for (j in 1:ncol(Corr.similarity)) { if (!is.na(Corr.similarity[i,j])){ if (Corr.similarity[i,j] >= quantile(Corr.similarity[,j],0.5,na.rm = T)) { most.similar.corr[i,j] <- Corr.similarity[i,j] } } setTxtProgressBar(pb.similar,i) } } b <- Sys.time() b-a # Time difference of 17.14373 secs rm(pb.similar,a,b) # View(most.similar.corr) # Check what is the number of most similar items ############ # # and that it is the same for every product # a <- rep(NA,184) # for (i in 1:ncol(most.similar)){ # a[i] <- sum(!is.na(most.similar[,i])) # } # table(a) # rm(a) # # If quantile = 0.95 --> number of most.similar[,i] = 10 # # If quantile = 0.90 --> number of most.similar[,i] = 19 # # If quantile = 0.85 --> number of most.similar[,i] = 28 # # If quantile = 0.80 --> number of most.similar[,i] = 37 # # If quantile = 0.75 --> number of most.similar[,i] = 46 ########### Test set ########### # Familiarity matrix - test set fam.t <- as.matrix(test[,185:368]) # Rating matrix - test set rat.t <- as.matrix(test[,369:552]) # Input rating = na every time familiarity is low for (i in 1:nrow(rat.t)) { for (j in 1:ncol(rat.t)) { if (fam.t[i,j]==1 |fam[i,j]==2) { rat.t[i,j] <- NA } } } # Remove vegetables ratings from the test set rat.t[,9:37] <- NA # Comparison matrix rat.t1 <- as.matrix(test[,369:552]) # Function: compute weighted sum getWeightedSum <- function(k,z) { # function(u,i) { # old version # Example: user = 4, product = 11 # u <- 7 # i <- 14 # k <- rat.t[u,!is.na(most.similar.adj[,i])] # z <- most.similar.adj[!is.na(most.similar.adj[,i]),i] pred <- sum(k*z,na.rm = T)/sum(z,na.rm = T) return(pred) } ######## Create predicted and true matrices ######## pred.adj <- matrix(NA, nrow = nrow(rat.t),ncol = ncol(rat.t)) pred.corr <- matrix(NA, nrow = nrow(rat.t),ncol = ncol(rat.t)) true <- matrix(NA, nrow = nrow(rat.t),ncol = ncol(rat.t)) true <- rat.t1 a <- Sys.time() for (u in 1:nrow(rat.t)) { for (i in 9:37) { # there are products not rated by the users in # the test set --> "if statement" necessary # to exclude them from the prediction # true[u,i] <- rat.t1[u,i] if (!is.na(true[u,i])){ # prediction from AdjCos similarity matrix k <- rat.t[u,!is.na(most.similar.adj[,i])] z <- most.similar.adj[!is.na(most.similar.adj[,i]),i] # pred.adj[u,i] <- round(getWeightedSum(k,z)) pred.adj[u,i] <- getWeightedSum(k,z) # prediction from Corr similarity matrix kk <- rat.t[u,!is.na(most.similar.adj[,i])] zz <- most.similar.corr[!is.na(most.similar.adj[,i]),i] # pred.corr[u,i] <- round(getWeightedSum(kk,zz)) pred.corr[u,i] <- getWeightedSum(kk,zz) # diff[u,i] <- true[u,i] - pred[u,i] } } } b <- Sys.time() b-a rm(a,b) # Time difference of 0.1685388 secs # Name the products colnames(pred.adj) <- colnames(rat.t) colnames(pred.corr) <- colnames(rat.t) # Select predictions for the veggies pred.adj <- pred.adj[,9:37] pred.corr <- pred.corr[,9:37] # Difference matrices true <- true[,9:37] diff.adj <- matrix(NA, nrow = nrow(true),ncol = ncol(true)) diff.adj <- true-pred.adj diff.corr <- matrix(NA, nrow = nrow(true),ncol = ncol(true)) diff.corr <- true-pred.corr # diff.adj <- diff.adj[,9:37] # diff.corr <- diff.corr[,9:37] # # View(pred.adj) # # View(pred.corr) # # View(true) ######### Evaluation metrics ######### colnames(diff.adj) <- colnames(pred.adj) colnames(diff.corr) <- colnames(pred.corr) # View(diff.adj) # View(diff.corr) mean(colMeans(diff.adj, na.rm=T),na.rm = T) mean(colMeans(diff.corr, na.rm=T),na.rm = T) # In mean, considering raw differences, true ratings are 2.659349 and 2.661307 points more than # the corrispondent predicted ratings --> the algorithm underestimates vegetable ratings # RMSE: sqrt(sum((true[i,j]-pred[i,j])^2)/abs(k)) k <- nrow(pred.adj)*ncol(pred.adj) RMSE.adj <- sqrt(sum(rowSums(diff.adj^2,na.rm = T),na.rm = T)/abs(k)) # RMSE.adj = 3.228498 RMSE.corr <- sqrt(sum(rowSums(diff.corr^2,na.rm = T),na.rm = T)/abs(k)) # RMSE.corr = 3.215119 # MAE: sum(abs(pred-true))/k MAE.adj <- sum(abs(pred.adj-true),na.rm = T)/k # MAE.adj = 2.833856 MAE.corr <- sum(abs(pred.corr-true),na.rm = T)/k # MAE.corr = 2.822884 # Checking different numbers of similar products, it seems that # the mean differences mean(colMeans(diff, na.rm=T),na.rm = T) # (between predicted and actual ratings (averaged for both users # and products) are minimized for quantile = 0.5 , i.e. the 92 # most similar products. ######### Bitter taste ######### # Prediction formula modification with bitter tasting preferences. # What are the worst results in terms of difference between # predicted and actual ratings? worstres.prod.adj <- ifelse(colMeans(diff.adj, na.rm=T) >= mean(colMeans(diff.adj, na.rm=T)), 1, 0) worstres.prod.corr <- ifelse(colMeans(diff.corr, na.rm=T) >= mean(colMeans(diff.corr, na.rm=T)), 1, 0) # what are the most bitter vegetables? bitterveg <- c(1,1,1,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,1,0,0,1,1,0,0,0,0,1,1) names(bitterveg) <- names(worstres.prod.corr) # What is the correlation between these two? cor(worstres.prod.adj,bitterveg) # 0.6505161 cor(worstres.prod.corr,bitterveg) # 0.4313725 # Load ratings users did to 4 chocolate creams with different amounts of sugar bit.lik.train <- as.matrix(train[,110:113]) bit.lik.test <- as.matrix(test[,110:113]) # PCA on chocolate liking (with different sugar concentration) bit.lik.pca <- prcomp(bit.lik.train, center = TRUE, scale. = TRUE) print(bit.lik.pca) plot(bit.lik.pca, type = "l") summary(bit.lik.pca) # Biplot pca.lik.plot <- ggbiplot(bit.lik.pca, obs.scale = 1, var.scale = 1) # pca.lik.plot <- pca.lik.plot + scale_color_discrete(name = '') # pca.lik.plot <- pca.lik.plot + theme(legend.direction = "horizontal", legend.position = "topright") print(pca.lik.plot) ######### New pred - 1: scaled bitter score added to the predicted rating ######### sweet.pc <- predict(bit.lik.pca, bit.lik.test)[,2] bitter.pc <- -1*sweet.pc # scale bitter.pc in terms of rating points (1:9) bitter.pc.n <- ((9-1)/(max(bitter.pc)-min(bitter.pc)))*(bitter.pc-max(bitter.pc))+9 # min(true,na.rm = T) pred.b.adj <- pred.adj pred.b.corr <- pred.corr # Add the scaled bitter score for every user of the test set for (j in 1:ncol(pred.corr)){ for (i in 1:nrow(pred.corr)) { # j<-29 # i<-1 if(bitterveg[j]!=0){ # pred.b.adj[,j] <- round(pred.adj[,j] + bitter.pc.n) # pred.b.corr[,j] <- round(pred.corr[,j] + bitter.pc.n) pred.b.adj[,j] <- pred.adj[,j] + bitter.pc.n pred.b.corr[,j] <- pred.corr[,j] + bitter.pc.n } } } for (j in 1:ncol(pred.corr)){ for (i in 1:nrow(pred.corr)) { if (pred.b.adj[i,j] >= 9 & !is.na(pred.b.adj[i,j])){ pred.b.adj[i,j] <- 9 } if (pred.b.corr[i,j] >= 9 & !is.na(pred.b.corr[i,j])){ pred.b.corr[i,j] <- 9 } } } # View(pred.b.adj) # View(pred.b.corr) # # View(true) # pred <- pred[,9:37] # true <- true[,9:37] # diff <- diff[,9:37] diff.b.adj <- true-pred.b.adj diff.b.corr <- true-pred.b.corr colnames(diff.b.adj) <- colnames(pred.adj) colnames(diff.b.corr) <- colnames(pred.corr) # View(diff.b.adj) ######### New pred - 1: evaluation metrics ######### mean(colMeans(diff.b.adj, na.rm=T),na.rm = T) mean(colMeans(diff.b.corr, na.rm=T),na.rm = T) # In mean, true ratings are higher than predicted of 2.659349 points # RMSE: sqrt(sum((true[i,j]-pred[i,j])^2)/abs(k)) k <- nrow(pred.adj)*ncol(pred.adj) RMSE.b.adj <- sqrt(sum(rowSums(diff.b.adj^2,na.rm = T),na.rm = T)/abs(k)) # RMSE.b.adj = 2.807011 RMSE.b.corr <- sqrt(sum(rowSums(diff.b.corr^2,na.rm = T),na.rm = T)/abs(k)) # RMSE.b.corr = 2.916551 # MAE: sum(abs(pred-true))/k MAE.b.adj <- sum(abs(pred.b.adj-true),na.rm = T)/k # MAE.b.adj = 2.252351 MAE.b.corr <- sum(abs(pred.b.corr-true),na.rm = T)/k # MAE.b.corr = 2.34953 ######### New pred - 2: bitter score weighted with a lm coeff ######### # bit.lik.pca$x[,2] mod <- rep(NA,length = ncol(rat)) # bit.lik.pca$x[,2] could be interpreted as the principal component related to the liking for sweeter # chocolate cream --> the more bitterness is appreciated, the less is bit.lik.pca$x[,2] # i<-10 for (i in 9:37){ y <- scale(rat[,i], center = T) ########### WARN ### x <- scale(bit.lik.pca$x, center = T) # mod[i] <- lm(y~x)$coefficients[3] mod[i] <- lm(rat[,i]~bit.lik.pca$x)$coefficients[3] # pls.dat <- data.frame(rat = rat[,i], # choc = I(bit.lik.train)) # mod <- pcr(rat~choc,data=pls.dat,ncomp = 4,validation="LOO") # summary(mod) # mod$coefficients # mod$scores # mod$loadings # mod$loading.weights # plot(mod, ncomp = 2, asp = 1, line = TRUE) # plot(mod, plottype = "biplot", comps = c(1,3)) } mod <- mod[9:37] # mod[j] pred.b2.adj <- pred.adj pred.b2.corr <- pred.corr # Add the scaled bitter score for every user of the test set for (j in 1:ncol(pred.corr)){ # j=29 if(bitterveg[j]!=0){ # pred.b2.adj[,j] <- round(pred.adj[,j] + mod[j]*sweet.pc) # pred.b2.corr[,j] <- round(pred.b.corr[,j] + mod[j]*sweet.pc) pred.b2.adj[,j] <- pred.adj[,j] + mod[j]*sweet.pc pred.b2.corr[,j] <- pred.b.corr[,j] + mod[j]*sweet.pc } } diff.b2.adj <- true-pred.b2.adj diff.b2.corr <- true-pred.b2.corr colnames(diff.b2.adj) <- colnames(pred.adj) colnames(diff.b2.corr) <- colnames(pred.corr) ######### New pred - 2: evaluation metrics ######### mean(colMeans(diff.b2.adj, na.rm=T),na.rm = T) mean(colMeans(diff.b2.corr, na.rm=T),na.rm = T) # In mean, true ratings are 2.643205 and 0.6511787 points higher than predicted ones # RMSE: sqrt(sum((true[i,j]-pred[i,j])^2)/abs(k)) k <- nrow(pred.adj)*ncol(pred.adj) RMSE.b2.adj <- sqrt(sum(rowSums(diff.b2.adj^2,na.rm = T),na.rm = T)/abs(k)) # RMSE.b2.adj = 3.213412 RMSE.b2.corr <- sqrt(sum(rowSums(diff.b2.corr^2,na.rm = T),na.rm = T)/abs(k)) # RMSE.b2.corr = 3.141142 # MAE: sum(abs(pred-true))/k MAE.b2.adj <- sum(abs(pred.b2.adj-true),na.rm = T)/k # MAE.b2.adj = 2.824451 MAE.b2.corr <- sum(abs(pred.b2.corr-true),na.rm = T)/k # MAE.b2.corr = 2.647335 ####### COMPARISON OF MODELS RESULTS AND EVALUATION METRICS ###### WARN ######## MAE.eval <- cbind.data.frame(Model = c("Classic","New Pred 1","New Pred 2"), MAE.adj.list = c(MAE.adj,MAE.b.adj,MAE.b2.adj), MAE.corr.list = c(MAE.corr,MAE.b.corr,MAE.b2.corr)) RMSE.eval <- cbind.data.frame(Model = c("Classic","New Pred 1","New Pred 2"), RMSE.adj.list = c(RMSE.adj,RMSE.b.adj,RMSE.b2.adj), RMSE.corr.list = c(RMSE.corr,RMSE.b.corr,RMSE.b2.corr)) # e <- ggplot() + # geom_line(data = MAE.eval, aes(x = Model, y = MAE.adj.list, color = "red")) + # geom_line(data = MAE.eval, aes(x = Model, y = MAE.corr.list, color = "blue")) + # xlab('Model') + # ylab('MAE') eval <- cbind.data.frame(Model = c("Classic","New Pred 1","New Pred 2"), MAE.adj.list = c(MAE.adj,MAE.b.adj,MAE.b2.adj), MAE.corr.list = c(MAE.corr,MAE.b.corr,MAE.b2.corr), RMSE.adj.list = c(RMSE.adj,RMSE.b.adj,RMSE.b2.adj), RMSE.corr.list = c(RMSE.corr,RMSE.b.corr,RMSE.b2.corr)) # eval.plot <- ggplot(eval, aes(x=Model,y=MAE.adj.list)) + geom_point() # eval.plot <- eval.plot + expand_limits(y = c(1, 4)) # eval.plot <- eval.plot + geom_line() # print(eval.plot) # # aa <- rep(NA,29) # for (i in 1:ncol(true)) { # aa[i] <- cor(pred.adj[,i],true[,i],use = "pairwise") # } # names(aa) <- colnames(true) # cor(aa,bitterveg) # # # # b <- rep(NA,22) # for (i in 1:nrow(true)) { # b[i] <- cor(pred.b.adj[i,],true[i,],use = "pairwise") # } # b # summary(b) ######### Rank and classify the ratings ######### WARN ######## ######### in order to show users top 5 vegetables # # View(pred.b.adj) # TOP 10 for adjcos sim col_to_name.adj <- function(col) { dimnames(pred.adj)[[2]][col] } p.adj <- apply(pred.adj,1,function(x){head(order(na.omit(x),decreasing = T),10) }) # View(p) pp.adj <- apply(p.adj,2,function(x) col_to_name.adj(x)) # View(pp.adj) # PER QUASI TUTTI VIENE SCELTO Bastoncini.di.verdure.con.salsine!!! :-( # table(true[2,]) # TOP 10 for corr sim col_to_name.corr <- function(col) { dimnames(pred.corr)[[2]][col] } p.corr <- apply(pred.corr,1,function(x){head(order(na.omit(x),decreasing = T),10) }) # View(p) pp.corr <- apply(p.corr,2,function(x) col_to_name.corr(x)) # View(pp.corr) # PER QUASI TUTTI VIENE SCELTO Pomodori!!! :-( # # TOP 10 for adjcos sim b col_to_name.b.adj <- function(col) { dimnames(pred.b.adj)[[2]][col] } p.b.adj <- apply(pred.b.adj,1,function(x){head(order(na.omit(x),decreasing = T),10) }) # View(p) pp.b.adj <- apply(p.b.adj,2,function(x) col_to_name.b.adj(x)) # View(pp.b.adj) # PER QUASI TUTTI VIENE SCELTO Broccoli!!! :-( # # TOP 10 for corr sim b col_to_name.b.corr <- function(col,mat) { dimnames(pred.b.corr)[[2]][col] } p.b.corr <- apply(pred.b.corr,1,function(x){head(order(na.omit(x),decreasing = T),10) }) # View(p) pp.b.corr <- apply(p.b.corr,2,function(x) col_to_name.b.corr(x)) # View(pp.b.corr) # PER QUASI TUTTI VENGONO SCELTE LE RAPE ROSSE!!! :-( # # View(true) # TRUE TOP 10 col_to_name.true <- function(col) { dimnames(true)[[2]][col] } t <- apply(true,1,function(x){head(order(na.omit(x),decreasing = T),10) }) # View(p) tt <- apply(t,2,function(x) col_to_name.b.corr(x)) # View(tt) count <- rep(NA,ncol(tt)) for (i in 1:ncol(tt)){ count[i] <- length(intersect(tt[,i], pp.b.adj[,i])) } count # head(pred.b.adj[i,order(pred.b.adj[i,],na.last = T,decreasing = T)],5) # head(true[i,order(true[i,],na.last = T,decreasing = T)],5) ######### PLS ######### WARN ######### # PLS regression of sensory taste on liking # library(pls) # bit.sens.train <- as.matrix(train[,141:156]) # cons.taste.train <- data.frame(liking = I(bit.lik.train)) # cons.taste.train$sensory <- bit.sens.train # # str(cons.taste.train) # # res <- plsr(liking ~ sensory, ncomp = 10, data = cons.taste.train, validation = "CV") # summary(res) # plot(RMSEP(res), legendpos = "topright") # 3 components are selected (min RMSEP) # plot(res, plottype = "scores", comps = 1:3) # # par(mfrow=c(2,2)) # plot(res, plottype = "biplot", comps = 1:2) # plot(res, plottype = "biplot", comps = c(1,3)) # plot(res, plottype = "biplot", comps = 2:3) # # par(mfrow=c(1,1)) # plot(res, plottype = "correlation", comps = 1:3) # axis(1,las=3) # plot(res, "loadings", comps = 1:3, # labels = "names", xlab = colnames(bit.sens.train) # ) # abline(h = 0) ######### ROC curve ######### WARN ######### # Choose a threshold in order to consider a vegetable as favourite for the consumer or not # (e.g. if the predicted rating is >= 5) # Users rated more or less 29 products of the test set. Some of them with more than the threshold. # They are part of the "relevant information set". # The algorithm predict ratings for each user for each product, more or less. Some of those above # the threshold. These products are part of the "retrieved information set". # These two set could have different dimension and they can contain different products. # Hence, it is possible to compute sensibility and specificity, and a ROC curve can be obtained # using different thresholds. p.b.a <- t(apply(pred.b.adj,1,function(x){ ifelse(x >= mean(na.omit(x)),1,0)})) # View(pred.b.adj) View(p.b.a) rowMeans(pred.b.adj,na.rm = T) View(pred.b.adj) t <- t(apply(true,1,function(x){ ifelse(x >= mean(na.omit(x)),1,0)})) # View(pred.b.adj) View(t) rowMeans(true,na.rm = T) View(true) p.b2.a <- apply(pred.b2.adj,2,function(x){ ifelse(x >= 5,1,0)}) View(pred.b.adj) View(p.b2.a) ###################################### TRASH ######### # # rowCounts() # a <- names(ratings) # names(ratings)<- seq(1,184) # # table(ratings$`1`) # # for (j in 1:184) { # ratings$`j`[ratings$`j` %in% "N"] <- NA # } # # # for (i in 1:nrow(ratings)) { # ratings[,i] <- as.numeric(ratings[,i]) # } # ratings$"1" # # ratings[,i] <- as.character(ratings[,i]) # ratings$i[ratings$i == "N"] <- NA # ratings$i[ratings$i == "N"] <- NA # ratings[,i] <- as.character(ratings[,i]) # ratings[,1] # table(data[,368],data[,883]) # table(data[,185],data[,369]) # plot(data[,185:221]) # # getCorrDist <- function(x,y) # { # this.cosine <- sum((x-mean(x))*(y-mean(y))) / (sqrt(sum((x-mean(x))*(x-mean(x)))) * sqrt(sum((y-mean(y))*(y-mean(y))))) # return(this.cosine) # } # # rowMeans(as.matrix(ratings)) # # getAdjCosine <- function(i,j) { # avg.rate <- rowMeans(ratings) # adj.cos <- # } # # # similarity <- matrix(NA, nrow=ncol(ratings), # ncol=ncol(ratings), # dimnames=list(colnames(ratings),colnames(ratings))) # # similarity.1 <- matrix(NA, nrow=ncol(ratings), # ncol=ncol(ratings), # dimnames=list(colnames(ratings),colnames(ratings))) # # # for(i in 1:ncol(ratings)) { # for(j in 1:ncol(ratings)) { # similarity.1[i,j] <- Adjusted_cosine_similarity(ratings[,i],ratings[,j]) # } # } # # for(i in 1:ncol(ratings)) { # for(j in 1:ncol(ratings)) { # similarity[i,j] <- getCorrDist(as.matrix(ratings[,i]),as.matrix(ratings[,j])) # } # } # # # # Back to dataframe # similarity <- as.data.frame(similarity) # # # View(similarity) # sim <- as(similarity, "realRatingMatrix") # sim_m <- normalize(sim) # image(similarity) # str(similarity) # # str(r) # rr <- as.matrix(ratings) # # str(rr) # sum(!is.character(ratings)) # # # # # View(similarity) # # # # # library(recommenderlab) # # # # r <- as(ratings, "realRatingMatrix") # # r_m <- normalize(r) # # image(r_m, main = "Normalized Ratings") # # table(ratings) # # # # # head(r_m) # # image(r_m, main = "Normalized Ratings") # # var <- as.vector(colMeans(ratings)) # rrrr <- as.vector(colnames(ratings)) # fff <- as.data.frame(c(var,rrrr)) # fff <- as.data.frame(fff) # qplot(colnames(ratings),colMeans(ratings)) # + geom_point() # + geom_text(data=subset(ratings, colMeans(ratings) < 3)) # # data=subset(ratings, colMeans(ratings) < 4), # aes(colMeans(ratings),colnames(ratings),label=name)) # # # # # b <- as.vector(colMeans(ratings)) # # b$names <- colnames(ratings) # # str(b) # # plot(b) # # View(b[b<4]) # # # a <- as.data.frame(colSums(ratings)) # # a$mean <- as.data.frame(colMeans(ratings)) # # levels(a$mean) <- colnames(ratings) # # str(a) # # View(a) # # # # library(ggplot2) # # # library("devtools") # # # install.packages("ggvis") # # # install.packages("googleVis") # # # install_github("ramnathv/rCharts") # # # # library("ggvis") # # # # library("googleVis") # # # library("rCharts") # # rm(rat) # # scatter.ggplot <- ggplot(a, aes(x = mean)) # # scatter.ggplot # # # # # # if (AdjCos.similarity[i,j] >= quantile(AdjCos.similarity[,j],0.95,na.rm = T)) { # # pred[i,j] <- AdjCos.similarity[i,j] # # } # # # # # # # # # # # # # # # # # # # # pred <- matrix(NA, nrow=11, ncol=ncol(AdjCos.similarity)) # # # # for (i in 1:ncol(AdjCos.similarity)) { # # pred[,i] <- AdjCos.similarity[,i][AdjCos.similarity[,i] > quantile(AdjCos.similarity[,i],0.95,na.rm = T)] # # names(pred[,i]) <- names(AdjCos.similarity[,i][AdjCos.similarity[,i] > quantile(AdjCos.similarity[,i],0.95,na.rm = T)]) # # } # # View(pred) # # dimnames(pred)[[2]] <- names() # # names(pred[,5]) <- names(AdjCos.similarity[,5][AdjCos.similarity[,5] > quantile(AdjCos.similarity[,5],0.95,na.rm = T)]) # # pred[,164] # # # # df <- as.data.frame(AdjCos.similarity) # # # # for (i in ncol(AdjCos.similarity)) { # # df$i <- # # > quantile(df$i,0.75,na.rm = T)] # # } # # View(df) # # a[a > quantile(a,0.95,na.rm = T)] # # library(magrittr) # # # # AdjCos.similarity[,AdjCos.similarity[,3] > quantile(AdjCos.similarity[,3],0.99,na.rm = T)] # # # # sim <- AdjCos.similarity # # # # a <- c(0,2,324,512,3,24,1,43,234,2,55,23,3,5,2,5,2,56,6,34) # # quantile(a,0.75) # # a[a < quantile(a,0.75)] # # d[d$Point < quantile(d$Point, 0.95), ] # # # # # # rm(i,j) # # # # AdjCos.similarity[4,4] >= quantile(AdjCos.similarity[,4],0.95,na.rm = T)
5ec69ad9c095f98898eb208cad7a4e1a349dd594
efc2cda5525312640065de5c8cd53b208342cbf8
/man/species_range_shps.Rd
5d366116779001d0124b4f603ee15cc28445c181
[ "LicenseRef-scancode-public-domain-disclaimer", "LicenseRef-scancode-warranty-disclaimer", "CC-BY-4.0", "LicenseRef-scancode-public-domain", "LicenseRef-scancode-unknown-license-reference" ]
permissive
MLBartley/nabatr
68ad577f57d6d37adc2965fd099693df4ba092ee
c805012e54081ba45497e294863d4a5f26aaa7bd
refs/heads/master
2023-04-20T22:11:14.092346
2021-04-07T14:40:48
2021-04-07T14:40:48
null
0
0
null
null
null
null
UTF-8
R
false
true
394
rd
species_range_shps.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nabat_gql_queries.R \docType{data} \name{species_range_shps} \alias{species_range_shps} \title{NABat species range maps} \format{An object of class \code{SpatialPolygonsDataFrame} with 49 rows and 4 columns.} \usage{ species_range_shps } \description{ Reading in species range maps with readOGR } \keyword{datasets}
002036d1da339a25035a1145b9d001064e103e7b
f0376af6ad82009494a3ce17dbc31794c60f025f
/conseq.precip.days.R
b13a8467a16387646b342397d42f837d0d6bf330
[]
no_license
macroecology/BioExtremes
cd3460018db5708a3f5d0bd07238b04b33449a33
f2180fbd5cd1fc1d238fdedba8eade4eae2b974f
refs/heads/master
2020-03-19T08:09:51.069919
2018-12-07T10:44:05
2018-12-07T10:44:05
136,180,708
0
1
null
2018-12-06T12:14:08
2018-06-05T13:21:28
R
UTF-8
R
false
false
970
r
conseq.precip.days.R
#' Consecutive days of precipitation #' #' Calcultaes the number of consecutive days of rain #' #' The function calculates the number of consecutive days #' #' @usage consec.precip.days(data) #' #' @param data Input vector. This is a vector with the precipitation for each day in one year in one cell #' @param data Output vector. This is a vector which reports the highest number of consecutive days of rain #' #' @example \dontrun #' consec.precip.days(data) #' #' @example \dontrun #' precipitation <- c(0,0,0,0,0,1,2,0,0,3,3,5,6,0) #' #' consec.precip.days(precipitation) consec.precip.days <- function(precipitation){ precip.days <- 0 counter <- 0 for (i in 1: length (precipitation)){ if (precipitation[[i]] > 1){ counter <- counter + 1 }else{ counter <- 0 } if (counter > precip.days){ precip.days <- counter } } precip.days } consec.precip.days(precipitation)
d4121ab73509f0d569bc5816179673618d19040f
508a96b3623d4a70b5ef29dbd2c90ab6a7fc7035
/WaspsOBD-Abundance&DetectionMSE(RandomSelection)-DistanceConstraintMultinomial.r
c54a7166be195928e573a6f501fa67c58985c565
[]
no_license
pablogarciadiaz/Repository-Experimental-Wasps
0afdd4b68a5fc935d8999799f732d0d990f782d3
6e3798ed1dff81b5f6297877c47a9837853219a7
refs/heads/master
2023-03-11T14:08:22.452051
2021-03-01T09:48:07
2021-03-01T09:48:07
338,566,047
0
0
null
null
null
null
UTF-8
R
false
false
9,917
r
WaspsOBD-Abundance&DetectionMSE(RandomSelection)-DistanceConstraintMultinomial.r
#### Bayesian optimal design - Wasps Quadratic Loss - minimise the 1/(msqe) #### Script 2 - optimal design in 2 visits, how many sites? MH algorithm #### Trade-offs number of sites vs distance walked & maximum distance walked ##### Effort within function ##### MSE: abundance per cell & detection coefficients ### Multinomial distribution of values ### Load the libraries library(coda) library(MASS) library(MCMCpack) library(MuMIn) library(jagsUI) library(ggmcmc) library(corrplot) library(nimble) library(dclone) library(beepr) library(parallel) library(doParallel) library(foreach) library(DHARMa) library(compiler) library(lhs) library(modeest) library(truncdist) ### Load data data.wasp<-read.table("e:/CONTAIN/Experimental Design/Wasps/Nest-Count-OptimalExperimental.csv", header=TRUE, sep=",") id.exc<-which(data.wasp$pop.dens==0 | is.na(data.wasp$pop.dens)) data.wasp<-data.wasp[-id.exc, ] summary(data.wasp) nrow(data.wasp) #### Nimble model to fit model.wt<-nimbleCode({ ### Priors ### Intercept of the abundance model alpha.ab~dnorm(0, var=100) ### Quadratic error quad.loss[1]<-pow(param[1]-alpha.ab, 2) ### Slopes of the abundance model for (j in 1:slope.ab){ beta.ab[j]~dnorm(0, var=100) quad.loss[j+1]<-pow(param[j+1]-beta.ab[j], 2) } ### Probability of detection p.det~dbeta(1, 1) quad.loss[slope.ab+2]<-pow(param[slope.ab+2]-p.det, 2) ### 1st session before control for (i in 1:n.site){ ### Abundance log(mean.ab[i])<-alpha.ab+beta.ab[1]*mean.ndvi[i]+beta.ab[2]*var.ndvi[i]+beta.ab[3]*water.length[i] n0[i]~dpois(mean.ab[i]) ### Survey occasions for (j in 1:n.occ){ y0[i, j]~dbinom(p.det, n0[i]) } } ### Mean squared error loss.fun<-mean(quad.loss[1:(slope.ab+2)]) }) #### Utility function to determine the inverse of the determinant ut.fun1<-function(n.site, transect.reps, tot.dist, n.occ, data.wasp){ ### Define effort per cell (walking distance) effort<-rep(500*transect.reps, n.site) ### Choosing cells based on their distance to CEHUM seq.tot<-seq(min(data.wasp$dist.cehum), max(data.wasp$dist.cehum), by=5000) seq.band<-seq.tot[-length(seq.tot)] weight.fun<-function(x) length(which(data.wasp$dist.cehum>=x & data.wasp$dist.cehum<x+5000)) count.cells.per.band<-sapply(seq.band, weight.fun) #weight.vals<-count.cells.per.band*(1/seq.band) #### Number of cells for each band of distance to CEHUM (the closeer the better) cell.per.band<-rmultinom(1, n.site[1], 1/seq.band) sel.band<-seq.band[which(cell.per.band!=0)] number.per.band<-cell.per.band[which(cell.per.band!=0)] ### Sampling at random from the selected bands sel.fun<-function(x) sample(which(data.wasp$dist.cehum>=x[1] & data.wasp$dist.cehum<x[1]+5000), x[2]) id.sel<-c(unlist(apply(data.frame(sel.band, number.per.band), 1, sel.fun))) #### Chosen data data.wasp<-data.wasp[id.sel, ] ### NDVI mean.ndvi<-(data.wasp$median.ndvi-mean(data.wasp$median.ndvi))/sd(data.wasp$median.ndvi) var.ndvi<-(data.wasp$var.ndvi-mean(data.wasp$var.ndvi))/sd(data.wasp$var.ndvi) #### Water body length in each cell water.length<-(data.wasp$lenght.water-mean(data.wasp$lenght.water))/sd(data.wasp$lenght.water) cov.data<-data.frame(mean.ndvi=mean.ndvi, var.ndvi=var.ndvi, water.length=water.length) #cov.data ######## Data-generation part #### Initial abundance in each cell alpha.ab<-rnorm(1, 0, 2) beta.ab<-c(runif(2, 0, 2), runif(1, -1, 1)) mean.pop<-exp(alpha.ab+beta.ab[1]*cov.data$mean.ndvi+beta.ab[2]*cov.data$water.length+beta.ab[3]*cov.data$var.ndvi) n0<-rpois(n.site, lambda=mean.pop) ### Intercept and slope of the effect of survey effort alpha.det<-runif(1, -5, 0) beta.det<-runif(1, 0, 5) ### 1st session - generate detection histories y0<-matrix(0, nrow=n.site, ncol=n.occ) for (i in 1:n.site){ ### Probability of detection by site and occasion p.det1<-plogis(alpha.det+beta.det*log10(effort[i])) y0[i, ]<-rbinom(2, n0[i], p.det1) } ## Bayesian modelling ### Data for the model data.tot<-list(y0=y0, param=c(alpha.ab, beta.ab, p.det1), mean.ndvi=cov.data$mean.ndvi, var.ndvi=cov.data$var.ndvi, water.length=cov.data$water.length) inits<-list(p.det=runif(1, 0, 1), alpha.ab=0, beta.ab=rep(0, 3), n0=apply(y0, 1, max)*2) ### Constants constants<-list(n.site=n.site, n.occ=n.occ, slope.ab=3) ###### THE MODEL Rmodel<-nimbleModel(code=model.wt, constants=constants, data=data.tot, inits=inits) wt.conf<-configureMCMC(Rmodel, monitors=list('loss.fun'), thin=1) Rmcmc<-buildMCMC(wt.conf) Cmodel<-compileNimble(Rmodel) Cmcmc<-compileNimble(Rmcmc, project = Rmodel) ### Three chains and check for burnin m2.wt<-runMCMC(Cmcmc, niter=50000, nchains=1, nburnin=5000, inits=inits, samplesAsCodaMCMC=TRUE, progress=TRUE) ### Inverse of the mean squared error return(1/mean(m2.wt, na.rm=TRUE)) } ut.fun<-cmpfun(ut.fun1) ### Check number of cores for parallel computing n.core<-detectCores() n.core<-5 #### Simulations ### Number of simulation steps n.sim<-200 n.site<-transect.reps<-tot.dist<-rep(NA, n.sim) #### Initial design lh<-improvedLHS(1, 2, dup=5) ### Generate values transect.reps[1]<-round(qunif(lh[, 1], 1, 6)) #### Number of 500-metre sections per site tot.dist[1]<-round(qunif(lh[, 2], 10000, 50000), digits=0) #### Total distance walked to distribute across sites n.site[1]<-round(tot.dist[1]/(500*transect.reps[1]), digits=0) n.occ<-2 #### Simulating stuff!! ### Number of repeats per step n.rep<-50 ut.sim<-rep(NA, n.sim) ### To store utility value #### Timing the function start.time<-Sys.time() ### Set a progress bar pb<-txtProgressBar(min = 0, max = n.sim, style = 3) #### First design registerDoParallel(n.core) rep.sim<-foreach(i=1:n.rep, .combine = c, .packages=c("nimble")) %dopar% ut.fun(n.site[1], transect.reps=transect.reps[1], tot.dist=tot.dist[1], n.occ=n.occ, data.wasp=data.wasp) ut.sim[1]<-median(rep.sim, na.rm=TRUE) setTxtProgressBar(pb, 1) #### Stop cluster stopImplicitCluster() #### Rest of simulations for (j in 2:n.sim){ new.transect.reps<-round(runif(1, 1, 6), digits=0) #### Number of 500-metre sections per site new.tot.dist<-round(runif(1, 10000, 50000), digits=0) #### Total distance walked to distribute across sites new.site<-round(new.tot.dist/(500*new.transect.reps), digits=0) ### Estimate the utility of the new design registerDoParallel(n.core) new.ut<-foreach(i=1:n.rep, .combine = c, .packages=c("nimble")) %dopar% ut.fun(new.site, new.transect.reps, new.tot.dist, n.occ=n.occ, data.wasp=data.wasp) curr.ut<-median(new.ut, na.rm=TRUE) #### Stop cluster stopImplicitCluster() #### Metropolis acc.thre<-curr.ut/ut.sim[j-1] ### Acceptance criteria rd.num<-runif(1, 0, 1) ### Random number generation if(rd.num<=acc.thre){ ### Accept & update ut.sim[j]<-curr.ut n.site[j]<-new.site transect.reps[j]<-new.transect.reps tot.dist[j]<-new.tot.dist }else{ ### Reject proposal and keep previous iteration ut.sim[j]<-ut.sim[j-1] n.site[j]<-n.site[j-1] transect.reps[j]<-transect.reps[j-1] tot.dist[j]<-tot.dist[j-1] } setTxtProgressBar(pb, j) } ### Close progress bar close(pb) ## End time end.time<-Sys.time() end.time-start.time ### Time to run beep(sound=8) #### Outputs of the designs ut.sim hist(ut.sim) n.site transect.reps tot.dist ## Plotting par(mfrow=c(2, 2)) plot(ut.sim~c(1:n.sim), type="l", lwd=2, xlab="Iteration", ylab="Utility", main="MH Search - Inverse of MSE") #### Number of cells to survey hist(n.site, xlab="Number of cells to survey", main="Number of cells to survey") #### Sections per cell hist(transect.reps, xlab="Number of 500-m transects per cell", main="Number of 500-m transects per cell") #### Total walking effort across all cells hist(tot.dist, xlab="Total distance walked (metres)", main="Total distance across all sits (metres walked)") ### Optimal design based on the mean, median, and mode of each trap library(modeest) ### Mode #### Sites mode.site<-meanshift(n.site) mode.site ## 500-m sections per cell mode.reps<-meanshift(transect.reps) mode.reps ## Total distance walked mode.totdist<-meanshift(tot.dist) mode.totdist ### Mean mean.site<-mean(n.site) mean.site mean.reps<-mean(transect.reps) mean.reps mean.totdist<-mean(tot.dist) mean.totdist ### Median median.site<-median(n.site) median.site median.reps<-median(transect.reps) median.reps median.totdist<-median(tot.dist) median.totdist #### Exporting results data.exp<-data.frame(mode.site=meanshift(n.site), mode.reps=meanshift(transect.reps), mode.totdist=meanshift(tot.dist), mean.site=mean(n.site), mean.reps=mean(transect.reps), mean.totdist=mean(tot.dist), median.site=median(n.site), median.reps=median(transect.reps), median.tot.dist=median(tot.dist)) data.exp write.table(data.exp, "e:/CONTAIN/Experimental Design/Wasps/OptimalMH-EstimatedDistance(MSE).csv", sep=",") write.table(ut.sim, "e:/CONTAIN/Experimental Design/Wasps/OptimalMH-Utility(MSE).csv", sep=",") write.table(n.site, "e:/CONTAIN/Experimental Design/Wasps/OptimalMH-NumberofSites(MSE).csv", sep=",") write.table(transect.reps, "e:/CONTAIN/Experimental Design/Wasps/OptimalMH-RepeatsPerCell(MSE).csv", sep=",") write.table(tot.dist, "e:/CONTAIN/Experimental Design/Wasps/OptimalMH-TotalDistance(MSE).csv", sep=",")
97111fdf59ee9344f36fe1dbff48eacf6ef2e8bf
2e4c7f08c116666a1bdaadbe4e9e8c633641b869
/R/cellCellReport-internal.R
611dce3a4970eff81f1931591a92d3a600730e96
[ "Artistic-2.0" ]
permissive
rikenbit/scTensor
581bfa29773b50b9cba4d6cad50148784ed42be4
bfe66bccf230d9786dc277bd00d01f57f724bc33
refs/heads/master
2023-01-22T02:49:42.443599
2023-01-12T09:46:02
2023-01-12T09:46:02
135,140,761
31
6
null
2020-06-13T10:54:57
2018-05-28T09:36:23
R
UTF-8
R
false
false
35,557
r
cellCellReport-internal.R
.SinglePlot <- function(x){ plot(1, 1, col="white", ann=FALSE, xaxt="n", yaxt="n", axes=FALSE) par(ps=100) text(1, 1, x, col="red") } .shrink2 <- function(x, thr=7){ block <- strsplit(as.character(x) , " ")[[1]] l <- length(block) nc <- vapply(block, nchar, 0L) if(l >= 2){ out <- block[1] for(i in 2:l){ end <- length(out) tmp <- block[i] if(nchar(out[end])+nchar(tmp) <= thr){ out[end] <- paste(c(out[end], tmp), collapse=" ") }else{ out[end+1] <- tmp } } paste(out, collapse="\n") }else{ x } } .setColor <- function(col){ if(col == "reds"){ c("#FFF5F0", "#FEE0D2", "#FCBBA1", "#FC9272", "#FB6A4A", "#EF3B2C", "#CB181D", "#A50F15", "#67000D") }else if(col == "blues"){ c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5", "#08519C", "#08306B") }else if(col == "greens"){ c("#F7FCF5", "#E5F5E0", "#C7E9C0", "#A1D99B", "#74C476", "#41AB5D", "#238B45", "#006D2C", "#00441B") }else if(col == "many"){ c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#1B9E77", "#D95F02", "#7570B3", "#E7298A", "#66A61E", "#E6AB02", "#A6761D", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3E2CD", "#FDCDAC", "#CBD5E8", "#F4CAE4", "#E6F5C9", "#FFF2AE", "#F1E2CC", "#7FC97F", "#BEAED4", "#FDC086", "#FFFF99", "#386CB0", "#F0027F", "#BF5B17", "#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5", "#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F", "#FBB4AE", "#B3CDE3", "#CCEBC5", "#DECBE4", "#FED9A6", "#FFFFCC", "#E5D8BD", "#FDDAEC") }else{ stop("Wrong col is specified in .setColor") } } .palf <- colorRampPalette(c("#4b61ba", "gray", "#a87963", "red")) .REACTOMESPC_taxid <- list( "7165" = "anopheles", "3702" = "arabidopsis", "9913" = "bovine", "9615" = "canine", "6239" = "celegans", "9031" = "chicken", "9598" = "chimp", "7227" = "fly", "5811" = "gondii", "9606" = "human", "10090" = "mouse", "9823" = "pig", "10116" = "rat", "8355" = "xenopus", "7955" = "zebrafish" ) .ggdefault_cols <- function(n){ hcl(h=seq(15, 375-360/n, length=n)%%360, c=100, l=65) } .knowndbs <- c("DLRP", "IUPHAR", "HPMR", "CELLPHONEDB", "SINGLECELLSIGNALR") .extractLR <- function(sce, lr.evidence, cols){ # SQLite connection con = dbConnect(SQLite(), metadata(sce)$lrbase) LR <- dbGetQuery(con, "SELECT * FROM DATA") dbDisconnect(con) # Extract corresponding rows if(lr.evidence[1] == "all"){ target <- seq(nrow(LR)) }else{ targetKnown <- unique(unlist(lapply(.knowndbs, function(x){grep(x, LR$SOURCEDB)}))) if(lr.evidence[1] == "known"){ target <- targetKnown }else if(lr.evidence[1] == "putative"){ target <- setdiff(seq(nrow(LR)), targetKnown) }else{ target <- unique(unlist(lapply(lr.evidence, function(x){ which(x == LR$SOURCEDB) }))) if(length(target) == 0){ cat("Please specify the valid lr.evidence!\n") cat("In this LRBase package, following databases are avilable as lr.evidence.\n") print(as.matrix(table(LR$SOURCEDB))) stop("\n") } } } .uniqueLR(LR[target, cols]) } .uniqueLR <- function(LR){ # Filtering targetL <- grep("GENEID", LR$GENEID_L, invert = TRUE) targetR <- grep("GENEID", LR$GENEID_R, invert = TRUE) targetNotNAL <- which(!is.na(LR$GENEID_L)) targetNotNAR <- which(!is.na(LR$GENEID_R)) target <- intersect(targetL, intersect(targetR, intersect(targetNotNAL, targetNotNAR))) .shrinkLR(unique(LR[target, ])) } .shrinkLR <- function(LR){ targetShrink <- setdiff(colnames(LR), c("GENEID_L", "GENEID_R")) if(length(targetShrink) != 0){ left <- unique(LR[, c("GENEID_L", "GENEID_R")]) for(i in seq_along(targetShrink)){ if(i == 1){ right <- .shrinkNonLR(LR, left, targetShrink[i]) }else{ right <- cbind(right, .shrinkNonLR(LR, left, targetShrink[i])) } } out <- cbind(left, right) colnames(out) <- c("GENEID_L", "GENEID_R", targetShrink) out }else{ LR } } .shrinkNonLR <- function(LR, left, nonLR){ obj <- list(LR=LR, nonLR=nonLR) apply(left, 1, function(x, obj){ LR = obj$LR nonLR = obj$nonLR targetL = which(LR$GENEID_L == as.character(x[1])) targetR = which(LR$GENEID_R == as.character(x[2])) target = intersect(targetL, targetR) if(length(target) != 1){ out <- paste(LR[target, nonLR], collapse="|") out <- unique(strsplit(out, "\\|")[[1]]) paste(setdiff(out, c("-", "")), collapse=" ") }else{ LR[target, nonLR] } }, obj=obj) } .myvisNetwork <- function(g, col=NULL){ # Edges edges = as.data.frame(get.edgelist(g), stringsAsFactors=FALSE) colnames(edges) <- c("from", "to") edges <- data.frame(edges, color=edge.attributes(g)$color, width=edge.attributes(g)$width) if(!is.null(col)){ edges <- edges[which(edges$color == col), ] } # Nodes vattr <- get.vertex.attribute(g) uniqueid <- unique(c(edges$from, edges$to)) color <- sapply(uniqueid, function(x){ pos <- which(vattr$name == x) if(length(pos) == 1){ vattr$color[pos] }else{ "purple" } }) size <- sapply(uniqueid, function(x){ sum(vattr$size[which(vattr$name == x)]) }) nodes <- data.frame( id=uniqueid, type=TRUE, color=color, size=size*5, label=uniqueid ) # Plot visNetwork(nodes, edges) } .omasize <- function(l, r){ # 1 - 16 c(44.7, 44.7, 22.4, 14.9, 11.15, 8.95, 7.45, 6.4, 5.6, 4.98, 4.47, 4.06, 3.75, 3.52, 3.25, 3.05)[max(l, r)] } .oma4 <- function(l, r){ # 1 - 16 c(131.4, 131.4, 109.2, 101.5, 97.75, 95.5, 94.1, 93.1, 92.3, 91.7, 91.2, 90.7, 90.7, 91.70, 91.000, 91.000)[max(l, r)] } .smallTwoDplot <- function(sce, assayNames, geneid, genename, color){ g <- schex::plot_hexbin_feature(sce, type=assayNames, feature=geneid, action="mean", xlab="Dim1", ylab="Dim2", title=paste0("Mean of ", genename)) if(color == "reds"){ g <- g + scale_fill_gradient(low = 'gray', high = 'red') } if(color == "blues"){ g <- g + scale_fill_gradient(low = 'gray', high = 'blue') } g } # For previous LRBase (v-1.0.0 - v-1.2.0) .TAXID <- c( "Hsa" = 9606, "Mmu" = 10090, "Ath" = 3702, "Rno" = 10116, "Bta" = 9913, "Cel" = 6239, "Dme" = 7227, "Dre" = 7955, "Gga" = 9031, "Pab" = 9601, "Xtr" = 8364, "Ssc" = 9823 ) .hyperLinks <- function(ranking, ligandGeneID, receptorGeneID, lr, taxid, value, percentage, GeneInfo, pvalue, qvalue, evidence){ ## helper for vector dividing div <- function(x, d=1) {"" delta <- ceiling(length(x) / d) y <- lapply(seq_len(d), function(i){ as.vector(na.omit(x[((i-1)*delta+1):(i*delta)])) }) return(y) } embedLink <- function(genename1, geneid1, description1, go1, reactome1, mesh1, uni1, string1, refex1, ea1, sea1, scdb1, panglao1, cmap1, genename2, geneid2, description2, go2, reactome2, mesh2, uni2, string2, refex2, ea2, sea2, scdb2, panglao2, cmap2){ if(description1 != ""){ description1 <- paste0("Description: ", genename1, description1, "<br>") } if(description2 != ""){ description2 <- paste0("Description: ", genename1, description2, "<br>") } if(go1 != ""){ go1 <- paste0("GO: ", go1, "<br>") } if(go2 != ""){ go2 <- paste0("GO: ", go2, "<br>") } if(reactome1 != ""){ reactome1 <- paste0("Reactome: ", reactome1, "<br>") } if(reactome2 != ""){ reactome2 <- paste0("Reactome: ", reactome2, "<br>") } if(uni1 != ""){ uni1 <- paste0("UniProtKB: ", uni1, "<br>") } if(uni2 != ""){ uni2 <- paste0("UniProtKB: ", uni2, "<br>") } if(string1 != ""){ string1 <- paste0("STRING: ", string1, "<br>") } if(string2 != ""){ string2 <- paste0("STRING: ", string2, "<br>") } if(refex1 != ""){ refex1 <- paste0("RefEx: [", genename1, "](", refex1, ")<br>") } if(refex2 != ""){ refex2 <- paste0("RefEx: [", genename1, "](", refex2, ")<br>") } if(ea1 != ""){ ea1 <- paste0("Expression Atlas: [", genename1, "](", ea1, ")<br>") } if(ea2 != ""){ ea2 <- paste0("Expression Atlas: [", genename2, "](", ea2, ")<br>") } if(sea1 != ""){ sea1 <- paste0("Single Cell Expression Atlas: [", genename1, "](", sea1, ")<br>") } if(sea2 != ""){ sea2 <- paste0("Single Cell Expression Atlas: [", genename2, "](", sea2, ")<br>") } if(scdb1 != ""){ scdb1 <- paste0("scRNASeqDB: [", genename1, "](", scdb1, ")<br>") } if(scdb2 != ""){ scdb2 <- paste0("scRNASeqDB: [", genename2, "](", scdb2, ")<br>") } if(panglao1 != ""){ panglao1 <- paste0("PanglaoDB: [", genename1, "](", panglao1, ")<br>") } if(panglao2 != ""){ panglao2 <- paste0("PanglaoDB: [", genename2, "](", panglao2, ")<br>") } if(cmap1 != ""){ cmap1 <- paste0("CMap: [", genename1, "](", cmap1, ")<br>") } if(cmap2 != ""){ cmap2 <- paste0("CMap: [", genename2, "](", cmap2, ")<br>") } if(mesh1 != ""){ mesh1 <- paste0("MeSH: ", mesh1) } if(mesh2 != ""){ mesh2 <- paste0("MeSH: ", mesh2) } # Output paste0( "[", genename1, "](https://www.ncbi.nlm.nih.gov/gene/", geneid1, ")<br>", description1, go1, reactome1, uni1, string1, refex1, ea1, sea1, scdb1, panglao1, cmap1, mesh1, "|", "[", genename2, "](https://www.ncbi.nlm.nih.gov/gene/", geneid2, ")<br>", description2, go2, reactome2, uni2, string2, refex2, ea2, sea2, scdb2, panglao2, cmap2, mesh2, "|" ) } convertGeneName <- function(geneid, GeneInfo){ if(!is.null(GeneInfo$GeneName)){ genename <- GeneInfo$GeneName[ which(GeneInfo$GeneName$ENTREZID == geneid), "SYMBOL"][1] if(is.null(genename)){ genename = geneid }else{ if(is.na(genename)){ genename = geneid } if(length(genename) == 0 || genename %in% c("", NA)){ genename = geneid } } }else{ genename = "" } genename } convertGeneDescription <- function(geneid, GeneInfo){ if(!is.null(GeneInfo$Description)){ description <- GeneInfo$Description[ which(GeneInfo$Description$ENTREZID == geneid), "GENENAME"][1] if(length(description) == 0 || description %in% c("", NA)){ description = "" } description }else{ description = "" } } convertGeneOntology <- function(geneid, GeneInfo){ if(!is.null(GeneInfo$GO)){ GO <- unique(unlist(GeneInfo$GO[ which(GeneInfo$GO$ENTREZID == geneid), "GO"])) GO <- GO[which(GO != "")] GO <- gsub(":", "%3A", GO) if(length(GO) != 0){ GO_loc <- div(seq_along(GO), ceiling(length(GO) / 100)) GO <- lapply(GO_loc, function(x){ mi <- min(x) ma <- max(x) if(ma == 1){ paste0("[", mi, "](", "http://amigo.geneontology.org/amigo/search/ontology?q=", paste0(GO[mi:ma], collapse="%20"), ")") }else{ paste0("[", mi, "-", ma, "](", "http://amigo.geneontology.org/amigo/search/ontology?q=", paste0(GO[mi:ma], collapse="%20"), ")") } }) GO <- paste(unlist(GO), collapse=" ") }else{ GO <- "" } }else{ GO = "" } GO } convertReactome <- function(geneid, GeneInfo){ if(!is.null(GeneInfo$Reactome)){ Reactome <- unique(unlist(GeneInfo$Reactome[ which(GeneInfo$Reactome$gene_id == geneid), "DB_ID"])) Reactome <- Reactome[which(Reactome != "")] if(length(Reactome) != 0){ Reactome_loc <- div(seq_along(Reactome), ceiling(length(Reactome) / 100)) Reactome <- lapply(Reactome_loc, function(x){ mi <- min(x) ma <- max(x) if(ma == 1){ paste0("[", mi, "](", "https://reactome.org/content/query?q=", paste0(Reactome[mi:ma], collapse="+"), "&types=Pathway&cluster=true)") }else{ paste0("[", mi, "-", ma, "](", "https://reactome.org/content/query?q=", paste0(Reactome[mi:ma], collapse="+"), "&types=Pathway&cluster=true)") } }) Reactome = paste(unlist(Reactome), collapse=" ") }else{ Reactome = "" } }else{ Reactome = "" } Reactome } convertMeSH <- function(geneid, GeneInfo){ if(!is.null(GeneInfo$MeSH)){ MeSH <- GeneInfo$MeSH[which(GeneInfo$MeSH$GENEID == geneid), "MESHID"] MeSH <- MeSH[which(MeSH != "")] if(length(MeSH) != 0){ MeSH_loc <- div(seq_along(MeSH), ceiling(length(MeSH) / 100)) MeSH <- lapply(MeSH_loc, function(x){ mi <- min(x) ma <- max(x) if(ma == 1){ paste0("[", mi, "](", "https://www.ncbi.nlm.nih.gov/mesh?term=", paste0(MeSH[mi:ma], collapse="%20OR%20"), ")") }else{ paste0("[", mi, "-", ma, "](", "https://www.ncbi.nlm.nih.gov/mesh?term=", paste0(MeSH[mi:ma], collapse="%20OR%20"), ")") } }) MeSH = paste(unlist(MeSH), collapse=" ") }else{ MeSH = "" } }else{ MeSH = "" } MeSH } convertUniProtKB <- function(geneid, GeneInfo){ if(!is.null(GeneInfo$UniProtKB)){ UniProtKB <- unique(unlist(GeneInfo$UniProtKB[ which(GeneInfo$UniProtKB$ENTREZID == geneid), "UNIPROT"])) UniProtKB <- UniProtKB[which(UniProtKB != "")] if(length(UniProtKB) != 0){ UniProtKB_loc <- div(seq_along(UniProtKB), ceiling(length(UniProtKB) / 100)) UniProtKB <- lapply(UniProtKB_loc, function(x){ mi <- min(x) ma <- max(x) if(ma == 1){ paste0("[", mi, "](", "https://www.uniprot.org/uniprot/?query=", paste0(UniProtKB[mi:ma], collapse="+OR+"), "&sort=score)") }else{ paste0("[", mi, "-", ma, "](", "https://www.uniprot.org/uniprot/?query=", paste0(UniProtKB[mi:ma], collapse="+OR+"), "&sort=score)") } }) UniProtKB <- paste(unlist(UniProtKB), collapse=" ") }else{ UniProtKB <- "" } }else{ UniProtKB = "" } UniProtKB } # taxidでの生物種フィルタリングが必要 convertSTRING <- function(geneid, GeneInfo, taxid){ if(!is.null(GeneInfo$ENSP)){ ENSP <- unique(unlist(GeneInfo$ENSP[ which(GeneInfo$ENSP$ENTREZID == geneid), "ENSEMBLPROT"])) ENSP <- ENSP[which(ENSP != "")] if(length(ENSP) != 0 && !is.na(taxid)){ STRING <- paste0("https://string-db.org/network/", taxid, ".", ENSP) STRING <- paste( paste0("[", seq_along(STRING), "](", STRING, ")"), collapse=" ") }else{ STRING <- "" } }else{ STRING = "" } STRING } convertRefEx <- function(geneid, GeneInfo, taxid){ if(!is.null(GeneInfo$GeneName)){ genename <- GeneInfo$GeneName[ which(GeneInfo$GeneName$ENTREZID == geneid), "SYMBOL"][1] if(length(genename) != 0){ ref <- "http://refex.dbcls.jp/genelist.php?gene_name%5B%5D=" if(taxid == "9606"){ paste0(ref, genename, "&lang=en&db=human") }else if(taxid == "10090"){ paste0(ref, genename, "&lang=en&db=mouse") }else if(taxid == "10116"){ paste0(ref, genename, "&lang=en&db=rat") }else{ "" } }else{ "" } }else{ "" } } # taxidでの生物種フィルタリングが必要 convertEA <- function(geneid, GeneInfo){ if(!is.null(GeneInfo$ENSG)){ ENSG <- GeneInfo$ENSG[ which(GeneInfo$GeneName$ENTREZID == geneid), "ENSEMBL"][1] if(length(ENSG) != 0){ paste0("https://www.ebi.ac.uk/gxa/genes/", tolower(ENSG)) }else{ "" } }else{ "" } } convertSEA <- function(geneid, GeneInfo){ if(!is.null(GeneInfo$GeneName)){ genename <- GeneInfo$GeneName[ which(GeneInfo$GeneName$ENTREZID == geneid), "SYMBOL"][1] if(length(genename) != 0){ paste0("https://www.ebi.ac.uk/gxa/sc/search?species=&q=", genename) }else{ "" } }else{ "" } } convertSCDB <- function(geneid, GeneInfo, taxid){ if(!is.null(GeneInfo$GeneName)){ genename <- GeneInfo$GeneName[ which(GeneInfo$GeneName$ENTREZID == geneid), "SYMBOL"][1] if(length(genename) != 0 && taxid == "9606"){ paste0("https://bioinfo.uth.edu/scrnaseqdb/", "index.php?r=site/rankGene&gene=", genename, "&check=0") }else{ "" } }else{ "" } } convertPANGLAO <- function(geneid, GeneInfo, taxid){ if(!is.null(GeneInfo$GeneName)){ genename <- GeneInfo$GeneName[ which(GeneInfo$GeneName$ENTREZID == geneid), "SYMBOL"][1] if(length(genename) != 0){ ref <- "https://panglaodb.se/search.html?query=" if(taxid == "9606"){ paste0(ref, genename, "&species=3") }else if(taxid == "10090"){ paste0(ref, genename, "&species=2") }else{ "" } }else{ "" } }else{ "" } } convertCMAP <- function(geneid, GeneInfo, taxid){ if(!is.null(GeneInfo$GeneName)){ genename <- GeneInfo$GeneName[ which(GeneInfo$GeneName$ENTREZID == geneid), "SYMBOL"][1] if(length(genename) != 0){ ref <- "https://clue.io/command?q=" if(taxid == "9606"){ paste0(ref, genename) }else{ "" } }else{ "" } }else{ "" } } convertPubMed <- function(geneid1, geneid2, lr){ target <- intersect( which(lr$GENEID_L == geneid1), which(lr$GENEID_R == geneid2)) PubMed <- lr$SOURCEID[target] LEN <- length(target) != 0 NUL <- !is.null(lr$SOURCEID[target]) HYP <- length(grep("-", PubMed)) == 0 if(LEN && NUL && HYP){ PubMed <- unique(strsplit(lr$SOURCEID[target], "\\|")[[1]]) PubMed_loc <- div(seq_along(PubMed), ceiling(length(PubMed) / 100)) PubMed <- lapply(PubMed_loc, function(x){ mi <- min(x) ma <- max(x) if(ma == 1){ paste0("[", mi, "](", "https://www.ncbi.nlm.nih.gov/pubmed/?term=", paste0(PubMed[mi:ma], collapse="%20OR%20"), ")") }else{ paste0("[", mi, "-", ma, "](", "https://www.ncbi.nlm.nih.gov/pubmed/?term=", paste0(PubMed[mi:ma], collapse="%20OR%20"), ")") } }) PubMed = paste(unlist(PubMed), collapse=" ") }else{ PubMed = "" } PubMed } # ranking XYZ <- paste0("|", ranking, "|") # Gene Name (Ligand) GeneName_L <- convertGeneName(ligandGeneID, GeneInfo) # Gene Name (Receptor) GeneName_R <- convertGeneName(receptorGeneID, GeneInfo) # Description (Ligand) Description_L <- convertGeneDescription(ligandGeneID, GeneInfo) # Description (Receptor) Description_R <- convertGeneDescription(receptorGeneID, GeneInfo) # Gene Ontology (Ligand) GO_L <- convertGeneOntology(ligandGeneID, GeneInfo) # Gene Ontology (Receptor) GO_R <- convertGeneOntology(receptorGeneID, GeneInfo) # Reactome (Ligand) Reactome_L <- convertReactome(ligandGeneID, GeneInfo) # Reactome (Receptor) Reactome_R <- convertReactome(receptorGeneID, GeneInfo) # MeSH (Ligand) MeSH_L <- convertMeSH(ligandGeneID, GeneInfo) # MeSH (Receptor) MeSH_R <- convertMeSH(receptorGeneID, GeneInfo) # UniProtKB(Ligand) UniProtKB_L <- convertUniProtKB(ligandGeneID, GeneInfo) # UniProtKB(Receptor) UniProtKB_R <- convertUniProtKB(receptorGeneID, GeneInfo) # STRING(Ligand) STRING_L <- convertSTRING(ligandGeneID, GeneInfo, taxid) # STRING(Receptor) STRING_R <- convertSTRING(receptorGeneID, GeneInfo, taxid) # RefEx(Ligand) RefEx_L <- convertRefEx(ligandGeneID, GeneInfo, taxid) # RefEx(Receptor) RefEx_R <- convertRefEx(receptorGeneID, GeneInfo, taxid) # EA(Ligand) EA_L <- convertEA(ligandGeneID, GeneInfo) # EA(Receptor) EA_R <- convertEA(receptorGeneID, GeneInfo) # SEA(Ligand) SEA_L <- convertSEA(ligandGeneID, GeneInfo) # SEA(Receptor) SEA_R <- convertSEA(receptorGeneID, GeneInfo) # SCDB(Ligand) SCDB_L <- convertSCDB(ligandGeneID, GeneInfo, taxid) # SCDB(Receptor) SCDB_R <- convertSCDB(receptorGeneID, GeneInfo, taxid) # PANGLAO(Ligand) PANGLAO_L <- convertPANGLAO(ligandGeneID, GeneInfo, taxid) # PANGLAO(Receptor) PANGLAO_R <- convertPANGLAO(receptorGeneID, GeneInfo, taxid) # CMAP(Ligand) CMAP_L <- convertCMAP(ligandGeneID, GeneInfo, taxid) # CMAP(Receptor) CMAP_R <- convertCMAP(receptorGeneID, GeneInfo, taxid) # PubMed (L and R) PubMed <- convertPubMed(ligandGeneID, receptorGeneID, lr) # Embedding paste0(XYZ, embedLink(GeneName_L, ligandGeneID, Description_L, GO_L, Reactome_L, MeSH_L, UniProtKB_L, STRING_L, RefEx_L, EA_L, SEA_L, SCDB_L, PANGLAO_L, CMAP_L, GeneName_R, receptorGeneID, Description_R, GO_R, Reactome_R, MeSH_R, UniProtKB_R, STRING_R, RefEx_R, EA_R, SEA_R, SCDB_R, PANGLAO_R, CMAP_R ), "![](figures/Ligand/", ligandGeneID, ".png)", "|", "![](figures/Receptor/", receptorGeneID, ".png)", "|", round(value, 3), " (", round(percentage, 3), "%)", "|", round(pvalue, 3), "|", round(qvalue, 3), "|", evidence, "|", PubMed, "|\n") } .HCLUST <- function(x){ out <- hclust(dist(x), method="ward.D") cluster <- cutree(out, 2) max1 <- max(x[which(cluster == 1)]) max2 <- max(x[which(cluster == 2)]) if(max1 > max2){ cluster[which(cluster == 1)] <- "selected" cluster[which(cluster == 2)] <- "not selected" }else{ cluster[which(cluster == 1)] <- "not selected" cluster[which(cluster == 2)] <- "selected" } cluster } .OUTLIERS <- function(x, method="grubbs"){ ranking = length(x) - rank(x) + 1 vapply(ranking, function(thr){ remain = which(ranking > thr) if(length(remain) > 2){ if(method == "grubbs"){ grubbs.test(x[remain])$p }else if(method == "chisq"){ chisq.out.test(x[remain])$p } }else{ 1 } }, 0.0) } .shrink <- function(x){ block <- strsplit(x , " & ")[[1]] l <- length(block) nc <- vapply(block, nchar, 0L) if(l >= 2){ out <- block[1] for(i in 2:l){ end <- length(out) tmp <- block[i] if(nchar(out[end])+nchar(tmp) <= 45){ out[end] <- paste(c(out[end], tmp), collapse=" & ") }else{ out[end+1] <- tmp } } paste(out, collapse="\n") }else{ x } } .eachVecLR <- function(x, e){ p <- e$p index <- e$index sce <- e$sce ah <- e$ah .HCLUST <- e$.HCLUST .OUTLIERS <- e$.OUTLIERS top <- e$top GeneInfo <- e$GeneInfo out.dir <- e$out.dir .smallTwoDplot <- e$.smallTwoDplot input <- e$input twoD <- e$twoD .hyperLinks <- e$.hyperLinks LR <- e$LR taxid <- e$taxid .eachRender <- e$.eachRender .XYZ_HEADER1 <- e$.XYZ_HEADER1 .XYZ_HEADER2 <- e$.XYZ_HEADER2 .XYZ_HEADER3 <- e$.XYZ_HEADER3 .XYZ_ENRICH <- e$.XYZ_ENRICH out.vecLR <- e$out.vecLR algorithm <- e$algorithm goenrich <- e$goenrich meshenrich <- e$meshenrich reactomeenrich <- e$reactomeenrich doenrich <- e$doenrich ncgenrich <- e$ncgenrich dgnenrich <- e$dgnenrich # Each LR-Pattern Vector if(algorithm == "ntd2"){ vecLR <- metadata(sce)$sctensor$lrpair@data[x[1], x[2],] } if(algorithm == "ntd"){ vecLR <- metadata(sce)$sctensor$lrpair[x, ] } # Clustering ClusterLR <- .HCLUST(vecLR) # P-value of Grubbs test PvalueLR <- suppressWarnings(.OUTLIERS(vecLR)) # FDR control by BH method QvalueLR <- p.adjust(PvalueLR, "BH") # TARGET elements by Clustering TARGET <- which(ClusterLR == "selected") TARGET <- TARGET[order(vecLR[TARGET], decreasing=TRUE)] # TOP elements if(top != "full"){ TOP <- min(top, length(TARGET)) TARGET <- TARGET[seq_len(TOP)] }else{ TOP <- "full" } # L-R evidence targetL <- unlist(lapply(names(TARGET), function(x){strsplit(x, "_")[[1]][1]})) targetR <- unlist(lapply(names(TARGET), function(x){strsplit(x, "_")[[1]][2]})) targetL <- unlist(lapply(targetL, function(x){ which(LR$GENEID_L == x) })) targetR <- unlist(lapply(targetR, function(x){ which(LR$GENEID_R == x) })) target <- intersect(targetL, targetR) Evidence <- LR[target, "SOURCEDB"] # Enrichment (Too Heavy) all <- unique(unlist(strsplit(names(vecLR), "_"))) sig <- unique(unlist(strsplit(names(TARGET), "_"))) # GO-Check if("GO" %ni% AnnotationDbi::columns(ah)){ goenrich <- FALSE } # MeSH-Check ah <- AnnotationHub() mcah <- mcols(ah) mesh_org_msg <- gsub("LRBaseDb", "MeSHDb", ah[metadata(sce)$ahid]$title) position <- regexpr("v[0-9][0-9][0-9]", mesh_org_msg) mesh_version <- substr(mesh_org_msg, position, position + 3) mesh_db_msg <- paste0("MeSHDb for MeSH.db (", mesh_version, ")") ahid1 <- mcah@rownames[which(mcah@listData$title == mesh_org_msg)] ahid2 <- mcah@rownames[which(mcah@listData$title == mesh_db_msg)] if(length(ahid1) != 0){ message(paste0("Related MeSH IDs are retrieved from ", "AnnotationHub...")) e$meshannotation <- MeSHDbi::MeSHDb(ah[[ahid1]]) e$meshdb <- MeSHDbi::MeSHDb(ah[[ahid2]]) }else{ meshenrich <- FALSE e$meshannotation <- NA e$meshdb <- NA } # Reactome-Check reactomespc <- .REACTOMESPC_taxid[[taxid]] if(is.null(reactomespc)){ reactomeenrich <- FALSE } # DO/NCG/DGN-Check if(taxid != "9606"){ doenrich <- FALSE ncgenrich <- FALSE dgnenrich <- FALSE } Enrich <- suppressWarnings(.ENRICHMENT(all, sig, e, reactomespc, goenrich, meshenrich, reactomeenrich, doenrich, ncgenrich, dgnenrich, p, ah)) # Eigen Value # Each LR-Pattern Vector if(algorithm == "ntd2"){ Value <- metadata(sce)$sctensor$lrpair@data[x[1], x[2], TARGET] Percentage <- Value / sum(metadata(sce)$sctensor$lrpair@data[x[1], x[2], ]) * 100 } if(algorithm == "ntd"){ Value <- metadata(sce)$sctensor$lrpair[x, TARGET] Percentage <- Value / sum(metadata(sce)$sctensor$lrpair[x, ]) * 100 } # Hyper Link (Too Heavy) cat("Hyper-links are embedded...\n") if(!is.null(GeneInfo$GeneName)){ GeneName <- GeneInfo$GeneName[, c("SYMBOL", "ENTREZID")] }else{ GeneName <- rep(FALSE, length=length(TARGET)) } LINKS <- vapply(seq_along(TARGET), function(xx){ # IDs Ranking <- xx L_R <- strsplit(names(TARGET[xx]), "_") LigandGeneID <- L_R[[1]][1] ReceptorGeneID <- L_R[[1]][2] if(!is.null(GeneInfo$GeneName)){ LigandGeneName <- GeneName[which(GeneName[,2] == LigandGeneID), 1] ReceptorGeneName <- GeneName[which(GeneName[,2] == ReceptorGeneID), 1] if(length(LigandGeneName) == 0 || LigandGeneName %in% c("", NA)){ LigandGeneName <- LigandGeneID } if(length(ReceptorGeneName) == 0 || ReceptorGeneName %in% c("", NA)){ ReceptorGeneName <- ReceptorGeneID } }else{ LigandGeneName <- LigandGeneID ReceptorGeneName <- ReceptorGeneID } # Return Hyper links .hyperLinks(Ranking, LigandGeneID, ReceptorGeneID, LR, taxid, Value[xx], Percentage[xx], GeneInfo, PvalueLR[xx], QvalueLR[xx], Evidence[xx]) }, "") LINKS <- paste(LINKS, collapse="") # Output object list( ClusterLR=ClusterLR, PvalueLR=PvalueLR, QvalueLR=QvalueLR, TARGET=TARGET, TOP=TOP, Enrich=Enrich, Value=Value, Percentage=Percentage, LINKS=LINKS ) } .genePlot <- function(sce, assayNames, input, out.dir, GeneInfo, LR){ GeneName <- GeneInfo$GeneName LigandGeneID <- unique(LR$GENEID_L) ReceptorGeneID <- unique(LR$GENEID_R) if(!is.null(GeneName)){ LigandGeneName <- vapply(LigandGeneID, function(x){ GeneName[which(GeneName$ENTREZID == x)[1], "SYMBOL"]}, "") ReceptorGeneName <- vapply(ReceptorGeneID, function(x){ GeneName[which(GeneName$ENTREZID == x)[1], "SYMBOL"]}, "") }else{ LigandGeneName <- LigandGeneID ReceptorGeneName <- ReceptorGeneID } # Plot (Ligand) lapply(seq_along(LigandGeneID), function(x){ Ligandfile <- paste0(out.dir, "/figures/Ligand/", LigandGeneID[x], ".png") target <- which(rownames(input) == LigandGeneID[x]) if(length(target) != 0){ g <- .smallTwoDplot(sce, assayNames, LigandGeneID[x], LigandGeneName[x], "reds") ggsave(Ligandfile, plot=g, dpi=200, width=6, height=6) } }) # Plot (Receptor) lapply(seq_along(ReceptorGeneID), function(x){ Receptorfile <- paste0(out.dir, "/figures/Receptor/", ReceptorGeneID[x], ".png") target <- which(rownames(input) == ReceptorGeneID[x]) if(length(target) != 0){ g <- .smallTwoDplot(sce, assayNames, ReceptorGeneID[x], ReceptorGeneName[x], "blues") ggsave(Receptorfile, plot=g, dpi=200, width=6, height=6) } }) } .ligandPatternPlot <- function(numLPattern, celltypes, sce, col.ligand, ClusterL, out.dir, twoD){ vapply(seq_len(numLPattern), function(i){ label.ligand <- unlist(vapply(names(celltypes), function(x){ metadata(sce)$sctensor$ligand[paste0("Dim", i), x]}, 0.0)) label.ligand[] <- smoothPalette(label.ligand, palfunc=colorRampPalette(col.ligand, alpha=TRUE)) LPatternfile <- paste0(out.dir, "/figures/Pattern_", i, "__", ".png") png(filename=LPatternfile, width=1000, height=1000, bg="transparent") par(ps=20) plot(twoD, col=label.ligand, pch=16, cex=2, bty="n", xaxt="n", yaxt="n", xlab="", ylab="", main="") dev.off() }, 0L) } .receptorPatternPlot <- function(numRPattern, celltypes, sce, col.receptor, ClusterR, out.dir, twoD){ vapply(seq_len(numRPattern), function(i){ label.receptor <- unlist(vapply(names(celltypes), function(x){ metadata(sce)$sctensor$receptor[paste0("Dim", i), x]}, 0.0)) label.receptor[] <- smoothPalette(label.receptor, palfunc=colorRampPalette(col.receptor, alpha=TRUE)) RPatternfile = paste0(out.dir, "/figures/Pattern__", i, "_", ".png") png(filename=RPatternfile, width=1000, height=1000, bg="transparent") par(ps=20) plot(twoD, col=label.receptor, pch=16, cex=2, bty="n", xaxt="n", yaxt="n", xlab="", ylab="", main="") dev.off() }, 0L) }
551ad260114ae0ed026a17b1baa69820cc6b2b6d
a1ed65f1b6689d842b9a476df34151f10ddfa596
/lavoroinflazione.R
9257083177f0321c7bfe06d687bba07818c947a5
[]
no_license
nbadino/gtrend-inflation
3f67ade76a8539a04ed3e80ef9756b18a243b4b6
66a64a20b9fab0b49b3759559c50d77b8a804a87
refs/heads/main
2023-05-15T09:15:22.395741
2021-06-04T06:59:46
2021-06-04T06:59:46
372,734,961
0
0
null
null
null
null
UTF-8
R
false
false
2,489
r
lavoroinflazione.R
library(lubridate) library(tidyverse) library(forecast) # import data google <- read.csv("https://raw.githubusercontent.com/nbadino/gtrend-inflation/main/google.csv") istat<-read.csv("https://raw.githubusercontent.com/nbadino/gtrend-inflation/main/istat.csv") CPI <-read.csv("https://raw.githubusercontent.com/nbadino/gtrend-inflation/main/CPI.csv") istat[196:196,2:8]<-(istat[195:195,2:8]+istat[197:197,2:8])/2 head(google) delta_google <- (google$inflazione...Italia.*2-google$deflazione...Italia.)^2 #function for standardization STD <- function (x) { std <-((x- mean(x))/sd(x)) return(std) } # dataframe creation DATA=data.frame(data=ym(google$Mese), google=log(2+STD(delta_google))) DATA$attese <- STD((istat$saldo[73:209])) DATA$CPI <- CPI$delta[73:209]*100 ggplot(data=DATA)+ geom_line(aes(x=data, y=attese, color="attese"))+ geom_line(aes(x=data,y=google, color="google"))+ geom_line(aes(x=data,y=CPI, color="cpi"))+ xlab("Dates")+ ylab("Indexes") # regressor regressori_gi <- cbind( lag_1 <- lag(DATA$google,1), lag_2 <- lag(DATA$google,2)) regressori_attese <- cbind( lag_1 <- lag(DATA$attese,1), lag_2 <- lag(DATA$attese,2)) DATA$google_lag1 <- regressori_gi[,1] DATA$google_lag2 <- regressori_gi[,2] DATA$attese_lag1 <- regressori_attese[,1] DATA$attese_lag2 <- regressori_attese[,2] covariates <- c("google_lag1", "google_lag2") covariates2 <- c("attese_lag1", "attese_lag2") train <- window(ts(DATA), end = 126) test <- window(ts(DATA), start = 127, end= 132) # modello cpi autoplot(train[,"CPI"])+ xlab("year") ggAcf(train[,"CPI"]) ggPacf(train[,"CPI"]) modello_cpi <- auto.arima(train[,"CPI"]) summary(modello_cpi) checkresiduals(modello_cpi) fcast_cpi <- forecast(modello_cpi) autoplot(fcast_cpi) summary(fcast_cpi) accuracy(fcast_cpi,test[,"CPI"]) # modello GI modello_gi <- auto.arima(train[,"CPI"], xreg =train[,covariates]) summary(modello_gi) checkresiduals(modello_gi) fcast_gi <- forecast(modello_gi, xreg = test[,covariates]) autoplot(fcast_gi) summary(fcast_gi) accuracy(fcast_gi,test[,"CPI"]) # modello aspettative modello_aspettative <- auto.arima(train[,"CPI"],xreg =train[,covariates2]) summary(modello_aspettative) checkresiduals(modello_aspettative) fcast_aspettative <- forecast(modello_aspettative, xreg = test[,covariates2]) autoplot(fcast_aspettative) summary(fcast_aspettative) accuracy(fcast_aspettative,test[,"CPI"])
8a2b7fd685b12dbb5b25d68e8cbab4c1ba2fa200
aa7e3ab128b85115cb0c1a26da34517e7bc8c47c
/man/addSampleData.Rd
ca0973c629682f7c45334cd2703b4c2ad621aeac
[]
no_license
derele/MultiAmplicon
dca88d10c9d700f6e307f2146d358083a6cc4644
2908a52bf7f3fde82aae32036d41213528a9cbc7
refs/heads/master
2023-01-07T07:52:17.550795
2022-12-21T12:20:07
2022-12-21T12:20:07
72,954,812
1
1
null
2022-12-21T12:20:08
2016-11-05T21:39:43
R
UTF-8
R
false
true
1,135
rd
addSampleData.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/toPhyloseq.R \name{addSampleData} \alias{addSampleData} \title{addSampleData} \usage{ addSampleData(MA, sampleData = NULL) } \arguments{ \item{MA}{A \code{\link{MultiAmplicon}} object} \item{sampleData}{A data frame of providing data for samples in the \code{\link{MultiAmplicon}} object. This has to have the same rownames as the colnames of the MultiAmplicon object. If NULL, (default) sampleData will be added based on names of the \code{link{PairedReadFileSet}} and the resulting sampleData will (only) give names of forward and reverse file names for each sample.} } \value{ A \code{\link{MultiAmplicon}} object with the sampleData slot filled. } \description{ Add sample data to a MultiAmplicon object } \details{ The sampleData slot is filled with a \code{\link[phyloseq]{sample_data}} object from phyoseq created merging sample names (colnames) of the MultiAmplicon object with a data frame of sample data. The rownames of the that sampleData data frame must correspond to colnames of the MultiAmplcion object. } \author{ Emanuel Heitlinger }
85d22f8947a8b217e0f73680c9c88702410a66b1
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
/service/paws.glacier/R/paws.glacier_operations.R
f79d28b2a2864a303bad7bf2ba9b1279525df3a4
[ "Apache-2.0" ]
permissive
CR-Mercado/paws
9b3902370f752fe84d818c1cda9f4344d9e06a48
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
refs/heads/master
2020-04-24T06:52:44.839393
2019-02-17T18:18:20
2019-02-17T18:18:20
null
0
0
null
null
null
null
UTF-8
R
false
false
101,312
r
paws.glacier_operations.R
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_operation new_request send_request NULL #' This operation aborts a multipart upload identified by the upload ID #' #' This operation aborts a multipart upload identified by the upload ID. #' #' After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload. #' #' This operation is idempotent. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Working with Archives in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) and [Abort Multipart Upload](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' abort_multipart_upload( #' accountId = "string", #' vaultName = "string", #' uploadId = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param uploadId &#91;required&#93; The upload ID of the multipart upload to delete. #' #' @examples #' # The example deletes an in-progress multipart upload to a vault named #' # my-vault: #' \donttest{abort_multipart_upload( #' accountId = "-", #' uploadId = "19gaRezEXAMPLES6Ry5YYdqthHOC_kGRCT03L9yetr220UmPtBYKk-OssZtLqyFu7sY1_lR7vgFuJV6NtcV5zpsJ", #' vaultName = "my-vault" #' )} #' #' @export abort_multipart_upload <- function (accountId, vaultName, uploadId) { op <- new_operation(name = "AbortMultipartUpload", http_method = "DELETE", http_path = "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", paginator = list()) input <- abort_multipart_upload_input(accountId = accountId, vaultName = vaultName, uploadId = uploadId) output <- abort_multipart_upload_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation aborts the vault locking process if the vault lock is not in the Locked state #' #' This operation aborts the vault locking process if the vault lock is not in the `Locked` state. If the vault lock is in the `Locked` state when this operation is requested, the operation returns an `AccessDeniedException` error. Aborting the vault locking process removes the vault lock policy from the specified vault. #' #' A vault lock is put into the `InProgress` state by calling InitiateVaultLock. A vault lock is put into the `Locked` state by calling CompleteVaultLock. You can get the state of a vault lock by calling GetVaultLock. For more information about the vault locking process, see [Amazon Glacier Vault Lock](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). For more information about vault lock policies, see [Amazon Glacier Access Control with Vault Lock Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). #' #' This operation is idempotent. You can successfully invoke this operation multiple times, if the vault lock is in the `InProgress` state or if there is no policy associated with the vault. #' #' @section Accepted Parameters: #' ``` #' abort_vault_lock( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The example aborts the vault locking process if the vault lock is not in #' # the Locked state for the vault named examplevault. #' \donttest{abort_vault_lock( #' accountId = "-", #' vaultName = "examplevault" #' )} #' #' @export abort_vault_lock <- function (accountId, vaultName) { op <- new_operation(name = "AbortVaultLock", http_method = "DELETE", http_path = "/{accountId}/vaults/{vaultName}/lock-policy", paginator = list()) input <- abort_vault_lock_input(accountId = accountId, vaultName = vaultName) output <- abort_vault_lock_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation adds the specified tags to a vault #' #' This operation adds the specified tags to a vault. Each tag is composed of a key and a value. Each vault can have up to 10 tags. If your request would cause the tag limit for the vault to be exceeded, the operation throws the `LimitExceededException` error. If a tag already exists on the vault under a specified key, the existing key value will be overwritten. For more information about tags, see [Tagging Amazon Glacier Resources](http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). #' #' @section Accepted Parameters: #' ``` #' add_tags_to_vault( #' accountId = "string", #' vaultName = "string", #' Tags = list( #' "string" #' ) #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param Tags The tags to add to the vault. Each tag is composed of a key and a value. The value can be an empty string. #' #' @examples #' # The example adds two tags to a my-vault. #' \donttest{add_tags_to_vault( #' Tags = list( #' examplekey1 = "examplevalue1", #' examplekey2 = "examplevalue2" #' ), #' accountId = "-", #' vaultName = "my-vault" #' )} #' #' @export add_tags_to_vault <- function (accountId, vaultName, Tags = NULL) { op <- new_operation(name = "AddTagsToVault", http_method = "POST", http_path = "/{accountId}/vaults/{vaultName}/tags?operation=add", paginator = list()) input <- add_tags_to_vault_input(accountId = accountId, vaultName = vaultName, Tags = Tags) output <- add_tags_to_vault_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' You call this operation to inform Amazon Glacier that all the archive parts have been uploaded and that Amazon Glacier can now assemble the archive from the uploaded parts #' #' You call this operation to inform Amazon Glacier that all the archive parts have been uploaded and that Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob. #' #' In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see [Computing Checksums](http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). On the server side, Amazon Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Amazon Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue. #' #' Additionally, Amazon Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Amazon Glacier returns an error and the operation fails. #' #' Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Uploading Large Archives in Parts (Multipart Upload)](http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) and [Complete Multipart Upload](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' complete_multipart_upload( #' accountId = "string", #' vaultName = "string", #' uploadId = "string", #' archiveSize = "string", #' checksum = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param uploadId &#91;required&#93; The upload ID of the multipart upload. #' @param archiveSize The total size, in bytes, of the entire archive. This value should be the sum of all the sizes of the individual parts that you uploaded. #' @param checksum The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 tree hash of the individual parts. If the value you specify in the request does not match the SHA256 tree hash of the final assembled archive as computed by Amazon Glacier, Amazon Glacier returns an error and the request fails. #' #' @examples #' # The example completes a multipart upload for a 3 MiB archive. #' \donttest{complete_multipart_upload( #' accountId = "-", #' archiveSize = "3145728", #' checksum = "9628195fcdbcbbe76cdde456d4646fa7de5f219fb39823836d81f0cc0e18aa67", #' uploadId = "19gaRezEXAMPLES6Ry5YYdqthHOC_kGRCT03L9yetr220UmPtBYKk-OssZtLqyFu7sY1_lR7vgFuJV6NtcV5zpsJ", #' vaultName = "my-vault" #' )} #' #' @export complete_multipart_upload <- function (accountId, vaultName, uploadId, archiveSize = NULL, checksum = NULL) { op <- new_operation(name = "CompleteMultipartUpload", http_method = "POST", http_path = "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", paginator = list()) input <- complete_multipart_upload_input(accountId = accountId, vaultName = vaultName, uploadId = uploadId, archiveSize = archiveSize, checksum = checksum) output <- complete_multipart_upload_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation completes the vault locking process by transitioning the vault lock from the InProgress state to the Locked state, which causes the vault lock policy to become unchangeable #' #' This operation completes the vault locking process by transitioning the vault lock from the `InProgress` state to the `Locked` state, which causes the vault lock policy to become unchangeable. A vault lock is put into the `InProgress` state by calling InitiateVaultLock. You can obtain the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, [Amazon Glacier Vault Lock](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). #' #' This operation is idempotent. This request is always successful if the vault lock is in the `Locked` state and the provided lock ID matches the lock ID originally used to lock the vault. #' #' If an invalid lock ID is passed in the request when the vault lock is in the `Locked` state, the operation returns an `AccessDeniedException` error. If an invalid lock ID is passed in the request when the vault lock is in the `InProgress` state, the operation throws an `InvalidParameter` error. #' #' @section Accepted Parameters: #' ``` #' complete_vault_lock( #' accountId = "string", #' vaultName = "string", #' lockId = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param lockId &#91;required&#93; The `lockId` value is the lock ID obtained from a InitiateVaultLock request. #' #' @examples #' # The example completes the vault locking process by transitioning the #' # vault lock from the InProgress state to the Locked state. #' \donttest{complete_vault_lock( #' accountId = "-", #' lockId = "AE863rKkWZU53SLW5be4DUcW", #' vaultName = "example-vault" #' )} #' #' @export complete_vault_lock <- function (accountId, vaultName, lockId) { op <- new_operation(name = "CompleteVaultLock", http_method = "POST", http_path = "/{accountId}/vaults/{vaultName}/lock-policy/{lockId}", paginator = list()) input <- complete_vault_lock_input(accountId = accountId, vaultName = vaultName, lockId = lockId) output <- complete_vault_lock_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation creates a new vault with the specified name #' #' This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon Glacier. #' #' You must use the following guidelines when naming a vault. #' #' - Names can be between 1 and 255 characters long. #' #' - Allowed characters are a-z, A-Z, 0-9, \'\_\' (underscore), \'-\' (hyphen), and \'.\' (period). #' #' This operation is idempotent. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Creating a Vault in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html) and [Create Vault](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' create_vault( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The following example creates a new vault named my-vault. #' \donttest{create_vault( #' accountId = "-", #' vaultName = "my-vault" #' )} #' #' @export create_vault <- function (accountId, vaultName) { op <- new_operation(name = "CreateVault", http_method = "PUT", http_path = "/{accountId}/vaults/{vaultName}", paginator = list()) input <- create_vault_input(accountId = accountId, vaultName = vaultName) output <- create_vault_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation deletes an archive from a vault #' #' This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios: #' #' - If the archive retrieval job is actively preparing the data for download when Amazon Glacier receives the delete archive request, the archival retrieval operation might fail. #' #' - If the archive retrieval job has successfully prepared the archive for download when Amazon Glacier receives the delete archive request, you will be able to download the output. #' #' This operation is idempotent. Attempting to delete an already-deleted archive does not result in an error. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Deleting an Archive in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html) and [Delete Archive](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' delete_archive( #' accountId = "string", #' vaultName = "string", #' archiveId = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param archiveId &#91;required&#93; The ID of the archive to delete. #' #' @examples #' # The example deletes the archive specified by the archive ID. #' \donttest{delete_archive( #' accountId = "-", #' archiveId = "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUsuhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqrEXAMPLEArchiveId", #' vaultName = "examplevault" #' )} #' #' @export delete_archive <- function (accountId, vaultName, archiveId) { op <- new_operation(name = "DeleteArchive", http_method = "DELETE", http_path = "/{accountId}/vaults/{vaultName}/archives/{archiveId}", paginator = list()) input <- delete_archive_input(accountId = accountId, vaultName = vaultName, archiveId = archiveId) output <- delete_archive_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation deletes a vault #' #' This operation deletes a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use [Initiate a Job (POST jobs)](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html) to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using [Delete Archive (DELETE archive)](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html). #' #' This operation is idempotent. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Deleting a Vault in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html) and [Delete Vault](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' delete_vault( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The example deletes a vault named my-vault: #' \donttest{delete_vault( #' accountId = "-", #' vaultName = "my-vault" #' )} #' #' @export delete_vault <- function (accountId, vaultName) { op <- new_operation(name = "DeleteVault", http_method = "DELETE", http_path = "/{accountId}/vaults/{vaultName}", paginator = list()) input <- delete_vault_input(accountId = accountId, vaultName = vaultName) output <- delete_vault_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation deletes the access policy associated with the specified vault #' #' This operation deletes the access policy associated with the specified vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely remove the access policy, and you might still see the effect of the policy for a short time after you send the delete request. #' #' This operation is idempotent. You can invoke delete multiple times, even if there is no policy associated with the vault. For more information about vault access policies, see [Amazon Glacier Access Control with Vault Access Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). #' #' @section Accepted Parameters: #' ``` #' delete_vault_access_policy( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The example deletes the access policy associated with the vault named #' # examplevault. #' \donttest{delete_vault_access_policy( #' accountId = "-", #' vaultName = "examplevault" #' )} #' #' @export delete_vault_access_policy <- function (accountId, vaultName) { op <- new_operation(name = "DeleteVaultAccessPolicy", http_method = "DELETE", http_path = "/{accountId}/vaults/{vaultName}/access-policy", paginator = list()) input <- delete_vault_access_policy_input(accountId = accountId, vaultName = vaultName) output <- delete_vault_access_policy_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation deletes the notification configuration set for a vault #' #' This operation deletes the notification configuration set for a vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Configuring Vault Notifications in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) and [Delete Vault Notification Configuration](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html) in the Amazon Glacier Developer Guide. #' #' @section Accepted Parameters: #' ``` #' delete_vault_notifications( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The example deletes the notification configuration set for the vault #' # named examplevault. #' \donttest{delete_vault_notifications( #' accountId = "-", #' vaultName = "examplevault" #' )} #' #' @export delete_vault_notifications <- function (accountId, vaultName) { op <- new_operation(name = "DeleteVaultNotifications", http_method = "DELETE", http_path = "/{accountId}/vaults/{vaultName}/notification-configuration", paginator = list()) input <- delete_vault_notifications_input(accountId = accountId, vaultName = vaultName) output <- delete_vault_notifications_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon Glacier completes the job #' #' This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon Glacier completes the job. For more information about initiating a job, see InitiateJob. #' #' This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Amazon Glacier can notify the topic after it completes the job. #' #' A job ID will not expire for at least 24 hours after Amazon Glacier completes the job. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For more information about using this operation, see the documentation for the underlying REST API [Describe Job](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' describe_job( #' accountId = "string", #' vaultName = "string", #' jobId = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param jobId &#91;required&#93; The ID of the job to describe. #' #' @examples #' # The example returns information about the previously initiated job #' # specified by the job ID. #' \donttest{describe_job( #' accountId = "-", #' jobId = "zbxcm3Z_3z5UkoroF7SuZKrxgGoDc3RloGduS7Eg-RO47Yc6FxsdGBgf_Q2DK5Ejh18CnTS5XW4_XqlNHS61dsO4Cn", #' vaultName = "my-vault" #' )} #' #' @export describe_job <- function (accountId, vaultName, jobId) { op <- new_operation(name = "DescribeJob", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}/jobs/{jobId}", paginator = list()) input <- describe_job_input(accountId = accountId, vaultName = vaultName, jobId = jobId) output <- describe_job_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault #' #' This operation returns information about a vault, including the vault\'s Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon Glacier generates vault inventories approximately daily. For more information, see [Downloading a Vault Inventory in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html). #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Retrieving Vault Metadata in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) and [Describe Vault](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' describe_vault( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The example retrieves data about a vault named my-vault. #' \donttest{describe_vault( #' accountId = "-", #' vaultName = "my-vault" #' )} #' #' @export describe_vault <- function (accountId, vaultName) { op <- new_operation(name = "DescribeVault", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}", paginator = list()) input <- describe_vault_input(accountId = accountId, vaultName = vaultName) output <- describe_vault_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation returns the current data retrieval policy for the account and region specified in the GET request #' #' This operation returns the current data retrieval policy for the account and region specified in the GET request. For more information about data retrieval policies, see [Amazon Glacier Data Retrieval Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). #' #' @section Accepted Parameters: #' ``` #' get_data_retrieval_policy( #' accountId = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (\'-\') in the ID. #' #' @examples #' # The example returns the current data retrieval policy for the account. #' \donttest{get_data_retrieval_policy( #' accountId = "-" #' )} #' #' @export get_data_retrieval_policy <- function (accountId) { op <- new_operation(name = "GetDataRetrievalPolicy", http_method = "GET", http_path = "/{accountId}/policies/data-retrieval", paginator = list()) input <- get_data_retrieval_policy_input(accountId = accountId) output <- get_data_retrieval_policy_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation downloads the output of the job you initiated using InitiateJob #' #' This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory. #' #' You can download all the job output or download a portion of the output by specifying a byte range. In the case of an archive retrieval job, depending on the byte range you specify, Amazon Glacier returns the checksum for the portion of the data. You can compute the checksum on the client and verify that the values match to ensure the portion you downloaded is the correct data. #' #' A job ID will not expire for at least 24 hours after Amazon Glacier completes the job. That a byte range. For both archive and inventory retrieval jobs, you should verify the downloaded size against the size returned in the headers from the **Get Job Output** response. #' #' For archive retrieval jobs, you should also verify that the size is what you expected. If you download a portion of the output, the expected size is based on the range of bytes you specified. For example, if you specify a range of `bytes=0-1048575`, you should verify your download size is 1,048,576 bytes. If you download an entire archive, the expected size is the size of the archive when you uploaded it to Amazon Glacier The expected size is also returned in the headers from the **Get Job Output** response. #' #' In the case of an archive retrieval job, depending on the byte range you specify, Amazon Glacier returns the checksum for the portion of the data. To ensure the portion you downloaded is the correct data, compute the checksum on the client, verify that the values match, and verify that the size is what you expected. #' #' A job ID does not expire for at least 24 hours after Amazon Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and the underlying REST API, see [Downloading a Vault Inventory](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html), [Downloading an Archive](http://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html), and [Get Job Output](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html) #' #' @section Accepted Parameters: #' ``` #' get_job_output( #' accountId = "string", #' vaultName = "string", #' jobId = "string", #' range = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param jobId &#91;required&#93; The job ID whose data is downloaded. #' @param range The range of bytes to retrieve from the output. For example, if you want to download the first 1,048,576 bytes, specify the range as `bytes=0-1048575`. By default, this operation downloads the entire output. #' #' If the job output is large, then you can use a range to retrieve a portion of the output. This allows you to download the entire output in smaller chunks of bytes. For example, suppose you have 1 GB of job output you want to download and you decide to download 128 MB chunks of data at a time, which is a total of eight Get Job Output requests. You use the following process to download the job output: #' #' 1. Download a 128 MB chunk of output by specifying the appropriate byte range. Verify that all 128 MB of data was received. #' #' 2. Along with the data, the response includes a SHA256 tree hash of the payload. You compute the checksum of the payload on the client and compare it with the checksum you received in the response to ensure you received all the expected data. #' #' 3. Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each time specifying the appropriate byte range. #' #' 4. After downloading all the parts of the job output, you have a list of eight checksum values. Compute the tree hash of these values to find the checksum of the entire output. Using the DescribeJob API, obtain job information of the job that provided you the output. The response includes the checksum of the entire archive stored in Amazon Glacier. You compare this value with the checksum you computed to ensure you have downloaded the entire archive content with no errors. #' #' @examples #' # The example downloads the output of a previously initiated inventory #' # retrieval job that is identified by the job ID. #' \donttest{get_job_output( #' accountId = "-", #' jobId = "zbxcm3Z_3z5UkoroF7SuZKrxgGoDc3RloGduS7Eg-RO47Yc6FxsdGBgf_Q2DK5Ejh18CnTS5XW4_XqlNHS61dsO4CnMW", #' range = "", #' vaultName = "my-vaul" #' )} #' #' @export get_job_output <- function (accountId, vaultName, jobId, range = NULL) { op <- new_operation(name = "GetJobOutput", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}/jobs/{jobId}/output", paginator = list()) input <- get_job_output_input(accountId = accountId, vaultName = vaultName, jobId = jobId, range = range) output <- get_job_output_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation retrieves the access-policy subresource set on the vault; for more information on setting this subresource, see Set Vault Access Policy (PUT access-policy) #' #' This operation retrieves the `access-policy` subresource set on the vault; for more information on setting this subresource, see [Set Vault Access Policy (PUT access-policy)](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html). If there is no access policy set on the vault, the operation returns a `404 Not found` error. For more information about vault access policies, see [Amazon Glacier Access Control with Vault Access Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). #' #' @section Accepted Parameters: #' ``` #' get_vault_access_policy( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The example retrieves the access-policy set on the vault named #' # example-vault. #' \donttest{get_vault_access_policy( #' accountId = "-", #' vaultName = "example-vault" #' )} #' #' @export get_vault_access_policy <- function (accountId, vaultName) { op <- new_operation(name = "GetVaultAccessPolicy", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}/access-policy", paginator = list()) input <- get_vault_access_policy_input(accountId = accountId, vaultName = vaultName) output <- get_vault_access_policy_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation retrieves the following attributes from the lock-policy subresource set on the specified vault: #' #' This operation retrieves the following attributes from the `lock-policy` subresource set on the specified vault: #' #' - The vault lock policy set on the vault. #' #' - The state of the vault lock, which is either `InProgess` or `Locked`. #' #' - When the lock ID expires. The lock ID is used to complete the vault locking process. #' #' - When the vault lock was initiated and put into the `InProgress` state. #' #' A vault lock is put into the `InProgress` state by calling InitiateVaultLock. A vault lock is put into the `Locked` state by calling CompleteVaultLock. You can abort the vault locking process by calling AbortVaultLock. For more information about the vault locking process, [Amazon Glacier Vault Lock](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). #' #' If there is no vault lock policy set on the vault, the operation returns a `404 Not found` error. For more information about vault lock policies, [Amazon Glacier Access Control with Vault Lock Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). #' #' @section Accepted Parameters: #' ``` #' get_vault_lock( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The example retrieves the attributes from the lock-policy subresource #' # set on the vault named examplevault. #' \donttest{get_vault_lock( #' accountId = "-", #' vaultName = "examplevault" #' )} #' #' @export get_vault_lock <- function (accountId, vaultName) { op <- new_operation(name = "GetVaultLock", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}/lock-policy", paginator = list()) input <- get_vault_lock_input(accountId = accountId, vaultName = vaultName) output <- get_vault_lock_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation retrieves the notification-configuration subresource of the specified vault #' #' This operation retrieves the `notification-configuration` subresource of the specified vault. #' #' For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a `404 Not Found` error. For more information about vault notifications, see [Configuring Vault Notifications in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html). #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Configuring Vault Notifications in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) and [Get Vault Notification Configuration](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' get_vault_notifications( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The example retrieves the notification-configuration for the vault named #' # my-vault. #' \donttest{get_vault_notifications( #' accountId = "-", #' vaultName = "my-vault" #' )} #' #' @export get_vault_notifications <- function (accountId, vaultName) { op <- new_operation(name = "GetVaultNotifications", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}/notification-configuration", paginator = list()) input <- get_vault_notifications_input(accountId = accountId, vaultName = vaultName) output <- get_vault_notifications_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation initiates a job of the specified type, which can be a select, an archival retrieval, or a vault retrieval #' #' This operation initiates a job of the specified type, which can be a select, an archival retrieval, or a vault retrieval. For more information about using this operation, see the documentation for the underlying REST API [Initiate a Job](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html). #' #' @section Accepted Parameters: #' ``` #' initiate_job( #' accountId = "string", #' vaultName = "string", #' jobParameters = list( #' Format = "string", #' Type = "string", #' ArchiveId = "string", #' Description = "string", #' SNSTopic = "string", #' RetrievalByteRange = "string", #' Tier = "string", #' InventoryRetrievalParameters = list( #' StartDate = "string", #' EndDate = "string", #' Limit = "string", #' Marker = "string" #' ), #' SelectParameters = list( #' InputSerialization = list( #' csv = list( #' FileHeaderInfo = "USE"|"IGNORE"|"NONE", #' Comments = "string", #' QuoteEscapeCharacter = "string", #' RecordDelimiter = "string", #' FieldDelimiter = "string", #' QuoteCharacter = "string" #' ) #' ), #' ExpressionType = "SQL", #' Expression = "string", #' OutputSerialization = list( #' csv = list( #' QuoteFields = "ALWAYS"|"ASNEEDED", #' QuoteEscapeCharacter = "string", #' RecordDelimiter = "string", #' FieldDelimiter = "string", #' QuoteCharacter = "string" #' ) #' ) #' ), #' OutputLocation = list( #' S3 = list( #' BucketName = "string", #' Prefix = "string", #' Encryption = list( #' EncryptionType = "aws:kms"|"AES256", #' KMSKeyId = "string", #' KMSContext = "string" #' ), #' CannedACL = "private"|"public-read"|"public-read-write"|"aws-exec-read"|"authenticated-read"|"bucket-owner-read"|"bucket-owner-full-control", #' AccessControlList = list( #' list( #' Grantee = list( #' Type = "AmazonCustomerByEmail"|"CanonicalUser"|"Group", #' DisplayName = "string", #' URI = "string", #' ID = "string", #' EmailAddress = "string" #' ), #' Permission = "FULL_CONTROL"|"WRITE"|"WRITE_ACP"|"READ"|"READ_ACP" #' ) #' ), #' Tagging = list( #' "string" #' ), #' UserMetadata = list( #' "string" #' ), #' StorageClass = "STANDARD"|"REDUCED_REDUNDANCY"|"STANDARD_IA" #' ) #' ) #' ) #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param jobParameters Provides options for specifying job information. #' #' @examples #' # The example initiates an inventory-retrieval job for the vault named #' # examplevault. #' \donttest{initiate_job( #' accountId = "-", #' jobParameters = list( #' Description = "My inventory job", #' Format = "CSV", #' SNSTopic = "arn:aws:sns:us-west-2:111111111111:Glacier-InventoryRetrieval-topic-Example", #' Type = "inventory-retrieval" #' ), #' vaultName = "examplevault" #' )} #' #' @export initiate_job <- function (accountId, vaultName, jobParameters = NULL) { op <- new_operation(name = "InitiateJob", http_method = "POST", http_path = "/{accountId}/vaults/{vaultName}/jobs", paginator = list()) input <- initiate_job_input(accountId = accountId, vaultName = vaultName, jobParameters = jobParameters) output <- initiate_job_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation initiates a multipart upload #' #' This operation initiates a multipart upload. Amazon Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart). #' #' When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB. #' #' Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB. #' #' You don\'t need to know the size of the archive when you start a multipart upload because Amazon Glacier does not require you to specify the overall archive size. #' #' After you complete the multipart upload, Amazon Glacier removes the multipart upload resource referenced by the ID. Amazon Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Uploading Large Archives in Parts (Multipart Upload)](http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) and [Initiate Multipart Upload](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' initiate_multipart_upload( #' accountId = "string", #' vaultName = "string", #' archiveDescription = "string", #' partSize = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param archiveDescription The archive description that you are uploading in parts. #' #' The part size must be a megabyte (1024 KB) multiplied by a power of 2, for example 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB (4096 MB). #' @param partSize The size of each part except the last, in bytes. The last part can be smaller than this part size. #' #' @examples #' # The example initiates a multipart upload to a vault named my-vault with #' # a part size of 1 MiB (1024 x 1024 bytes) per file. #' \donttest{initiate_multipart_upload( #' accountId = "-", #' partSize = "1048576", #' vaultName = "my-vault" #' )} #' #' @export initiate_multipart_upload <- function (accountId, vaultName, archiveDescription = NULL, partSize = NULL) { op <- new_operation(name = "InitiateMultipartUpload", http_method = "POST", http_path = "/{accountId}/vaults/{vaultName}/multipart-uploads", paginator = list()) input <- initiate_multipart_upload_input(accountId = accountId, vaultName = vaultName, archiveDescription = archiveDescription, partSize = partSize) output <- initiate_multipart_upload_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation initiates the vault locking process by doing the following: #' #' This operation initiates the vault locking process by doing the following: #' #' - Installing a vault lock policy on the specified vault. #' #' - Setting the lock state of vault lock to `InProgress`. #' #' - Returning a lock ID, which is used to complete the vault locking process. #' #' You can set one vault lock policy for each vault and this policy can be up to 20 KB in size. For more information about vault lock policies, see [Amazon Glacier Access Control with Vault Lock Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). #' #' You must complete the vault locking process within 24 hours after the vault lock enters the `InProgress` state. After the 24 hour window ends, the lock ID expires, the vault automatically exits the `InProgress` state, and the vault lock policy is removed from the vault. You call CompleteVaultLock to complete the vault locking process by setting the state of the vault lock to `Locked`. #' #' After a vault lock is in the `Locked` state, you cannot initiate a new vault lock for the vault. #' #' You can abort the vault locking process by calling AbortVaultLock. You can get the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, [Amazon Glacier Vault Lock](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). #' #' If this operation is called when the vault lock is in the `InProgress` state, the operation returns an `AccessDeniedException` error. When the vault lock is in the `InProgress` state you must call AbortVaultLock before you can initiate a new vault lock policy. #' #' @section Accepted Parameters: #' ``` #' initiate_vault_lock( #' accountId = "string", #' vaultName = "string", #' policy = list( #' Policy = "string" #' ) #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param policy The vault lock policy as a JSON string, which uses \"\\\" as an escape character. #' #' @examples #' # The example initiates the vault locking process for the vault named #' # my-vault. #' \donttest{initiate_vault_lock( #' accountId = "-", #' policy = list( #' Policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Define-vault-lock\",\"Effect\":\"Deny\",\"Principal\":{\"AWS\":\"arn:aws:iam::999999999999:root\"},\"Action\":\"glacier:DeleteArchive\",\"Resource\":\"arn:aws:glacier:us-west-2:999999999999:vaults/examplevault\",\"Condition\":{\"NumericLessThanEquals\":{\"glacier:ArchiveAgeinDays\":\"365\"}}}]}" #' ), #' vaultName = "my-vault" #' )} #' #' @export initiate_vault_lock <- function (accountId, vaultName, policy = NULL) { op <- new_operation(name = "InitiateVaultLock", http_method = "POST", http_path = "/{accountId}/vaults/{vaultName}/lock-policy", paginator = list()) input <- initiate_vault_lock_input(accountId = accountId, vaultName = vaultName, policy = policy) output <- initiate_vault_lock_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished #' #' This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. The List Job operation returns a list of these jobs sorted by job initiation time. #' #' Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists. #' #' The List Jobs operation supports pagination. You should always check the response `Marker` field. If there are no more jobs to list, the `Marker` field is set to `null`. If there are more jobs to list, the `Marker` field is set to a non-null value, which you can use to continue the pagination of the list. To return a list of jobs that begins at a specific job, set the marker request parameter to the `Marker` value for that job that you obtained from a previous List Jobs request. #' #' You can set a maximum limit for the number of jobs returned in the response by specifying the `limit` parameter in the request. The default limit is 50. The number of jobs returned might be fewer than the limit, but the number of returned jobs never exceeds the limit. #' #' Additionally, you can filter the jobs list returned by specifying the optional `statuscode` parameter or `completed` parameter, or both. Using the `statuscode` parameter, you can specify to return only jobs that match either the `InProgress`, `Succeeded`, or `Failed` status. Using the `completed` parameter, you can specify to return only jobs that were completed (`true`) or jobs that were not completed (`false`). #' #' For more information about using this operation, see the documentation for the underlying REST API [List Jobs](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html). #' #' @section Accepted Parameters: #' ``` #' list_jobs( #' accountId = "string", #' vaultName = "string", #' limit = "string", #' marker = "string", #' statuscode = "string", #' completed = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param limit The maximum number of jobs to be returned. The default limit is 50. The number of jobs returned might be fewer than the specified limit, but the number of returned jobs never exceeds the limit. #' @param marker An opaque string used for pagination. This value specifies the job at which the listing of jobs should begin. Get the marker value from a previous List Jobs response. You only need to include the marker if you are continuing the pagination of results started in a previous List Jobs request. #' @param statuscode The type of job status to return. You can specify the following values: `InProgress`, `Succeeded`, or `Failed`. #' @param completed The state of the jobs to return. You can specify `true` or `false`. #' #' @examples #' # The example lists jobs for the vault named my-vault. #' \donttest{list_jobs( #' accountId = "-", #' vaultName = "my-vault" #' )} #' #' @export list_jobs <- function (accountId, vaultName, limit = NULL, marker = NULL, statuscode = NULL, completed = NULL) { op <- new_operation(name = "ListJobs", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}/jobs", paginator = list()) input <- list_jobs_input(accountId = accountId, vaultName = vaultName, limit = limit, marker = marker, statuscode = statuscode, completed = completed) output <- list_jobs_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation lists in-progress multipart uploads for the specified vault #' #' This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order. #' #' The List Multipart Uploads operation supports pagination. By default, this operation returns up to 50 multipart uploads in the response. You should always check the response for a `marker` at which to continue the list; if there are no more items the `marker` is `null`. To return a list of multipart uploads that begins at a specific upload, set the `marker` request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the `limit` parameter in the request. #' #' Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and the underlying REST API, see [Working with Archives in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) and [List Multipart Uploads](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' list_multipart_uploads( #' accountId = "string", #' vaultName = "string", #' marker = "string", #' limit = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param marker An opaque string used for pagination. This value specifies the upload at which the listing of uploads should begin. Get the marker value from a previous List Uploads response. You need only include the marker if you are continuing the pagination of results started in a previous List Uploads request. #' @param limit Specifies the maximum number of uploads returned in the response body. If this value is not specified, the List Uploads operation returns up to 50 uploads. #' #' @examples #' # The example lists all the in-progress multipart uploads for the vault #' # named examplevault. #' \donttest{list_multipart_uploads( #' accountId = "-", #' vaultName = "examplevault" #' )} #' #' @export list_multipart_uploads <- function (accountId, vaultName, marker = NULL, limit = NULL) { op <- new_operation(name = "ListMultipartUploads", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}/multipart-uploads", paginator = list()) input <- list_multipart_uploads_input(accountId = accountId, vaultName = vaultName, marker = marker, limit = limit) output <- list_multipart_uploads_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation lists the parts of an archive that have been uploaded in a specific multipart upload #' #' This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range. #' #' The List Parts operation supports pagination. By default, this operation returns up to 50 uploaded parts in the response. You should always check the response for a `marker` at which to continue the list; if there are no more items the `marker` is `null`. To return a list of parts that begins at a specific part, set the `marker` request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the `limit` parameter in the request. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and the underlying REST API, see [Working with Archives in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) and [List Parts](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' list_parts( #' accountId = "string", #' vaultName = "string", #' uploadId = "string", #' marker = "string", #' limit = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param uploadId &#91;required&#93; The upload ID of the multipart upload. #' @param marker An opaque string used for pagination. This value specifies the part at which the listing of parts should begin. Get the marker value from the response of a previous List Parts response. You need only include the marker if you are continuing the pagination of results started in a previous List Parts request. #' @param limit The maximum number of parts to be returned. The default limit is 50. The number of parts returned might be fewer than the specified limit, but the number of returned parts never exceeds the limit. #' #' @examples #' # The example lists all the parts of a multipart upload. #' \donttest{list_parts( #' accountId = "-", #' uploadId = "OW2fM5iVylEpFEMM9_HpKowRapC3vn5sSL39_396UW9zLFUWVrnRHaPjUJddQ5OxSHVXjYtrN47NBZ-khxOjyEXAMPLE", #' vaultName = "examplevault" #' )} #' #' @export list_parts <- function (accountId, vaultName, uploadId, marker = NULL, limit = NULL) { op <- new_operation(name = "ListParts", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", paginator = list()) input <- list_parts_input(accountId = accountId, vaultName = vaultName, uploadId = uploadId, marker = marker, limit = limit) output <- list_parts_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation lists the provisioned capacity units for the specified AWS account #' #' This operation lists the provisioned capacity units for the specified AWS account. #' #' @section Accepted Parameters: #' ``` #' list_provisioned_capacity( #' accountId = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'-\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don\'t include any hyphens (\'-\') in the ID. #' #' @examples #' # The example lists the provisioned capacity units for an account. #' \donttest{list_provisioned_capacity( #' accountId = "-" #' )} #' #' @export list_provisioned_capacity <- function (accountId) { op <- new_operation(name = "ListProvisionedCapacity", http_method = "GET", http_path = "/{accountId}/provisioned-capacity", paginator = list()) input <- list_provisioned_capacity_input(accountId = accountId) output <- list_provisioned_capacity_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation lists all the tags attached to a vault #' #' This operation lists all the tags attached to a vault. The operation returns an empty map if there are no tags. For more information about tags, see [Tagging Amazon Glacier Resources](http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). #' #' @section Accepted Parameters: #' ``` #' list_tags_for_vault( #' accountId = "string", #' vaultName = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' #' @examples #' # The example lists all the tags attached to the vault examplevault. #' \donttest{list_tags_for_vault( #' accountId = "-", #' vaultName = "examplevault" #' )} #' #' @export list_tags_for_vault <- function (accountId, vaultName) { op <- new_operation(name = "ListTagsForVault", http_method = "GET", http_path = "/{accountId}/vaults/{vaultName}/tags", paginator = list()) input <- list_tags_for_vault_input(accountId = accountId, vaultName = vaultName) output <- list_tags_for_vault_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation lists all vaults owned by the calling user's account #' #' This operation lists all vaults owned by the calling user\'s account. The list returned in the response is ASCII-sorted by vault name. #' #' By default, this operation returns up to 10 items. If there are more vaults to list, the response `marker` field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the `marker` field is `null`. To return a list of vaults that begins at a specific vault, set the `marker` request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the `limit` parameter in the request. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Retrieving Vault Metadata in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) and [List Vaults](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' list_vaults( #' accountId = "string", #' marker = "string", #' limit = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (\'-\') in the ID. #' @param marker A string used for pagination. The marker specifies the vault ARN after which the listing of vaults should begin. #' @param limit The maximum number of vaults to be returned. The default limit is 10. The number of vaults returned might be fewer than the specified limit, but the number of returned vaults never exceeds the limit. #' #' @examples #' # The example lists all vaults owned by the specified AWS account. #' \donttest{list_vaults( #' accountId = "-", #' limit = "", #' marker = "" #' )} #' #' @export list_vaults <- function (accountId, marker = NULL, limit = NULL) { op <- new_operation(name = "ListVaults", http_method = "GET", http_path = "/{accountId}/vaults", paginator = list()) input <- list_vaults_input(accountId = accountId, marker = marker, limit = limit) output <- list_vaults_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation purchases a provisioned capacity unit for an AWS account #' #' This operation purchases a provisioned capacity unit for an AWS account. #' #' @section Accepted Parameters: #' ``` #' purchase_provisioned_capacity( #' accountId = "string" #' ) #' ``` #' #' @param accountId &#91;required&#93; The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'-\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don\'t include any hyphens (\'-\') in the ID. #' #' @examples #' # The example purchases provisioned capacity unit for an AWS account. #' \donttest{purchase_provisioned_capacity( #' accountId = "-" #' )} #' #' @export purchase_provisioned_capacity <- function (accountId) { op <- new_operation(name = "PurchaseProvisionedCapacity", http_method = "POST", http_path = "/{accountId}/provisioned-capacity", paginator = list()) input <- purchase_provisioned_capacity_input(accountId = accountId) output <- purchase_provisioned_capacity_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation removes one or more tags from the set of tags attached to a vault #' #' This operation removes one or more tags from the set of tags attached to a vault. For more information about tags, see [Tagging Amazon Glacier Resources](http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). This operation is idempotent. The operation will be successful, even if there are no tags attached to the vault. #' #' @section Accepted Parameters: #' ``` #' remove_tags_from_vault( #' accountId = "string", #' vaultName = "string", #' TagKeys = list( #' "string" #' ) #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param TagKeys A list of tag keys. Each corresponding tag is removed from the vault. #' #' @examples #' # The example removes two tags from the vault named examplevault. #' \donttest{remove_tags_from_vault( #' TagKeys = list( #' "examplekey1", #' "examplekey2" #' ), #' accountId = "-", #' vaultName = "examplevault" #' )} #' #' @export remove_tags_from_vault <- function (accountId, vaultName, TagKeys = NULL) { op <- new_operation(name = "RemoveTagsFromVault", http_method = "POST", http_path = "/{accountId}/vaults/{vaultName}/tags?operation=remove", paginator = list()) input <- remove_tags_from_vault_input(accountId = accountId, vaultName = vaultName, TagKeys = TagKeys) output <- remove_tags_from_vault_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation sets and then enacts a data retrieval policy in the region specified in the PUT request #' #' This operation sets and then enacts a data retrieval policy in the region specified in the PUT request. You can set one policy per region for an AWS account. The policy is enacted within a few minutes of a successful PUT operation. #' #' The set policy operation does not affect retrieval jobs that were in progress before the policy was enacted. For more information about data retrieval policies, see [Amazon Glacier Data Retrieval Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). #' #' @section Accepted Parameters: #' ``` #' set_data_retrieval_policy( #' accountId = "string", #' Policy = list( #' Rules = list( #' list( #' Strategy = "string", #' BytesPerHour = 123 #' ) #' ) #' ) #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (\'-\') in the ID. #' @param Policy The data retrieval policy in JSON format. #' #' @examples #' # The example sets and then enacts a data retrieval policy. #' \donttest{set_data_retrieval_policy( #' Policy = list( #' Rules = list( #' list( #' BytesPerHour = 10737418240, #' Strategy = "BytesPerHour" #' ) #' ) #' ), #' accountId = "-" #' )} #' #' @export set_data_retrieval_policy <- function (accountId, Policy = NULL) { op <- new_operation(name = "SetDataRetrievalPolicy", http_method = "PUT", http_path = "/{accountId}/policies/data-retrieval", paginator = list()) input <- set_data_retrieval_policy_input(accountId = accountId, Policy = Policy) output <- set_data_retrieval_policy_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation configures an access policy for a vault and will overwrite an existing policy #' #' This operation configures an access policy for a vault and will overwrite an existing policy. To configure a vault access policy, send a PUT request to the `access-policy` subresource of the vault. An access policy is specific to a vault and is also called a vault subresource. You can set one access policy per vault and the policy can be up to 20 KB in size. For more information about vault access policies, see [Amazon Glacier Access Control with Vault Access Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). #' #' @section Accepted Parameters: #' ``` #' set_vault_access_policy( #' accountId = "string", #' vaultName = "string", #' policy = list( #' Policy = "string" #' ) #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param policy The vault access policy as a JSON string. #' #' @examples #' # The example configures an access policy for the vault named #' # examplevault. #' \donttest{set_vault_access_policy( #' accountId = "-", #' policy = list( #' Policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Define-owner-access-rights\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::999999999999:root\"},\"Action\":\"glacier:DeleteArchive\",\"Resource\":\"arn:aws:glacier:us-west-2:999999999999:vaults/examplevault\"}]}" #' ), #' vaultName = "examplevault" #' )} #' #' @export set_vault_access_policy <- function (accountId, vaultName, policy = NULL) { op <- new_operation(name = "SetVaultAccessPolicy", http_method = "PUT", http_path = "/{accountId}/vaults/{vaultName}/access-policy", paginator = list()) input <- set_vault_access_policy_input(accountId = accountId, vaultName = vaultName, policy = policy) output <- set_vault_access_policy_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation configures notifications that will be sent when specific events happen to a vault #' #' This operation configures notifications that will be sent when specific events happen to a vault. By default, you don\'t get any notifications. #' #' To configure vault notifications, send a PUT request to the `notification-configuration` subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon Glacier to send notifications to the topic. #' #' Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events: #' #' - **ArchiveRetrievalCompleted** This event occurs when a job that was initiated for an archive retrieval is completed (InitiateJob). The status of the completed job can be \"Succeeded\" or \"Failed\". The notification sent to the SNS topic is the same output as returned from DescribeJob. #' #' - **InventoryRetrievalCompleted** This event occurs when a job that was initiated for an inventory retrieval is completed (InitiateJob). The status of the completed job can be \"Succeeded\" or \"Failed\". The notification sent to the SNS topic is the same output as returned from DescribeJob. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Configuring Vault Notifications in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) and [Set Vault Notification Configuration](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' set_vault_notifications( #' accountId = "string", #' vaultName = "string", #' vaultNotificationConfig = list( #' SNSTopic = "string", #' Events = list( #' "string" #' ) #' ) #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param vaultNotificationConfig Provides options for specifying notification configuration. #' #' @examples #' # The example sets the examplevault notification configuration. #' \donttest{set_vault_notifications( #' accountId = "-", #' vaultName = "examplevault", #' vaultNotificationConfig = list( #' Events = list( #' "ArchiveRetrievalCompleted", #' "InventoryRetrievalCompleted" #' ), #' SNSTopic = "arn:aws:sns:us-west-2:012345678901:mytopic" #' ) #' )} #' #' @export set_vault_notifications <- function (accountId, vaultName, vaultNotificationConfig = NULL) { op <- new_operation(name = "SetVaultNotifications", http_method = "PUT", http_path = "/{accountId}/vaults/{vaultName}/notification-configuration", paginator = list()) input <- set_vault_notifications_input(accountId = accountId, vaultName = vaultName, vaultNotificationConfig = vaultNotificationConfig) output <- set_vault_notifications_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation adds an archive to a vault #' #' This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon Glacier returns the archive ID in the `x-amz-archive-id` header of the response. #' #' You must use the archive ID to access your data in Amazon Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob. #' #' You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see [Computing Checksums](http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). #' #' You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list. #' #' Archives are immutable. After you upload an archive, you cannot edit the archive or its description. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Uploading an Archive in Amazon Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html) and [Upload Archive](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' upload_archive( #' vaultName = "string", #' accountId = "string", #' archiveDescription = "string", #' checksum = "string", #' body = raw #' ) #' ``` #' #' @param vaultName &#91;required&#93; The name of the vault. #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param archiveDescription The optional description of the archive you are uploading. #' @param checksum The SHA256 tree hash of the data being uploaded. #' @param body The data to upload. #' #' @examples #' # The example adds an archive to a vault. #' \donttest{upload_archive( #' accountId = "-", #' archiveDescription = "", #' body = "example-data-to-upload", #' checksum = "", #' vaultName = "my-vault" #' )} #' #' @export upload_archive <- function (vaultName, accountId, archiveDescription = NULL, checksum = NULL, body = NULL) { op <- new_operation(name = "UploadArchive", http_method = "POST", http_path = "/{accountId}/vaults/{vaultName}/archives", paginator = list()) input <- upload_archive_input(vaultName = vaultName, accountId = accountId, archiveDescription = archiveDescription, checksum = checksum, body = body) output <- upload_archive_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } #' This operation uploads a part of an archive #' #' This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload. #' #' Amazon Glacier rejects your upload part request if any of the following conditions is true: #' #' - **SHA256 tree hash does not match**To ensure that part data is not corrupted in transmission, you compute a SHA256 tree hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. If these hash values don\'t match, the operation fails. For information about computing a SHA256 tree hash, see [Computing Checksums](http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). #' #' - **Part size does not match**The size of each part except the last must match the size specified in the corresponding InitiateMultipartUpload request. The size of the last part must be the same size as, or smaller than, the specified size. #' #' If you upload a part whose size is smaller than the part size you specified in your initiate multipart upload request and that part is not the last part, then the upload part request will succeed. However, the subsequent Complete Multipart Upload request will fail. #' #' - **Range does not align**The byte range value in the request does not align with the part size specified in the corresponding initiate request. For example, if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. However, if you set a range value of 2 MB to 6 MB, the range does not align with the part size and the upload will fail. #' #' This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data. #' #' An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don\'t have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see [Access Control Using AWS Identity and Access Management (IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). #' #' For conceptual information and underlying REST API, see [Uploading Large Archives in Parts (Multipart Upload)](http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) and [Upload Part](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html) in the *Amazon Glacier Developer Guide*. #' #' @section Accepted Parameters: #' ``` #' upload_multipart_part( #' accountId = "string", #' vaultName = "string", #' uploadId = "string", #' checksum = "string", #' range = "string", #' body = raw #' ) #' ``` #' #' @param accountId &#91;required&#93; The `AccountId` value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single \'`-`\' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (\'-\') in the ID. #' @param vaultName &#91;required&#93; The name of the vault. #' @param uploadId &#91;required&#93; The upload ID of the multipart upload. #' @param checksum The SHA256 tree hash of the data being uploaded. #' @param range Identifies the range of bytes in the assembled archive that will be uploaded in this part. Amazon Glacier uses this information to assemble the archive in the proper sequence. The format of this header follows RFC 2616. An example header is Content-Range:bytes 0-4194303/\*. #' @param body The data to upload. #' #' @examples #' # The example uploads the first 1 MiB (1024 x 1024 bytes) part of an #' # archive. #' \donttest{upload_multipart_part( #' accountId = "-", #' body = "part1", #' checksum = "c06f7cd4baacb087002a99a5f48bf953", #' range = "bytes 0-1048575/*", #' uploadId = "19gaRezEXAMPLES6Ry5YYdqthHOC_kGRCT03L9yetr220UmPtBYKk-OssZtLqyFu7sY1_lR7vgFuJV6NtcV5zpsJ", #' vaultName = "examplevault" #' )} #' #' @export upload_multipart_part <- function (accountId, vaultName, uploadId, checksum = NULL, range = NULL, body = NULL) { op <- new_operation(name = "UploadMultipartPart", http_method = "PUT", http_path = "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", paginator = list()) input <- upload_multipart_part_input(accountId = accountId, vaultName = vaultName, uploadId = uploadId, checksum = checksum, range = range, body = body) output <- upload_multipart_part_output() svc <- service() request <- new_request(svc, op, input, output) response <- send_request(request) return(response) }
9247a2b4b9d59a41aea2af3ca3ed087261f1b3d1
b175327395279b596b9a8478c0ce5f6137747b14
/第六次作业/Adaboost_single_s.R
f47d834b1a6a342bf1c92053a15f68463f8ab834
[]
no_license
LZKPKU/Statistical-Learning
21110f1abb2dbc30020885ad167e0c2d3875c5f1
b941e024eebcd16040fc064f57c592f16d4c9192
refs/heads/master
2021-09-05T01:37:07.400863
2018-01-23T13:38:38
2018-01-23T13:38:38
115,834,903
0
0
null
null
null
null
UTF-8
R
false
false
2,073
r
Adaboost_single_s.R
# Parameter p = 10;n = 100 # Training Process X = matrix(nrow=n,ncol=p) y = vector(length=n) y_hat = vector(length=n) err = vector(length=n) # generating training data for (i in 1:p) X[,i] = rnorm(n,0,1) for (i in 1:n) y[i] = sum(X[i,]^2) y[y<=9.34] = -1 y[y>9.34]=1 # save Decision stump parameter f = vector(length=n*p*2) x = vector(length=n*p*2) alpha = vector(length=n*p*2) # save weights w = vector(length=n) w[1:n] = 1 # Maximum iterate number M = 1000 err1 = 0 for(t in 1:M){ minerr = Inf sumw = sum(w) # (n-1)*k stump for (i in 1:n){ for (j in 1:p){ yerr = 0 for(l in 1:n){ # yhat = 1 if(X[l,j]<=X[i,j]){ if(y[l] == -1){ yerr = yerr + w[l] } } else{ if(y[l] == 1){ yerr = yerr + w[l] } } } # now we get a yerr # min in t iterate, save parameters if(yerr<minerr){ f[t] = j x[t] = X[i,j] minerr = yerr } } } # calculate alpha err1 = minerr/sumw alpha[t] = log((1-err1)/err1) if(alpha[t]<1e-4) break; # update weights for (a in 1:n){ # yhat = 1 if(X[a,f[t]]<=x[t]){ yhat[a] = yhat[a]+alpha[t]*1 # wrong prediction if(y[a]!=1){ w[a] = w[a]*exp(alpha[t]) } } # yhat = -1 else{ yhat[a] = yhat[a]+alpha[t]*(-1) # wrong prediction if(y[a]==1){ w[a] = w[a]*exp(alpha[t]) } } } } # prediction on training data y_hat[y_hat<=0] = -1 y_hat[y_hat>0] = 1 # f, alpha, x save the model # Testing Process Xtest = matrix(nrow=n,ncol=p) ytest = vector(length=n) y_pred = vector(length=n) y_pred[1:n] = 0 for (i in 1:p) Xtest[,i] = rnorm(n,0,1) for (i in 1:n) ytest[i] = sum(Xtest[i,]^2) ytest[ytest<=9.34] = -1 ytest[ytest>9.34]=1 for(t in 1:M){ for (i in 1:n){ if(Xtest[i,f[t]]<=x[t]){ y_pred[i] = y_pred[i] + alpha[t]*1} else{ y_pred[i] = y_pred[i] + alpha[t]*(-1)} } } y_pred[y_pred<=0] = -1 y_pred[y_pred>0] = 1 sum(y_pred==ytest)
b8cb4791cc72c2c50ec1d3874bd1ca4066321ec9
c85d67e38879bb7ee5daeb8bc2ede423afb0b3f4
/plot3.R
35bc970868f5d00e744945d3215b12c9785caf1d
[]
no_license
neethupanackal/ExData_Plotting1
876e7d9adb257941bc0c680695322358c6267932
59c80a8d686af8b73939299caaf979d1c9a90e73
refs/heads/master
2021-01-22T19:30:32.512265
2015-06-07T15:36:29
2015-06-07T15:36:29
37,013,816
0
0
null
2015-06-07T11:16:46
2015-06-07T11:16:46
null
UTF-8
R
false
false
1,491
r
plot3.R
#** The source file should be in your working directory **# # Read the data into data frame. The data we want starts from 66638th row # to 69517th row. We use skip and nrows to read only the required rows power_consumption=read.table("household_power_consumption.txt" ,header = TRUE ,sep = ";" ,skip=66637 ,nrows=2879 ,na.strings = "?", colClasses = c(rep("character",2),rep("numeric",7))) # Name the columns names(power_consumption) <- c("PDate","PTime","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") # Set the height and width of the device png(file = "plot3.png", width = 480, height = 480) # Set the parameters for the plot with(power_consumption, plot( strptime(paste(PDate,PTime),format="%d/%m/%Y %T"), Sub_metering_1, type="l", xlab = "", ylab ="Energy sub metering", col = "black" ) ) with(power_consumption, lines(strptime(paste(PDate,PTime),format="%d/%m/%Y %T"),Sub_metering_2,col="red") ) with(power_consumption, lines(strptime(paste(PDate,PTime),format="%d/%m/%Y %T"),Sub_metering_3,col="blue") ) # Parameters for the legend box legend("topright",names(power_consumption)[7:9] , lty =1, col = c("black","red","blue")) # Close the device dev.off()
8893570823351dc6b342e36e96ab58f510682225
2d34708b03cdf802018f17d0ba150df6772b6897
/googlecomputebeta.auto/man/ForwardingRulesScopedList.warning.Rd
4fd56956e367d292d4f5488e9647014d7bed0665
[ "MIT" ]
permissive
GVersteeg/autoGoogleAPI
8b3dda19fae2f012e11b3a18a330a4d0da474921
f4850822230ef2f5552c9a5f42e397d9ae027a18
refs/heads/master
2020-09-28T20:20:58.023495
2017-03-05T19:50:39
2017-03-05T19:50:39
null
0
0
null
null
null
null
UTF-8
R
false
true
1,147
rd
ForwardingRulesScopedList.warning.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compute_objects.R \name{ForwardingRulesScopedList.warning} \alias{ForwardingRulesScopedList.warning} \title{ForwardingRulesScopedList.warning Object} \usage{ ForwardingRulesScopedList.warning(ForwardingRulesScopedList.warning.data = NULL, code = NULL, data = NULL, message = NULL) } \arguments{ \item{ForwardingRulesScopedList.warning.data}{The \link{ForwardingRulesScopedList.warning.data} object or list of objects} \item{code}{[Output Only] A warning code, if applicable} \item{data}{[Output Only] Metadata about this warning in key: value format} \item{message}{[Output Only] A human-readable description of the warning code} } \value{ ForwardingRulesScopedList.warning object } \description{ ForwardingRulesScopedList.warning Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} Informational warning which replaces the list of forwarding rules when the list is empty. } \seealso{ Other ForwardingRulesScopedList functions: \code{\link{ForwardingRulesScopedList.warning.data}}, \code{\link{ForwardingRulesScopedList}} }
1917d395849b620cde4cc33831b955d0e5736ec0
377aeaedacdc60ea71bc250d3fb6b15c9a2aa1c8
/R/geom_gene_arrow.R
040571595d7b795ab09600e1539bc1d186b35ec3
[]
no_license
snashraf/gggenes
b016c9c075e970de1bdd00742b2f5b9bd0405760
a4d1062cbbbd6ebaf883e02934599130d34e5123
refs/heads/master
2021-01-19T17:07:02.672128
2017-08-22T04:04:46
2017-08-22T04:04:46
null
0
0
null
null
null
null
UTF-8
R
false
false
4,116
r
geom_gene_arrow.R
#' @title Draw an arrow to represent a gene. #' @export #' #' @description #' Draws an arrow representing a gene with orientation. Only works for genes #' arranged horizontally, i.e. with the x-axis as molecule position and the #' y-axis as molecule. #' #' @section Aesthetics: #' #' \itemize{ #' \item xmin and xmax (start and end of the gene; will be used to determine #' gene orientation) #' \item y #' \item alpha #' \item colour #' \item fill #' \item linetype #' } #' #' @param mapping,data,stat,identity,na.rm,show.legend,inherit.aes,... As #' standard for ggplot2. #' @param arrowhead_width Unit object giving the width of the arrowhead. #' Defaults to 4 mm. If the gene is drawn smaller than this width, only the #' arrowhead will be drawn, compressed to the length of the gene. #' @param arrowhead_height Unit object giving the height of the arrowhead. #' Defaults to 4 mm. #' @param arrow_body_height Unit object giving the height of the body of the #' arrow. Defaults to 3 mm. geom_gene_arrow <- function( mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, arrowhead_width = unit(4, "mm"), arrowhead_height = unit(4, "mm"), arrow_body_height = unit(3, "mm"), ... ) { layer( geom = GeomGeneArrow, mapping = mapping, data = data, stat = stat, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( na.rm = na.rm, arrowhead_width = arrowhead_width, arrowhead_height = arrowhead_height, arrow_body_height = arrow_body_height, ... ) ) } #' @rdname geom_gene_arrow #' @export #' @noRd GeomGeneArrow <- ggproto("GeomGeneArrow", Geom, required_aes = c("xmin", "xmax", "y"), default_aes = aes(alpha = 1, colour = "black", fill = "white", linetype = 1), draw_key = draw_key_polygon, draw_panel = function( data, panel_scales, coord, arrowhead_width, arrowhead_height, arrow_body_height ) { data <- coord$transform(data, panel_scales) gt <- grid::gTree( data = data, cl = "genearrowtree", arrowhead_width = arrowhead_width, arrowhead_height = arrowhead_height, arrow_body_height = arrow_body_height ) gt$name <- grid::grobName(gt, "geom_gene_arrow") gt } ) #' @rdname geom_gene_arrow #' @export #' @noRd makeContent.genearrowtree <- function(x) { data <- x$data # Prepare grob for each gene grobs <- lapply(1:nrow(data), function(i) { gene <- data[i, ] # Determine orientation orientation <- ifelse(gene$xmax > gene$xmin, 1, -1) # Arrowhead defaults to 4 mm, unless the gene is shorter in which case the # gene is 100% arrowhead arrowhead_width <- as.numeric(grid::convertWidth(x$arrowhead_width, "native")) gene_width <- abs(gene$xmax - gene$xmin) arrowhead_width <- ifelse( arrowhead_width > gene_width, gene_width, arrowhead_width ) # Calculate x-position of flange flangex <- (-orientation * arrowhead_width) + gene$xmax # Set arrow and arrowhead heights; it's convenient to divide these by two # for calculating y positions on the polygon arrowhead_height <- as.numeric(grid::convertHeight(x$arrowhead_height, "native")) / 2 arrow_body_height <- as.numeric(grid::convertHeight(x$arrow_body_height, "native")) / 2 # Create polygon grob pg <- grid::polygonGrob( x = c( gene$xmin, gene$xmin, flangex, flangex, gene$xmax, flangex, flangex ), y = c( gene$y + arrow_body_height, gene$y - arrow_body_height, gene$y - arrow_body_height, gene$y - arrowhead_height, gene$y, gene$y + arrowhead_height, gene$y + arrow_body_height ),, gp = grid::gpar( fill = alpha(gene$fill, gene$alpha), col = alpha(gene$colour, gene$alpha), lty = gene$linetype ) ) # Return the polygon grob pg }) class(grobs) <- "gList" grid::setChildren(x, grobs) }
30a5b6c2dfc25070a0abbc5d34c5d55ad588bcfd
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/GeDS/examples/Integrate.Rd.R
5a62e60584dabbc846ea47839ad1b7fe73d93310
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,108
r
Integrate.Rd.R
library(GeDS) ### Name: Integrate ### Title: Defined integral of GeDS objects ### Aliases: Integrate ### ** Examples # Generate a data sample for the response variable # Y and the single covariate X # see Dimitrova et al. (2017), section 4.1 set.seed(123) N <- 500 f_1 <- function(x) (10*x/(1+100*x^2))*4+4 X <- sort(runif(N, min = -2, max = 2)) # Specify a model for the mean of Y to include only # a component non-linear in X, defined by the function f_1 means <- f_1(X) # Add (Normal) noise to the mean of Y Y <- rnorm(N, means, sd = 0.1) # Fit GeDS regression using NGeDS Gmod <- NGeDS(Y ~ f(X), beta = 0.6, phi = .995, Xextr = c(-2,2)) # Compute defined integrals (in TeX style) $\int_{1}^{-1} f(x)dx$ # and $\int_{1}^{1} f(x)dx$ # $f$ being the quadratic fit Integrate(Gmod, to = c(-1,1), from = 1, n = 3) # Compute defined integrals (in TeX style) $\int_{1}^{-1} f(x)dx$ # and $\int_{-1}^{1} f(x)dx$ # $f$ being the quadratic fit Integrate(Gmod, to = c(-1,1), from = c(1,-1), n = 3) ## Not run: ##D ## This gives an error ##D Integrate(Gmod, to = 1, from = c(1,-1), n = 3) ## End(Not run)
67978ca3f5e63637fbe49b4e28fc8b55ebc7184b
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/IceCast/man/get_dist.Rd
d33b81e7fed3bf6c2c6c10a3b1a892837a778653
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
true
490
rd
get_dist.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{get_dist} \alias{get_dist} \title{Find euclidean distance} \usage{ get_dist(p1, p2) } \arguments{ \item{p1}{vector giving the x and y coordinate pair for the first point} \item{p2}{vector giving the x and y coordinate pair for the second point} } \value{ distance value } \description{ Finds the euclidean distance between two points (ignoring projection) } \examples{ get_dist(c(1, 2), c(3, 4)) }
d2fa5a72805f8e39b6f51705b2be7817ca42df18
6d443800445592a4bcdc3531a850d5152942e2fd
/GUI/gene_query_UI.R
edc1c7cbc6817e87432f0486fd4e74d6f54f6681
[]
no_license
angy89/InsideNano
35f2004414bd1065df4db686ceefdb2096b789da
0b5ee4502106740acc3daec100cac37f015791d3
refs/heads/master
2021-01-18T21:11:38.811196
2016-01-10T20:23:47
2016-01-10T20:23:47
45,189,493
0
0
null
null
null
null
UTF-8
R
false
false
2,952
r
gene_query_UI.R
gene_query_UI_part2 = function(input,output,gene_nano,gene_drug,gene_chem,gene_dis){ output$gene_query_nano_input = renderUI({ selectizeInput('gene_query_nano_input', label = "Nanomaterials", choices = c("ALL",gene_nano), multiple = TRUE, options = list(create = TRUE)) }) output$gene_query_drug_input = renderUI({ selectizeInput('gene_query_drug_input', label = "Drugs", choices = c(unlist(ATC_choice_list),gene_drug), multiple = TRUE, options = list(create = TRUE)) }) output$gene_query_disease_input = renderUI({ selectizeInput('gene_query_disease_input', label = "Diseases", choices = c("ALL",gene_dis), multiple = TRUE, options = list(create = TRUE)) }) output$gene_query_chemical_input = renderUI({ selectizeInput('gene_query_chemical_input', label = "Chemicals", choices = c(unlist(chemical_choice_list),gene_chem), multiple = TRUE, options = list(create = TRUE)) }) output$th_slider3 = renderUI({ sliderInput("th_slider3", label = "Strenght of Similarity", min = 10, max = 99, value = 99,step=1) }) output$gene_query_percentuale_somma = renderUI({ numericInput(inputId = "gene_query_percentuale_somma",label = "Number of query items connected to the selected nodes based on the threshold", value = 2,min = 1,max = 10,step=1) }) output$gene_query_nroCliques = renderUI({ numericInput(inputId = "gene_query_nroCliques",label = "Number of query items that must be selected in the cliques", value = 2,min = 1,max = 10,step=1) }) output$gene_query_clique_type = renderUI({ selectizeInput("gene_query_clique_type", label = "Select the type of clique you want to analyze", choices = list("All" = "ALL","Nano-Drug-Chemical-Disease" = "NDCD", "Nano-Drug-Disease" = "NDD", "Nano-Drug-Chemical" = "NDC", "Drug-Chemical-Disease" = "DCD"), selected="ALL")}) output$gene_query_Refresh = renderUI({ actionButton(inputId = "gene_query_Refresh",label = "Refresh",icon("refresh", lib = "glyphicon")) }) output$Go4 = renderUI({ actionButton(inputId = "Go4",label = "Start",icon("circle-arrow-right", lib = "glyphicon")) }) } gene_query_UI = function(input,output,g,genes_input){ genes_input = V(g)[which(V(g)$node_type == "gene")]$name genes_input = genes_input[1:100] if(DEBUGGING) cat("Gene list --> ",genes_input,"\n") output$gene_input = renderUI({ selectizeInput('gene_input', label = "Gene", choices = c("ALL",genes_input), multiple = TRUE, options = list(create = TRUE)) }) output$Go3 = renderUI({ actionButton(inputId = "Go3",label = "Start",icon("circle-arrow-right", lib = "glyphicon")) }) return(genes_input) }
7471d45b27ffa900d551e41be400982af72388d7
e44a0f914cd55a7cb5d51c20c1c3a7ec6ef11a3d
/data/code/helpers/load_shp.R
d293031cc46d30892328e9c990d3e1ae70be0969
[]
no_license
gabrieljkelvin/acre_allcott
04a20266e0829ed8777d068017305269f8021de4
6ca0ff556d41208beeabe816c0821facfdb86c96
refs/heads/main
2023-03-11T09:11:42.828086
2021-03-02T07:43:26
2021-03-02T07:43:26
342,379,695
0
0
null
null
null
null
UTF-8
R
false
false
1,563
r
load_shp.R
#Helper function to load shapefiles, requires "sf", "stringr", "dplyr", and "magrittr" packages to have been loaded load_shp <- function(zip_folder, zip_file, state, st_fips, planar_crs, repair_geom) { #Unzip state's zipped shapefiles stub <- zip_file %>% str_replace("\\.zip$", "") #Remove .zip suffix exdir <- sprintf("temp/%s", stub) #External directory in which to save unzipped shapefiles unzip(sprintf("%s/%s", zip_folder, zip_file), #Unzip shapefiles exdir=exdir, overwrite=TRUE, unzip=getOption("unzip")) #Load state's shapefiles as sf suff <- "" #Two states have non-standard suffices on the shapefiles within their precinct zipped folder, deal with these if (grepl("precinct", zip_folder) && state=="md") suff <- "_w_ushouse" if (grepl("precinct", zip_folder) && state=="va") suff <- "_president" shp <- st_read(sprintf("%s/%s%s.shp", exdir, stub, suff), stringsAsFactors=FALSE) #Load shapefile as sf object #Convert to planar coordinate system if (st_crs(shp) != planar_crs) { shp %<>% st_transform(crs=planar_crs) } #Transform to planar #Possibly repair geometries if (repair_geom) { invalid_inds <- !st_is_valid(shp) if (any(invalid_inds)) { print(sprintf("Repairing %f portion of geometries", mean(invalid_inds))) shp[invalid_inds, ] <- st_make_valid(shp[invalid_inds, ]) } } #Remove folder of state's unzipped shapefiles, no longer needed unlink(exdir, recursive=TRUE) return(shp) #Return loaded shapefile }
f31aa8d5b27cbe002cb94c8197cf8e0b085991fb
9bf8e9da8f902b9b892ae373713d5302dfa36253
/plot4.R
919376e7ef6b8d3c5ac3332caed7d04231eed549
[]
no_license
lvizzini/Exploratory-Data-Analysis-Course-Project-2
126fa9ee3ca8a0ce2a85db4f1fe20d18223029ec
05b53ea88cb93185eddf70d2432bc1f3d35a012b
refs/heads/master
2021-01-16T08:47:14.379129
2020-02-25T16:45:27
2020-02-25T16:45:27
243,047,417
0
0
null
null
null
null
UTF-8
R
false
false
871
r
plot4.R
## read file NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") ##create p object with total Emission by year for coal library(data.table) library(ggplot2) ##merge information of SCC to NEI NEI_SCC <- merge(NEI, SCC, by="SCC") NEI_SCC<-as.data.table(NEI_SCC) ##select coal index coal<- grepl("coal", NEI_SCC$Short.Name, ignore.case=TRUE) NEI_SCC_coal <- NEI_SCC[coal, ] p<-NEI_SCC_coal[,.(Emissions=sum(Emissions)),by=year] ##ggplot to answer the question: <Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?> png('plot4.png') g <- ggplot(p, aes(factor(year), Emissions)) g <- g + geom_bar(stat="identity") + xlab("year") + ylab(expression('Total PM'[2.5]*" Emissions")) + ggtitle('Total Emissions from coal from 1999 to 2008') print(g) dev.off()
1d9f2d2bc553d08b7bb25d34f5564b9ebf82422f
cd7136449c4ea91fec7071948cce80da45d4244d
/man/Sync.test.OLD.Rd
3983d1ac3ad18e6d117738cdae391821f6148d04
[]
no_license
bokov/adapr
20428bb4c871b6ae02f3df3461d837513174d622
5af9860921a3e945a8ea4993d69e545c1cc5681e
refs/heads/master
2021-01-19T10:05:40.674036
2017-05-23T14:15:05
2017-05-23T14:15:05
87,824,109
0
0
null
null
null
null
UTF-8
R
false
true
511
rd
Sync.test.OLD.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sync_test.R \name{Sync.test.OLD} \alias{Sync.test.OLD} \title{check the synchrony of source files and their created objects} \usage{ Sync.test.OLD(dagger, tree, plotl = TRUE) } \arguments{ \item{dagger}{a directed acyclic igraph representing dependencies} \item{tree}{dependency tree corresponding to dagger} \item{plotl}{logical for plotting or not} } \description{ check the synchrony of source files and their created objects }
9383ea432b83c237e6d469217c3a71fa82d9e156
372c012ff389da88f1d385d8b087f2105ea1f059
/WifiPositioning_v2.R
f185579022589fa55b18898f4b93695e45b9e44b
[]
no_license
Ubiqum-Data-Bcn-110/task3-2-wifi-caroldorofei
f5aedb4b7faa26a14485a56d8dff231a6dc89070
65d08e28aab15230755d380c69581daf9e00d2d2
refs/heads/master
2020-04-09T07:43:17.248527
2019-01-16T21:57:06
2019-01-16T21:57:06
160,167,900
0
0
null
null
null
null
UTF-8
R
false
false
34,704
r
WifiPositioning_v2.R
#Wifi Positioning #Start Date: December 3, 2018 #Deadline: Dec 21, 2018 #Cleaning environment and uploading data ---- rm(list =ls()) setwd("C:/Users/carol/Desktop/Ubiqum/R/Wifi Positioning/task3-2-wifi-caroldorofei") trainset <- read.csv("trainingData.csv") validation <- read.csv("validationData.csv") #Libraries ---- pacman::p_load(dplyr,ggplot2,reshape2,rlist,prob,caret,lattice,kazaam,pbdMPI,plotly) #Data Preprocessing ---- ##Converting floor and building into factors trainset$FLOOR <- as.factor(trainset$FLOOR) trainset$BUILDINGID <- as.factor(trainset$BUILDINGID) validation$FLOOR <- as.factor(validation$FLOOR) validation$BUILDINGID <- as.factor(validation$BUILDINGID) ##Finding out what WAPs are never detected ---- ###TRAINING detectedWAPs <- c() for (i in 1:520){ if (min(trainset[,i]) != 100){ detectedWAPs <- append(detectedWAPs,i) } } ##VALIDATION detectedValidation <- c() for (i in 1:520){ if (min(validation[,i]) != 100){ detectedValidation <- append(detectedValidation,i) } } ##Creating data sets with the intersected WAPs ---- intersectedWAP <- intersect(detectedWAPs,detectedValidation) cleantrainset <- trainset[,intersectedWAP] cleanvalidation <- validation[,intersectedWAP] ##Changing 100s to -105s ---- cleantrainset[cleantrainset == 100] <- -105 cleanvalidation[cleanvalidation == 100] <- -105 ##Add 9 columns after WAPs ---- cleantrainset <- cbind(cleantrainset,trainset[,521:529]) cleanvalidation <- cbind(cleanvalidation,validation[,521:529]) ##Removing observations in which no WAP was detected cleantrainset <- cleantrainset[apply(cleantrainset[,1:312],1,mean) != -105,] ###Removing observation which contain values between -30 and 0 cleantrainset3 <- cleantrainset[apply(cleantrainset[,1:312],1,function(x) max(x) < -30),] ###Check for Waps not detected after removing rows - NONE in cleantraining3 detectedWAPs3 <- c() for (i in 1:312){ if (mean(cleantrainset3[,i]) == -105){ detectedWAPs3 <- append(detectedWAPs3,i) } } ###Finding observations with only one WAP detected ---- cleantrainset4 <- cleantrainset3[apply(cleantrainset3[,1:312],1,function(x) sum(x != -105)) > 2,] cleantrainset5 <- cleantrainset3[apply(cleantrainset3[,1:312],1,function(x) sum(x != -105)) > 3,] ###Creating Building+Floor columns ---- cleantrainset$BuildingFloor <- paste0("B",cleantrainset$BUILDINGID,"F",cleantrainset$FLOOR) cleantrainset$BuildingFloor <- as.factor(cleantrainset$BuildingFloor) cleantrainset3$BuildingFloor <- paste0("B",cleantrainset3$BUILDINGID,"F",cleantrainset3$FLOOR) cleantrainset3$BuildingFloor <- as.factor(cleantrainset3$BuildingFloor) cleanvalidation$BuildingFloor <- paste0("B",cleanvalidation$BUILDINGID,"F",cleanvalidation$FLOOR) cleanvalidation$BuildingFloor <- as.factor(cleanvalidation$BuildingFloor) #WEIGHTED DATASETS clean3WAPs <- cleantrainset3[,1:312] clean3noWAPs <- cleantrainset3[,313:322] cleantrainset3.2 <- cbind(as.data.frame(apply(clean3WAPs,2,function(x) ifelse (x >= -70, x-x*0.2,x))),clean3noWAPs) clean3WAPs_Val <- cleanvalidation[,1:312] clean3noWAPs_Val <- cleanvalidation[,313:322] cleanvalidation3.2 <- cbind(as.data.frame(apply(clean3WAPs_Val,2,function(x) ifelse (x >= -70, x-x*0.2,x))),clean3noWAPs_Val) ##Creating datasets for each building ---- cleanbuilding0 <- cleantrainset[cleantrainset$BUILDINGID == 0,] cleanbuilding1 <- cleantrainset[cleantrainset$BUILDINGID == 1,] cleanbuilding3 <- cleantrainset[cleantrainset$BUILDINGID == 2,] #MODELLING - BUILDING ---- ##Parallel Processing #cl <- makeCluster(2) #registerDoParallel(cl) ##Create training and test sets 1 (sample with 20% data each) set.seed(123) inTraining1 <- createDataPartition(cleantrainset$BUILDINGID, times = 1, p = .02) training1 <- cleantrainset[inTraining1$Resample1,] ##Check distribution of observations in buildings ggplot(cleantrainset, aes(LATITUDE,LONGITUDE)) + geom_point() ggplot(training1, aes(LATITUDE,LONGITUDE)) + geom_point() ##Create training and test sets 3 (sample with 20% data) inTraining3 <- createDataPartition(cleantrainset3$BUILDINGID, times = 1, p = .3) training3 <- cleantrainset3[inTraining3$Resample1,] training3.Regr <- training3 training3.Regr$BUILDINGID <- as.numeric(training3.Regr$BUILDINGID) ##Create 10-fold cross-validation fitcontrol <- trainControl(method = "repeatedcv",number = 2,repeats = 2) #K-NN BUILDING (312 WAPs, sample 399 rows) ##Train k-nn1 (312 WAPs) #knnFit1 <- train(BUILDINGID~. -LONGITUDE-LATITUDE-FLOOR-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-BuildingFloor, # data = training3, method = "knn", preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) knnFit1 <- readRDS("knnSampleBuilding.rds") knnFit1 #Accuracy 0.9927, Kappa 0.9884 #Apply k-nn1 to test set testknn1 <- predict(knnFit1, cleanvalidation) #postResample k-nn1 to assess the metrics of the predictions postResample(testknn1, validation$BUILDINGID) ##training: Accuracy 0.9676, Kappa 0.9491 #Plot confusion matrix k-nn1 confusionMatrix(data = testknn1, validation$BUILDINGID) #RF1 (312 WAPs, sample 399 rows) ##Train RF1 (312 WAPs) RFfit1 <- readRDS("RFSampleBuilding.rds") RFfit1 #Accuracy 0.9922, Kappa 0.9878 #Apply RF1 to test set testRF1 <- predict(RFfit1, cleanvalidation) #postResample RF1 to assess the metrics of the predictions postResample(testRF1, cleanvalidation$BUILDINGID) ##Accuracy 0.9784, Kappa 0.9660 #Plot confusion matrix RF1 confusionMatrix(data = testRF1, validation$BUILDINGID) #SVMLinear1 (312 WAPs, sample 399 rows) ##Train SVMLinear1 (312 WAPs) SVMLinearfit1 <- readRDS("SVMLinearSampleBuilding.rds") SVMLinearfit1 #Accuracy 0.9934, Kappa 0.9896 #Apply SVMLinear1 to test set Predicted_BuildingSVMLinear1 <- predict(SVMLinearfit1, cleanvalidation) #postResample SVMLinear1 to assess the metrics of the predictions postResample(Predicted_BuildingSVMLinear1, cleanvalidation$BUILDINGID) ##Accuracy 0.9892, Kappa 0.9830 #Plot confusion matrix SVMLinear1 confusionMatrix(data = Predicted_BuildingSVMLinear1, validation$BUILDINGID) #SVMLinear2 BUILDING (312 WAPs, 19861 rows) ##Train SVMLinear2 (312 WAPs) SVMLinearfit2 <- readRDS("SVMLinear2SampleBuilding.rds") SVMLinearfit2 #Accuracy 0.9999, Kappa 0.9999 #Apply SVMLinear2 to validation set Predicted_BuildingSVMLinear2 <- predict(SVMLinearfit2, cleanvalidation) #postResample SVMLinear1 to assess the metrics of the predictions postResample(Predicted_BuildingSVMLinear2, cleanvalidation$BUILDINGID) ##Accuracy 0.9991, Kappa 0.9986 #Plot confusion matrix SVMLinear2 confusionMatrix(data = Predicted_BuildingSVMLinear2, cleanvalidation$BUILDINGID) #SVMLinear4 BUILDING (312 WAPs, 19389 - rows removed observations which contain values >= -30) ##Train SVMLinear3 (312 WAPs, 19389 rows) SVMLinearfit4 <- readRDS("SVMLinear4Building.rds") SVMLinearfit4 #Accuracy 0.9999, Kappa 0.9999 #Apply SVMLinear4 to validation set Predicted_BuildingSVMLinear4 <- predict(SVMLinearfit4, cleanvalidation) #postResample SVMLinear1 to assess the metrics of the predictions postResample(Predicted_BuildingSVMLinear4, cleanvalidation$BUILDINGID) ##Accuracy 0.9991, Kappa 0.9986 #Plot confusion matrix SVMLinear4 confusionMatrix(data = Predicted_BuildingSVMLinear4, cleanvalidation$BUILDINGID) #SVMLinear5 Weighted BUILDING (312 WAPs, 19861 rows) ##Train SVMLinear5 (312 WAPs) SVMLinearfit5 <- train(BUILDINGID~. -LONGITUDE-LATITUDE-FLOOR-SPACEID -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-BuildingFloor, data = cleantrainset3.2, method = "svmLinear", tuneLength = 15, trControl = fitcontrol) SVMLinearfit5 #Apply SVMLinear5 to validation set Predicted_BuildingSVMLinear5 <- predict(SVMLinearfit5, cleanvalidation3.2) #postResample SVMLinear1 to assess the metrics of the predictions postResample(Predicted_BuildingSVMLinear5, cleanvalidation3.2$BUILDINGID) ##Accuracy 0.9991, Kappa 0.9986 #Plot confusion matrix SVMLinear5 confusionMatrix(data = Predicted_BuildingSVMLinear5, cleanvalidation$BUILDINGID) validationBUILDING$RealBuilding <- cleanvalidation$BUILDINGID filter(validationBUILDING, BUILDINGID == 1) %>% filter(validationBUILDING, RealBuilding == 0) ###MODELLING FLOOR ---- ###New training set with predicted BUILDING validationBUILDING <- cleanvalidation validationBUILDING$BuildingFloor <- paste0("B",Predicted_BuildingSVMLinear4,"F",cleanvalidation$FLOOR) validationBUILDING$BuildingFloor <- as.factor(validationBUILDING$BuildingFloor) #K-NN Floor (312 WAPs + Predicted Building+Floor, sample 399 rows) ##Train k-nn1 (312 WAPs) #knnFloorfit1 <- train(BuildingFloor~. -LONGITUDE-LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR, # data = training1, method = "knn", preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) knnFloorfit1 <- readRDS("knnSampleFloor.rds") knnFloorfit1 #Apply k-nn1 to test set knnFloor1 <- predict(knnFloorfit1, validationBUILDING) #postResample k-nn1 to assess the metrics of the predictions postResample(knnFloor1, cleanvalidation$BuildingFloor) ##Accuracy 0.5877, Kappa 0.5491 #Plot confusion matrix k-nn1 confusionMatrix(data = knnFloor1, cleanvalidation$BuildingFloor) #SVMLinear Floor (312 WAPs, sample 399 rows) ##Train SVMLinear Floor (312 WAPs) #SVMLinearFloor1 <- train(BuildingFloor~. -LONGITUDE-LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR, # data = training1, method = "svmLinear", # tuneLength = 15, trControl = fitcontrol) SVMLinearFloor1 <- readRDS("SVMLinear1SampleFloor.rds") SVMLinearFloor1 #Apply SVMLinear1 Floor to test set Predicted_FloorSVMLinear1 <- predict(SVMLinearFloor1, validationBUILDING) #postResample SVMLinear1 Floor to assess the metrics of the predictions postResample(Predicted_FloorSVMLinear1, cleanvalidation$BuildingFloor) ##Accuracy 0.8181, Kappa 0.7981 #Plot confusion matrix SVMLinear1 confusionMatrix(data = Predicted_FloorSVMLinear1, cleanvalidation$BuildingFloor) #SVMLinear Floor (312 WAPs, 19389 rows - cleantrainset3) ##Train SVMLinear Floor (312 WAPs) #SVMLinearFloor2 <- train(BuildingFloor~. -LONGITUDE-LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR, # data = cleantrainset3, method = "svmLinear", # tuneLength = 15, trControl = fitcontrol) SVMLinearFloor2 <- readRDS("SVMLinear2Floor.rds") SVMLinearFloor2 #Apply SVMLinear1 Floor to test set Predicted_FloorSVMLinear2 <- predict(SVMLinearFloor2, validationBUILDING) #postResample SVMLinear1 Floor to assess the metrics of the predictions postResample(Predicted_FloorSVMLinear2, cleanvalidation$BuildingFloor) ##Accuracy 0.8830, Kappa 0.8691 #Plot confusion matrix SVMLinear2 confusionMatrix(data = Predicted_FloorSVMLinear2, cleanvalidation$BuildingFloor) ##MODELLING FLOOR for individual buildings ###Split training & validation sets per building B0_trainset3WAPs <- cleantrainset3[cleantrainset3$BUILDINGID == 0,1:312] B0_trainset3NoWAPs <- cleantrainset3[cleantrainset3$BUILDINGID == 0,313:322] B1_trainset3WAPs <- cleantrainset3[cleantrainset3$BUILDINGID == 1,1:312] B1_trainset3NoWAPs <- cleantrainset3[cleantrainset3$BUILDINGID == 1,313:322] B2_trainset3WAPs <- cleantrainset3[cleantrainset3$BUILDINGID == 2,1:312] B2_trainset3NoWAPs <- cleantrainset3[cleantrainset3$BUILDINGID == 2,313:322] B0_validation3WAPs <- cleanvalidation[cleanvalidation$BUILDINGID == 0,1:312] B0_validation3NoWAPs <- cleanvalidation[cleanvalidation$BUILDINGID == 0,313:322] B1_validation3WAPs <- cleanvalidation[cleanvalidation$BUILDINGID == 1,1:312] B1_validation3NoWAPs <- cleanvalidation[cleanvalidation$BUILDINGID == 1,313:322] B2_validation3WAPs <- cleanvalidation[cleanvalidation$BUILDINGID == 2,1:312] B2_validation3NoWAPs <- cleanvalidation[cleanvalidation$BUILDINGID == 2,313:322] ##Finding WAPs never detected per building ###TRAINING & VALIDATION ###BUILDING 0 B0_trainset3 <- cbind(B0_trainset3WAPs[,apply(B0_trainset3WAPs,2,mean) != -105],B0_trainset3NoWAPs) B0_validation3 <- cbind(B0_validation3WAPs[,apply(B0_validation3WAPs,2,mean) != -105],B0_validation3NoWAPs) intersectedB0 <- intersect(names(B0_trainset3),names(B0_validation3)) length(intersectedB0)#149 B0_trainset3 <- B0_trainset3[,intersectedB0] B0_trainset3$BuildingFloor <- factor(B0_trainset3$BuildingFloor) B0_validation3 <- B0_validation3[,intersectedB0] B0_validation3$BuildingFloor <- factor(B0_validation3$BuildingFloor) B1_trainset3 <- cbind(B1_trainset3WAPs[,apply(B1_trainset3WAPs,2,mean) != -105],B1_trainset3NoWAPs) B1_validation3 <- cbind(B1_validation3WAPs[,apply(B1_validation3WAPs,2,mean) != -105],B1_validation3NoWAPs) intersectedB1 <- intersect(names(B1_trainset3),names(B1_validation3)) length(intersectedB1)#156 B1_trainset3 <- B1_trainset3[,intersectedB1] B1_trainset3$BuildingFloor <- factor(B1_trainset3$BuildingFloor) B1_validation3 <- B1_validation3[,intersectedB1] B1_validation3$BuildingFloor <- factor(B1_validation3$BuildingFloor) B2_trainset3 <- cbind(B2_trainset3WAPs[,apply(B2_trainset3WAPs,2,mean) != -105],B2_trainset3NoWAPs) B2_validation3 <- cbind(B2_validation3WAPs[,apply(B2_validation3WAPs,2,mean) != -105],B2_validation3NoWAPs) intersectedB2 <- intersect(names(B2_trainset3),names(B2_validation3)) length(intersectedB2)#116 B2_trainset3 <- B2_trainset3[,intersectedB2] B2_trainset3$BuildingFloor <- factor(B2_trainset3$BuildingFloor) B2_validation3 <- B2_validation3[,intersectedB2] B2_validation3$BuildingFloor <- factor(B2_validation3$BuildingFloor) check <- union(intersectedB0,intersectedB1,intersectedB2) length(check) B0_validationBUILDING <- validationBUILDING[validationBUILDING$BUILDINGID == "0",] B0_validationBUILDING$BUILDINGID <- factor(B0_validationBUILDING$BUILDINGID) B0_validationBUILDING$BuildingFloor <- factor(B0_validationBUILDING$BuildingFloor) B1_validationBUILDING <- validationBUILDING[validationBUILDING$BUILDINGID == "1",] B1_validationBUILDING$BUILDINGID <- factor(B1_validationBUILDING$BUILDINGID) B2_validationBUILDING <- validationBUILDING[validationBUILDING$BUILDINGID == "2",] B2_validationBUILDING$BUILDINGID <- factor(B2_validationBUILDING$BUILDINGID) ##MODELS #SVMLinear Floor BUILDING 0 ##Train SVMLinear Floor #SVMLinearFloor2_B0 <- train(BuildingFloor~. -LONGITUDE-LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR, # data = B0_trainset3, method = "svmLinear", # tuneLength = 15, trControl = fitcontrol) SVMLinearFloor2_B0 <- readRDS("SVMLinear2Floor_B0.rds") SVMLinearFloor2_B0 #Apply SVMLinear1 Floor to test set Predicted_FloorSVMLinear2_B0 <- predict(SVMLinearFloor2_B0, B0_validationBUILDING) #postResample SVMLinear1 Floor to assess the metrics of the predictions postResample(Predicted_FloorSVMLinear2_B0, B0_validation3$BuildingFloor) ##Accuracy 0.9477, Kappa 0.9265 #Plot confusion matrix SVMLinear2 confusionMatrix(data = Predicted_FloorSVMLinear2_B0, B0_validation3$BuildingFloor) #SVMLinear Floor BUILDING 0 ##Train SVMLinear Floor B0_trainset3v2 <- B0_trainset3 B0_trainset3v2$BuildingFloor <- NULL B0_validationBUILDINGv2 <- B0_validationBUILDING B0_validationBUILDINGv2$BuildingFloor <- NULL #SVMLinearFloor2_B0v2 <- train(FLOOR~. -LONGITUDE-LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP, # data = B0_trainset3v2, method = "svmLinear", # tuneLength = 15, trControl = fitcontrol) SVMLinearFloor2_B0v2 <- readRDS("SVMLinear2Floor_B0.rds") SVMLinearFloor2_B0v2 #Apply SVMLinear1 Floor to test set Predicted_FloorSVMLinear2_B0v2 <- predict(SVMLinearFloor2_B0v2, B0_validationBUILDINGv2) #postResample SVMLinear1 Floor to assess the metrics of the predictions postResample(Predicted_FloorSVMLinear2_B0v2, B0_validationBUILDINGv2$FLOOR) ##Accuracy 0.9477, Kappa 0.9265 #SVMLinear Floor BUILDING 1 ##Train SVMLinear Floor #SVMLinearFloor2_B1 <- train(BuildingFloor~. -LONGITUDE-LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR, # data = B1_trainset3, method = "svmLinear", # tuneLength = 15, trControl = fitcontrol) SVMLinearFloor2_B1 <- readRDS("SVMLinear2Floor_B1.rds") SVMLinearFloor2_B1 #Apply SVMLinear1 Floor to test set Predicted_FloorSVMLinear2_B1 <- predict(SVMLinearFloor2_B1, B1_validationBUILDING) #postResample SVMLinear1 Floor to assess the metrics of the predictions postResample(Predicted_FloorSVMLinear2_B1, B1_validation3$BuildingFloor) ##Accuracy 0.7948, Kappa 0.7056 #Plot confusion matrix SVMLinear2 confusionMatrix(data = Predicted_FloorSVMLinear2_B1, B1_validation3$BuildingFloor) #RF Floor BUILDING 1 ##Train RF Floor #RFFloor2_B1 <- train(BuildingFloor~. -LONGITUDE-LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR, # data = B1_trainset3, method = "rf", ntree = 20, # tuneLength = 15, trControl = fitcontrol) RFFloor2_B1 <- readRDS("RF2Floor_B1.rds") RFFloor2_B1 #Apply RF Floor to test set Predicted_FloorRF2_B1 <- predict(RFFloor2_B1, B1_validationBUILDING) #postResample RF2 Floor to assess the metrics of the predictions postResample(Predicted_FloorRF2_B1, B1_validation3$BuildingFloor) ##Accuracy 0.8078, Kappa 0.7267 #Plot confusion matrix RF2 confusionMatrix(data = Predicted_FloorRF2_B1, B1_validation3$BuildingFloor) #Error analysis checkVal_B1F2 <- B1_validation3 checkVal_B1F2$PredFloor <- Predicted_FloorRF2_B1 checkVal_B1F2 <- filter(checkVal_B1F2, Predicted_FloorRF2_B1 == "B1F2" & BuildingFloor == "B1F1") checkVal_B1F2$BuildingFloor <- checkVal_B1F2$PredFloor checkVal_B1F2$PredFloor <- NULL checkVal_B1F2$Real_Pred <- "Predicted" checkVal_B1F2Real <- B1_validation3 checkVal_B1F2Real$Real_Pred <- "Real" checkVal_B1F2a <- rbind(checkVal_B1F2Real,checkVal_B1F2) #The plot below shows all validation points for B1 in blue and wrong predictions in pink #Real: B1F1, Predicted B1F2 ggplot(checkVal_B1F2a, aes(checkVal_B1F2a$LATITUDE,checkVal_B1F2a$LONGITUDE, colour = checkVal_B1F2a$Real_Pred)) + geom_point() #Range Longitude -7569 to -7474 #Range Latitude 4864840 to 4864901 checkRangeB1F1 <- B1_trainset3 %>% filter(LONGITUDE >= -7569 & LONGITUDE <= -7474) %>% filter(LATITUDE >= 4864840 & LATITUDE <= 4864901) %>% filter(BuildingFloor == "B1F1") checkRangeB1F1Train <- B1_trainset3 %>% filter(BuildingFloor == "B1F1") checkRangeB1F1Train$Train_error <- "Training" checkRangeB1F1Error <- checkVal_B1F2[,1:156] checkRangeB1F1Error$Train_error <- "Error" checkTrainError <- rbind(checkRangeB1F1Train,checkRangeB1F1Error) ggplot(checkRangeB1F1Train,aes(checkRangeB1F1Train$LATITUDE,checkRangeB1F1Train$LONGITUDE)) + geom_point() #The plot below shows the trainset points in B0 floor 1 and the 37/42 errors ggplot(checkTrainError,aes(checkTrainError$LATITUDE,checkTrainError$LONGITUDE, colour = checkTrainError$Train_error)) + geom_point() checkVal_B1F3 <- B1_validation3 checkVal_B1F3$PredFloor <- Predicted_FloorRF2_B1 checkVal_B1F3 <- filter(checkVal_B1F3, Predicted_FloorRF2_B1 == "B1F3" & BuildingFloor == "B1F1") checkVal_B1F3$BuildingFloor <- checkVal_B1F3$PredFloor checkVal_B1F3$PredFloor <- NULL checkVal_B1F3$Real_Pred <- "Predicted" checkVal_B1F3Real <- B1_validation3 checkVal_B1F3Real$Real_Pred <- "Real" checkVal_B1F3a <- rbind(checkVal_B1F3Real,checkVal_B1F3) ggplot(checkVal_B1F3a, aes(checkVal_B1F3a$LATITUDE,checkVal_B1F3a$LONGITUDE, colour = checkVal_B1F3a$Real_Pred)) + geom_point() ggplot(B1_trainset3, aes(B1_trainset3$LATITUDE,B1_trainset3$LONGITUDE)) + geom_point() #SVMLinear Floor BUILDING 2 ##Train SVMLinear Floor #SVMLinearFloor2_B2 <- train(BuildingFloor~. -LONGITUDE-LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR, # data = B2_trainset3, method = "svmLinear", # tuneLength = 15, trControl = fitcontrol) SVMLinearFloor2_B2 <- readRDS("SVMLinear2Floor_B2.rds") SVMLinearFloor2_B2 #Apply SVMLinear1 Floor to test set Predicted_FloorSVMLinear2_B2 <- predict(SVMLinearFloor2_B2, B2_validationBUILDING) #postResample SVMLinear1 Floor to assess the metrics of the predictions postResample(Predicted_FloorSVMLinear2_B2, B2_validation3$BuildingFloor) ##Accuracy 0.9328, Kappa 0.9080 #Plot confusion matrix SVMLinear2 confusionMatrix(data = Predicted_FloorSVMLinear2_B2, B2_validation3$BuildingFloor) #MODELLING LONGITUDE/LATITUDE ---- ##Preparing data frames for Regression cleantrainset3.Regr <- cleantrainset3 cleantrainset3.Regr$BUILDINGID <- as.numeric(cleantrainset3.Regr$BUILDINGID) validationBUILDING.Regr <- validationBUILDING validationBUILDING.Regr$BUILDINGID <- as.numeric(validationBUILDING.Regr$BUILDINGID) B0_trainset3.Regr <- B0_trainset3 B0_trainset3.Regr$BUILDINGID <- 0 B1_trainset3.Regr <- B1_trainset3 B1_trainset3.Regr$BUILDINGID <- 1 B2_trainset3.Regr <- B2_trainset3 B2_trainset3.Regr$BUILDINGID <- 2 B0_validation3.Regr <- data.frame(B0_validation3) B0_validation3.Regr$BUILDINGID <- 0 B1_validation3.Regr <- B1_validation3 B1_validation3.Regr$BUILDINGID <- 1 B2_validation3.Regr <- B2_validation3 B2_validation3.Regr$BUILDINGID <- 2 B0_validationBUILDING.Regr <- B0_validationBUILDING B0_validationBUILDING.Regr$BUILDINGID <- 0 B1_validationBUILDING.Regr <- B1_validationBUILDING B1_validationBUILDING.Regr$BUILDINGID <- 1 B2_validationBUILDING.Regr <- B2_validationBUILDING B2_validationBUILDING.Regr$BUILDINGID <- 2 ##Based on the variance below, we have decided to predict latitude before longitude summary(cleantrainset3$LONGITUDE)#varies from -7691 to -7301 (390 "variance") summary(cleantrainset3$LATITUDE)#varies from 4864746 to 4865017 (271 "variance") #LATITUDE ---- #K-NN Latitude (312 WAPs + Predicted Building, ) ##Train k-nn1 LATITUDE (312 WAPs) #knnLatitudefit1 <- train(LATITUDE~. -LONGITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = cleantrainset3.Regr, method = "knn", preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) knnLatitudefit1 <- readRDS("knnLatitudefit1.rds") knnLatitudefit1 #Apply k-nn1 to test set knnLatitude1 <- predict(knnLatitudefit1, validationBUILDING.Regr) #postResample k-nn1 to assess the metrics of the predictions postResample(knnLatitude1, validationBUILDING.Regr$LATITUDE) ##RMSE 15.62, Rsquared 0.95, MAE 7.55 #Error analysis ErroLatitude_Knn <- knnLatitude1 - validationBUILDING.Regr$LATITUDE hist(ErroLatitude_Knn) #RF Latitude (312 WAPs + Predicted Building, ) ##Train RF1 LATITUDE (312 WAPs) #RFLatitudefit1 <- train(LATITUDE~. -LONGITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = training3.Regr, method = "rf", ntree = 5,preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) RFLatitudefit1 <- readRDS("RFLatitude1.rds") RFLatitudefit1 #Apply RF1 to test set RFLatitude1 <- predict(RFLatitudefit1, validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(RFLatitude1, cleanvalidation$LATITUDE) ##RMSE 11.566, Rsquared 0.97, MAE 7.67 #Error analysis ErroLatitude_RF <- RFLatitude1 - cleanvalidation$LATITUDE hist(ErroLatitude_RF) #SVMLinear Latitude (312 WAPs + Predicted Building, ) ##Train SVMLinear1 LATITUDE (312 WAPs) #SVMLinearLatitudefit1 <- train(LATITUDE~. -LONGITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = training3.Regr, method = "svmLinear", preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) SVMLinearLatitudefit1 <- readRDS("SVMLinearLatitude1.rds") SVMLinearLatitudefit1 #Apply RF1 to test set SVMLinearLatitude1 <- predict(SVMLinearLatitudefit1, validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(SVMLinearLatitude1, validationBUILDING.Regr$LATITUDE) ##RMSE 16, Rsquared 0.94, MAE 11.82 #Error analysis ErroLatitude_SVMLinear1 <- SVMLinearLatitude1 - validationBUILDING.Regr$LATITUDE hist(ErroLatitude_SVMLinear1) #RF Latitude per BUILDING ##Train RF1 LATITUDE B0 #RFLatitudefit1_B0 <- train(LATITUDE~. -LONGITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = B0_trainset3.Regr, method = "rf", ntree = 5,preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) RFLatitudefit1_B0 <- readRDS("RFLatitudefit1_B0.rds") RFLatitudefit1_B0 #Apply RF1 to test set RFLatitude1_B0 <- predict(RFLatitudefit1_B0, B0_validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(RFLatitude1_B0, B0_validation3.Regr$LATITUDE) ##RMSE 6.277, Rsquared 0.96, MAE 4.41 #Error analysis ErroLatitude_RF_B0 <- RFLatitude1_B0 - B0_validation3.Regr$LATITUDE hist(ErroLatitude_RF_B0) ##Train RF1 LATITUDE B1 #RFLatitudefit1_B1 <- train(LATITUDE~. -LONGITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = B1_trainset3.Regr, method = "rf", ntree = 5,preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) RFLatitudefit1_B1 <- readRDS("RFLatitudefit1_B1.rds") RFLatitudefit1_B1 #Apply RF1 to test set RFLatitude1_B1 <- predict(RFLatitudefit1_B1, B1_validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(RFLatitude1_B1, B1_validation3.Regr$LATITUDE) ##RMSE 11.99, Rsquared 0.88, MAE 8.58 #Error analysis ErrorLatitude_RF_B1 <- RFLatitude1_B1 - B1_validation3.Regr$LATITUDE hist(ErrorLatitude_RF_B1) ##Train RF1 LATITUDE B2 #RFLatitudefit1_B2 <- train(LATITUDE~. -LONGITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = B2_trainset3.Regr, method = "rf", ntree = 5,preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) RFLatitudefit1_B2 <- readRDS("RFLatitudefit1_B2.rds") RFLatitudefit1_B2 #Apply RF1 to test set RFLatitude1_B2 <- predict(RFLatitudefit1_B2, B2_validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(RFLatitude1_B2, B2_validation3.Regr$LATITUDE) ##RMSE 11.78, Rsquared 0.83, MAE 8.02 #Error analysis ErrorLatitude_RF_B2 <- RFLatitude1_B2 - B2_validation3.Regr$LATITUDE hist(ErrorLatitude_RF_B2) #LONGITUDE ---- #K-NN Longitude (312 WAPs + Predicted Building, ) ##Train k-nn1 LONGITUDE (312 WAPs) #knnLongitudefit1 <- train(LONGITUDE~. -LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = training3.Regr, method = "knn", preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) knnLongitudefit1 <- readRDS("knnLongitudefit1.rds") knnLongitudefit1 #Apply k-nn1 to test set knnLongitude1 <- predict(knnLongitudefit1, validationBUILDING.Regr) #postResample k-nn1 to assess the metrics of the predictions postResample(knnLongitude1, validationBUILDING.Regr$LONGITUDE) ##RMSE 18.91, Rsquared 0.97, MAE 8.28 #Error analysis ErrorLongitude_knn <- knnLongitude1 - validationBUILDING.Regr$LONGITUDE hist(ErrorLongitude_knn) #RF Longitude (312 WAPs + Predicted Building, ) ##Train RF1 LONGITUDE (312 WAPs) #RFLongitudefit1 <- train(LONGITUDE~. -LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = training3.Regr, method = "rf", ntree = 5, preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) RFLongitudefit1 <- readRDS("RFLongitudefit1.rds") RFLongitudefit1 #RFLongitudefit1 <- readRDS("RFLongitudefit1.rds") #Apply RF1 to test set RFLongitude1 <- predict(RFLongitudefit1, validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(RFLongitude1, cleanvalidation$LONGITUDE) ##RMSE 12.55, Rsquared 0.989, MAE 7.88 #Error analysis ErrorLongitude_RF <- RFLongitude1 - validationBUILDING.Regr$LONGITUDE hist(ErrorLongitude_RF) ##Train SVMLinear1 LONGITUDE (312 WAPs) #SVMLinearLongitudefit1 <- train(LONGITUDE~. -LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = training3.Regr, method = "svmLinear", preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) SVMLinearLongitudefit1 <- readRDS("SVMLinearLongitudefit1.rds") SVMLinearLongitudefit1 #SVMLinearLongitudefit1 <- readRDS("SVMLinearLongitudefit1.rds") #Apply SVMLinear1 to test set SVMLinearLongitude1 <- predict(SVMLinearLongitudefit1, validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(SVMLinearLongitude1, validationBUILDING.Regr$LONGITUDE) ##RMSE 17.64, Rsquared 0.97, MAE 13.30 #Error analysis ErrorLongitude_SVM <- SVMLinearLongitude1 - validationBUILDING.Regr$LONGITUDE hist(ErrorLongitude_SVM) #RF Longitude per BUILDING ##Train RF1 LONGITUDE B0 #RFLongitudefit1_B0 <- train(LONGITUDE~. -LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = B0_trainset3.Regr, method = "rf", ntree = 5,preProc = c("center","scale"), # tuneLength = 15, trControl = fitcontrol) RFLongitudefit1_B0 <- readRDS("RFLongitudefit1_B0.rds") RFLongitudefit1_B0 #Apply RF1 to test set RFLongitude1_B0 <- predict(RFLongitudefit1_B0, B0_validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(RFLongitude1_B0, B0_validation3.Regr$LONGITUDE) ##RMSE 8.27, Rsquared 0.90, MAE 5.32 #Error analysis ErrorLongitude_RF_B0 <- RFLongitude1_B0 - B0_validation3.Regr$LONGITUDE hist(ErrorLongitude_RF_B0) ##Train RF1 LONGITUDE B1 #RFLongitudefit1_B1 <- train(LONGITUDE~. -LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = B1_trainset3.Regr, method = "rf", ntree = 5,preProc = c("center","scale"), # tuneLength = 15) RFLongitudefit1_B1 <- readRDS("RFLongitudefit1_B1.rds") RFLongitudefit1_B1 #Apply RF1 to test set RFLongitude1_B1 <- predict(RFLongitudefit1_B1, B1_validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(RFLongitude1_B1, B1_validation3.Regr$LONGITUDE) ##RMSE 10.59, Rsquared 0.95, MAE 7.76 #Error analysis ErrorLongitude_RF_B1 <- RFLongitude1_B1 - B1_validation3.Regr$LONGITUDE hist(ErrorLongitude_RF_B1) ##Train RF1 LONGITUDE B2 #RFLongitudefit1_B2 <- train(LONGITUDE~. -LATITUDE-SPACEID # -RELATIVEPOSITION-USERID-PHONEID-TIMESTAMP-FLOOR-BuildingFloor, # data = B2_trainset3.Regr, method = "rf", ntree = 5,preProc = c("center","scale"), # tuneLength = 15,trControl = fitcontrol) RFLongitudefit1_B2 <- readRDS("RFLongitudefit1_B2.rds") RFLongitudefit1_B2 #Apply RF1 to test set RFLongitude1_B2 <- predict(RFLongitudefit1_B2, B2_validationBUILDING.Regr) #postResample RF1 to assess the metrics of the predictions postResample(RFLongitude1_B2, B2_validationBUILDING.Regr$LONGITUDE) ##RMSE 12.60, Rsquared 0.84, MAE 8.37 #ERROR ANALYSIS ---- ##Create validation set with predicted LAT & LONG validationLatLong_B0 <- B0_validation3.Regr validationLatLong_B0$Real_Predicted <- "Real" validationLatLong_B0Pred <- B0_validation3.Regr validationLatLong_B0Pred$LATITUDE <- RFLatitude1_B0 validationLatLong_B0Pred$LONGITUDE <- RFLongitude1_B0 validationLatLong_B0Pred$Real_Predicted <- "Predicted" validationLatLong_B0Compl <- rbind(validationLatLong_B0,validationLatLong_B0Pred) B0_validation_predictions <- B0_validation3.Regr B0_validation_predictions$Pred_Latitude <- RFLatitude1_B0 B0_validation_predictions$Pred_Longitude <- RFLongitude1_B0 B0_validation_predictions$Error_Latitude <- B0_validation_predictions$Pred_Latitude-B0_validation_predictions$LATITUDE B0_validation_predictions$Error_Longitude <- B0_validation_predictions$Pred_Longitude-B0_validation_predictions$LONGITUDE summary(B0_validation_predictions$Error_Latitude) hist(B0_validation_predictions$Error_Latitude) B0_higherror10 <- filter(B0_validation_predictions, Error_Latitude <= -10 | Error_Latitude >= 10) summary(B0_higherror10) #F0 7, F1 22, F2 15, F3 11 B0_higherror20 <- filter(B0_validation_predictions, Error_Latitude <= -20 | Error_Latitude >= 20) summary(B0_higherror20) #F0 0, F1 2, F2 0, F3 3 ggplot(validationLatLong_B0Compl,aes(validationLatLong_B0Compl$LONGITUDE,validationLatLong_B0Compl$LATITUDE, colour = Real_Predicted)) + geom_point() ggplot(validationLatLong_B0) + geom_point(aes(validationLatLong_B0$LONGITUDE,validationLatLong_B0$LATITUDE)) #Check unique points (LAT/LONG) in trainset and validation LatLong <- trainset %>% select(LATITUDE,LONGITUDE) uniqueLatLong <- unique(LatLong) #692 unique combinations uniqueLatLong$Train_Val <- "Train" ggplot(uniqueLatLong,aes(uniqueLatLong$LATITUDE,uniqueLatLong$LONGITUDE))+ geom_point() LatLong_Val <- validation %>% select(LATITUDE,LONGITUDE) uniqueLatLong_Val <- unique(LatLong_Val) #1068 unique combinations uniqueLatLong_Val$Train_Val <- "Validation" ggplot(uniqueLatLong_Val,aes(uniqueLatLong_Val$LATITUDE,uniqueLatLong_Val$LONGITUDE))+ geom_point() uniqueTrain_Val <- rbind(uniqueLatLong,uniqueLatLong_Val) plot_ly(x=uniqueTrain_Val$LATITUDE,y=uniqueTrain_Val$LONGITUDE,z=uniqueTrain_Val$Train_Val, color=uniqueTrain_Val$Train_Val, type = "scatter3d",mode="markers") ggplot(uniqueTrain_Val,aes(uniqueTrain_Val$LATITUDE,uniqueTrain_Val$LONGITUDE, colour=uniqueTrain_Val$Train_Val))+ geom_point() plot_ly(x=validationLatLong_B0Compl$LATITUDE,y=validationLatLong_B0Compl$LONGITUDE, z=validationLatLong_B0Compl$FLOOR, color=validationLatLong_B0Compl$Real_Predicted, type = "scatter3d",mode="markers")
1c775b384ebb6abd3bf744dfd15be6386f4166b6
88940774e194e91582afdc6e3308b43cae14ef24
/HW2_2.R
674f00e7199794a71791f90da7b0fc7df4f1cee4
[]
no_license
garnerat/DM1_HW2
85c19a17388601475a7f8e480fb1f6309f319b4a
2fc484bc16d158b342a487523f4539becb0fd36e
refs/heads/master
2021-01-11T11:26:39.195456
2017-01-29T22:48:31
2017-01-29T22:48:31
80,256,088
0
1
null
null
null
null
UTF-8
R
false
false
2,420
r
HW2_2.R
# Simulation of Linear Regression varying n and noise (e) # sample size n=c(25, 100, 200, 500, 5000) # noise sigma= c(0.1, 0.5, 1) # initialize matrices MSE.matrix <- matrix(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), nrow = 5) colnames(MSE.matrix) <- c("sig = .1","sig = .5","sig = 1") rownames(MSE.matrix) <- c("n = 25", "n = 100", "n = 200", "n = 500", "n = 5000") rsq.matrix <- matrix(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), nrow = 5) colnames(rsq.matrix) <- c("sig = .1","sig = .5","sig = 1") rownames(rsq.matrix) <- c("n = 25", "n = 100", "n = 200", "n = 500", "n = 5000") adj.rsq.matrix <- matrix(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), nrow = 5) colnames(adj.rsq.matrix) <- c("sig = .1","sig = .5","sig = 1") rownames(adj.rsq.matrix) <- c("n = 25", "n = 100", "n = 200", "n = 500", "n = 5000") #list for equations equations <- list() for (i in 1:length(n)){ for (j in 1:length(sigma)){ # generate predictors set.seed(1984) x1 <- rnorm(n=n[i], mean = 2, sd = 0.4) set.seed(1985) x2 <- rnorm(n=n[i], mean = -1, sd = 0.1) x3 <- x1 * x2 # generate error set.seed(1986) e <- rnorm(n=n[i], mean = 0, sd = sigma[j]) eyx <- 5 + 1.2*x1 + 3*x2 + e # create data from for model model.df <- as.data.frame(cbind(eyx,x1,x2,x3)) # set null and full model for stepwise regression nullmodel <- lm(eyx ~ 1, data = model.df) fullmodel <- lm(eyx ~ ., data = model.df) # create a "banner" to separate model outputs print(paste("**************** n = ", n[i]," sig = ",sigma[j], " **********************", sep = "")) # stepwise regression model.stepwise = step(nullmodel, scope = list(lower = nullmodel, upper = fullmodel), direction = "both") summary.model <- summary(model.stepwise) equations[[length(equations)+1]] <- list(paste("n = ", n[i]," sig = ",sigma[j], " equation = ",summary.model$call[2], sep = "")) MSE.matrix[i,j] <- round(summary.model$sigma^2, 4) rsq.matrix[i,j] <- round(summary.model$r.squared, 4) adj.rsq.matrix[i,j] <- round(summary.model$adj.r.squared, 4) } } # print matrices print(MSE.matrix) print(rsq.matrix) print(adj.rsq.matrix) # user defined function line.plot from plot_func.R - creates line graphs with ggplot2 line.plot(MSE.matrix,"MSE") line.plot(rsq.matrix,"R-Squared") line.plot(adj.rsq.matrix,"Adjusted R-Squared")
f58d6f9814dfdeb1879499189e777054f0469a4d
809703480f03db55e3d2fe178efa0f1810ece44f
/initialization.R
eb66894939bbbf6fce66a84014c33401b7e79f1e
[ "MIT" ]
permissive
akulbahl/COVID-19FlightVisualizations
e59b08353d8300340336d2a8ca7026b0f0b1fc35
ed64f81ce555987824687fc72c84b700806f1f31
refs/heads/master
2023-02-24T22:08:38.953200
2021-02-02T18:58:45
2021-02-02T18:58:45
293,997,939
2
0
null
null
null
null
UTF-8
R
false
false
540
r
initialization.R
# app initialization packages = c("shiny", "shinythemes", "shinydashboard", "dashboardthemes", "shinyWidgets", "reshape2", "tidyverse", "knitr", "lubridate", "heatmaply", "ggthemes", "scales", "plotly") install_if_missing = function(p) { if (p %in% rownames(installed.packages()) == FALSE) { install.packages(p) } } invisible(sapply(packages, install_if_missing))
a6ae9a8356cde9e66c97f924f532828a047adbf8
21c8f47800b0062e6abcb5166a7e38cd8cbbefa1
/prelim/oe_analysis.R
1e40d90d54f3d91394b042a4411408e2c6b0bc28
[]
no_license
experimentalpolitics/immigration
5bdd1c0ebfdacf4e7ff8bd142fc880aa096c66f0
00c22b160ebfccc1dcc21bec7f471e8e76eca70a
refs/heads/master
2021-10-23T18:18:46.421375
2021-10-20T16:41:05
2021-10-20T16:41:05
181,782,683
3
0
null
null
null
null
UTF-8
R
false
false
17,387
r
oe_analysis.R
# =================================================================== # # Project: Sources of Misperception # Author: Nick # Date: 10/19/2020 # Summary: Explore/analyze OE responses using different methods. # =================================================================== # cd("~/Dropbox/github-local/experimentalpolitics/immigration") # packages library(here) library(readr) library(tidyverse) library(caret) library(quanteda) quanteda_options(threads = RcppParallel::defaultNumThreads()) # max threads for parallel processing library(quanteda.textmodels) # prep the data for LIWC program dat <- read_csv(here("data/immigration_20191219_clean.csv")) # write out -- !DON'T PROCESS THIS AFTER FIRST RUN! (commented out) # dat %>% # select(., id, taxes_oe) %>% # write_csv(., "data/taxes_oe.csv") # dat %>% # select(., id, jobs_oe) %>% # write_csv(., "data/jobs_oe.csv") # read back in processed scores tmp <- read_csv(here("data/LIWC2015 Results (taxes_oe).csv")) tmp <- rename(tmp, id = `Source (A)`, tentat_taxes = tentat, certain_taxes = certain) dat <- tmp %>% select(., id, tentat_taxes, certain_taxes) %>% left_join(dat, ., by = "id") tmp <- read_csv(here("data/LIWC2015 Results (jobs_oe).csv")) tmp <- rename(tmp, id = `Source (A)`, tentat_jobs = tentat, certain_jobs = certain) # add to data and remove tmp dat <- tmp %>% select(., id, tentat_jobs, certain_jobs) %>% left_join(dat, ., by = "id") rm(tmp) # create `prefer` and `exposure` dat <- dat %>% mutate(., prefer = (tv_fox - tv_msnbc == 0) + 2 * (tv_fox - tv_msnbc > 0), prefer = factor(prefer, labels = c("msnbc", "neutral", "fox")), exposure = ((prefer == "fox" & tweet == "msnbc") | (prefer == "msnbc" & tweet == "fox")) + 2 * (prefer == "neutral" & tweet != "control") + 3 * (prefer == tweet), exposure = factor(exposure, labels = c("control", "inconsistent", "neutral", "consistent"))) # create "folded" measures to get at the intensity of answer dat$folded_taxes <- abs(as.numeric(scale(dat$taxes_pos, center = TRUE, scale = FALSE))) dat$folded_jobs <- abs(as.numeric(scale(dat$jobs_pos, center = TRUE, scale = FALSE))) # correlation between closed ended responses and tentativeness cor.test(dat$taxes_pos, dat$tentat_taxes) cor.test(dat$jobs_pos, dat$tentat_jobs) # correlation between folded responses and tentativeness cor.test(dat$folded_taxes, dat$tentat_taxes) cor.test(dat$folded_jobs, dat$tentat_jobs) # we want to understand if the association of the first pair is smaller than the association of the second pair. Since the first pair is not stat. sig. we would say that the relationship of the second pair is indeed stronger (0 < ~0.15). The sign of the second correlation is negative, meaning more "extreme" answers are less tentative. # This first step indicates that we have a reasonable measure of ambivalence because the correlation is stronger with the folded (extremity) indicator than the positional indicator. We then want to test whether there is a difference in the measure of ambivalence between choice treated which select consistent and those which select inconsistent media. dat$exposure2 <- ifelse(dat$exposure %in% c("control", "neutral"), NA, dat$exposure) %>% factor(., labels = c("inconsistent", "consistent")) dat %>% filter(., condition == "assigned" & !is.na(exposure2)) %>% t.test(tentat_taxes ~ exposure2, data = .) # the sample is not balanced (48 incons vs 74 cons), but we might think that this finding indicates assigning does not change ambivalence dat %>% filter(., condition == "choice" & !is.na(exposure2)) %>% t.test(tentat_taxes ~ exposure2, data = .) # here we are seeing that there isn't a difference, meaning that whether they choose consistent or inconsistent sources ambivalence is not stat. different. Samples are very unbalance; 21 incons and 102 consistent # run with jobs as well dat %>% filter(., condition == "assigned" & !is.na(exposure2)) %>% t.test(tentat_jobs ~ exposure2, data = .) dat %>% filter(., condition == "choice" & !is.na(exposure2)) %>% t.test(tentat_jobs ~ exposure2, data = .) # once again, no stat. sig difference here, meaning the treatment doesn't explain ambivalence nor does choice have a selection bias among ambivalent people # "Bag-of-words" vector space model of text data. # 1. dichotomize data (label) dat$taxes_label <- ifelse(dat$taxes_pos >= .5, "positive", "negative") dat$jobs_label <- ifelse(dat$jobs_pos >= .5, "positive", "negative") # isolate the relevant data; can be rejoined later oe_dat <- dat %>% select(., id, condition, taxes_label, taxes_oe, jobs_label, jobs_oe) # check for imbalance (splits about 70-30 for each) table(oe_dat$taxes_label) table(oe_dat$jobs_label) # recast data to "pool" observations -- we do this to to maximize data available. Also, we believe there is a common data generating process and these can be pooled oe_dat <- oe_dat %>% select(., id, condition, taxes_oe, jobs_oe, taxes_label, jobs_label) %>% gather(., key, value, -id, -condition) oe_dat$question <- ifelse(rownames(oe_dat) %in% grep("taxes", oe_dat$key, fixed = TRUE), "taxes", "jobs") oe_dat$key <- gsub("taxes_", "", oe_dat$key, fixed = TRUE) oe_dat$key <- gsub("jobs_", "", oe_dat$key, fixed = TRUE) oe_dat$key <- gsub("oe", "response", oe_dat$key, fixed = FALSE) oe_dat <- oe_dat %>% spread(., key, value) # 2. pre-process the text # create corpus (note that 4 obs do not have response text) docs <- corpus(oe_dat$response) # assign document names that id unique responses docnames(docs) <- paste(oe_dat$id, oe_dat$question, sep = "_") # additional document meta data (don't want to lose this information). The `_` is to make sure you don't mix these up with features of the same name docvars(docs, "id_") <- oe_dat$id docvars(docs, "label_") <- oe_dat$label docvars(docs, "question_") <- oe_dat$question docvars(docs, "condition_") <- oe_dat$condition # - They do not stem, or remove stopwords or other tokens. We should imagine they have reduced case and removed punctuation, but it is not clear. # create a document-feature matrix containing n-grams docs_dfm <- tokens(docs) %>% tokens_remove("\\p{P}", valuetype = "regex", padding = FALSE) %>% # punctuation dfm() # convert to DFM # find number of features and top 100 features nfeat(docs_dfm) # 2,694 features topfeatures(docs_dfm, 100) # - Note that the original paper "fixes vocabulary" which is intended to address the *finite sample bias* problem explored by Gentzkow, Shapiro, and Taddy (2016, NBER working paper). The idea behind this is that speakers have many phrases/words to choose from relative to the amount of speech we have on record. It could be the case that a speech contains a phrase or word that appears there but is not a substantive signal of party. However, as Gentzkow et al. explain, naive estimators don't understand the substantive signal and therefore would use such terms as though they were credible signals of party label. This could overstate the signal in the classifier; instead, fixing the size of the vocabulary is thought to prevent this. I have specifically eliminated certain terms in other work (i.e. proper nouns which might correlate with labels) and it appears that Gentzkow et al. (2016) use a LASSO to effectively eliminate "weak" terms. Petersen and Spirling "fix" the vocabulary by removing terms which appear in less than 200 speeches, which is 0.0057143 percent of their data. I am not sure we need to concern ourselves here, but in case I only include terms which appear in more than one answer. # drops features appearing in only one document docs_dfm <- dfm_trim(docs_dfm, min_docfreq = 2, docfreq_type = "count") nfeat(docs_dfm) # 1,323 features (~51% reduction) # 3. select algorithms & apply to data; 10-fold CV # - Note that the original paper runs four algorithms over the data for each legislative session to classify party. They indicate that the best performing (highest accuracy in CV) is chosen for each session. They also create inversely proportional weights to balance the classes in each session. For our purposes, we do not have temporally segmented data. We do have very different class frequencies. # first split the data into training and test sets by randomly pulling a sample of 50% of the data. if we want to weight by label or by question we can do so. we can also change how much of the data we use to train # first create train and test sets by randomly holding out 50% of data set.seed(42) train_ids <- base::sample(docnames(docs_dfm), round(nrow(docs_dfm) * 0.5), replace = FALSE) test_ids <- docnames(docs_dfm)[!(docnames(docs_dfm) %in% train_ids)] length(test_ids[test_ids %in% train_ids]) # must evaluate to zero; it does # get training set dfmat_train <- docs_dfm[train_ids, ] # get test set dfmat_test <- docs_dfm[test_ids, ] # - It is not clear to me that we need to use these four; in fact, the advice from the authors is to use Naive Bayes or some other fast, scalable option (and then to check with a few different alternatives). # https://rpubs.com/FaiHas/197581 --perceptron # Stochastic Gradient Descent, which is better for large data (http://deeplearning.stanford.edu/tutorial/supervised/OptimizationStochasticGradientDescent/) # "passive-agressive" GLM with hinge-loss parameter would need to be hand-coded # logit with specific loss, regularization parameters fit with stochastic average gradient descent would also need to be coded by hand in R # we have decided to use Naive Bayes, Support Vector Machine(s) instead. this can be done in quanteda # ensure compatible dimensionality of train, test sets dfmat_matched <- dfm_match(dfmat_test, featnames(dfmat_train)) # set the seed to get reproducible results set.seed(42) # NB model mod_nb <- textmodel_nb(dfmat_train, docvars(dfmat_train, "label_")) # set the actual labels vector for evaluation, using the matched test labels actual_class <- docvars(dfmat_matched, "label_") # generate predictions of labels based on NB model, using the matched test data predicted_class <- predict(mod_nb, newdata = dfmat_matched) # store the label matrix tab_class <- table(actual_class, predicted_class) # print the label matrix in the console and look at F1. `caret` is the package I use here, and you need to explicitly set the positive class in order to get sensible results; the mode argument allows for F1 to print confusionMatrix(tab_class, positive = "positive", mode = "everything") # SVMs using different weighting schemes; note that this uses the actual class from the NB model (not model dependent) # unweighted SVM dfm(dfmat_train) %>% textmodel_svm(docvars(dfmat_train, "label_")) %>% predict(., newdata = dfmat_matched) %>% table(actual_class, .) %>% confusionMatrix(., positive = "positive", mode = "everything") # proportional weight SVM dfm(dfmat_train) %>% dfm_weight(scheme = "prop") %>% textmodel_svm(docvars(dfmat_train, "label_")) %>% predict(., newdata = dfmat_matched) %>% table(actual_class, .) %>% confusionMatrix(., positive = "positive", mode = "everything") # SVM with tf-idf dfm(dfmat_train) %>% dfm_tfidf() %>% textmodel_svm(docvars(dfmat_train, "label_")) %>% predict(., newdata = dfmat_matched) %>% table(actual_class, .) %>% confusionMatrix(., positive = "positive", mode = "everything") # SVM with tf-idf weighted by docfreq dfm(dfmat_train) %>% dfm_tfidf() %>% textmodel_svm(docvars(dfmat_train, "label_"), weight = "docfreq") %>% predict(., newdata = dfmat_matched) %>% table(actual_class, .) %>% confusionMatrix(., positive = "positive", mode = "everything") # 4. accuracy (all true / all obs) where class is determined by a p >=.5 # - Note that the original authors are using balanced classes, and can get away with simple accuracy as a metric of classification error. However, there are different approaches to measuring classification error that are better for class imbalanced data, such as F1 (harmonic mean of precision, recall). # 5. Establish ambivalence from accuracy, where low accuracy means high ambivalence. # We will need to obtain class probabilities rather than bins. This will be done with the predict() function and setting type = "probability". We can then use that class probability to make a measure of "certainty" of language. Note that the probabilities are for the positive label, so we may want to do a "folding" type of thing here where values nearest to 0.5 are most uncertain. # get the class probabilities for just the test set nb_preds <- predict(mod_nb, newdata = dfmat_matched, type = "probability") # create folded measure from the positive pred probs tmp <- abs(nb_preds[, 2] - .5) %>% cbind(., t(sapply(str_split(names(.), "_"), unlist))) %>% as_tibble names(tmp) <- c("folded_ml_x", "id", "question") tmp$id <- as.numeric(tmp$id) tmp$folded_ml_x <- as.numeric(tmp$folded_ml_x) # add to oe data oe_dat <- oe_dat %>% left_join(., tmp) rm(tmp) # add to total data dat$folded_ml_jobs <- oe_dat %>% filter(., question == "jobs") %>% .[match(dat$id, .$id), "folded_ml_x"] %>% unlist dat$folded_ml_taxes <- oe_dat %>% filter(., question == "taxes") %>% .[match(dat$id, .$id), "folded_ml_x"] %>% unlist # correlation between closed ended responses and folded ml classifier cor.test(dat$taxes_pos, dat$folded_ml_taxes) cor.test(dat$jobs_pos, dat$folded_ml_jobs) # correlation between folded responses and folded ml classifier cor.test(dat$folded_taxes, dat$folded_ml_taxes) cor.test(dat$folded_jobs, dat$folded_ml_jobs) # we want to understand if the association of the first pair is smaller than the association of the second pair. Since the SECOND pair is not stat. sig. we would say that the relationship of the second pair is NOT stronger (~0.15 > 0). Assuming the same interpretation as before, this cannot support our claim around ambivalence. [CHECK INTERPRETATION!] # do this another way - compare accuracies between groups # add the class probabilities tmp <- nb_preds[, 2] %>% cbind(., t(sapply(str_split(names(.), "_"), unlist))) %>% as_tibble names(tmp) <- c("prob_ml_x", "id", "question") tmp$id <- as.numeric(tmp$id) tmp$prob_ml_x <- as.numeric(tmp$prob_ml_x) oe_dat <- oe_dat %>% left_join(., tmp) rm(tmp) dat$prob_ml_jobs <- oe_dat %>% filter(., question == "jobs") %>% .[match(dat$id, .$id), "prob_ml_x"] %>% unlist dat$prob_ml_taxes <- oe_dat %>% filter(., question == "taxes") %>% .[match(dat$id, .$id), "prob_ml_x"] %>% unlist # re-label dat$label_ml_taxes <- ifelse(dat$prob_ml_taxes >= .5, "positive", "negative") dat$label_ml_jobs <- ifelse(dat$prob_ml_jobs >= .5, "positive", "negative") # check confusion matrix on all obs confusionMatrix(table(dat$taxes_label, dat$label_ml_taxes), positive = "positive", mode = "everything") # now filter to subset and run again dat %>% filter(., exposure == "consistent" & !is.na(label_ml_taxes)) %>% select(., taxes_label, label_ml_taxes) %>% table(.) %>% confusionMatrix(., positive = "positive", mode = "everything") dat %>% filter(., exposure == "inconsistent" & !is.na(label_ml_taxes)) %>% select(., taxes_label, label_ml_taxes) %>% table(.) %>% confusionMatrix(., positive = "positive", mode = "everything") # for taxes, it looks like there isn't a difference between accuracy (F1) from consistency or not dat %>% filter(., exposure == "consistent" & !is.na(label_ml_jobs)) %>% select(., jobs_label, label_ml_jobs) %>% table(.) %>% confusionMatrix(., positive = "positive", mode = "everything") dat %>% filter(., exposure == "inconsistent" & !is.na(label_ml_jobs)) %>% select(., jobs_label, label_ml_jobs) %>% table(.) %>% confusionMatrix(., positive = "positive", mode = "everything") # for jobs there is a slight difference, but not sure it is enough # OVERALL: if we think this is the right way to do this, it suggests again that ambivalence is not driving # step 2 of this approach, actually look at the group differences as before # now filter to subset and run again with specific groups dat %>% filter(., condition == "assigned" & exposure == "consistent" & !is.na(label_ml_taxes)) %>% select(., taxes_label, label_ml_taxes) %>% table(.) %>% confusionMatrix(., positive = "positive", mode = "everything") dat %>% filter(., condition == "assigned" & exposure == "inconsistent" & !is.na(label_ml_taxes)) %>% select(., taxes_label, label_ml_taxes) %>% table(.) %>% confusionMatrix(., positive = "positive", mode = "everything") # the accuracy (F1) is not meaningfully different between consistent and inconsistent in assigned group dat %>% filter(., condition == "choice" & exposure == "consistent" & !is.na(label_ml_taxes)) %>% select(., taxes_label, label_ml_taxes) %>% table(.) %>% confusionMatrix(., positive = "positive", mode = "everything") dat %>% filter(., condition == "choice" & exposure == "inconsistent" & !is.na(label_ml_taxes)) %>% select(., taxes_label, label_ml_taxes) %>% table(.) %>% confusionMatrix(., positive = "positive", mode = "everything") # while the accuracy is a little higher in the inconsistent group (ambivalence is lower) this is not a large difference
ea1ee27ec5345b09c27c05b56a3cb1af16112db9
980f570c0e740594473d42757816981de58b5470
/Rsrc/man/plot_sizetransition.Rd
728ce64ec6d15e9f4d971aa3fceea3c22a592d46
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
LarryJacobson/gmacs
334f24fee6cd651f5e580d2225e1fac026d9757b
e5e999d3fdff2f392d6e48c11bdfe290bc672434
refs/heads/master
2021-01-14T12:44:45.044908
2015-01-16T01:05:01
2015-01-16T01:05:01
null
0
0
null
null
null
null
UTF-8
R
false
false
327
rd
plot_sizetransition.Rd
% Generated by roxygen2 (4.0.2): do not edit by hand \name{plot_sizetransition} \alias{plot_sizetransition} \title{Plot size transition} \usage{ plot_sizetransition(replist) } \arguments{ \item{replist}{List object created by read_admb function} } \value{ Plot of size transition matrix } \description{ Plot size transition }
fed6c65c5172ff2f8671db56e4a07681222eecd7
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/hansard/examples/mp_vote_record.Rd.R
b45cce94c78c94a3f3da4bf8151c4ac9a3224add
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
382
r
mp_vote_record.Rd.R
library(hansard) ### Name: mp_vote_record ### Title: Individual MP voting records ### Aliases: mp_vote_record hansard_mp_vote_record ### ** Examples ## Not run: ##D x <- mp_vote_record(172, lobby = "all") ##D ##D x <- mp_vote_record(172, lobby = "aye") ##D ##D x <- mp_vote_record(172, lobby = "no") ##D ##D x <- mp_vote_record(172, session = "2016/17") ## End(Not run)
e211b9337f95cb778b40538e69ca7b1d81af5550
f28950c0086eee6f8e89ea47565bcfe9341be8f4
/lib/Tree&rf.R
2e8c81ccb72c76d1e21eaae9717e9a41db5a4d51
[]
no_license
TZstatsADS/Spr2017-proj5-grp4
4c07b95b2aedf67e1569d2880cf2d235af489e53
95aeac3b53b9a8a8f05d385e880de44b57d88046
refs/heads/master
2021-01-19T06:24:23.453512
2017-04-28T19:13:40
2017-04-28T19:13:40
87,462,909
0
2
null
null
null
null
UTF-8
R
false
false
2,590
r
Tree&rf.R
library(randomForest) library(randomForestSRC) library(caret) library(rpart) library(rattle) library(rpart.plot) library("Metrics") setwd("~/Desktop/Proj5") data<-read.csv("HR_comma_sep.csv",header = T) #add level to ppl left 1 means ppl left 0 means ppl stay #data$left[data$left == 1] <- "Left" ;data$left[data$left == 0] <- "Remain" data$left <- as.factor(data$left) #ordered the salary #data$salary <- ordered(data$salary, c("low", "medium", "high")) per_train <- 0.75 # percentage of training data smp_size <- floor(per_train * nrow(data)) # size of the sample set.seed(111) #set seed index <- sample(seq_len(nrow(data)), size = smp_size) # train index for red wine #split the data into train and test data.train<-data[index,] data.test<-data[-index,] #col index for ppl left which(colnames(data.train)=="left") Y.train<-data.train$left X.train<-data.train[,-which(colnames(data.train)=="left")] Y.test<-data.test$left X.test<-data.test[,-which(colnames(data.train)=="left")] # decisionTreeModel <- train(left~.,method="rpart",data=data.train) # fancyRpartPlot(decisionTreeModel$finalModel) # pred_tree <- predict(decisionTreeModel,X.test) # mean(pred_tree == Y.test) tree.model <- rpart(left ~ ., data = data.train) tree.predict <- predict(tree.model, data.test) #accuracy auc(as.numeric(Y.test) - 1, tree.predict[, 2]) # ?auc # pred<-as.numeric(round(tree.predict)[,2]) # mean(as.numeric(Y.test) - 1 == pred) rpart.plot(tree.model, type = 2, fallen.leaves = F, cex = 0.8, extra = 2) # So, what can we observe? # # Satisfaction level appears to be the most import piece. If you’re above 0.46 you’re much more likely to stay (which is what we observed above). # If you have low satisfaction, the number of projects becomes import. If you’re on more projects you’re more likely to remain. If you’re on fewer projects – perhaps you see the writing on the wall? # If you’re happy, have been at the company for less than 4.5 years, and score over 81% on your last evaluation, you’re very likely to leave. And, it appears as if the “decider” is monthly hours over 216. # In brief: # # If you’re successful and overworked, you leave. # If you’re unhappy and overworked, you leave. # If you’re unhappy and underworked, you leave. # If you’ve been at the company for more than 6.5 years, you’re more likely to be happy working longer hours. ####random forest### randomforest.Model <- randomForest(left~ .,data.train,ntree=30) randomforest.predict <- predict(randomforest.Model,data.test) mean(randomforest.predict == Y.test) #head(data.test)
4f5607f443859aacb5e5ec2d696b8daa61942a82
c073ada8ff21dcd20094f93a944a84040b1d0bfc
/src/03_seuratPreProcess.R
cfcf9d2b9325ca795b96c090bb8c3f5a4c16589d
[]
no_license
GenomicsNX/scRNAseq-Gut-IFNB-IFNL
a71f0a6ba1b8ee3c0640430fb8c8d0936e7826d9
8fae9983cf212c6d4910a5428fb64f1ead9ecd03
refs/heads/main
2023-05-30T07:28:31.381865
2021-06-16T14:04:42
2021-06-16T14:04:42
null
0
0
null
null
null
null
UTF-8
R
false
false
11,501
r
03_seuratPreProcess.R
#------------------------------------------------------------------------------- # Install required libraries and set the root path #------------------------------------------------------------------------------- library(DropletUtils) library(Matrix) library(tidyverse) library(Seurat) library(scDblFinder) library(scico) library(reshape2) library(patchwork) path = "/icgc/dkfzlsdf/analysis/B080/sharma/boulantLab/sc_interferon/" #------------------------------------------------------------------------------- # Run analysis over all samples and replicates #------------------------------------------------------------------------------- for (rep in c("HJHG3BGXF", "HJJCYBGXF")) { # Reading the corresponding annotation file anno = readRDS(paste0(path,"data/", "scINF_",rep,"_anno.RDS"))[,2:5] anno = unique(anno) for(type in paste0("Int",1:8)) { print(paste0("Running analysis for - ", rep, " and type - ", type)) #--------------------------------------------------------------------------- # Create output directory #--------------------------------------------------------------------------- if( ! dir.exists(paste0(path, "results/merged_after_kb/QC/", rep))){ dir.create(paste0(path, "results/merged_after_kb/QC/", rep), recursive = T) } if( ! dir.exists(paste0(path, "analysis/", rep, "_counts/Seurat"))){ dir.create(paste0(path, "analysis/", rep, "_counts/Seurat"), recursive = T) } #--------------------------------------------------------------------------- # Function to read kb-python output #--------------------------------------------------------------------------- read_count_output <- function(dir, name) { dir <- normalizePath(dir, mustWork = TRUE) m <- readMM(paste0(dir, "/", name, ".mtx")) m <- Matrix::t(m) m <- as(m, "dgCMatrix") # The matrix read has cells in rows ge <- ".genes.txt" genes <- readLines(file(paste0(dir, "/", name, ge))) barcodes <- readLines(file(paste0(dir, "/", name, ".barcodes.txt"))) colnames(m) <- barcodes rownames(m) <- genes return(m) } res_mat <- read_count_output(dir = paste0(path, "analysis/", rep, "_counts/kb/", type, "/counts_unfiltered"), name = "cells_x_genes") #--------------------------------------------------------------------------- # 1. QC plot - Knee plot function #--------------------------------------------------------------------------- knee_plot <- function(bc_rank) { knee_plt <- tibble(rank = bc_rank[["rank"]], total = bc_rank[["total"]]) %>% distinct() %>% dplyr::filter(total > 0) annot <- tibble(inflection = metadata(bc_rank)[["inflection"]], rank_cutoff = max(bc_rank$rank[bc_rank$total > metadata(bc_rank)[["inflection"]]])) p <- ggplot(knee_plt, aes(total+1, rank)) + theme_bw(base_size = 8) + geom_line() + geom_hline(aes(yintercept = rank_cutoff), data = annot, linetype = 2) + geom_vline(aes(xintercept = inflection), data = annot, linetype = 2) + scale_x_log10() + scale_y_log10() + annotation_logticks() + labs(y = "Rank", x = "Total UMIs") return(p) } bc_rank <- barcodeRanks(res_mat, lower = 300) p1 = knee_plot(bc_rank) #--------------------------------------------------------------------------- # 2. QC plot - nCount vs nGene per cell #--------------------------------------------------------------------------- df2 <- tibble(nCount = colSums(res_mat), nGene = colSums(res_mat > 0)) p2 = ggplot(df2, aes(nCount+1, nGene+1)) + theme_bw(base_size = 8) + geom_bin2d(bins = 50) + scale_fill_scico(palette = "devon", direction = -1, end = 0.95) + scale_x_log10() + scale_y_log10() + annotation_logticks() + geom_vline(xintercept = metadata(bc_rank)[["inflection"]]) + geom_hline(yintercept = 500) + labs(x = "Total UMI counts", y = "Number of genes detected") + theme(legend.position = "bottom", legend.text = element_text(angle = 45), legend.text.align = 1) + guides(fill = guide_colourbar(title.position="left", title.vjust = 0.75)) #--------------------------------------------------------------------------- # 3. QC plot - distribution of nGene per cell #--------------------------------------------------------------------------- p3 = ggplot(df2, aes(nGene+1)) + theme_bw(base_size = 8) + scale_x_log10() + #geom_vline(xintercept = 100) + labs(y = "Density") + geom_vline(xintercept = 500) + annotation_logticks() + geom_density() #--------------------------------------------------------------------------- # Filtering empty barcodes based on - # # 1. Total UMI count in each cell > knee plot inflection # 2. Number of genes detected in each cell > 300 # # [After step (1 + 2)] # # 3. Genes detected in at least 1% of non-empty cells #--------------------------------------------------------------------------- res_mat <- res_mat[,colSums(res_mat) > metadata(bc_rank)$inflection & colSums(res_mat > 0) > 500] res_mat <- res_mat[rowSums(res_mat > 0) > 0.01 * ncol(res_mat),] rm(df2, bc_rank) # #------------------------------------------------------------------------------- # # Identify empty barcodes # #------------------------------------------------------------------------------- # # # Identify empty droplets # # Any barcode with total UMI count < 100 will be automatically considered empty and assigned a NA value # set.seed(123) # e.out = emptyDrops(res_mat, lower = 100) # is.cell = e.out$FDR <= 0.01 # # # Check to ensure that the number of iteration in the MC simulation was sufficient or not # # See http://bioconductor.org/books/release/OSCA/droplet-processing.html # table(Significant=is.cell, Limited=e.out$Limited) # # # Should Monte Carlo simulation iterations be increased (if 0 no, if > 0 yes) # mc = table(Significant=is.cell, Limited=e.out$Limited)[1,2] # # # Empty drops selected empty cells # ed = sum(is.cell, na.rm=TRUE) / length(is.cell) * 100 # # # Low expression based empty cell (if total UMI count per cell < 100) # # Empty drops assigns such cell NA values # le = sum(is.na(is.cell)) / length(is.cell) * 100 # # # Non empty cells # ne = sum(!is.cell, na.rm=TRUE) / length(is.cell) * 100 # # # Total number of barcodes # bc = length(is.cell) # # # Filter empty barcodes # res_mat = res_mat[,which(e.out$FDR <= 0.01)] # # # Saving empty droplets QC # saveRDS(setNames(c(mc, ed, le, ne, bc), c("monte_carlo_sim", "empty_drops", "low_expr", "non_empty", "barcode_count")), # paste0(path, "analysis/", rep,"_QC/",type,"/emptydroplets/edStats.RDS")) # saveRDS(e.out, paste0(path, "analysis/", rep,"_QC/",type,"/emptydroplets/emptyDropsOutput.RDS")) # # rm(e.out, is.cell, mc, ed, le, ne, bc) # #------------------------------------------------------------------------------- # Gene name mapping #------------------------------------------------------------------------------- tr2g <- read_tsv(paste0(path, "data/kallistoIndex/t2g.txt"), col_names = c("transcript", "gene", "gene_symbol")) %>% select(-transcript) %>% distinct() rownames(res_mat) <- tr2g$gene_symbol[match(rownames(res_mat), tr2g$gene)] rm(tr2g) #------------------------------------------------------------------------------- # Seurat pre-processing #------------------------------------------------------------------------------- # Make a Seurat object, with the following filtering criteria - # removing cells with less than 100 genes # removing genes expressed in less than 100 cells seu = CreateSeuratObject(res_mat, project = paste0("scINF_", rep, "_", type)) rm(res_mat) # Additional study annotation seu[["site"]] = anno$site[anno$type == type] seu[["treatment"]] = anno$treatment[anno$type == type] seu[["replicate"]] = anno$replicate[anno$type == type] # Estimate mitochondrial composition per barcode seu[["percent.mt"]] <- PercentageFeatureSet(seu, pattern = "^MT-") # Identify doublets dbl = scDblFinder(as.SingleCellExperiment(seu)) seu[["doublets"]] <- dbl$scDblFinder.class dbl = data.frame(table(dbl$scDblFinder.class)) p4 = ggplot(data=dbl, aes(x=Var1, y=Freq)) + theme_bw(base_size = 8) + geom_bar(stat='identity', position="dodge") + labs(x="", y = "Counts", fill = "") + geom_text(aes(label = Freq), position = position_dodge(width=0.9), vjust = 0, size = 3) rm(dbl) #--------------------------------------------------------------------------- # Final consolidated QC plots #--------------------------------------------------------------------------- qcdat = melt(seu@meta.data) qcdat = data.frame(Treatment=paste(qcdat$site, qcdat$treatment, sep= "-"), Type = qcdat$variable, value = qcdat$value, stringsAsFactors = F) p5 = ggplot(qcdat, aes(x = Type, y = value)) + labs(x="", y="") + theme_bw(base_size = 8) + geom_violin(color = "grey60", draw_quantiles = 0.5, lwd=0.3) + facet_wrap(~Type, scales = "free") + theme(strip.background = element_blank(), strip.text = element_blank()) p6 = ggplot(seu@meta.data, aes(x = nCount_RNA, y = nFeature_RNA, color = percent.mt)) + theme_bw(base_size = 8) + labs(x = "Total RNA counts", y = "Number of genes detected") + geom_point(size = 0.2) + scale_shape_manual(values = c(1,3)) + scale_x_log10() + scale_y_log10() + annotation_logticks() + geom_vline(xintercept = quantile(seu@meta.data$nCount_RNA, 0.99)) + geom_hline(yintercept = quantile(seu@meta.data$nFeature_RNA, 0.99)) + facet_wrap(~doublets, scales = "free") + theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1)) # Integrated QC plot p1 = p1 + labs(subtitle = paste(type, unique(qcdat$Treatment),sep="-")) p = (p1 + p2 + p3) / (p4 + p5 + p6) ggsave(filename = paste0(paste0(path, "results/merged_after_kb/QC/", rep, "/"), paste(type, unique(qcdat$Treatment), "QCresults.pdf", sep="-") ), plot = p, width = 7.5, height = 6) rm(p, p1, p2, p3, p4, p5, p6, qcdat) #--------------------------------------------------------------------------- # Filtering poor quality cells, i.e # # 1. With MT content > 20% # 2. Cells classified as doublets #--------------------------------------------------------------------------- seufilt <- subset(seu, subset = percent.mt < 20) seufilt <- subset(seufilt, subset = doublets == "singlet") # Saving filtered objects saveRDS(seufilt, paste0(path, "analysis/", rep,"_counts/Seurat/",type,"_filteredCounts.RDS")) rm(seu, seufilt) } rm(anno) }
46b9f4915b3fb6618f173936362cb9b05390c513
f9a03eca6377913e1b5f5c550f18cae81128551f
/misc/20180622_AlvarkDefence.R
4c0b3a3f97b6cbae5d59a2604c6fbe38f93a5c6b
[]
no_license
rintaromasuda/bleaguebydata
03ea24c82bd875f2b296bc170cd4c40e9f80ccaa
285de734f0c5e8db22588c5b6f9edad9f9c34bcc
refs/heads/master
2022-07-01T12:42:44.456004
2022-06-05T05:38:30
2022-06-05T05:38:30
112,900,747
2
0
null
2019-02-26T14:37:25
2017-12-03T04:47:40
R
UTF-8
R
false
false
792
r
20180622_AlvarkDefence.R
setwd('C:/git/bleaguebydata/analysis') library(knitr) library(dplyr) library(ggplot2) library(stringr) df <- read.csv("201718_b1_gamebygame.csv") df$POS <- df$F2GA + df$F3GA + (0.44 * df$FTA) + df$TO - df$OR df$PPP <- df$PTS / df$POS df$PPP100 <- df$PPP * 100 gsub(df$VS, "vs", "") df$VS_STR <- as.character(df$VS) df$VS_STR <- gsub("vs", "", df$VS_STR) df$ISAVR <- grepl("A東京", df$VS_STR) df[df$ISAVR, ] df$ISBREX <- grepl("栃木", df$VS_STR) summary(df$TEAM) df <- df[order(df$PPP100, decreasing = TRUE),] df.mikawa <- subset(df, TEAM == "三河") #ggplot(df.mikawa, aes(x = DAY, y = PPP100)) + # geom_bar(stat = "identity") + # geom_bar(stat = "Identity", data = subset(df.mikawa, ISAVR), fill = "red") ggplot(df.mikawa, aes(y = PPP100)) + geom_histogram(stat = "identity")
0d4fee96e22f57cda04988b45f959ff174f59038
d05f52ac50bd71f7b08f01bebc0c4488b33f9be1
/17분석스터디/17스터디 시각화 과제.R
8742d03f0ff2346dcd39a69ffb2b6eacd1ea83b7
[]
no_license
gunw00/R_project
51755c9ed54a47210f6b9551c2396e3c2f525e5a
abad45ee9682ed4260c5e4d496cf5d4fcb1591f0
refs/heads/master
2021-07-03T15:15:37.694766
2021-06-30T13:21:37
2021-06-30T13:21:37
235,510,343
0
0
null
null
null
null
UTF-8
R
false
false
1,223
r
17스터디 시각화 과제.R
# 장석건 training %>% ggplot(aes(workclass, fill=wage)) + geom_bar() training %>% ggplot(aes(capital_gain-capital_loss, fill=wage)) +geom_density(alpha =1) + xlim(-5000, 5000) training %>% ggplot(aes(hours_per_week,fill=wage)) + geom_density(alpha=.5) # 차현우 training%>%ggplot(aes(hours_per_week,fill=wage))+geom_bar() training%>%ggplot(aes(occupation,fill=wage))+geom_bar()+coord_flip() training%>%ggplot(aes(marital_status,fill=wage))+geom_bar()+coord_flip() training%>%ggplot(aes(race,fill=wage))+geom_bar() # 배경득 training %>% ggplot(aes(sex,fill=wage))+geom_bar() training %>% ggplot(aes(race,fill=sex))+geom_bar() training %>% ggplot(aes(age,fill=sex))+geom_bar() training %>% group_by(wage, sex) %>% tally() %>% mutate(tol_n = n/sum(n)) %>% ggplot(aes(wage, tol_n, fill=sex))+geom_histogram(stat='identity')+ylim(0,1) # 김성원 training %>% ggplot(aes(sex, fill=wage)) + geom_bar() training %>% ggplot(aes(age, fill=wage)) + geom_density(alpha=.6) training %>% ggplot(aes(hours_per_week, fill=sex)) + geom_density(alpha=.6) training %>% ggplot(aes(hours_per_week, fill=wage)) + geom_density(alpha=.6) training %>% ggplot(aes(workclass, fill=wage)) + geom_bar()
3959154833430020bfffa0898cf5f5c8b35b772d
cc7139789c2e524d61e11c92a033666246d2da32
/1_RawData/LearningCurve.R
31cd51d8393becb345b30e7ecc5c3661e7925794
[]
no_license
dmpe/bachelor
e99dc0de17d8cf54455dfec08c15e32f1916f74c
7611bb2556cc728e64d7be50a47c32a055b6ce9d
refs/heads/master
2021-01-10T13:00:51.218870
2015-10-02T15:52:25
2015-10-02T15:52:25
36,564,092
1
0
null
null
null
null
UTF-8
R
false
false
1,066
r
LearningCurve.R
library(stringr) library(plyr) library(xlsx) learningCurveData <- read.xlsx("1_RawData/DataSources/learningcurve.xlsx", sheetIndex = 1, startRow = 18, endRow = 58) learningCurveData <- plyr::rename(learningCurveData, c(NA. = "Country", Overall.Index = "LearningCurve_Index")) # sapply(learningCurveData, class) # factors -> to char learningCurveData$Country <- str_trim(learningCurveData$Country, side = "both") learningCurveData$Country[learningCurveData$Country == "South Korea"] <- "Korea" learningCurveData$Country[learningCurveData$Country == "Hong Kong-China"] <- "China" #' delete some columns learningCurveData <- learningCurveData[, !(colnames(learningCurveData) %in% c("Cognitive.Skills", "Educational.Attainment", "Notes", "NA..1", "NA..2"))] learningCurveData$Ranking_LearningCurve <- seq(1, 40) # learningCurveData$LearningCurve_Index <- scale(learningCurveData$LearningCurve_Index) #' mean of 24 countries z-score #' format(round(mean(subset(learningCurveData, Country %in% selectedCountries)$LearningCurve_Index), 5), nsmall = 5)
0c0369cac1a5ac1f1d22ea6cac2b1443c1e1947f
5c4c58456a2a0f1d80c5eb783d6cbc8f846f8414
/man/geovctrs-package.Rd
a0cd6e7dca77d35b30a10fb6fd649637c80027a7
[ "MIT" ]
permissive
paleolimbot/geovctrs
6b9882efe4a05d2d3614932feae68c0c6b9a37f6
12de2c88d29df5c5e770405b0d772906a2e18533
refs/heads/master
2021-05-18T01:28:15.343654
2020-07-24T18:53:27
2020-07-24T18:53:27
251,045,555
24
2
null
2020-06-27T02:37:54
2020-03-29T13:59:38
R
UTF-8
R
false
true
871
rd
geovctrs-package.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geovctrs-package.R \docType{package} \name{geovctrs-package} \alias{geovctrs} \alias{geovctrs-package} \title{geovctrs: Efficient Data Structures for Geometry Vectors} \description{ Provides a set of data structures to efficiently encode simple geometries such as points, segments, and rectangles. Coercion functions are provided such that any vector of geometry-like objects can be used interchangeably. } \seealso{ Useful links: \itemize{ \item \url{https://paleolimbot.github.io/geovctrs} \item \url{https://github.com/paleolimbot/geovctrs} \item Report bugs at \url{https://github.com/paleolimbot/geovctrs/issues} } } \author{ \strong{Maintainer}: Dewey Dunnington \email{dewey@fishandwhistle.net} (\href{https://orcid.org/0000-0002-9415-4582}{ORCID}) } \keyword{internal}
333652745d13a05a995d0339f93a18425b93db89
59988b49b03b5a539a64425222310663a3e1bef5
/code/data_gathering_code/rebuilding_Statcast_db.R
230d625622dda9dc001c9479ccd7a4c4416c1a1c
[]
no_license
cgettings/Baseball-Data
17896a8d18b5f1f09d86e9cffd73c265c375a1b8
bfe5110ba5e03c60bfd43fae0d6637b18433bf71
refs/heads/master
2021-04-26T23:58:43.240753
2020-02-02T20:13:14
2020-02-02T20:13:14
123,887,732
2
0
null
null
null
null
UTF-8
R
false
false
24,659
r
rebuilding_Statcast_db.R
###########################################################################################- ###########################################################################################- ## ## Building Statcast database ---- ## ###########################################################################################- ###########################################################################################- #=========================================================================================# # Setting up ---- #=========================================================================================# #-----------------------------------------------------------------------------------------# # Loading libraries #-----------------------------------------------------------------------------------------# library(tidyverse) library(lubridate) library(stringdist) library(magrittr) library(dbplyr) library(DBI) library(RSQLite) library(pryr) library(useful) library(tictoc) library(glue) library(openxlsx) #-----------------------------------------------------------------------------------------# # Loading custom functions #-----------------------------------------------------------------------------------------# source("code/functions/get_savant_pitches_data.R") rate <- rate_backoff(pause_base = 1, pause_cap = 512, pause_min = 1, max_times = Inf, jitter = FALSE) insistently_get_savant_pitches_data <- insistently( get_savant_pitches_data, rate = rate, quiet = FALSE ) game_date <- NA_character_ get_game_date <- function(...) identity(game_date) insistently_get_savant_pitches_data_possibly <- possibly( insistently_get_savant_pitches_data, otherwise = tibble(game_date = get_game_date())) #-----------------------------------------------------------------------------------------# # Connecting to database #-----------------------------------------------------------------------------------------# statcast_db <- dbConnect(SQLite(), "data/statcast_db_rebuilt.sqlite3") #-----------------------------------------------------------------------------------------# # Setting search parameters #-----------------------------------------------------------------------------------------# ## Setting start_date to March 1, 2008 ## start_date <- as_date("2008-03-01") ## Setting end_date to today ## end_date <- today(tzone = "US/Eastern") ## setting the time span ## num_days <- (end_date - start_date) %>% as.numeric() ## sequence of single days ## every.1.days <- seq(1, num_days, 1) ## single-day spans ## single_days_all <- sort(end_date - every.1.days, decreasing = FALSE) single_days <- single_days_all[which(month(single_days_all) %in% c(3:11))] #=========================================================================================# # Downloading data ---- #=========================================================================================# ## need to break up in "home" and "away" batches, to avoid getting a "too much data" error ## now() %>% format("%r") tic() for(each_day in 1:length(single_days)) { game_date <- single_days[each_day] ## Home ## statcast_update_home <- insistently_get_savant_pitches_data_possibly( start_date = game_date, end_date = game_date, home_road = "Home") ## Road ## statcast_update_road <- insistently_get_savant_pitches_data_possibly( start_date = game_date, end_date = game_date, home_road = "Road") ## Combining home and away ## statcast_update_comb <- bind_rows( statcast_update_home, statcast_update_road ) %>% as_tibble() ## Writing to database ## dbWriteTable( statcast_db, "statcast_update_comb", statcast_update_comb, overwrite = FALSE, append = TRUE ) ## Pretty printing info to console if (nrow(statcast_update_home) > 0 | nrow(statcast_update_road) > 0) { cat("\n===============================\n") cat("Loop", each_day) cat("|-- ") cat(game_date %>% as.character()) cat(" --|") cat("\n-------------------------------\n") cat("Home") cat("\n- - - - - - - - - - - - - - - -\n") cat(nrow(statcast_update_home), "rows added\n") cat("Size: ") object_size(statcast_update_home) %>% print() cat("-------------------------------\n") cat("Road") cat("\n- - - - - - - - - - - - - - - -\n") cat(nrow(statcast_update_road), "rows added\n") cat("Size: ") object_size(statcast_update_road) %>% print() cat("===============================\n") } gc(verbose = FALSE) Sys.sleep(1) } now() %>% format("%r") toc() #=========================================================================================# # Cleaning ---- #=========================================================================================# statcast_db <- dbConnect(SQLite(), "data/statcast_db_rebuilt.sqlite3") statcast_db_chunk <- dbConnect(SQLite(), "data/statcast_db_rebuilt_chunk.sqlite3") #-----------------------------------------------------------------------------------------# # Setting up query #-----------------------------------------------------------------------------------------# statcast_res <- dbSendQuery( statcast_db, "SELECT * FROM statcast_update_comb" ) #-----------------------------------------------------------------------------------------# # Getting data in chunks #-----------------------------------------------------------------------------------------# while (!dbHasCompleted(statcast_res)) { statcast_update_comb_distinct_clean <- dbFetch(statcast_res, n = 5e5) %>% as_tibble() %>% select( -contains("error"), -contains("_deprecated"), # dropping deprecated columns -fielder_2 ) %>% rename(fielder_2 = fielder_2_1) %>% mutate_all(list(~case_when(. == "null" ~ NA_character_, TRUE ~ .))) %>% type_convert() %>% mutate( game_date = as_date(game_date), year = year(game_date), month = month(game_date), day = day(game_date), wday = wday(game_date) ) %>% mutate( obs_date_time_loc = make_datetime( year = str_c("20", substr(sv_id, 1, 2)) %>% as.integer(), month = substr(sv_id, 3, 4) %>% as.integer(), day = substr(sv_id, 5, 6) %>% as.integer(), hour = substr(sv_id, 8, 9) %>% as.integer(), min = substr(sv_id, 10, 11) %>% as.integer(), sec = substr(sv_id, 12, 13) %>% as.integer(), tz = "UTC" ) ) %>% mutate( home_team = case_when(home_team == "FLA" ~ "MIA", TRUE ~ home_team) ) %>% mutate( away_team = case_when(away_team == "FLA" ~ "MIA", TRUE ~ away_team) ) %>% mutate( ballpark = case_when( home_team == "ATL" & as.integer(game_year) < 2017L ~ "Turner Field", home_team == "ATL" & as.integer(game_year) >= 2017L ~ "SunTrust Park", home_team == "ARI" ~ "Chase Field", home_team == "BAL" ~ "Camden Yards", home_team == "BOS" ~ "Fenway Park", home_team == "CIN" ~ "Great American Ball Park", home_team == "COL" ~ "Coors Field", home_team == "CWS" ~ "U.S. Cellular Field", home_team == "DET" ~ "Comerica Park", home_team == "CHC" ~ "Wrigley Field", home_team == "CLE" ~ "Progressive Field", home_team == "HOU" ~ "Minute Maid Park", home_team == "KC" ~ "Kauffman Stadium", home_team == "LAA" ~ "Angel Stadium", home_team == "LAD" ~ "Dodger Stadium", home_team == "MIA" & as.integer(game_year) < 2012L ~ "Land Shark Stadium", home_team == "MIA" & as.integer(game_year) >= 2012L ~ "Marlins Park", home_team == "MIL" ~ "Miller Park", home_team == "MIN" & as.integer(game_year) < 2010L ~ "Metrodome", home_team == "MIN" & as.integer(game_year) >= 2010L ~ "Target Field", home_team == "NYM" & as.integer(game_year) < 2009L ~ "Shea Stadium", home_team == "NYM" & as.integer(game_year) >= 2009L ~ "Citi Field", home_team == "NYY" & as.integer(game_year) < 2009L ~ "Old Yankee Stadium", home_team == "NYY" & as.integer(game_year) >= 2009L ~ "Yankee Stadium", home_team == "OAK" ~ "O.co Coliseum", home_team == "PIT" ~ "PNC Park", home_team == "PHI" ~ "Citizens Bank Park", home_team == "STL" ~ "Busch Stadium", home_team == "SEA" ~ "Safeco Field", home_team == "SF" ~ "AT&T Park", home_team == "SD" ~ "PETCO Park", home_team == "TB" ~ "Tropicana Field", home_team == "TEX" ~ "Globe Life Park", home_team == "TOR" ~ "Rogers Centre", home_team == "WSH" ~ "Nationals Park" ) ) %>% mutate( obs_date_time_utc = case_when( ballpark %in% c("Busch Stadium", "Globe Life Park", "Target Field", "Kauffman Stadium", "Metrodome", "Miller Park", "Minute Maid Park", "Target Field", "U.S. Cellular Field", "Wrigley Field") ~ force_tz(obs_date_time_loc, tzone = "US/Central") %>% with_tz(., tzone = "UTC"), ballpark %in% c("Camden Yards", "Citi Field", "Citizens Bank Park", "Comerica Park", "Fenway Park", "Great American Ball Park", "Land Shark Stadium", "Marlins Park", "Nationals Park", "Old Yankee Stadium", "PNC Park", "Progressive Field", "Rogers Centre", "Shea Stadium", "SunTrust Park", "Tropicana Field", "Turner Field", "Yankee Stadium") ~ force_tz(obs_date_time_loc, tzone = "US/Eastern") %>% with_tz(., tzone = "UTC"), ballpark == "Coors Field" ~ force_tz(obs_date_time_loc, tzone = "US/Mountain") %>% with_tz(., tzone = "UTC"), ballpark == "Chase Field" ~ force_tz(obs_date_time_loc, tzone = "America/Phoenix") %>% with_tz(., tzone = "UTC"), ballpark %in% c("Dodger Stadium", "AT&T Park", "PETCO Park", "O.co Coliseum", "Angel Stadium", "Safeco Field") ~ force_tz(obs_date_time_loc, tzone = "US/Pacific") %>% with_tz(., tzone = "UTC") )) %>% mutate( obs_date_time_loc_chr = as.character(obs_date_time_loc), obs_date_time_utc_chr = as.character(obs_date_time_utc) ) %>% mutate( loc_date_round = round_date(obs_date_time_loc, unit = "hour"), utc_date_round = round_date(obs_date_time_utc, unit = "hour") ) %>% mutate( loc_date_round_chr = as.character(loc_date_round), utc_date_round_chr = as.character(utc_date_round) ) %>% mutate( pitch_type = case_when( pitch_type == "CH" ~ "Changeup", pitch_type == "CU" ~ "Curveball", pitch_type == "EP" ~ "Eephus", pitch_type == "FA" ~ "Fastball", pitch_type == "FC" ~ "Cutter", pitch_type == "FF" ~ "Four-seam Fastball", pitch_type == "FS" ~ "Splitter", pitch_type == "FT" ~ "Two-seam Fastball", pitch_type == "FO" ~ "Forkball", pitch_type == "IN" ~ "Intent ball", pitch_type == "KC" ~ "Knuckle ball Curve", pitch_type == "KN" ~ "Knuckle ball", pitch_type == "PO" ~ "Pitch Out", pitch_type == "SC" ~ "Screwball", pitch_type == "SI" ~ "Sinker", pitch_type == "SL" ~ "Slider" ) ) %>% mutate(batter_team = case_when( inning_topbot == "bot" ~ home_team, inning_topbot == "top" ~ away_team )) %>% mutate(pitcher_team = case_when( inning_topbot == "bot" ~ away_team, inning_topbot == "top" ~ home_team )) %>% # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # # trigonometric operations # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # mutate( hc_x.0 = hc_x - 125.42, hc_y.0 = 198.27 - hc_y, hc_x.new.1 = hc_x.0 * cos(-.75) - hc_y.0 * sin(-.75), hc_y.new.1 = hc_x.0 * sin(-.75) + hc_y.0 * cos(-.75), horiz_angle = cart2pol(x = hc_x.new.1, y = hc_y.new.1, degrees = FALSE) %>% pull(theta), hit_radius = cart2pol(x = hc_x.new.1, y = hc_y.new.1, degrees = FALSE) %>% pull(r), horiz_angle_deg = (horiz_angle * 180) / pi, horiz_angle_c = (horiz_angle_deg - 45) * -1 ) %>% # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # # categorizing hits into angle groupings # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # mutate( horiz_angle_2 = case_when( horiz_angle_c <= 0 & horiz_angle_c >= -45 ~ "Left", horiz_angle_c > 0 & horiz_angle_c <= 45 ~ "Right") %>% factor(., levels = c("Left", "Right"), ordered = TRUE), horiz_angle_3 = case_when( horiz_angle_c < -15 ~ "Left", (horiz_angle_c >= -15) & (horiz_angle_c < 15) ~ "Center", horiz_angle_c >= 15 ~ "Right") %>% factor(., levels = c("Left", "Center", "Right"), ordered = TRUE), horiz_angle_4 = case_when( horiz_angle_c < -22.5 ~ "Left", (horiz_angle_c >= -22.5) & (horiz_angle_c < 0) ~ "Left Center", (horiz_angle_c >= 0) & (horiz_angle_c < 22.5) ~ "Right Center", horiz_angle_c >= 22.5 ~ "Right") %>% factor(., levels = c("Left", "Left Center", "Right Center", "Right"), ordered = TRUE), horiz_angle_5 = case_when( horiz_angle_c < -27 ~ "Left", (horiz_angle_c >= -27) & (horiz_angle_c < -9) ~ "Left Center", (horiz_angle_c >= -9) & (horiz_angle_c < 9) ~ "Center", (horiz_angle_c >= 9) & (horiz_angle_c < 27) ~ "Right Center", horiz_angle_c >= 27 ~ "Right") %>% factor(., levels = c("Left", "Left Center", "Center", "Right Center", "Right"), ordered = TRUE), horiz_angle_6 = case_when( horiz_angle_c < -30 ~ "Left", (horiz_angle_c >= -30) & (horiz_angle_c < -15) ~ "Left Center", (horiz_angle_c >= -15) & (horiz_angle_c < 0) ~ "Center Left", (horiz_angle_c >= 0) & (horiz_angle_c < 15) ~ "Center Right", (horiz_angle_c >= 15) & (horiz_angle_c < 30) ~ "Right Center", horiz_angle_c >= 30 ~ "Right") %>% factor(., levels = c("Left", "Left Center", "Center Left", "Center Right", "Right Center", "Right"), ordered = TRUE) ) gc() #-----------------------------------------------------------------------------------------# # Finishing up #-----------------------------------------------------------------------------------------# # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # # querying the number of rows # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # updated_rows <- statcast_update_comb_distinct_clean %>% select(game_date) %>% nrow() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # # pretty printing # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # cat("---------\n") cat("Overall:\n") cat(updated_rows, "rows added\n") statcast_update_comb_distinct_clean %>% object_size() %>% print() cat("---------\n") # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # # appending to main table in database # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # dbWriteTable( statcast_db_chunk, "statcast_data", statcast_update_comb_distinct_clean, append = TRUE, overwrite = FALSE, temporary = FALSE) } dbClearResult(statcast_res) dbDisconnect(statcast_db) dbDisconnect(statcast_db_chunk) rm(statcast_update_comb_distinct_clean) gc() #-----------------------------------------------------------------------------------------# # Putting into real database ---- #-----------------------------------------------------------------------------------------# # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # # Connecting to databases # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # statcast_db <- dbConnect(SQLite(), "data/statcast_db_rebuilt.sqlite3") statcast_db_chunk <- dbConnect(SQLite(), "data/statcast_db_rebuilt_chunk.sqlite3") # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # # Putting into real databsse # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # statcast_res <- dbSendQuery( statcast_db_chunk, "SELECT * FROM statcast_data" ) while (!dbHasCompleted(statcast_res)) { statcast_chunk <- dbFetch(statcast_res, n = 5e5) dbWriteTable( statcast_db, "statcast_data", value = statcast_chunk, append = TRUE, temporary = FALSE) gc() } dbClearResult(statcast_res) rm(statcast_chunk) gc() dbDisconnect(statcast_db) dbDisconnect(statcast_db_chunk) #=========================================================================================# # creating indexes ---- #=========================================================================================# # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # # Connecting to databases # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # statcast_db <- dbConnect(SQLite(), "data/statcast_db_rebuilt.sqlite3") ## If building database from scratch, create indexes ## db_create_index(statcast_db, "statcast_data", "pitch_type") db_create_index(statcast_db, "statcast_data", "game_date") db_create_index(statcast_db, "statcast_data", "game_year") db_create_index(statcast_db, "statcast_data", "game_pk") db_create_index(statcast_db, "statcast_data", "at_bat_number") db_create_index(statcast_db, "statcast_data", "pitch_number") db_create_index(statcast_db, "statcast_data", "events") db_create_index(statcast_db, "statcast_data", "type") db_create_index(statcast_db, "statcast_data", "player_name") db_create_index(statcast_db, "statcast_data", "batter") db_create_index(statcast_db, "statcast_data", "pitcher") db_create_index(statcast_db, "statcast_data", "batter_team") db_create_index(statcast_db, "statcast_data", "pitcher_team") db_create_index(statcast_db, "statcast_data", "description") db_create_index(statcast_db, "statcast_data", "hit_location") db_create_index(statcast_db, "statcast_data", "game_type") db_create_index(statcast_db, "statcast_data", "home_team") db_create_index(statcast_db, "statcast_data", "away_team") db_create_index(statcast_db, "statcast_data", "ballpark") db_create_index(statcast_db, "statcast_data", "bb_type") db_create_index(statcast_db, "statcast_data", "balls") db_create_index(statcast_db, "statcast_data", "strikes") db_create_index(statcast_db, "statcast_data", "outs_when_up") db_create_index(statcast_db, "statcast_data", "inning") db_create_index(statcast_db, "statcast_data", "stand") db_create_index(statcast_db, "statcast_data", "p_throws") db_create_index(statcast_db, "statcast_data", "utc_date_round_chr") dbGetQuery(statcast_db, "SELECT * FROM sqlite_master WHERE type = 'index'") dbExecute(statcast_db, "VACUUM") dbDisconnect(statcast_db) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ---- THIS IS THE END! ---- # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
c6407bd5235a78fd8a780e583ae5f26a9dbf0d90
fc7aa3397397065a8afd74fc9a682bcf515cfce2
/Ejercicios/derivada.R
da7c6d3f6edd6670c752ab6cfae4b61a6b5cb2ad
[]
no_license
valentinaescobarg/Analisis-Numerico
21a60fb127c7e4ad8f2415761951d9428a1404a1
1531b9c874d1bf09f62a8770d58b7b370d88ee90
refs/heads/master
2021-07-10T11:51:56.411842
2020-07-12T02:31:55
2020-07-12T02:31:55
null
0
0
null
null
null
null
UTF-8
R
false
false
588
r
derivada.R
x=c(1.8, 1.9, 2, 2.1, 2.2) y=c() b=2 a=2 for (i in 0:5) y[i]=x[i]*exp(x[i]) cat(x) cat(y) f= function(x) x*exp(x) #derivar con 4.4 h=0.1 cuatro= function(x1,x2,x3,h){ valor=1/(2*h)*(-3*f(x1)+4*f(x2)-f(x3)) cat(valor) return valor } cinco= function(x1,x2,h){ valor=1/(2*h)*(f(x1)-f(x2)) cat(valor) return valor } derivar=function(x){ return exp(x)*x*exp(x) } cuatro(x[3],x[4], x[5],h) cuatro(x[3],x[2], x[1],h) cinco(2.1,1.9,h) h=0.2 cinco(2.2,1.8,h) derivar(2) error1=derivar(x[3])-cuatro(x[3],x[4], x[5],h)
d7688e82541e45d044ea4fcf81d4db40a3a76fb9
98ede6f295123fdda5f61d317e9c8a0ccb07f726
/man/simARIMA.Rd
1a901da40c4507d956f419b7b0600e874420bc84
[]
no_license
statmanrobin/ts343
51d889e6cdc4f6dff0fd78fc7f653c2aaa5f517b
1f6fba367a31b70beffd25c138f9a8c83200c204
refs/heads/master
2023-05-10T20:43:10.059506
2023-05-03T14:31:55
2023-05-03T14:31:55
169,302,851
2
1
null
null
null
null
UTF-8
R
false
true
700
rd
simARIMA.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simARIMA.R \name{simARIMA} \alias{simARIMA} \title{Simulate data from an ARIMA model} \usage{ simARIMA(n = 100, delta = 0, phi = NULL, d = 0, theta = NULL, sderror = 1) } \arguments{ \item{n}{Length of the series to simulate} \item{delta}{Constant term (delta)} \item{phi}{AR coefficients (use c( ) to enter more than one)} \item{d}{Number of differences (default is d=0)} \item{theta}{Moving average coefficients (use c( ) to enter more than one)} \item{sderror}{Standard deviation of the error term} } \value{ A time series simulated from the ARIMA(p,d,q) model } \description{ Simulate data from an ARIMA model }
6d678284045b3370a11aa424176715ca31934ee8
2ea3e7bec58fa389e10f15a7627862dd5b89f5cf
/simple_mondRian.R
93261d2cd6f8546c28b0324a99d6e95a406b61e2
[]
no_license
borstell/mondriaRt
b8d2da2e7bde295632a3e857920118a453bc30aa
2581c1cecbf4783a29fa0b1a7672d1f6e79b424b
refs/heads/master
2022-12-27T15:01:56.987370
2020-10-04T12:02:20
2020-10-04T12:02:20
259,119,477
0
0
null
null
null
null
UTF-8
R
false
false
916
r
simple_mondRian.R
library(ggplot2) library(tidyverse) #library(patchwork) # Make color palette clrs <- c(rep("#C52F24",3), rep("#1513C6",4), rep("#FDF851",3), "#7AB3BA", "#000000", rep("#FFFFFF",5)) # Create a dataframe with random rectangles xa <- sample(0:4,15,replace=T) ya <- sample(0:4,15,replace=T) df <- data.frame(xmin=xa, ymin=ya) df <- df %>% rowwise() %>% mutate(xmax = sample(0:(4-xmin+1),1)) %>% mutate(ymax = sample(0:(4-ymin+1),1)) %>% mutate(color = sample(clrs, 1)) # Plot the Mondrian-like canvas ggplot(df) + geom_rect(aes(xmin=0, xmax=5, ymin=0, ymax=5, fill="white"), color="black", size=2) + geom_rect(aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax, fill=color), color="black", size=2) + scale_fill_identity() + scale_color_identity() + xlim(0,5) + ylim(0,5) + coord_flip() + theme_void() + theme(legend.position = "none")
a24e6472961ec2142ca269faa1abb1d1da87de7e
b055dc416dad13e82736c34f62ee7f0ea96dbfbd
/R/check_input.R
864a21975abb0c03e17479e57fe084c32f5def64
[]
no_license
krashkov/pcSteiner
c8cc3aff2cc35159f124300638285c7c8a189157
b62b9901f5ac26a4c8c3f33a288a0685f971559d
refs/heads/master
2021-07-19T18:20:54.257493
2020-08-21T18:59:34
2020-08-26T12:35:33
209,090,640
2
0
null
null
null
null
UTF-8
R
false
false
3,307
r
check_input.R
check_input <- function (graph, terminals, lambda, root, depth, eps, max_iter) { # Check graph if (! is.igraph(graph)) stop("Error: Graph is not an igraph graph") if (length(V(graph)) == 0 ) stop("Error: Graph does not contain vertices") if (! is.connected(graph)) stop("Error: The graph is not connected. Run pcs.forest instead") if (is.null(V(graph)$prizes)) stop("Error: Vertices do not have prizes") if (is.null(E(graph)$costs)) stop("Error: Edges do not have costs") # Check lambda if (class(lambda) != "numeric") stop("Error: lambda is not a number") # Check names of vertices if (is.null(V(graph)$name)) { # creating name attribute V(graph)$name <- as.character(1:length(V(graph))) } else { # creating new name and realname attributes V(graph)$realname <- V(graph)$name V(graph)$name <- as.character(1:length(V(graph))) } # Mathcing names of vertices and terminals if possible if (class(terminals) == "character") { # terminals contain realname of vertices if (sum(terminals %in% V(graph)$realname) != length(terminals)) { stop("Error: Vertices names do not contain terminal names") } else { # Convert realnames of terminals to integer id's (character id's) terminal_id <- match(terminals, V(graph)$realname) terminal_name <- V(graph)$name[terminal_id] } } else if (class(terminals) == "numeric") { # terminals contains id's of vertices terminal_id <- terminals terminal_name <- V(graph)$name[terminal_id] } else stop("Error: Invalid type of terminals") # Mathcing names of vertices and root if possible if (class(root) == "character") { if (sum(root %in% V(graph)$realname) != 1) { stop("Error: Vertices names do not contain root name") } else { root_id <- match(root, V(graph)$realname) root_name <- V(graph)$name[root_id] } } else if (class(terminals) == "numeric") { root_id <- root root_name <- V(graph)$name[root_id] } else stop("Error: Invalid type of root") # Check depth if (class(depth) != "numeric") { stop("Error: depth is not a number") } # Check eps if (class(eps) != "numeric") stop("Error: eps is not a number") # Check max_iter if (class(max_iter) != "numeric") stop("Error: max_iter is not a number") checkInput_res <- list() checkInput_res$vert_names <- V(graph)$name checkInput_res$terminal_id <- terminal_id checkInput_res$terminal_name <- terminal_name checkInput_res$root_id <- root_id checkInput_res$root_name <- root_name return(checkInput_res) }
bf63bfae2be31ebaaa56c246fdcad557bbd04e80
7917fc0a7108a994bf39359385fb5728d189c182
/cran/paws.storage/man/efs_tag_resource.Rd
5514e4e176ad70ae0e7f0bbb7708c51e428d4d06
[ "Apache-2.0" ]
permissive
TWarczak/paws
b59300a5c41e374542a80aba223f84e1e2538bec
e70532e3e245286452e97e3286b5decce5c4eb90
refs/heads/main
2023-07-06T21:51:31.572720
2021-08-06T02:08:53
2021-08-06T02:08:53
396,131,582
1
0
NOASSERTION
2021-08-14T21:11:04
2021-08-14T21:11:04
null
UTF-8
R
false
true
810
rd
efs_tag_resource.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/efs_operations.R \name{efs_tag_resource} \alias{efs_tag_resource} \title{Creates a tag for an EFS resource} \usage{ efs_tag_resource(ResourceId, Tags) } \arguments{ \item{ResourceId}{[required] The ID specifying the EFS resource that you want to create a tag for.} \item{Tags}{[required]} } \value{ An empty list. } \description{ Creates a tag for an EFS resource. You can create tags for EFS file systems and access points using this API operation. This operation requires permissions for the \code{elasticfilesystem:TagResource} action. } \section{Request syntax}{ \preformatted{svc$tag_resource( ResourceId = "string", Tags = list( list( Key = "string", Value = "string" ) ) ) } } \keyword{internal}
f91b0ffb7937643a0e12a8b2273f302b116323cc
93ba97f1adf1432aa86a11413ce6506439b2b6e9
/man/simulate.PlackettLuce.Rd
06738d2c952031ca2e7077e646136616efe73314
[]
no_license
Felix660/PlackettLuce
2fc53b68b7c8da23ef3dca94bbb2d1a6c59a7d26
a162cfe8d19278a8aa35f4e546a4693d029a8f8e
refs/heads/master
2023-07-14T01:38:16.819262
2021-08-24T11:10:22
2021-08-24T11:10:22
null
0
0
null
null
null
null
UTF-8
R
false
true
2,682
rd
simulate.PlackettLuce.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate.R \name{simulate.PlackettLuce} \alias{simulate.PlackettLuce} \title{Simulate from \code{PlackettLuce} fitted objects} \usage{ \method{simulate}{PlackettLuce}( object, nsim = 1L, seed = NULL, multinomial = FALSE, max_combinations = 20000, ... ) } \arguments{ \item{object}{an object representing a fitted model.} \item{nsim}{number of response vectors to simulate. Defaults to \code{1}.} \item{seed}{an object specifying if and how the random number generator should be initialised. Either \code{NULL} or an integer that will be used in a call to \code{set.seed} before simulating the rankings. If set, the value is saved as the \code{seed} attribute of the returned value. The default, \code{NULL}, will not change the random generator state, and return \code{.Random.seed} as the \code{seed} attribute.} \item{multinomial}{use multinomial sampling anyway? Default is \code{FALSE}. see Details.} \item{max_combinations}{a positive number. Default is \code{20000}. See Details.} \item{...}{additional optional arguments.} } \value{ A \code{data.frame} of \code{\link{rankings}} objects of the same dimension as \code{object$rankings}. } \description{ Simulate from \code{PlackettLuce} fitted objects } \details{ If \code{multinomial} is \code{FALSE} (default) and there are no tie parameters in the object (i.e. \code{length(object$ties) == 1}), then rankings are sampled by ordering exponential random variates with rate 1 scaled by the estimated item-worth parameters \code{object$coefficients} (see, Diaconis, 1988, Chapter 9D for details). In all other cases, the current implementation uses direct multinomial sampling, and will throw an error if there are more than \code{max_combinations} combinations of items that the sampler has to decide from. This is a hard-coded exit to prevent issues relating to the creation of massive objects in memory. If \code{length(object$ties) > 1} the user's setting for \code{multinomial} is ignored and \code{simulate.PlackettLuce} operates as if \code{multinomial} is \code{TRUE}. } \examples{ R <- matrix(c(1, 2, 0, 0, 4, 1, 2, 3, 2, 1, 1, 1, 1, 2, 3, 0, 2, 1, 1, 0, 1, 0, 3, 2), nrow = 6, byrow = TRUE) colnames(R) <- c("apple", "banana", "orange", "pear") mod <- PlackettLuce(R) simulate(mod, 5) s1 <- simulate(mod, 3, seed = 112) s2 <- simulate(mod, 2, seed = 112) identical(s1[1:2], s2[1:2]) } \references{ Diaconis (1988). \emph{Group Representations in Probability and Statistics}. Institute of Mathematical Statistics Lecture Notes 11. Hayward, CA. }
103fd29a377079bc796aa5a5447ee86abb56ed36
29585dff702209dd446c0ab52ceea046c58e384e
/plmm/R/cal_df_gamma1.R
9caf7a2935097e14561940a5e09450902d870a1a
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
1,756
r
cal_df_gamma1.R
cal_df_gamma1 <- function(yXb, poly.index, h, nbins, plmm){ bing<-binning(y=yXb, x=as.vector(plmm$T_mat), nbins=nbins) x_til<-bing$x M<-length(x_til) C<-matrix(rep(bing$x.freq, M), ncol=M, byrow=T) ## = rep(1, M)%x%t(bing$x.freq) X_til<-matrix(rep(x_til, rep(M, M)), byrow=T, ncol=M) - matrix(rep(x_til, rep(M, M)), byrow=F, ncol=M) ## = x_til%x%t(rep(1, M))-rep(1, M)%x%t(x_til) ## symmetric ## 1.row of X = elements of x-x.g1 ## 2.row of X = elements of x-x.g2 etc W_til<-exp(-0.5*(X_til/h)^2) WC<-W_til*C ## 1.row of WC = elements of W.g1%*%C ## 2.row of WC = elements of W.g2%*%C etc ### tr(S) ### if(poly.index==1){### Local Linear s0<-WC%*%rep(1,M)## vector {s0.l} l=1~M s1<-(WC*X_til)%*%rep(1,M)## vector {s1.l} l=1~M s2<-(WC*X_til^2)%*%rep(1,M)## vector {s2.l} l=1~M s0s2_s1s1<-s0*s2-s1^2## vector ## determinant for (XWCX)^(-1) S.ll<-s2/s0s2_s1s1## diag(S_til) vector tr_S<-sum(S.ll*bing$x.freq) }else if (poly.index==0){ S.ll<-1/(WC%*%rep(1,M))## XWCX = scalar tr_S<-sum(S.ll*bing$x.freq) } ### tr(SS') ### if(poly.index==1){### Local Linear ### XWWCX WWC<-W_til*WC ## 1.row of WWC = elements of W.g1^2%*%C ## 2.row of WWC = elements of W.g2^2%*%C etc z0<-WWC%*%rep(1,M)## vector {z0.l} l=1~M z1<-(WWC*X_til)%*%rep(1,M)## vector {z1.l} l=1~M z2<-(WWC*X_til^2)%*%rep(1,M)## vector {z2.l} l=1~M ### e(XWCX)XWWCX(XWCX)e a.l<-s2/s0s2_s1s1 b.l<--s1/s0s2_s1s1 SS.ll<-a.l^2*z0+2*a.l*b.l*z1+b.l^2*z2 tr_SS<-sum(SS.ll*bing$x.freq) }else if (poly.index==0){### NW WWC<-W_til*WC SS.ll<-S.ll^2*(WWC%*%rep(1,M)) tr_SS<-sum(SS.ll*bing$x.freq) } return(2*tr_S-tr_SS) }
5afdafb22db9f5664633e13c5f7f72eec8db944c
4cb402022fab6607fd908b3169298562538e411e
/IDcandles.R
f618b0a03300e25109d9c4f6af768006b6126b6a
[]
no_license
DrewArnold1/Candlestick-forex-analysis
57171467b1ed3ab37e35f39fafb6bdf1db9157e2
694842ba64c84e26f16c0ff6bcb73a42a0ff2dba
refs/heads/master
2021-01-10T17:06:50.583779
2016-01-20T19:16:47
2016-01-20T19:16:47
50,052,263
0
0
null
null
null
null
UTF-8
R
false
false
31
r
IDcandles.R
IDcandles = function(fx) { }
4fe8c23222d4c7d457ade14f121262c9a967f998
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/gaston/examples/lmm.diago.likelihood.Rd.R
6483ef42fc2ad86806bb1a746b6401a2c58daac6
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
912
r
lmm.diago.likelihood.Rd.R
library(gaston) ### Name: lmm.diago.likelihood ### Title: Likelihood of a linear mixed model ### Aliases: lmm.diago.likelihood lmm.diago.profile.likelihood ### Keywords: Eigen decomposition Likelihood ### ** Examples # Load data data(AGT) x <- as.bed.matrix(AGT.gen, AGT.fam, AGT.bim) # Compute Genetic Relationship Matrix K <- GRM(x) # eigen decomposition of K eiK <- eigen(K) # simulate a phenotype set.seed(1) y <- 1 + lmm.simu(tau = 1, sigma2 = 2, eigenK = eiK)$y # Likelihood TAU <- seq(0.5,1.5,length=30) S2 <- seq(1,3,length=30) lik1 <- lmm.diago.likelihood(tau = TAU, s2 = S2, Y = y, eigenK = eiK) H2 <- seq(0,1,length=51) lik2 <- lmm.diago.likelihood(h2 = H2, Y = y, eigenK = eiK) # Plotting par(mfrow=c(1,2)) lik.contour(TAU, S2, lik1, heat = TRUE, xlab = "tau", ylab = "sigma^2") lines(lik2$tau, lik2$sigma2) plot(H2, exp(lik2$likelihood), type="l", xlab="h^2", ylab = "likelihood")
6436e4e442c94065520a29d4c42553ce28d051c1
3fdac81b76b7352be7894e3c33c86ca32ab75641
/Analyse01aProvenance.R
fbc7a3c17931b4b90d7447e1d87bf409b7d41be5
[]
no_license
Stan112732/Airbnb-webscraping-program
e42c9f97f5d7f357db552963d5f1e353ba1df106
9eef1551255ea4260c953b906a342cc18ce7430a
refs/heads/main
2023-07-29T19:12:26.484269
2021-09-12T12:48:57
2021-09-12T12:48:57
405,632,346
0
0
null
null
null
null
UTF-8
R
false
false
1,741
r
Analyse01aProvenance.R
#ce programme vous permet de choisir une periode que vous souhaitez analyser, #d'analyser l'existance d'une differenciation de saison #en choisissant un pays et une annee pour desquelles les visiteurs sont passes a la zone cherche, #et visualiser le resultat #vous trouvrez une carte mondiale et une graphique a barre dans la repertoire racine library(sqldf) library(ggplot2) library(dplyr) library(forcats) #on peut aussi importer le fichier sauvegardé après le nettoyage pour les étapes suivantes #read.csv("DonneNettoyee.csv") PeriodeCherche = function(DateDebut,DateFin){ Periode<-subset(review_all, as.Date(date_review) > as.Date(DateDebut) & as.Date(date_review) < as.Date(DateFin)) sommePays<-sqldf("select Country, count(reviewer_id) as NombreVisiteur FROM Periode GROUP BY Country") #calcul de la pourcentage sommePays$Percentage<-sommePays$NombreVisiteur/sum(sommePays$NombreVisiteur)*100 sommePays$Percentage=round(sommePays$Percentage,1) #sommePays$Percentage<-paste(format(sommePays$NombreVisiteur/sum(sommePays$NombreVisiteur)*100,digits=1), sep='') sommePays %>% mutate(Country = fct_reorder(Country, Percentage)) %>% ggplot(aes(x=Country, y=Percentage)) + geom_bar(stat="identity", fill="#f68060", alpha=.6, width=0.6) + geom_text(mapping = aes(label =format(Percentage,digits=2)),size=3,vjust=0.5)+ ggtitle(paste("Analyse de provenance des visiteurs a la zone","entre", DateDebut ,"et" ,DateFin))+ coord_flip() + xlab("") + theme_bw() + scale_y_continuous(name='Pourcentage(%)', breaks=seq(0,100,10), limits=c(0,100)) # mets le chiffre sur chaque barre }
3c556382bd4f108c2632d0a4166812d87914e788
af222d50d6c1449590d12e3be4dd4d95b6a13220
/ScrapingPelisPlus2.R
2cc12c5c4fab2c96e1ef76ed23a2059417d9e1f4
[]
no_license
CamilaPinto98/scrapping_cuevana3_pelisplus
424f453de92ae35da86c588241f320ee7e8f82c2
f633bb9034affc25dc5b9689af63b2558eefa312
refs/heads/main
2023-06-27T17:05:06.720736
2021-07-30T16:45:52
2021-07-30T16:45:52
391,122,794
0
0
null
null
null
null
UTF-8
R
false
false
1,851
r
ScrapingPelisPlus2.R
--- title: "BIENVENIDO A NUESTRO PROYECTO DE PAGINAS WEB" output: html_document --- # Base de datos : A continuacion veremos como se instala la libreria, cargando datos para analizar datos sobre pelisplus ###INSTALAMOS LIBRERIA Y LA CARGAMOS install.packages("rvest") install.packages("curl") install.packages("xml2") library("rvest") ###VEMOS DIRECCION PAG WEB Direccion <- "https://pelisplushd.net/" ###LEEMOS PAG WEB pelisplus <- read_html(Direccion) ###poster pelis Banner <- html_nodes(pelisplus, css=".posters-caps") Banner_text <- html_text(Banner) Banner_text ###caratula peliculas recomendadas en pagina web recomendada <- html_nodes(pelisplus, css= ".Posters-img") recomendadasi <- html_text(recomendada) ###Valoracion peliculas en inicio valoracion <- html_nodes(pelisplus, css= "#default-tab-1 span") valoracionsi <- html_text(valoracion) ###Simulamos cambio de pagina ###Simulamos navegar en 10 paginas for(nroPagina in 1:10){ print(nroPagina) print(paste("https://pelisplushd.net/" , nroPagina, sep = "")) } ###Categorias contenido pag web categorias <- html_nodes(pelisplus, css= ".nav-link") categorias1 <- html_text(categorias) ###navegando por la pagina 2 de todas las peliculas print(paste("navegando nro pag", 2)) urlpelisplus <- paste("https://pelisplushd.net/peliculas?page=2", 2 , sep = "") print(urlpelisplus) pelisplus <- read_html(urlpelisplus) peliculas <- html_nodes(pelisplus, css=".active a") ###peliculas mas vistas en el dia masvistas <- html_nodes(pelisplus, css="#load_topivews img" ) masvistas1 <- html_text(masvistas) ###visitamos pagina web de datos analiticos ###para saber el nivel de visita de la pagina enlace <-"https://www.similarweb.com/website/pelisplus.so/"
1bfff398d66d7860b67f192b38f4e3a0f0af693c
f289a04c54a0a6c9f024e6b75b9a08ed1ca5f128
/parser/1xbet.R
af569f0a05ab69f88aadeaab1977afad7cafdf8d
[]
no_license
pabelspy/sumo-odds
f571d589d730be408bc59562072344a975e4b328
5f3e319e49eac80faebd798ad2f1649d406ba0a2
refs/heads/master
2021-09-14T17:20:08.463625
2018-05-16T15:20:45
2018-05-16T15:20:45
null
0
0
null
null
null
null
UTF-8
R
false
false
575
r
1xbet.R
library(rvest) library(tidyverse) parse_1xbet <- function(raw_html) { columns <- c( "gameid", "coef", "betname", "opp1", "opp2" ) df <- raw_html %>% read_html() %>% html_nodes("a:not(.non).c-bets__bet") %>% html_attrs() %>% map_df(bind_rows) %>% set_names(sub("data-", "", names(.))) if ( is.data.frame(df) && df %>% colnames() %>% setdiff(columns, .) %>% length() == 0 ) df %>% select(one_of(columns)) %>% spread(betname, coef) %>% mutate_at(1, as.integer) %>% mutate_at(4:5, as.numeric) %>% arrange(gameid) }
7c8eef9ba7f6ff3aa30e30a3ad0293f1772644fa
705b1d948fe80e38030266fc39bd0567c82aeafd
/Dusky_Redo.Fig4.R
860de411d418c92b6de1a54b5e20f0be9f6a10fd
[]
no_license
JuanMatiasBraccini/Git_shark_acoustic
fc44047b4c04f24e3d3bde9d6f0101f2361c603a
2b483a060399f78aff606cbb28e3f8d151fc13e0
refs/heads/master
2022-09-26T16:56:34.381428
2022-09-13T07:55:48
2022-09-13T07:55:48
191,541,988
0
0
null
null
null
null
UTF-8
R
false
false
3,087
r
Dusky_Redo.Fig4.R
#Re do Simons if(!exists('handl_OneDrive')) source('C:/Users/myb/OneDrive - Department of Primary Industries and Regional Development/Matias/Analyses/SOURCE_SCRIPTS/Git_other/handl_OneDrive.R') setwd(handl_OneDrive("Analyses/Acoustic_tagging/Dusky_migration/Re.do.Fig.4")) library(RODBC) channel <- odbcConnectExcel2007("FL.xlsx") FL<- sqlFetch(channel,"dat") close(channel) channel <- odbcConnectExcel2007("Month.xlsx") Month<- sqlFetch(channel,"dat") close(channel) CI.pol=function(X,Ylow,Yhigh,COL,BORDER) { XX=c(X,tail(X, 1),rev(X),X[1]) YY <- c(Ylow, tail(Yhigh, 1), rev(Yhigh), Ylow[1]) polygon(XX,YY,col=COL,border=BORDER) } male.col="grey40" fem.col="grey40" col.bg.F=rgb(.1,.1,.1,alpha=0.1) col.bg.M=rgb(.1,.1,.1,alpha=0.1) #fem.col=rgb(.6,0,0.1,alpha=0.6) #col.bg.F=rgb(.75,0,0,alpha=0.1) #male.col="blue" #col.bg.M=rgb(0,0,0.75,alpha=0.1) setwd(handl_OneDrive("Analyses/Acoustic_tagging/FRDC/Outputs_movement/Natal_migration/Paper")) tiff(file="Figure_prob_movement N_S_Simon.tiff",width = 2400, height = 2400,units = "px", res = 300,compression = "lzw") par(mfrow=c(2,2),mar=c(3,3,1.5,.1), oma=c(1,1.75,.1,1),las=1,mgp=c(1,0.8,0)) #Size effect #females #January a=subset(FL,sex=="F" & month==1) plot(a$length,a$median,type='l',lwd=2,col=fem.col,xlab="",ylab="",cex.axis=1.5,ylim=c(0,1)) CI.pol(X=a$length,Ylow=a$lower95,Yhigh=a$upper95,col.bg.F,"transparent") legend("topleft",c("January","August"),lty=c(2,1),lwd=3,col=c(fem.col,fem.col),bty="n",cex=1.5) #August a=subset(FL,sex=="F" & month==8) lines(a$length,a$median,lty=2,lwd=2,col=fem.col) CI.pol(X=a$length,Ylow=a$lower95,Yhigh=a$upper95,col.bg.F,"transparent") #mtext("Migration probability",2,las=3,cex=1.75,line=3.1) mtext("Females",3,cex=1.5,line=0) #males #January a=subset(FL,sex=="M" & month==1) plot(a$length,a$median,type='l',lwd=2,col=male.col,xlab="",ylab="",cex.axis=1.5,ylim=c(0,1)) CI.pol(X=a$length,Ylow=a$lower95,Yhigh=a$upper95,col.bg.M,"transparent") legend("topleft",c("January","August"),lty=c(2,1),lwd=3,col=c(male.col,male.col),bty="n",cex=1.5) #August a=subset(FL,sex=="M" & month==8) lines(a$length,a$median,lty=2,lwd=2,col=male.col) CI.pol(X=a$length,Ylow=a$lower95,Yhigh=a$upper95,col.bg.M,"transparent") mtext("Fork length (cm) ",1,cex=1.5,line=2.35) mtext("Males",3,cex=1.5,line=0) #Month effect #females a=subset(Month,Sex=="F") plot(a$Month,a$Median,type='l',lwd=2,col=fem.col,xlab="",ylab="",cex.axis=1.5,ylim=c(0,1)) CI.pol(X=a$Month,Ylow=a$PER_Low_95,Yhigh=a$PER_Upp_95,col.bg.F,"transparent") #mtext("Prob. of occurring north",2,las=3,cex=1.75,line=3.1) #males a=subset(Month,Sex=="M") plot(a$Month,a$Median,type='l',lwd=2,col=male.col,xlab="",ylab="",cex.axis=1.5,ylim=c(0,1)) CI.pol(X=a$Month,Ylow=a$PER_Low_95,Yhigh=a$PER_Upp_95,col.bg.M,"transparent") mtext("Month ",1,cex=1.5,line=2.35) mtext("Prob. of migrating north",2,las=3,cex=1.5,line=0,outer=T) dev.off()
521c883445539a9295b50a5ecc9764d47b6b28da
30346b23ba5dbf95d60c75a43433c865ce9dd6bc
/analises-yasmin-atbc-2021/scripts-modelos-nicho/processamento especie -3.R
f77c64083903e07a3e9062f0af3420feab5d4dec
[]
no_license
rbcer17/pesq-nicho-atbc2021
88f8215e34300df009b99d5c761bf79ff951f3a5
a7a07d6c19f72c499b978f9ea430067037faee61
refs/heads/master
2023-06-12T22:34:47.652751
2021-07-08T14:16:14
2021-07-08T14:16:14
null
0
0
null
null
null
null
UTF-8
R
false
false
2,902
r
processamento especie -3.R
library(rgdal) library(raster) library(dismo) #delimitar uma projecao espacial para lat/long e para UTM: longlat_WGS = CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0") ####################### 2. CARREGAR OS ARQUIVOS NECESSARIOS ######################## # Carregar os pontos de ocorrencia: pontos_brutos = read.csv("C:/projetosR/Modelagem/analise/Crowned/Localities/GA_Verao.csv") head(pontos_brutos) str(pontos_brutos) # Carregar a camada ambiental em 'asc' ja editada para servir como modelo variavel = raster("C:/projetosR/Modelagem/analise/Crowned/Ms/Crowned/wc2.1_2.5m_bio_1.asc") crs(variavel) = longlat_WGS variavel plot(variavel) ########################## 3. REMOVER PONTOS DUPLICADOS ############################ #numero de pontos brutos: length(pontos_brutos[, 1]) #148 #remover duplicados pontos_unicos = pontos_brutos[!duplicated(pontos_brutos[c("LONGITUDE","LATITUDE")]), ] #numero de pontos unicos: length(pontos_unicos[, 1]) #55 ##################### 4. REMOVER PONTOS FORA DA AREA DE ESTUDO ##################### #numero de pontos unicos: length(pontos_brutos[, 1]) #55 #selecionar apenas as colunas de lat e long: names(pontos_unicos) ocorrencia = pontos_unicos[,27:26] # lon/lat columns head(ocorrencia) str(ocorrencia) #adicionar uma projecao coordinates(ocorrencia) = ~LONGITUDE+LATITUDE #nome das colunas. crs(ocorrencia) = longlat_WGS #extrair os valores da camada valores = extract(variavel, ocorrencia) head(valores) #achar as posições onde não há valor, é um ponto fora da camada. NoData nos arquivos '.asc' é normalmente '-9999', mas você pode alterar o valor i = which(valores != "-9999") i #lines in the point_raw #update the points raw and create a SpatialPoints for ocorrence points. pontos_unicos_area = pontos_unicos #numero de pontos restantes: length(pontos_unicos_area[, 1]) #51 ####### 5. REMOVER VIES AMOSTRAL QUE PODE LEVAR A AUTOCORRELACAO ESPACIAL ########## #transformar os pontos em SpatialPoints names(pontos_unicos_area) ocorrencia = pontos_unicos_area[,27:26] # lon/lat columns coordinates(ocorrencia) = ~LONGITUDE+LATITUDE #nome das colunas. crs(ocorrencia) = longlat_WGS #criar um buffer de 10Km ao redor dos pontos buffer = circles(ocorrencia, d = 10000, lonlat=TRUE) #d é o raio do circulo em metros plot(buffer) class(buffer) #converter os círculos em polígonos buffer = polygons(buffer) #rasterizar os circulos buffer= rasterize(buffer, variavel) #selecionar 1 ponto por cada circulo sel = gridSample(ocorrencia, buffer, n=1) #verificar o numero de pontos restantes: length(pontos_unicos_area[,1]) #51 length(sel[,1]) #47 #salvar os pontos de ocorrencia corrigidos sel = as.data.frame(sel) write.csv(sel, "C:/projetosR/Modelagem/analise/Crowned/Localities/gaverao_corrigido.csv", row.names = FALSE)
92c9ff475df10c91694e62d8392370bd19009a5a
afdc42af8a468f29b56269626dccbe5859d0e595
/R_src/kernapply.ts/main/main.R
984a312d1c408be6704ce9fb816392fc68818e12
[]
no_license
pengyu/pytools
feb44c3da2922dae5a51d19abe2fbc6c05a5076f
578af6b6135f1fc999c89ca0ae0ca314cbdbfc76
refs/heads/master
2021-03-12T23:49:06.527392
2013-04-16T03:15:03
2013-04-16T03:15:03
null
0
0
null
null
null
null
UTF-8
R
false
false
165
r
main.R
x=ts(c(rep(0, 5), 1, rep(0, 5))) kernapply(x, kernel(coef='daniell', m=4)) x=ts(c(rep(0, 5), 1, rep(0, 5)), frequency=2) kernapply(x, kernel(coef='daniell', m=4))
5b46b9c9c4a16912156e4a0ed85a1325975fd115
d3613e73615708c26d6d38e16b446afdda1b131f
/day-1/day-1.R
9c9ec0975c12d4e2b2be72518b071c2c956a2bf7
[]
no_license
sxmorgan/advent-of-code-2020
ea8a3b7b5d185387865fcddcdef1c0fe33843ec8
1730262f19861af0ba03563d77cf0a95bbef6b29
refs/heads/main
2023-02-06T10:20:42.053319
2020-12-26T12:54:22
2020-12-26T12:54:22
317,576,893
0
0
null
null
null
null
UTF-8
R
false
false
604
r
day-1.R
library(tidyverse) input <- read.delim('~/Desktop/advent-of-code-20/day-1/input.txt', header = FALSE, sep = '\n') # which 2 numbers add up to 2020? nums <- input %>% as_tibble() %>% mutate(diff = 2020-V1) %>% mutate(presence = (diff %in% V1)) %>% filter(presence) %>% magrittr::use_series(V1) # answer nums[1]*nums[2] # which 3 numbers add up to 2020? nums <- input %>% as_tibble() %>% mutate(V2 = V1) %>% mutate(V3 = V1) %>% expand(V1, V2, V3) %>% filter(V1+V2+V3 == 2020) %>% magrittr::use_series(V1) %>% unique() # answer nums[1]*nums[2]*nums[3]
092542ae06bdbf40f134fd634059276f3d573bd9
d65acf70ee2b94ab3f66b6af503ad61b1cab30d3
/data-raw/sf_conversion.R
56069ccfe3aad8c04bc6ba15de874579051497f4
[]
no_license
crumplecup/riparian
80ebbfa7d7108cf09bfb5511ea00ed23c92bd600
755b066fd4d52bc58caf157c061101f813cda86a
refs/heads/master
2021-06-15T10:13:39.918615
2021-04-07T22:15:56
2021-04-07T22:15:56
176,836,375
0
0
null
null
null
null
UTF-8
R
false
false
974
r
sf_conversion.R
# convert sp spatial objects to sf sp_to_sf <- function(sp) { crs_ref <- sf::st_crs(sp) sf_list <- list() polys <- methods::slot(sp, 'polygons') for (i in seq_along(polys)) { p <- methods::slot(polys[[i]], 'Polygons') sf_polys <- list() for (j in seq_along(p)) { sf_polys[[j]] <- sf::st_polygon(list(methods::slot(p[[j]], 'coords'))) } sf_list[[i]] <- sf::st_multipolygon(sf_polys) } sf::st_sfc(sf_list, crs = crs_ref) } random_samples <- sp_to_sf(samples) # annual random monitoring boxes sf_perennial <- sf::st_as_sf(prc_per) # perennial streams (NHD) sf_streams <- sf::st_as_sf(prc_strms) # hydro-enforced drainage layer (WSI) sf_lots <- sf::st_as_sf(prc_lots) # maptaxlots in the riparian corridor sf_buff <- sf::st_as_sf(prc_buff) # riparian buffer for corridor usethis::use_data(random_samples) usethis::use_data(sf_perennial) usethis::use_data(sf_streams) usethis::use_data(sf_lots) usethis::use_data(sf_buff)
f4854f8088812fe0ce79fe6b58a13a2e67d9507c
29585dff702209dd446c0ab52ceea046c58e384e
/repfdr/R/ztobins.R
9dfd48f3ea134d22d49190ac542c1ed3c0e314e1
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
4,511
r
ztobins.R
ztobins <- function(zmat, n.association.status = 3, n.bins = 120, type = 0, df = 7, central.prop = 0.5) { if (type != 0 & type != 1) stop("type must equal 0 or 1") if (central.prop < 0 | central.prop > 1) stop("central.prop must take value between 0 and 1") M <- dim(zmat)[1] if(M < 120^2 & missing(n.bins)) n.bins <- floor(sqrt(M)) n.studies <- dim(zmat)[2] binned.z.mat <- array(dim = c(M, n.studies)) dimnames(binned.z.mat) <- dimnames(zmat) pdf.binned.z <- array(0, dim = c(n.studies, n.bins - 1, n.association.status)) dimnames(pdf.binned.z) <- list(sprintf("Study %i", 1:n.studies), sprintf("bin %i", 1:(n.bins - 1)), if (n.association.status == 2) c("H:0", "H:1") else c("H:-1", "H:0", "H:1")) for (i in 1:n.studies) { ### parts of the code here are adopted from locfdr function (package locfdr by Bradley Efron) ## density's estimation breaks <- seq(min(zmat[,i]), max(zmat[,i]), length = n.bins) x <- (breaks[-1] + breaks[-length(breaks)])/2 y <- hist(zmat[,i], breaks = breaks, plot = F)$counts K <- length(x) # K = n.bins-1 if (type == 0) { # spline(default) f <- glm(y ~ splines::ns(x, df = df), poisson)$fit } if (type == 1) { # polynomial f <- glm(y ~ poly(x, df = df), poisson)$fit } D <- (y - f)/(f + 1)^0.5 D <- sum(D[2:(K - 1)]^2)/(K - 2 - df) # deviance if (D > 1.5 ) # Efron's criterion warning(paste("In",colnames(zmat)[i], ",f(z) misfit = ", round(D, 1), ". Rerun with increased dfmax")) ## theoretical null f0 <- exp(-x^2/2) f0 <- (f0 * sum(f))/sum(f0) # (is identical to locfdr(zmat[,i],plot=0,nulltype=0)$mat[,7] ) ## pi0's estimation lowCentral <- quantile(zmat[,i], (1-central.prop)/2) highCentral <- quantile(zmat[,i], 1-(1-central.prop)/2) central <- (1:K)[x > lowCentral & x < highCentral] p0theo <- sum(f[central])/sum(f0[central]) if (p0theo>=1) stop(paste("In",colnames(zmat)[i],"the estimated fraction of nulls is 1")) # (is identical to locfdr(zmat[,i],plot=0,nulltype=0)$fp0[1,3] ) ## fdr's first estimation fdr0 <- pmin((p0theo * f0)/f, 1) # fdr's adjustement from Efron (equals to one in central position) l <- log(f) # log-likelihood imax <- seq(l)[l == max(l)][1] xmax <- x[imax] # "bin" of the max loglik if (sum(x <= xmax & fdr0 == 1) > 0) xxlo <- min(x[x <= xmax & fdr0 == 1]) else xxlo = xmax if (sum(x >= xmax & fdr0 == 1) > 0) xxhi <- max(x[x >= xmax & fdr0 == 1]) else xxhi = xmax if (sum(x >= xxlo & x <= xxhi) > 0) fdr0[x >= xxlo & x <= xxhi] <- 1 fdr0 <- as.numeric(fdr0) # (is identical to locfdr(zmat[,i],plot=0,nulltype=0)$mat[,8] ) ## pi1*f1(alternative) estimation p1f1 <- (1 - fdr0) * f p1f1 <- as.numeric(p1f1) # (is identical to locfdr(zmat[,i],plot=0,nulltype=0)$mat[,11] ) ### code from repfdr bins <- ceiling((zmat[, i] - min(zmat[, i]))/(x[2] - x[1])) bins[bins == 0] <- 1 bins[bins == n.bins] <- n.bins - 1 binned.z.mat[, i] <- bins if (n.association.status == 2) { pdf.binned.z[i, , 1] <- f0/sum(f0) pdf.binned.z[i, , 2] <- p1f1/sum(p1f1) } if (n.association.status == 3) { pdf.binned.z[i, , 2] <- f0/sum(f0) pdf.binned.z[i, , 1] <- ifelse(x < 0, p1f1, rep(0, n.bins - 1)) pdf.binned.z[i, , 3] <- ifelse(x > 0, p1f1, rep(0, n.bins - 1)) pdf.binned.z[i, , 1] <- pdf.binned.z[i, , 1]/sum(pdf.binned.z[i, , 1]) pdf.binned.z[i, , 3] <- pdf.binned.z[i, , 3]/sum(pdf.binned.z[i, , 3]) } if (n.association.status != 2 & n.association.status != 3) stop("Invalide number of hypothesis states.") } return(list(pdf.binned.z = pdf.binned.z, binned.z.mat = binned.z.mat)) }
fcf9c247bd827695d2f3e3880fa0728aaf079eb3
bc29afd23556108467937f46bff2afa06389e4d0
/Fit-DLM-Region-ARMA.R
982467251d1182dec565e567972040f596d18ac3
[ "MIT" ]
permissive
OcnAtmCouplingAndClimateForcing/Salmon-PDO-NPGO
062df5342eb45aca7c48caf359f1c72655c8a137
c0fe414ac3f9ec8a234c495e48903c6f6b8a3ae2
refs/heads/master
2020-04-11T10:15:59.037612
2020-04-05T19:11:53
2020-04-05T19:11:53
161,708,919
0
0
null
null
null
null
UTF-8
R
false
false
8,175
r
Fit-DLM-Region-ARMA.R
#================================================================================================== #Project Name: COAST-WIDE SALMON RECRUITMENT ANALYSIS - Fit DLM-Ricker with Time-varying Components Shared Among Regions #Creator: Curry James Cunningham, NOAA/NMFS, ABL #Date: 8.20.18 # #Purpose: To fit a dynamic linear Ricker model with # # #================================================================================================== #NOTES: # a) 100,000 iter took 7 hours for single species. # "Thu Feb 7 23:52:17 2019" # "Fri Feb 8 09:16:57 2019" # b) No Chinook - chains=3, iter=1e4, thin=5, # [1] "Wed Feb 20 09:41:02 2019" # [1] "Wed Feb 20 11:54:59 2019" #================================================================================================== require(tidyverse) require(dplyr) require(ggthemes) require(cowplot) require(viridis) require(rstan) require(rstantools) require(bayesplot) require(shinystan) require(BEST) require(reshape2) #CONTROL SECTION ========================================================== do.est <- TRUE n.chains <- 3 n.iter <- 1e4 n.thin <- 5 #Define Workflow Paths ==================================================== # *Assumes you are working from the Coastwide_Salmon_Analysis R project wd <- getwd() dir.data <- file.path(wd,"Data") dir.figs <- file.path(wd,"Figs","DLM-Region-ARMA") dir.output <- file.path(wd,"Output","DLM-Region-ARMA") dir.R <- file.path(wd,"R") #Create dir.create(dir.figs, recursive=TRUE) dir.create(dir.output, recursive=TRUE) #Source Necessary Functions ===================================== # source(file.path(dir.R, "plot-model-fit.R")) # source(file.path(dir.R, "plot-coef-ts.R")) # source(file.path(dir.R, "plot-other-pars.R")) #Read SR Data =================================================== dat <- read.csv(file.path("data","AK-WCoast-Salmon-SR.csv"), header=TRUE, stringsAsFactors=FALSE) #Add rps and ln.rps dat$rps <- dat$rec/dat$spawn dat$ln.rps <- log(dat$rps) #Subset SR Data ============================================== dat.2 <- dat %>% filter(!is.infinite(ln.rps), !is.na(ln.rps), broodYr>=1950, broodYr<=2010) #Update Duplicate Stock Identifiers (mostly Chinook) ========================== #Read PDO/NPGO Data ============================================= covar.dat <- read.csv(file.path(dir.data,"Covariates","covar.dat.csv")) #Extract Metadata =================================================== species <- unique(dat.2$species) n.species <- length(species) regions <- unique(dat.2$large.region) n.regions <- length(regions) #Specify Offsets for Covariates ================================ #From BroodYear offset <- c(2,2,1,2,3) offset.table <- cbind(species, offset) write.csv(offset.table, file=file.path(dir.figs,"offset.table.csv")) #Fit STAN Models ============================================== start <- date() s <- 5 # for(s in 1:n.species) { for(s in c(1,3,4,5)) { print(paste('###### s',s,'of',n.species)) temp.species <- species[s] dat.3 <- dat.2 %>% filter(species==temp.species) stocks <- unique(dat.3$stock) n.stocks <- length(stocks) #Required Attributs of the species stock.regions <- unique(dat.3$large.region) n.stock.regions <- length(stock.regions) stock.years <- min(dat.3$broodYr):max(dat.3$broodYr) n.stock.years <- length(stock.years) #Create Data Objects maxN <- n.stock.years S <- n.stocks N <- vector(length=n.stocks) R <- n.stock.regions #Number of Regions region <- vector(length=n.stocks) K <- 2 #Number of covariates PDO, NPGO # covars <- array(dim=c(n.stocks,100,K)) #We will start with a temporary length of 100 years then trim down to the max N PDO <- array(data=0, dim=c(n.stocks,maxN)) NPGO <- array(data=0, dim=c(n.stocks,maxN)) #Year Pointer - For Referencing Coefficient Values years <- array(data=0, dim=c(n.stocks,maxN)) #Ricker Parameters ln_rps <- array(data=0, dim=c(n.stocks, maxN)) spawn <- array(data=0, dim=c(n.stocks, maxN)) p <- 1 for(p in 1:n.stocks) { #Retreive Data ================= temp.stock <- stocks[p] dat.input <- dat.3 %>% filter(stock==temp.stock) %>% arrange(broodYr) #Assign STAN Inputs ============ N[p] <- nrow(dat.input) region[p] <- which(stock.regions==unique(dat.input$large.region)) ln_rps[p, 1:N[p]] <- dat.input$ln.rps spawn[p, 1:N[p]] <- dat.input$spawn years[p,1:N[p]] <- which(stock.years %in% dat.input$broodYr ) #Assign Covars =============== n <- 1 for(n in 1:N[p]) { covar.year <- dat.input$broodYr[n] + offset[s] # covars[p,n,1] <- covar.dat$avg[covar.dat$name=='pdo' & covar.dat$Year==covar.year] #PDO # covars[p,n,2] <- covar.dat$avg[covar.dat$name=='npgo' & covar.dat$Year==covar.year] #NPGO PDO[p,n] <- covar.dat$avg[covar.dat$name=='pdo' & covar.dat$Year==covar.year] #PDO NPGO[p,n] <- covar.dat$avg[covar.dat$name=='npgo' & covar.dat$Year==covar.year] #NPGO }#next n }#next p #Determine maximum length of covariates ===================== # maxN <- max(N) temp.regions <- regions[unique(region)] #Truncate STAN Input Objects ====================== # ln_rps <- ln_rps[,1:maxN] # spawn <- spawn[,1:maxN] # # covars <- covars[,1:maxN,] # PDO <- PDO[,1:maxN] # NPGO <- NPGO[,1:maxN] # #Call STAN ======================================================= if(do.est==TRUE) { fit <- stan(file=file.path(dir.R,"DLM-Region-ARMA.stan"), model_name="DLM-Region-ARMA", data=list("N"=N, "maxN"=maxN, "ln_rps"=ln_rps, "spawn"=spawn, "K"=K, #"covars"=covars, "PDO"=PDO, "NPGO"=NPGO, "S"=S, "R"=R, "region"=region), # chains=3, iter=1e5, thin=50, chains=n.chains, iter=n.iter, thin=n.thin, # chains=3, iter=1e3, thin=1, cores=3, verbose=FALSE, seed=101) #Save Output saveRDS(fit, file=file.path(dir.output,paste0(temp.species,'-fit.rds'))) }else { fit <- readRDS(file=file.path(dir.output,paste0(temp.species,'-fit.rds'))) } #Save Extras extras <- NULL extras$stocks <- stocks extras$n.stocks <- n.stocks extras$stock.regions <- stock.regions extras$n.stock.regions <- n.stock.regions extras$stock.years <- stock.years extras$n.stock.years <- n.stock.years saveRDS(extras, file=file.path(dir.output,paste0(temp.species,'-extras.rds'))) # fit <- readRDS(file=file.path(dir.output,paste0(temp.species,'-',temp.stock,'-fit.rds'))) # Evaluate Output # fit # stan_trace(fit, pars=list('beta')) # # stan_par(fit) # ## extract samples as a list of arrays # pars <- rstan::extract(fit) # # #Quickly Plot coefficients over time. # coefs.pdo <- apply(pars$coef_PDO, c(2,3), quantile, probs=c(0.025,0.25,0.5,0.75,0.975)) # coefs.npgo <- apply(pars$coef_NPGO, c(2,3), quantile, probs=c(0.025,0.25,0.5,0.75,0.975)) # # dimnames(coefs.pdo)[[2]] <- stock.regions # dimnames(coefs.pdo)[[3]] <- stock.years # # dimnames(coefs.npgo)[[2]] <- stock.regions # dimnames(coefs.npgo)[[3]] <- stock.years # # list.coefs.pdo <- melt(coefs.pdo) # head(list.coefs.pdo) # names(list.coefs.pdo) <- c('quantile') # # g <- ggplot(filter(list.coefs.pdo, Var1=='50%'), aes(x=Var3, y=value, color=factor(Var2))) + # geom_line() # g # # #Plot parameters over time # med.coef <- apply(pars$coef,c(2,3), median) # plot(med.coef[,1]) # # # rstantools::bayes_R2(fit) # # saveRDS(fit, file=file.path(dir.figs,"fit.rds")) # # #Plots # # bayesplot::mcmc_areas(a2) # # bayesplot::mcmc_areas_ridges() # # #Shinystan # # sso <- shinystan::as.shinystan(fit, model_name="DLM-Ricker") # # shinystan::launch_shinystan(sso) # shinystan::launch_shinystan(fit) # # #Plot the fit # plot_model_fit(fit=fit, dat.input=dat.input) # # #Plot Coefficients # plot_coef_ts(fit=fit, dat.input=dat.input, temp.offset=offset[s]) # # #Other Parameters # plot_other_pars(fit=fit) #Unload Unnecessary DLLs # loaded_dlls = getLoadedDLLs() # loaded_dlls = loaded_dlls[stringr::str_detect(names(loaded_dlls), '^file')] # if(length(loaded_dlls) > 5) { # dll <- head(loaded_dlls)[1] # dyn.unload(as.character(dll[[1]][2])) # } # }#next p - stock }#next s - species end <- date() print(start) print(end)
951767120bdfd197ed5b8f977749cbf04e0c9cd6
304dd2903689719fc3500832692c4a84901a9872
/captstone_lvm_p2.R
4305dcf438b9479ddfbd20f650ad2e0dc9368c97
[]
no_license
VijayMudivedu/capstone_project
7fa83c36487b8c373d4679719c272a8f93f1a020
f90bfb9a1b43649be5a26c27ecc0925cdcce6f8e
refs/heads/master
2020-03-27T11:21:04.015963
2018-10-28T13:18:11
2018-10-28T13:18:11
146,480,931
0
1
null
null
null
null
UTF-8
R
false
false
78,145
r
captstone_lvm_p2.R
# --- # title: "Hospital Ratings CMS" # author: "Vijay Mudivedu" # date: '2018-10-28' #install.packages("mice") #install.packages("psych") #install.packages("doParallel") #install.packages("cowplot") #install.packages("tidyverse") #install.packages("randomForest") #install.packages("caret") library(tidyverse) library(mice) library(psych) library(cowplot) library(randomForest) library(caret) library(doParallel) #setwd("~/Google Drive/_OneDrive_Atimi_Software/Upgrad/_Upgrad/Capstone_project/") ########################## # General Information ########################## general_info <- read.csv(file = "ValidFiles/Hospital General Information.csv",header = T,check.names = T,na.strings = c("Not Available",""), stringsAsFactors = T) #- Filtering the demographic variables, "Hospital.Name","Address","City","State","County.Name","Phone.Number","ZIP.Code" not needed for analysis #- Filtertime the redundant variables that do not contribute for analysis zdemographics <- c("Hospital.Name","Address","City","State","County.Name","Phone.Number","ZIP.Code") zdemogrphic_vars <- which(names(general_info) %in% zdemographics) zvar1 <- c("Hospital.Type","Hospital.Ownership","Emergency.Services","Meets.criteria.for.meaningful.use.of.EHRs") zvar2 <- which(names(general_info) %in% zvar1) general_info_cleaned <- general_info[,-c(zdemogrphic_vars,zvar2)] # There are missing ratings in the target variable "Hospital.Overall.Rating". Replacing the empty values in the target variable as "Missing" general_info_cleaned$Hospital.overall.rating[which(is.na(general_info_cleaned$Hospital.overall.rating))] <- "Missing" # Converting the targe variable to factor general_info_cleaned$Hospital.overall.rating <- as.factor(general_info_cleaned$Hospital.overall.rating) ## Check if the NAs in the Ratings are missing at Random in the Footnote # * footnote with levels are inducing the NAs in the dataset: # + "Data are shown only for hospitals that participate in the Inpatient Quality Reporting (IQR) and Outpatient Quality Reporting (OQR) programs" is inducing the NAs across the dataset # + "Data suppressed by CMS for one or more quarters" # * This concludes that the provider ids related this level can be purged from the dataset # # * Similarly, "Data suppressed by CMS for one or more quarters" also cannot be used for analysis as this is inducing the NAs across the datasets, which mandates to purge the dataset with missing NAs general_info_cleaned %>% filter(general_info_cleaned$Hospital.overall.rating.footnote %in% c("Data are shown only for hospitals that participate in the Inpatient Quality Reporting (IQR) and Outpatient Quality Reporting (OQR) programs", "Data suppressed by CMS for one or more quarters","Data suppressed by CMS for one or more quarters","Results are not available for this reporting period")) %>% group_by(Mortality.national.comparison,Readmission.national.comparison,Safety.of.care.national.comparison,Efficient.use.of.medical.imaging.national.comparison,Timeliness.of.care.national.comparison,Effectiveness.of.care.national.comparison,Patient.experience.national.comparison) %>% summarise(count_rws = n()) %>% t() # * Filtering the footnotes that are leading to NAs Hospital Ratings general_info_cleaned <- general_info_cleaned %>% filter(!general_info_cleaned$Hospital.overall.rating.footnote %in% c("Data are shown only for hospitals that participate in the Inpatient Quality Reporting (IQR) and Outpatient Quality Reporting (OQR) programs", "Data suppressed by CMS for one or more quarters","Results are not available for this reporting period")) # * Barplot of the ratings show that the Hospital rating has a Gaussian distribution ggplot(data = general_info_cleaned,aes(x = Hospital.overall.rating)) + geom_bar(na.rm = T, fill = "steelblue") + geom_text(stat = "count", aes(label = paste0(round(..count../sum(..count..),3)*100)),hjust = 0) + labs(title = "% Distrbution of the Class Variable, Hospital Ratings ") + theme(plot.title = element_text(hjust = 0.5),plot.background = element_blank(),axis.title.y = element_blank(), panel.background = element_blank(),axis.text.y = element_text(hjust = 1)) + coord_flip() # * checking the level footnote level of Hospital Rating once again. # - "There are too few measures or measure groups reported to calculate a star rating or measure group score" # - "Results are not available for this reporting period" # general_info_cleaned %>% filter(general_info_cleaned$Hospital.overall.rating.footnote %in% c("There are too few measures or measure groups reported to calculate a star rating or measure group score")) %>% summary() # * The above specific additional footnotes also contribute to the NAs in Hospital Overall Rating with multilevel dependency on the other groups. # * For example, specified ones below contribue less to NAs, compared to Mortality, Safety of care, Readmission, Efficient Use of medical Imaging. # + "Patient.experience" , "Effectiveness.of.care", "Timeliness.of.care" # general_info_cleaned <- general_info_cleaned %>% filter(!general_info_cleaned$Hospital.overall.rating.footnote %in% c("There are too few measures or measure groups reported to calculate a star rating or measure group score")) summary(general_info_cleaned) ### General Info csv file conclusions # 1. We thus have conclusively arrived at the "general_info_final" dataset with 1-5 hospital ratings imputing the NAs # 2. Picking the hospital Rating from the general info cleaned dataset z_rem_var1 <- which(names(general_info_cleaned) %in% c("Provider.ID","Hospital.overall.rating")) general_info_final <- general_info_cleaned[is.na(general_info_cleaned$Hospital.overall.rating.footnote),z_rem_var1] summary(general_info_final) dim(general_info_final) #################################### # Analysing the Complications #################################### complications_df <- read.csv(file = "ValidFiles//Complications - Hospital.csv", header = T,check.names = T,stringsAsFactors = T,na.strings = c('Not Available',"")) head(complications_df) # Cleaning the demographic variables that do not have an impact on the Measure IDs zdemographics <- c("Hospital.Name","Address","City","State","ZIP.Code","County.Name","Phone.Number","Measure.Start.Date", "Measure.End.Date") zdemogrphic_vars <- which(names(complications_df) %in% zdemographics) complications_df_cleaned <- complications_df[,-zdemogrphic_vars] head(complications_df_cleaned) # - Measuring the distribution of NAs in the Complications dataset, Compared to National, and Footnote variables. # - About 35% of the NAs in Compared to National, and 41% of the records in footnote is stale due to reasons specified in the variable. round(prop.table(summary(factor(complications_df_cleaned$Compared.to.National)))*100,2) #- Thus NAs are 35% in the Compared.To.National variable round(prop.table(summary(factor(complications_df_cleaned$Footnote)))*100,2) #- NAs are 65% in the Footnote variable ### imputation of NAs in Complications dataset # - Analysis of "footnote" variable by checking the impact on the score variable # Distribution of Score and Denominator by Footnote complications_df_cleaned %>% group_by(Footnote) %>% summarise(cnt_rows = n(),Avg_Score = mean(Score,na.rm = T),Total_Score = sum(Score,na.rm = T)) %>% arrange(Avg_Score) # - Footnote Variable: Footnote levels which have specific reasons do not contribute to score variable for the specific Provided.ID, # these rows can be imputed from analysis. zvar1 <- is.na(complications_df$Footnote) # Blank Footnotes zvar2 <- which(names(complications_df_cleaned) %in% "Footnote") complications_df_cleaned_footnote_nas <- complications_df_cleaned[zvar1,-zvar2] summary(complications_df_cleaned_footnote_nas) # - Imputing the "NAs" resulted in inducing the variability in each of the variables. # which variable is contributing most NAs in "Score" variable? library(reshape2) complications_df_cleaned_footnote_nas %>% group_by(Measure.ID) %>% summarise(cnt_rws = n(), mean_Score = round(mean(Score,na.rm = T))) %>% arrange(desc(cnt_rws)) %>% melt(value.name = c("value")) %>% ggplot(aes(x = Measure.ID,y = value)) + geom_col(fill = "steelblue") + geom_text(aes(label = value),hjust = 1,vjust = 0.5,angle = 90) + facet_wrap(facets = ~ variable,scales = "free",ncol = 3) + labs(title = "Complications measure") + theme(axis.text.x = element_text(angle = 60,vjust = 1,hjust = 1), plot.title = element_text(hjust = 0.5)) # - Significant Measures from this dataset measures are "PSI_90_SAFETY","PSI_4_SURG_COMP" "COMP_HIP_KNEE" # - rejecting the Denominator, Lower Estimate, Higher Estimate, Compared.to.National variables zvar1 <- c("Provider.ID", "Measure.ID", "Score") zvar2 <- which(names(complications_df_cleaned_footnote_nas) %in% zvar1) complications_df_final <- complications_df_cleaned_footnote_nas[,zvar2] %>% spread(key = Measure.ID,value = Score) summary(complications_df_final) # PSI_8_POST_HIP data is stale with no contribution to the overall score and minimal. complications_df_final <- complications_df_final[,-which(names(complications_df_final) %in% "PSI_8_POST_HIP")] # - Some of the variables exhibit that high variability compared to other measures are for example, "PSI_90_SAFETY","COMP_HIP_KNEE", "PSI_4_SURG_COMP" # - Considering the signficant variable measures from these Complications Dataset zvar1 <- which(names(complications_df_final) %in% c("Provider.ID","PSI_90_SAFETY","COMP_HIP_KNEE","PSI_4_SURG_COMP")) complications_df_final <- complications_df_final[,zvar1] library(psych) pairs.panels(x = complications_df_final[2:ncol(complications_df_final)], main = "Complications dataset correlation ", bg = rainbow(n = 12),smooth = TRUE, ellipses = TRUE,pch = 21,cex.cor = 0.85,cex.labels = 0.6) ######################################################################## ## Analysis for Healthcare Associated Infections - For Hosptial ######################################################################## hai_df <- read.csv(file = "~/Google Drive/_OneDrive_Atimi_Software/Upgrad/_Upgrad/Capstone_project/capstone_project/ValidFiles/Healthcare Associated Infections - Hospital.csv",na.strings = c("Not Available","")) # - Removing the demographic variables, Address, City, State, ZIP.Code, County.Name,Phone.Number,Footnote zdemographics <- c("Hospital.Name","Address","City","State","County.Name","Phone.Number","ZIP.Code","Measure.Start.Date","Measure.End.Date") hai_df_cleaned <- hai_df[,-which(names(hai_df) %in% c(zdemographics))] # - Imputing the NAs from the Hospital Associated Infections dataset - Variables, Footnote with values hai_df_cleaned %>% group_by(Footnote) %>% summarise(count_rows = n(),score_total = sum(Score,na.rm = T)) # - Footnote variable that briefs out the imperfections associated with the score for each related level # - Considering the blank in the footnotes that carry more weightage to the score zvar1 <- which(names(hai_df_cleaned) %in% c("Footnote")) hai_df_cleaned <- hai_df_cleaned[is.na(hai_df_cleaned$Footnote),-zvar1] summary(hai_df_cleaned) # running the summary on the after removing the invalid footnotes # determine the outliers in the Score hai_df_cleaned %>% group_by(Measure.ID) %>% summarise(count_rows = n(), per_mean_score = mean(Score)*100) %>% ggplot(aes(x = Measure.ID,y = count_rows)) + geom_col(fill = "steelblue") + geom_text(aes(label = count_rows),vjust = -0.5,angle = 0) + xlab("Measure.ID") + labs(title = "Healthcare Associated Infections") + theme(axis.text.x = element_text(angle = 45,hjust = 1,vjust = 1),plot.title = element_text(hjust = 0.5)) # * There are abnormal outliers in the dataset from the measure IDs HAI_1_DOPC_Days, besides this they do not contribute to the overall score # - Summarising the distribution of the measures. # - A central line-associated bloodstream infection (CLABSI) is a serious infection that occurs when germs (usually bacteria or viruses) enter the bloodstream through the central line. Healthcare providers must follow a strict protocol when inserting the line to make sure the line remains sterile and a CLABSI does not occur. # -Clostridium difficile (C. difficile) is a bacteria that causes diarrhea and can lead to serious complications. Those at highest risk for C. difficile infection include people who take antibiotics and also receive care in any medical setting, including hospitals. C. difficile bacteria produce spores that can be spread from patient to patient. # - CAUTI -Catheter Associated Urinary Tract Infections: A urinary tract infection (UTI) is an infection involving any part of the urinary system, including urethra, bladder, ureters, and kidney. UTIs are the most common type of healthcare-associated infection reported to the National Healthcare Safety Network (NHSN). # - Considering the SIRs, that Centre for Diesease Control and Prevention uses to calculate, Standard Infection Ratio (SIR) which takes into account patient care location, number of patients with an exisiting infection, lab mehtords, bed size, afficialiton with a medical schools, bed size of the hospital, age of patients. # # - central line-associated bloodstream infections (CLABSI), catheter- associated urinary tract infections (CAUTI), surgical site infection (SSI) from colon surgery or abdominal hysterectomy, methicillin-resistant Staphylococcus Aureus (MRSA) blood laboratory-identified events (bloodstream infections), and Clostridium difficile (C.diff.) laboratory-identified events (intestinal infections). # The HAI measures show how often patients in a particular hospital contract certain infections during the couse of their medical treatment: # HAI_1_SIR, HAI_1a_SIR, HAI_2_SIR, HAI_2a_SIR, HAI_6_SIR,HAI_4_SIR, HAI_5_SIR, HAI_3_SIR # HAI-1 measure tracks central-line associated bloodstream infections (CLABSI) in ICUs and select wards. # HAI-2 measure tracks catheter-associated urinary tract infections (CAUTI) in ICUs and select wards. # HAI-3 Surgical Site Infection from colon surgery (SSI: Colon) # HAI-4 Surgical Site Infection from abdominal hysterectomy (SSI: Hysterectomy) # HAI-5 Methicillin-resistant Staphylococcus Aureus (MRSA) Blood Laboratory-identified Events (Bloodstream infections) # HAI-6 Clostridium difficile (C.diff.) Laboratory-identified Events (Intestinal infections) hai_measures <- c("HAI_1_SIR", "HAI_2_SIR", "HAI_3_SIR", "HAI_4_SIR", "HAI_5_SIR", "HAI_6_SIR") # Filtering the measure.ids useful for analysis hai_df_cleaned <- hai_df_cleaned[which(hai_df_cleaned$Measure.ID %in% hai_measures),] # Plotting the measures required for the analysis ggplot(data = hai_df_cleaned,aes(x = Measure.ID)) + geom_bar(fill = "steelblue",na.rm = TRUE) + xlab("Measure.ID") + geom_text(stat = "count",aes(label = ..count..),vjust = -.1) + labs(title = "Hospital Associated Infections by Measure by Hospital Count") + theme(axis.text.x = element_text(angle = 45,vjust = 1,hjust = 1)) # Selecting the required variables for analysis zvar1 <- c( "Provider.ID", "Measure.ID", "Score") zvar2 <- which(names(hai_df_cleaned) %in% zvar1) # converting the measure variables from long format to wide format hai_df_final <- hai_df_cleaned[,zvar2] %>% spread(key = "Measure.ID",value = "Score") summary(hai_df_final) # Checking the correlation between the variables. pairs.panels(x = hai_df_final[,-1],cex.cor = 0.5,cex.labels = 0.9,ellipses = TRUE,pch = 21,bg = rainbow(length(zvar1)), main = "Hospital Patient Safety Group variable correlation" ) # - From the correlation plot HAI_1, HAI_2, HAI_3, HAI_4, HAI_5 measures have some degree of correlation between them #----- Merging dataframes-------- # Merging the complications_df final with general information final together w.r.t to the "Provider.ID" gives rise to "Saftey of Care Group" gen_inf_compli_df <- merge(x = general_info_final,y = complications_df_final,all = TRUE, by = intersect(x = names(general_info_final),y = names(complications_df_final))) safety_of_care_group <- merge(x = gen_inf_compli_df,y = hai_df_final,all = TRUE,by = intersect(x = names(gen_inf_compli_df),y = names(hai_df_final))) head(safety_of_care_group) #################################### # Analysis of HCAPHS dataset #################################### hcaphs_df <- read.csv(file = "ValidFiles/HCAHPS - Hospital.csv",header = TRUE,check.names = TRUE,na.strings = c("Not Available","Not Applicable","")) head(hcaphs_df) # Removing the demographic columns, Mesaure Start Date and measure end data columns hcaphs_df_cleaned <- hcaphs_df[,-which(names(hcaphs_df) %in% zdemographics)] head(hcaphs_df_cleaned) # - Extracting the Measure IDs that are of importance from: # H_COMP_1_LINEAR_SCORE, H_COMP_2_LINEAR_SCORE, H_COMP_3_LINEAR_SCORE, H_COMP_4_LINEAR_SCORE, H_COMP_5_LINEAR_SCORE, # H_COMP_6_LINEAR_SCORE, H_COMP_7_LINEAR_SCORE, H_HSP_RATING_LINEAR_SCORE, H_QUIET_LINEAR_SCORE,H_RECMND_LINEAR_SCORE, H_CLEAN_LINEAR_SCORE, # # - Selecting the required measures hcaphs_measures <- c("H_COMP_1_LINEAR_SCORE", "H_COMP_2_LINEAR_SCORE", "H_COMP_3_LINEAR_SCORE", "H_COMP_4_LINEAR_SCORE", "H_COMP_5_LINEAR_SCORE", "H_COMP_6_LINEAR_SCORE", "H_COMP_7_LINEAR_SCORE", "H_HSP_RATING_LINEAR_SCORE", "H_QUIET_LINEAR_SCORE","H_RECMND_LINEAR_SCORE", "H_CLEAN_LINEAR_SCORE") hcaphs_df_measures <- hcaphs_df_cleaned %>% filter(hcaphs_df_cleaned$HCAHPS.Measure.ID %in% hcaphs_measures) hcaphs_df_measures$HCAHPS.Measure.ID <- as.character(hcaphs_df_measures$HCAHPS.Measure.ID) hcaphs_df_measures$HCAHPS.Measure.ID <- str_replace(string = hcaphs_df_measures$HCAHPS.Measure.ID,pattern = "_LINEAR_SCORE",replacement = "") hcaphs_df_measures$HCAHPS.Measure.ID <- as.factor(hcaphs_df_measures$HCAHPS.Measure.ID) head(hcaphs_df_measures) # - Selecting only the important Variables zdistri_vars <- c("Provider.ID","HCAHPS.Measure.ID","HCAHPS.Linear.Mean.Value","Number.of.Completed.Surveys", "Survey.Response.Rate.Percent.Footnote","Number.of.Completed.Surveys.Footnote") hcaphs_df_measures_distri <- hcaphs_df_measures[,zdistri_vars] # - Eliminating the rows that have NA scores 1. too few cases to report, 2. Results are unavailable for the current reporting period 3. Data Specific to IQR and OQR hcaphs_df_measures_distri[is.na(hcaphs_df_measures_distri$Survey.Response.Rate.Percent.Footnote),] %>% summary() hcaphs_df_measures_distri <- hcaphs_df_measures_distri[is.na(hcaphs_df_measures_distri$Survey.Response.Rate.Percent.Footnote),c(1,2,3)] # * creating the patient experience group and spreading the measure ids patient_exp_group <- hcaphs_df_measures_distri %>% spread(key = HCAHPS.Measure.ID, value = HCAHPS.Linear.Mean.Value) head(patient_exp_group) # - Checking the correlation between the Patient Experience Measure Variables for each hosptial pairs.panels(patient_exp_group[,-1],cex.labels = 0.6 ,cex.cor = 0.7,main = "Patient Experience Relationship between measures") # - Patient experience measures are uniformly distributed about their mean. Each of the measureshave a strong correlation between them. # + Cleanliness of Hospital Environment (Q8) - H_Clean_HSP # + Nurse Communication (Q1, Q2, Q3) - H_COMP_1 # + Doctor Communication (Q5, Q6, Q7) _ H_COMP_2 # + Responsiveness of Hospital Staff (Q4, Q11)- H_COMP_3 # + Pain management (Q13, Q14) - H_COMP_4 # + Communication About Medicines (Q16, Q17) - H_COMP_5 # + Discharge Information (Q19, Q20) - H_COMP_6 # + Overall Rating of Hospital (Q21) - H_OVERALL_RATING # + Quietness of Hospital Environment (Q9) - H_QUIET_HSP # + Willingness to Recommend Hospital (Q22) - H_RECMND # + HCAHPS 3 Item Care Transition Measure (CTM-3) - - H_COMP_7 #----- Merging dataframes-------- # - Merging the patient experience group dataframe with the master dataframe master_df <- merge(x = safety_of_care_group,y = patient_exp_group,by = intersect(names(safety_of_care_group),names(patient_exp_group)),all = TRUE) head(master_df) ######################################################################## # Analysis of Timeliness and Effectiveness of Care Dataset ######################################################################## time_and_eff_care_df <- read.csv(file = "ValidFiles/Timely and Effective Care - Hospital.csv",header = T,check.names = T,stringsAsFactors = T,na.strings = c("Not Available","")) head(time_and_eff_care_df) # Cleaning the demographic variables from the dataset zdemogrphic_vars <- which(names(time_and_eff_care_df) %in% zdemographics) time_and_eff_care_cleaned <- time_and_eff_care_df[,-zdemogrphic_vars] head(time_and_eff_care_cleaned) # * NA Imputation in the Score Variable # * Considering the Footnote levels as a reference to impute the NAs, the levels below satisfy the non-stale data category. # + 1. Blank Levels (NA) 2. "2 - Data submitted were based on a sample of cases/patients." time_and_eff_care_cleaned %>% group_by(Footnote) %>% summary(cnt_rows=n()) # - The footnote level "2 - Data submitted were based on a sample of cases/patients." has valid data the and this affects the overall score of the patients # - So keeping the Footnote level and removing the dataset that have NAs time_and_eff_care_cleaned$Score <- as.integer(time_and_eff_care_cleaned$Score) time_and_eff_care_cleaned <- time_and_eff_care_cleaned[is.na(time_and_eff_care_cleaned$Footnote) | (time_and_eff_care_cleaned$Footnote %in% c("2 - Data submitted were based on a sample of cases/patients.")),] time_and_eff_care_cleaned %>% summary() # * Subsetting the data by removing the NA from "Samples" variable time_and_eff_care_cleaned <- time_and_eff_care_cleaned[!is.na(time_and_eff_care_cleaned$Sample),] # - Performing the validation checks after imputing the NAs in the dataset # Samples Vs Score time_and_eff_care_cleaned %>% group_by(Measure.ID) %>% summarise(mean_score = round(mean(Score))) %>% ggplot(aes(Measure.ID,mean_score)) + geom_col(fill = "steelblue") + geom_text(aes(label = mean_score),vjust = 0.5,angle = 90,hjust = 1) + labs(title = "Measure.ID Vs Scores") + theme(axis.text.x = element_text(angle = 60,vjust = 1,hjust = 1), plot.title = element_text(hjust = 0.5)) # - AMI_71, AMI7b though have high mean scores, there aren't many providers ggplot(time_and_eff_care_cleaned, aes(Measure.ID)) + geom_bar(fill = "steelblue") + geom_text(stat = "count",aes(label = ..count..),vjust = 0.5,angle = 90,hjust = 0) + labs(title = "Timeliness and Effectiveness Measure.ID Vs Count of Providers") + theme(axis.text.x = element_text(angle = 45,vjust = 1,hjust = 1), plot.title = element_text(hjust = 0.5)) # * Measures that contribute lessser to the overall score and that are have lesser providers ids are eliminated from the analysis # Grouping the Effectiveness Measures and Timeliness Measures: # - *Timeliness Group*: ED_1b,ED_2b,OP_18b, OP_3, OP_5, OP_20,OP_21 # ED-1b Median Time from ED Arrival to ED Departure for Admitted ED Patients # ED-2b Admit Decision Time to ED Departure Time for Admitted Patients # OP-3 Median Time to Transfer to Another Facility for Acute Coronary Intervention # OP-5 Median Time to ECG # OP-18b Median Time from ED Arrival to ED Departure for Discharged ED Patients # OP-20 Door to Diagnostic Evaluation by a Qualified Medical Professional # OP-21 ED-Median Time to Pain Management for Long Bone Fracture # # - *Effectiveness Group*: CAC_3,IMM_2,IMM_3_OP_27_FAC_ADHPCT,OP_22,OP_23,OP_29,OP_30,OP_4,PC_01,STK_1, STK_4, STK_6,STK_8,VTE_1,VTE_2,VTE_3,VTE_5, VTE_6 # CAC-3 Home Management Plan of Care (HMPC) Document Given to Patient/Caregiver # IMM-2 Influenza Immunization # IMM-3/OP-27 Healthcare Personnel Influenza Vaccination # OP-4 Aspirin at Arrival # OP-22 ED-Patient Left Without Being Seen # OP-23 ED-Head CT or MRI Scan Results for Acute Ischemic Stroke or Hemorrhagic Stroke who Received Head CT or MRI Scan Interpretation Within 45 Minutes of Arrival # OP-29 Endoscopy/Polyp Surveillance: Appropriate Follow-Up Interval for Normal Colonoscopy in Average Risk Patients # OP-30 Endoscopy/Polyp Surveillance: Colonoscopy Interval for Patients with a History of Adenomatous Polyps – Avoidance of Inappropriate Use # PC-01 Elective Delivery Prior to 39 Completed Weeks Gestation: Percentage of Babies Electively Delivered Prior to 39 Completed Weeks Gestation # STK-1 Venous Thromboembolism (VTE) Prophylaxis # STK-4 Thrombolytic Therapy # STK-6 Discharged on Statin Medication # STK-8 Stroke Education # VTE-3 Venous Thromboembolism Patients with Anticoagulation Overlap Therapy # VTE-1 Venous Thromboembolism Prophylaxis # VTE-2 Intensive Care Unit Venous Thromboembolism Prophylaxis # VTE-5 Venous Thromboembolism Warfarin Therapy Discharge Instructions # VTE-6 Hospital Acquired Potentially-Preventable Venous Thromboembolism # * Separating the the measure variables needed for analysis zvar1 <- which(names(time_and_eff_care_cleaned) %in% c("Provider.ID","Measure.ID","Score")) timeliness_group <- time_and_eff_care_cleaned[,zvar1] %>% spread(key = Measure.ID,value = Score) zvar1 <- which(names(timeliness_group) %in% c("Provider.ID","ED_1b","ED_2b","OP_18b", "OP_3b", "OP_5", "OP_20","OP_21")) zvar2 <- which(names(timeliness_group) %in% c("Provider.ID","CAC_3","IMM_2","IMM_3_OP_27_FAC_ADHPCT","OP_22","OP_23","OP_29","OP_30","OP_4","PC_01","STK_1", "STK_4", "STK_6","STK_8","VTE_1","VTE_2","VTE_3","VTE_5", "VTE_6")) effectiveness_group <- timeliness_group[,zvar2] timeliness_group <- timeliness_group[,zvar1] # * check the correlation between the measure variables pairs.panels(x = timeliness_group[2:length(zvar1)],cex.cor = 0.5,cex.labels = 0.9,ellipses = TRUE,pch = 21,bg = rainbow(length(zvar1)), main = "Timeliness Group variable correlation" ) # - most of the measure variables of timeliness group are uncorrelated. # - ED_1B and ED_2B are the inversely correlated to some extent as the timespent in the emergency room is the common between them. They are inversely correlated as the doctors visit reduces the time spend by the patient before moving to ED to inpatient room is reduced. # renmaing the measures "IMM_3_OP_27" zvar1 <- which(names(effectiveness_group) %in% "IMM_3_OP_27_FAC_ADHPCT") names(effectiveness_group)[zvar1] <- "IMM_3_OP_27" # - Plotting the Correlation between the effectiveness group measures cor.plot(effectiveness_group[,-1], stars = FALSE,numbers = TRUE,colors = TRUE,cex = 0.5, show.legend = FALSE, xlas = 2,cex.axis = 0.6,main = "Effectiveness group measure correlation") # - Variables represented by the effectiveness group have small degree of correlation # - Merging the timliness group and effectiveness group with master dataframe #----- Merging dataframes-------- # merging master dataframe with timliness group master_df <- merge(x = master_df,y = timeliness_group,by = intersect(names(master_df),names(timeliness_group)),all = TRUE) #----- Merging dataframes-------- # merging master dataframe with effectiveness group master_df <- merge(x = master_df,y = effectiveness_group,by = intersect(names(master_df),names(effectiveness_group)),all = TRUE) summary(master_df) ######################################################################## # Analysing the Readmission and Mortality groups ######################################################################## readm_mort_df <- read.csv(file = "ValidFiles/Readmissions and Deaths - Hospital.csv",check.names = T,header = T,stringsAsFactors = T,na.strings = c("Not Available","")) # * Purging the demographic variables not needed in the analysis zdemogrphic_vars <- which(names(readm_mort_df) %in% c(zdemographics,"Measure.Name","Compared.to.National")) readm_mort_cleaned <- readm_mort_df[,-zdemogrphic_vars] head(readm_mort_cleaned) # - Analysing the footnote variable ### removing the records filled with defective foot notes. # * Considering the Footnote variable as the reference variable. Assuming that the levels specified below encapsulate the NAs in the dataset. A summary rollover on the dataset breifs the situation that below levels holds NAs # 1 - The number of cases/patients is too few to report. # 19 - Data are shown only for hospitals that participate in the Inpatient Quality Reporting (IQR) and Outpatient Quality Reporting (OQR) programs. # 4 - Data suppressed by CMS for one or more quarters. # 5 - Results are not available for this reporting period. # 7 - No cases met the criteria for this measure. readm_mort_cleaned[!is.na(readm_mort_cleaned$Footnote),] %>% group_by(Footnote) %>% summarise(cnt_rows = n(),avg_score = mean(Score,na.rm = T)) # * Footnotes that are stale do not have any scores. # * Removing the rows that are as a reslut of stale data from different levels of footnotes readm_mort_cleaned <- readm_mort_cleaned[is.na(readm_mort_cleaned$Footnote),] # - Plotting the Mortality Mean scores values readm_mort_cleaned %>% group_by(Measure.ID) %>% summarise(Score = sum(Score)) %>% ggplot(aes(Measure.ID,Score)) + geom_col(fill = "steelblue") + geom_text(aes(label = Score),vjust = -0.2,size = 3) + labs(title = "Mortality Readmission Rate Score") + ylab(label = "Provider Count") + theme(axis.text.x = element_text(angle = 45,vjust = 1,hjust = 1), plot.title = element_text(hjust = 0.5)) # * Plotting the Mortality Measure variables by Count ggplot(readm_mort_cleaned,aes(Measure.ID)) + geom_bar(fill = "steelblue") + geom_text(stat = "count",aes(label = ..count..),vjust = -.1) + labs(title = "Mortality Readmission Rate Count by Providers") + ylab(label = "Provider Count") + theme(axis.text.x = element_text(angle = 45,vjust = 1,hjust = 1),axis.text.y = element_blank(), plot.title = element_text(hjust = 0.5)) # * *Readmission# measure that contribute to the overall count are: # READM-30-AMI Acute Myocardial Infarction (AMI) 30-Day Readmission Rate # READM-30-COPD Chronic Obstructive Pulmonary Disease (COPD) 30-Day Readmission Rate # READM-30-CABG Coronary Artery Bypass Graft (CABG) 30-Day Readmission Rate # READM-30-HF Heart Failure (HF) 30-Day Readmission Rate # READM-30-Hip-Knee Hospital-Level 30-Day All-Cause Risk- Standardized Readmission Rate (RSRR) Following Elective Total Hip Arthroplasty (THA)/ Total Knee Arthroplasty (TKA) # READM-30-PN Pneumonia (PN) 30-Day Readmission Rate # READM-30-STK Stroke (STK) 30-Day Readmission Rate # READM-30-HOSP-WIDE HWR Hospital-Wide All-Cause Unplanned Readmission # # * *Mortality# group that contribute to Overall Hospital Rating are: # MORT-30-AMI Acute Myocardial Infarction (AMI) 30-Day Mortality Rate # MORT-30-CABG Coronary Artery Bypass Graft (CABG) 30-Day Mortality Rate # MORT-30-COPD Chronic Obstructive Pulmonary Disease (COPD) 30-Day Mortality Rate # MORT-30-HF Heart Failure (HF) 30-Day Mortality Rate # MORT-30-PN Pneumonia (PN) 30-Day Mortality Rate # MORT-30-STK Acute Ischemic Stroke (STK) 30-Day Mortality Rate levels(readm_mort_cleaned$Measure.ID) zvar1 <- which(names(readm_mort_cleaned) %in% c("Provider.ID","Measure.ID","Score")) readm_mort_final <- readm_mort_cleaned[,zvar1] %>% spread(key = Measure.ID,value = Score) # Separating the mortality measure and readmission measure zvar1 <- which(names(readm_mort_final) %in% c("Provider.ID","MORT_30_AMI","MORT_30_CABG","MORT_30_COPD","MORT_30_HF","MORT_30_PN","MORT_30_STK")) mortality_grp <- readm_mort_final[,zvar1] zvar1 <- which(names(readm_mort_final) %in% c("MORT_30_AMI","MORT_30_CABG","MORT_30_COPD","MORT_30_HF","MORT_30_PN","MORT_30_STK")) readmission_grp <- readm_mort_final[,-zvar1] dim(readmission_grp) dim(mortality_grp) # Checking the correlation between different measures pairs.panels(mortality_grp[,-1],scale = TRUE,ellipses = TRUE,pch = 21,bg = rainbow(n = ncol(readmission_grp)),cex.labels = 0.6,cex.cor = 2,main = "Mortality Group Correlation Plot") # - Mortality measures have gaussian distribution # - Mortality measure have positive correlation between themselves pairs.panels(readmission_grp[,-1],scale = TRUE,ellipses = TRUE,pch = 21,bg = rainbow(n = ncol(readmission_grp)),cex.labels = 0.6,cex.cor = 2,main = "Readmission Group Correlation Plot") # * All varaibles in the dataset are significant and thus are being retained into the final dataframe # * Two variables that are significant and strong correlation between them are: # + 1. Readmission_Hospital_Wide and Readm_PN death rate # + 2. Readmission-after discharge and Heart Failure # + 3. Readmission after discharge and COPD # # Ailments are significantly correlated. # # * Combining the mortality and readmission group with the master dataframe. #----- Merging dataframes-------- master_df <- merge(x = master_df,y = readm_mort_final,by = intersect(x = names(master_df),y = names(readm_mort_final)),all = TRUE) head(master_df) # Analysis of Imaging dataset op_imaging_eff_df <- read.csv(file = "ValidFiles/Outpatient Imaging Efficiency - Hospital.csv",header = T,check.names = T,na.strings = c("Not Available",""),stringsAsFactors = T) head(op_imaging_eff_df) # Purging the variables that are not useful for analysis zdemogrphic_vars <- which(names(op_imaging_eff_df) %in% c(zdemographics,"Measure.Start.Date","Measure.End.Date")) op_imaging_eff_cleaned <- op_imaging_eff_df[,-zdemogrphic_vars] #head(op_imaging_eff_cleaned) # - Verifying the distribution of different measures # summary(op_imaging_eff_cleaned) # - In order to check the distribution of NAs in Score variable, Footnote is taken as reference to validate the NAs #op_imaging_eff_cleaned[is.na(op_imaging_eff_cleaned$Footnote),] %>% summary() op_imaging_eff_cleaned %>% group_by(Footnote) %>% summarise(count_score = sum(Score,na.rm = T)) # * From the summary we can infer that most of the NAs in the score are induced by Stale data represented by all classess of Footnotes except NA/Missing # * Purging the footnote levels that do not contribute op_imaging_eff_cleaned <- op_imaging_eff_cleaned[is.na(op_imaging_eff_cleaned$Footnote),] # Plotting the trends of Measure Variables acorss the providers ggplot(op_imaging_eff_cleaned,aes(Measure.ID)) + geom_bar(fill = "steelblue") + geom_text(stat = "count",aes(label = ..count..),vjust = -0.2,angle = 0,hjust = 0.5) + labs(title = "Count of Providers by Imaging Efficiency measures") + theme(plot.title = element_text(hjust = 0.5)) # - The plots suggests that all measures of Imaging efficiency are significant. op_imaging_eff_cleaned %>% group_by(Measure.ID) %>% summarise(mean_score = mean(Score)) %>% ggplot(aes(Measure.ID, mean_score)) + geom_col(fill = "steelblue") + geom_text(aes(label = round(mean_score,1)),vjust = -0.1,angle = 0,hjust = 0.5) + labs(title = "Mean Score of Measures for Imaging Efficiency") + theme(plot.title = element_text(hjust = 0.5)) # * Considering the imaging parameters OP_8,OP_10,OP_11,OP_13,OP_14 # *Oupatient Imaging Efficiency Group:* # OP-8 MRI Lumbar Spine for Low Back Pain # OP-10 Abdomen CT Use of Contrast Material # OP-11 Thorax CT Use of Contrast Material # OP-13 Cardiac Imaging for Preoperative Risk Assessment for Non-Cardiac Low-Risk Surgery # OP-14 Simultaneous Use of Brain Computed Tomography (CT) and Sinus CT zvar1 <- c("Provider.ID","Measure.ID","Score") zvar2 <- which(names(op_imaging_eff_cleaned) %in% zvar1) op_imaging_eff_final <- op_imaging_eff_cleaned[,zvar2] %>% spread(key = Measure.ID,value = Score) # dropping the less singificant OP_9 measure op_imaging_eff_final <- op_imaging_eff_final[,-ncol(op_imaging_eff_final)] # head(op_imaging_eff_final) # * Checking the variability correlation that exist between the imaging measures pairs.panels(op_imaging_eff_final[,-1],scale = T,smooth = T,density = T,ellipses = T,pch = 21,method = 'pearson',cex.labels = 1,cex.cor = 2, main = "Correlation between Imagining Efficiency Measures ") # * Data variablility is uniform acorss the dataset except the Op_10 and OP-11 measure which are closely correlated with skewed distribution # * Merging the imaging the dataframe with Master_dataframe #----- Merging dataframes-------- master_df_final <- merge(x = master_df,y = op_imaging_eff_final,by = intersect(x = names(master_df),y = names(op_imaging_eff_final)),all = TRUE) head(master_df_final) # * removing the provider.id and re arranging the hospital overall rating column # Next steps: # 1. check outliers in the measures # 2. imputing missing values # which rows have all NAs? na_indices <- apply(master_df_final[,-c(1,2)], MARGIN = 1, function(x) all(is.na(x))) sum(na_indices) # there are no columns with all NAs # Replace NA's in the final master_df with Missing master_df_final$Hospital.overall.rating[which(is.na(master_df_final$Hospital.overall.rating))] <- "Missing" # imputing the missing values using the mice library master_df_imputed <- mice(data = master_df_final[,-c(1,2)],seed = 100,maxit = 5,m = 5) master_df_imputed_final <- mice::complete(data = master_df_imputed,action = 5) master_df_imputed_final <- cbind(Provider.ID = master_df_final$Provider.ID,master_df_imputed_final) #################################################### # Using Random Forest algorithm for ratings #################################################### # Spliting the dataset into training and testing set.seed(100) master_df_model <- data.frame(Hospital.overall.rating = master_df_final$Hospital.overall.rating,master_df_imputed_final) ########################## # Sampling ########################## indices <- sample(1:nrow(master_df_model),0.7 * nrow(master_df_model)) # training dataset train_df <- master_df_model[indices,] # test dataset test_df <- master_df_model[-indices,] #################################################### # Model building using RandomForest Algorithm #################################################### set.seed(100) doParallel::registerDoParallel(cl = 4,cores = 4) train_control_rf <- trainControl(method = "repeatedcv", repeats = 5, number = 5, search = "grid", sampling = "smote", allowParallel = TRUE) # Tuning grid parameters of Random Forest tuning_grid_rf <- expand.grid(.mtry = round(sqrt(ncol(train_df[,-1]))),ntree = seq(100,1000,100)) # training the random forest with training dataset model_rf <- randomForest(Hospital.overall.rating ~., data = train_df, trControl = train_control_rf, tuneGrid = tuning_grid_rf, metric = "auc", na.action = na.roughfix, scale = TRUE, seed = 100) model_rf ### Predicting the Hospital Ratings vars_predict <- setdiff(x = names(train_df[,-1]),y = "Hospital.overall.rating") predict_rf <- stats::predict(object = model_rf,test_df[vars_predict]) # Confusion Matrix confusionMatrix(predict_rf,test_df$Hospital.overall.rating) ######################################################################## # Predicting the "Missing" Ratings in Hospital Rating using RandomForest ######################################################################## # Isolating the Hospitals with missing Hospital Ratings and buding a random Forest model for the remaining dataset set.seed(100) master_df_model <- data.frame(Hospital.overall.rating = master_df_final$Hospital.overall.rating,master_df_imputed_final) # Removing the Hospital with Hospital Rating as "Missing" ratings <- master_df_model$Hospital.overall.rating ratings[ratings %in% "Missing"] <- NA ratings <- as.integer(ratings) master_df_model_no_Missing <- data.frame(Hospital.overall.rating = ratings,master_df_model[,-1]) master_df_model_no_Missing <- master_df_model_no_Missing[!is.na(master_df_model_no_Missing$Hospital.overall.rating),] master_df_model_no_Missing$Hospital.overall.rating <- as.factor(master_df_model_no_Missing$Hospital.overall.rating) #################################### # Sampling #################################### set.seed(999) indices_no_missing <- sample(1:nrow(master_df_model_no_Missing),0.7 * nrow(master_df_model_no_Missing)) # training dataset train_df <- master_df_model_no_Missing[indices_no_missing,] # test dataset test_df <- master_df_model_no_Missing[-indices_no_missing,] ############################################################################################################ ### using RandomForest Algorithm for Hospitals Ratings to predict the Missing Rating ############################################################################################################ set.seed(100) doParallel::registerDoParallel(cl = 4,cores = 4) train_control_rf <- trainControl(method = "repeatedcv", repeats = 5, number = 5, search = "grid", sampling = "smote", allowParallel = TRUE) # Tuning grid parameters of Random Forest tuning_grid_rf <- expand.grid(.mtry = round(sqrt(ncol(train_df[,-1]))),ntree = seq(100,1000,100)) # training the random forest with training dataset model_rf_no_na <- randomForest(Hospital.overall.rating ~., data = train_df[,-2], trControl = train_control_rf, tuneGrid = tuning_grid_rf, metric = "auc", na.action = na.roughfix, scale = TRUE, seed = 100) model_rf_no_na # Predicting the test_df hosptial ratings and vars_predict <- setdiff(x = names(test_df[,-c(2)]),y = "Hospital.overall.rating") predict_rf_test <- stats::predict(object = model_rf_no_na,test_df[vars_predict]) # Confusion Matrix confusionMatrix(predict_rf_test,test_df$Hospital.overall.rating) # * Random Forest Algorithm generated an accuracy of 74%. # Predicting the rating of missing Ratings using the randomForest # Dataframe of Missing ratings master_df_model_Missing <- data.frame(Hospital.overall.rating = ratings,master_df_model[,-c(1)]) master_df_model_Missing <- master_df_model_Missing[is.na(master_df_model_Missing$Hospital.overall.rating),] predicted_rating_missing_Values <- predict(model_rf_no_na,master_df_model_Missing[,-1]) #assigning the predicted values to missing master_df_model_Missing$Hospital.overall.rating <- predicted_rating_missing_Values # combining the missing and no missing rows predict_master_df <- rbind(master_df_model_no_Missing,master_df_model_Missing) ############################################################################################################ # Grouping the measures ############################################################################################################ # Groups identified so far are: # Outcome Groups: # 1. Mortality group: - This is a negative group # 2. Saftey of Care + # 3. Readmission - # 4. Patient Experience + # Process Groups: # 5. Effectiveness of Care + # 6. Timeliness of care - # 7. Efficient Use of Imaging + ######################################################################## ## 1 Safety of Care Group ######################################################################## # safety of care has a positive impact on the overall rating zvar_saftey <- c("Provider.ID","COMP_HIP_KNEE","PSI_90_SAFETY","HAI_1_SIR","HAI_2_SIR","HAI_3_SIR","HAI_4_SIR","HAI_5_SIR","HAI_6_SIR") zvar1 <- which(names(master_df_imputed_final) %in% zvar_saftey) # scaled # winsorized safety_of_care_group <- master_df_imputed_final[,zvar1][,-1] %>% scale() %>% winsor(trim = 0.0125) ## Applying Latent Variable Model # - Factor Analysis technique to identify the underlying factors in the continuous variable # Latent Variable model is used to summarize the information. # For the Star Rating LVM assumes that each measure reflects information about the underlying unobserved dimension of quality. # LVM is constructed for each group independently. # - Different approaches to factor analysis to identify the factors # - Using the psych library functions to: # - 1. identify the factors # - 2. compute the factors # Method to determine the factors zxz <- nfactors(x=safety_of_care_group,n=20, rotate="varimax",diagonal=FALSE, fm="ml",title="Number of Factors",pch=16,use="complete") # Extracting the number of factors n_factors = sum(!is.na(zxz$vss.stats[,"prob"])) # From the nfactors, it is clear that the saftey_care variable is to be factored into 4 factors. # factoring method, fm = "pa" pa- principal axis, rotate = with max variance, minres - min resume fm = "ml" # maximum likelihood rotate = "varimax" # varimax rotation to obtain maximum variance between the measons nfactors = n_factors scores = "regression" safety_care_fact_anal <- fa(r = safety_of_care_group,rotate = rotate,fm = fm,scores = scores,nfactors = nfactors) # using the loadings safety_loadings <- safety_care_fact_anal$loadings cor.plot(safety_of_care_group,stars = FALSE,numbers = TRUE,colors = TRUE,cex = 0.5, show.legend = FALSE,upper = F, xlas = 2,cex.axis = 0.6,main = "Safety of Care measures correlation") # computing the mean loading safety_loading <- as.vector(apply(safety_loadings[,1:nfactors],1,FUN = mean)) # comparing with correlations cor_matrix <- data.frame(cor(safety_of_care_group)) cor_matrix <- data.frame(sapply(cor_matrix, function(x) round(mean(x), 2))) cor_matrix <- cbind(cor_matrix, round(safety_loading, 2)) colnames(cor_matrix) <- c("avg_correlation", "mean_factor_loading") cor_matrix # comparing the relation between the avg correlation, mean factor loadings, mean weights mean_weights = round(apply(safety_care_fact_anal$weights,1,mean),3) cbind(cor_matrix,mean_wghts = mean_weights) # It is clear that factor loadings are proportional to mean_weights. # ASSUMPTION: Num of Hospitals with greater than 3 measures having NAs in master_df_final is to be eliminated as per CMS master_df_final_safety_care_measures <- cbind(master_df_final$Provider.ID,master_df_final[,which(colnames(master_df_final) %in% colnames(safety_of_care_group))]) # filtering the indices with containing NA score measures <= 3 valid_indices <- apply(master_df_final_safety_care_measures[,-1],1,function(x) sum(is.na(x))) < 3 master_df_final_safety_care_measures <- master_df_final_safety_care_measures[valid_indices == TRUE,] # Num of Providers with less than 3 measures having NAs in the master_df_final table(valid_indices) # In order to calculate scores,imputing the missing values in the scores variable impute_na <- function(measure){ measure[which(is.na(measure))] <- median(measure, na.rm = TRUE) return(measure) } master_df_final_safety_care_measures <- data.frame(sapply(master_df_final_safety_care_measures, impute_na)) # Computting group score of Saftety of Care mean_weights master_df_final_safety_care_measures$score <- 0 # using sweep function to multiply mean_weights with each hospital group measure to evaluate to the total group score from all the measures. master_df_final_safety_care_measures$score <- apply(sweep(master_df_final_safety_care_measures[,-1],MARGIN = 2, mean_weights/length(mean_weights),'*'),1,sum) %>% round(digits = 3) # selecting the score measures saftey_group_score <- master_df_final_safety_care_measures[, c("master_df_final.Provider.ID", "score")] colnames(saftey_group_score) <- c("Provider.ID", "saftey_score") head(saftey_group_score) plot(density(saftey_group_score$saftey_score),main = "Safety Group Score" ) ############################################################################################################ ### Generic function to convert the remaining groups to group scores ############################################################################################################ # - To evaluate For the 6 other group, let's put this entire code in a function # - Function takes measure_group as the input and returns the group scores for each hospital # Initialize the function f_group_scores <- function(group_measure) { zxz <- nfactors(x=group_measure,n=20, rotate="varimax",diagonal=FALSE, fm="ml",title="Number of Factors",pch=16,use="complete") # Extracting the number of factors n_factors = sum(!is.na(zxz$vss.stats[,"prob"])) # From the nfactors, it is clear that the saftey_care variable is to be factored into 4 factors. # factoring method, fm = "pa" pa- principal axis, rotate = with max variance, minres - min resume fm = "ml" # maximum likelihood rotate = "varimax" # varimax rotation to obtain maximum variance between the measons nfactors = n_factors scores = "regression" fact_anal <- fa(r = group_measure,rotate = rotate,fm = fm,scores = scores,nfactors = nfactors) # using the loadings safety_loadings <- fact_anal$loadings cor.plot(group_measure,stars = FALSE,numbers = TRUE,colors = TRUE,cex = 0.5, show.legend = FALSE,upper = F, xlas = 2,cex.axis = 0.6,main = "Group measure correlation") # computing the mean loading loadings <- as.vector(apply(safety_loadings[,1:nfactors],1,FUN = mean)) # comparing with correlations cor_matrix <- data.frame(cor(group_measure)) cor_matrix <- data.frame(sapply(cor_matrix, function(x) round(mean(x), 2))) cor_matrix <- cbind(cor_matrix, round(loadings, 2)) colnames(cor_matrix) <- c("avg_correlation", "mean_factor_loading") cor_matrix # Comparing the relation between the avg correlation, mean factor loadings, mean weights mean_weights = round(apply(fact_anal$weights,1,mean),3) cbind(cor_matrix,mean_wghts = mean_weights) # It is clear that factor loadings are proportional to mean_weights. # Num of Hospitals with greater than 3 measures having NAs in master_df_final is to be eliminated as per CMS master_df_final_group_measure <- cbind(master_df_final$Provider.ID,master_df_final[,which(colnames(master_df_final) %in% colnames(group_measure))]) # filtering the indices with containing NA score measures <= 3 valid_indices <- apply(master_df_final_group_measure[,-1],1,function(x) sum(is.na(x))) < 3 master_df_final_group_measure <- master_df_final_group_measure[valid_indices == TRUE,] # Num of Providers with less than 3 measures having NAs in the master_df_final table(valid_indices) # In order to calculate scores,imputing the missing values in the scores variable impute_na <- function(measure){ measure[which(is.na(measure))] <- median(measure, na.rm = TRUE) return(measure) } master_df_final_group_measure <- data.frame(sapply(master_df_final_group_measure, impute_na)) #################################### # Function to multiply mean_weights with each hospital group measure to evaluate to the total group score from all the measures. #################################### n <- ncol(master_df_final_group_measure) master_df_final_group_measure$score <- 0 for (row in 1:nrow(master_df_final_group_measure)){ master_df_final_group_measure[row, c("score")] <- round(sum(master_df_final_group_measure[row, 1:n]*mean_weights)/length(mean_weights), 3) } # selecting the score measures group_score <- master_df_final_group_measure[, c("master_df_final.Provider.ID", "score")] colnames(group_score) <- c("Provider.ID", "saftey_score") return(group_score) } ####----------------------- END of USER-DEFINED Function ------------------ #### ######################################################################## ## 2 patient_experience Group ######################################################################## zvar_patient_exp <- c("Provider.ID","H_CLEAN","H_COMP_1","H_COMP_2","H_COMP_3","H_COMP_4","H_COMP_5","H_COMP_6","H_COMP_7","H_HSP_RATING","H_QUIET","H_RECMND") zvar1 <- which(names(master_df_imputed_final) %in% zvar_patient_exp) direction <- -1 # scaling the patient experience group and winsoring between +/- 3Sigma patient_experience_group <- master_df_imputed_final[,zvar1][,-1] %>% scale() %>% winsor(trim = 0.0125) * direction ## Patient experience latent model - ## Calling the user-defined function used to create the scores and latent view modelling patient_experience_group_score <- f_group_scores(patient_experience_group) colnames(patient_experience_group_score)[2] <- "patient_exp_score" str(patient_experience_group_score) ######################################################################## ## 3 readmission group ######################################################################## zvar_readm <- c("Provider.ID","READM_30_AMI","READM_30_CABG","READM_30_COPD","READM_30_HF","READM_30_HIP_KNEE","READM_30_HOSP_WIDE","READM_30_PN","READM_30_STK") zvar1 <- which(names(master_df_imputed_final) %in% zvar_readm) # multiply the measure scores with direction to indicate the type of measure. timeliness, readmission, mortality direction <- -1 # scaling the patient experience group and winsoring between +/- 3Sigma readmission_group <- master_df_imputed_final[,zvar1][,-1] %>% scale() %>% winsor(trim = 0.0125) *direction ## Computing readmission group latent model ## Readmission Latent Variable Model ## Calling the user-defined function used to create the scores and latent view modelling readmission_group_score <- f_group_scores(readmission_group) colnames(readmission_group_score)[2] <- "readm_score" head(readmission_group_score) ######################################################################## ## 4 mortality group ######################################################################## zvar_mort <- c("Provider.ID","MORT_30_AMI","MORT_30_CABG","MORT_30_COPD","MORT_30_HF","MORT_30_PN","MORT_30_STK","PSI_4_SURG_COMP") zvar1 <- which(names(master_df_imputed_final) %in% zvar_mort) # scaling the patient experience group and winsoring between +/- 3Sigma mortality_group <- master_df_imputed_final[,zvar1][,-1] %>% scale() %>% winsor(trim = 0.0125) * direction ## Computing mortality group latent model # Calling the user-defined function used to create the scores and latent view modelling mortality_group_score <- f_group_scores(mortality_group) colnames(mortality_group_score)[2] <- "mort_score" head(mortality_group_score) ######################################################################## ## 5. Process Group 4% - Effectiveness Group ######################################################################## zvar_eff <- c("Provider.ID","CAC_3","IMM_2","IMM_3_OP_27","OP_22","OP_23","OP_29","OP_30","OP_4","PC_01","STK_1","STK_4","STK_6","STK_8","VTE_1","VTE_2","VTE_3","VTE_5","VTE_6") zvar1 <- which(names(master_df_imputed_final) %in% zvar_eff) # scaling the patient experience group and winsoring between +/- 3Sigma effectiveness_group <- master_df_imputed_final[,zvar1][,-1] %>% scale() %>% winsor(trim = 0.0125) ## Computing Effectiveness group latent model # Calling the user-defined function used to create the scores and latent view modelling effectiveness_group_score <- f_group_scores(effectiveness_group) colnames(effectiveness_group_score)[2] <- "eff_score" head(effectiveness_group_score) ######################################################################## ## 6 timeliness Group ######################################################################## zvar_tim <- c("Provider.ID","ED_1b","ED_2b","OP_18b","OP_20","OP_21","OP_3b","OP_5") zvar1 <- which(names(master_df_imputed_final) %in% zvar_tim) # scaling the timeliness between +/1 3Sigma and winsorizing the scores timeliness_group <- master_df_imputed_final[,zvar1][,-1] %>% scale() %>% winsor(trim = 0.0125) * direction dim(timeliness_group) ## Computing timeliness group latent variable model # Calling the user-defined function used to create the scores and latent view modelling timeliness_group_score <- f_group_scores(timeliness_group) colnames(timeliness_group_score)[2] <- "timeliness_score" head(timeliness_group_score) ######################################################################## ## 7 medical imagine efficiency group ######################################################################## zvar_imag <- c("Provider.ID","OP_10","OP_11","OP_13","OP_14","OP_8") zvar1 <- which(names(master_df_imputed_final) %in% zvar_imag) # scaling the patient experience group and winsoring between +/- 3Sigma imaging_efficiency_group <- master_df_imputed_final[,zvar1][,-1] %>% scale() %>% winsor(trim = 0.0125) ## Computing Imagining Efficiency group latent variable model # Calling the user-defined function used to create the scores and latent view modelling imaging_eff_group_score <- f_group_scores(imaging_efficiency_group) colnames(imaging_eff_group_score)[2] <- "imaging_eff_score" head(imaging_eff_group_score) ########################################################################## ## Binding the all the groups scores and preparing the final scores table. ########################################################################## m1 <- merge(saftey_group_score, patient_experience_group_score, by="Provider.ID", all=T) m2 <- merge(m1, readmission_group_score, by="Provider.ID", all=T) m3 <- merge(m2, mortality_group_score, by="Provider.ID", all=T) m4 <- merge(m3, timeliness_group_score, by="Provider.ID", all=T) m5 <- merge(m4, effectiveness_group_score, by="Provider.ID", all=T) group_scores <- merge(m5, imaging_eff_group_score, by="Provider.ID", all=T) head(group_scores) dim(group_scores) ## Computing the final scores # Adding the weightages specified by the CMS # - The outcome groups have 22% weightage, that is Patient Experience,Saftey of Care, Readmission, and Mortality # - Process groups have 4% weightage, that is timeliness, effectiveness measures and imaging efficiency measures out_grp_weight = 0.22 process_group_weight = 0.04 weights <- c(safety = out_grp_weight, experience = out_grp_weight, readmission = out_grp_weight,mortality = out_grp_weight, timeliness = process_group_weight,effectivesss = process_group_weight,imaging_eff = process_group_weight) # procedure for reproproportioning the wieghts with example: # There are missing scores in group_scores. For example, for Provider.ID=10007, Saftey, readm, mortality, effectiveness and imaging scores are missing # Therefore, patient_exp = 22%, timeliness = 4% adds to 26%, the remaining weights add up to 74%. Thus, the reproportioned weights will be # c(mortality = 0.22/0.74, effectivenss = 0.04/0.74, readmission = 0.22/0.74,Safety = 0.22/0.74, imaging = 0.04/0.7 ) group_scores$final_score <- 100 # Initialise the group score for (row in 1:nrow(group_scores)) { if (sum(is.na(group_scores[row, -1])) > 0) { # if NAs are found in the group_scores invalid_indices = which(is.na(group_scores[row, -1])) # get the indices where NAs are found s = sum(weights[-invalid_indices]) # sum the scores' weights where the are scores are found reproportioned_weights <- weights[-invalid_indices]/s # reproportion the weights where indices are found #print(weights[-invalid_indices]) #print(reproportioned_weights) print(sum(weights[-invalid_indices]*reproportioned_weights)) # adjusting the re-proportioned to indices that have "NAs" group_scores$final_score[row] <- sum(group_scores[row, -1][-invalid_indices]*reproportioned_weights) } else { group_scores$final_score[row] <- sum(group_scores[row, -1]*weights) } } # The group scores are the final scores final_scores <- group_scores[, 1:ncol(group_scores)] dim(final_scores) ######################################################################## # Clustering the final scores ######################################################################## # - Applying K-means clustering algorithm to the final scores # clearing the garbage collector gc() # with 5 clusters kmeans_clustering <- kmeans(x = final_scores$final_score,centers = 5,nstart = 100) # Assigning clusters to the final_scores final_scores <- as.data.frame(final_scores) final_scores$rating_clster <- as.factor(kmeans_clustering$cluster) # summarising the clusters cluster_summary <- final_scores %>% group_by(rating_clster) %>% summarise(mean_score = mean(final_score) ,cluster_count = n()) %>% mutate(percent = cluster_count*100/sum(cluster_count)) cluster_summary # we now need to reassign the cluster ratings according to the average rating str(cluster_summary) # swap the 1st element with 5, 2nd with 4, 3rd with 3, 4th with 2, 5th with 1 # In general, in 1:5, swap the ith element with (5-i + 1) # Resetting the clusters for (row in 1:nrow(final_scores)) { id = final_scores$rating_clster[row] final_scores$new_cluster_id[row] = 5 - which(cluster_summary$rating_clster == id) + 1 # # swap x with 5 - which(f$cluster_id == x) + 1 } # check the distribution of the new clusters after the swap final_scores %>% group_by(new_cluster_id) %>% summarise(avg_score = mean(final_score)) %>% arrange(desc(avg_score)) # new cluster id added to the final scores data frame. after adjusting the clusters final_scores$new_cluster_id <- as.factor(final_scores$new_cluster_id) summary(final_scores[,c(10,11)]) head(final_scores) ## comparing scores across star ratings #### ggplot(final_scores, aes(x = factor(as.character(new_cluster_id)), y=final_score*100)) + geom_boxplot(na.rm=TRUE) + xlab("star rating") + ylab("final score") + theme_minimal() + ggtitle("comparing scores across star ratings") + scale_y_continuous(breaks=100*seq(-0.4, 0.4, 0.05)) # Distribution of ratings across different groups melt(data = final_scores[,-1],variable.name = "measure") %>% ggplot(aes(new_cluster_id,value*100)) + geom_boxplot(na.rm = T) + facet_wrap(facets = ~measure,scales = "free",ncol = 3) + ggtitle("Distribution of ratings across different groups")+ xlab("Star rating") + ylab("Score") + theme_minimal() # 5 - Rating in the final score is contributed by the Timeliness and Imaging Efficiency # 4 - Rating is contributed by Safety score, readmission, efficectiveness and timeliness. # 1 - Rating is mainly due to timeliness score, Low imaging efficiency, # star_rating_validation # merging the general_info_final with final scores final_scores_merged <- merge(final_scores, general_info_final, by="Provider.ID", all=T) head(final_scores_merged) final_scores_merged$new_cluster_id <- as.character(final_scores_merged$new_cluster_id) str(final_scores_merged) # imputing the missing NA records in the new_cluster_id with "Not Available" final_scores_merged$new_cluster_id[which(is.na(final_scores_merged$new_cluster_id))] <- "Missing" # converting the cluster id to factor final_scores_merged$new_cluster_id <- factor(final_scores_merged$new_cluster_id) summary(final_scores_merged$new_cluster_id) # confusion matrix table(final_scores_merged$new_cluster_id, final_scores_merged$Hospital.overall.rating) confusionMatrix(final_scores_merged$new_cluster_id, final_scores_merged$Hospital.overall.rating) ######################################################################## ## Provider Analysis ######################################################################## head(group_scores) # comapring provider's overall score with ggplot(group_scores, aes(x=1, y=final_score))+geom_boxplot() # provider's score final_scores_merged[which(final_scores_merged$Provider.ID == 140010),] final_scores_merged %>% group_by(new_cluster_id) %>% summarise(avg_final_score = mean(final_score, na.rm = T), avg_mort_scr = mean(mort_score, na.rm = T), avg_saf_scr = mean(saftey_score, na.rm = T),avg_eff_scr = mean(eff_score, na.rm = T), avg_read_scr = mean(readm_score, na.rm = T),avg_tim_scr = mean(timeliness_score, na.rm = T), avg_img_scr = mean(imaging_eff_score, na.rm=T), avg_pat_exp_scr = mean(patient_exp_score, na.rm=T)) # Viewing scores of the given provider final_scores_merged[which(final_scores_merged$Provider.ID == 140010),] # plotting group level scores with rating ggplot(final_scores, aes(x=factor(as.character(new_cluster_id)), y=final_score*100)) + geom_boxplot(na.rm=TRUE) + ylab("final score") + scale_y_continuous(breaks = 100*seq(-0.4, 0.4, 0.05)) + theme(plot.title = element_text(size = 10), axis.title.x = element_blank()) # mortality plot.m <- ggplot(final_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=mort_score*100)) + geom_boxplot(na.rm=TRUE) + ylab(label = "mortality score") + scale_y_continuous(breaks = 100*seq(-0.4, 0.4, 0.05)) + theme(plot.title = element_text(size = 10), axis.title.x = element_blank(),axis.title.y = element_text(size = 8),axis.text.x = element_text(size = 8), axis.text.y = element_blank(),axis.ticks.y = element_blank()) #plot.m # readmission plot.r <- ggplot(final_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=readm_score*100)) + geom_boxplot(na.rm=TRUE) + ylab("readmission score") + scale_y_continuous(breaks = 100*seq(-0.4, 0.4, 0.05)) + theme(plot.title = element_text(size = 10), axis.title.x = element_blank(),axis.title.y = element_text(size = 8),axis.text.x = element_text(size = 8), axis.text.y = element_blank(),axis.ticks.y = element_blank()) #plot.r # safety plot.s <- ggplot(final_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=saftey_score*100)) + geom_boxplot(na.rm=TRUE) + ylab("safety score") + scale_y_continuous(breaks = 100*seq(-0.4, 0.4, 0.05)) + theme(plot.title = element_text(size = 10), axis.title.x = element_blank(),axis.title.y = element_text(size = 8),axis.text.x = element_text(size = 8), axis.text.y = element_blank(),axis.ticks.y = element_blank()) #plot.s # effectiveness plot.eff <- ggplot(final_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=eff_score*100)) + geom_boxplot(na.rm=TRUE) + ylab("effectiveness score") + scale_y_continuous(breaks = 100*seq(-0.4, 0.4, 0.05)) + theme(plot.title = element_text(size = 10), axis.title.x = element_blank(),axis.title.y = element_text(size = 8),axis.text.x = element_text(size = 8), axis.text.y = element_blank(),axis.ticks.y = element_blank()) #plot.eff # experience plot.exp <- ggplot(final_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=patient_exp_score*100)) + geom_boxplot(na.rm=TRUE) + ylab("experience score") + scale_y_continuous(breaks = 100*seq(-0.4, 0.4, 0.05)) + theme(plot.title = element_text(size = 10), axis.title.x = element_blank(),axis.title.y = element_text(size = 8),axis.text.x = element_text(size = 8), axis.text.y = element_blank(),axis.ticks.y = element_blank()) #plot.exp # timeliness plot.t <- ggplot(final_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=timeliness_score*100)) + geom_boxplot(na.rm=TRUE) + ylab("timeliness score") + scale_y_continuous(breaks = 100*seq(-0.4, 0.4, 0.05)) + theme(plot.title = element_text(size = 10), axis.title.x = element_blank(),axis.title.y = element_text(size = 8),axis.text.x = element_text(size = 8), axis.text.y = element_blank(),axis.ticks.y = element_blank()) #plot.t # medical plot.med <- ggplot(final_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=imaging_eff_score*100)) + geom_boxplot(na.rm=TRUE) + ylab("medical score") + scale_y_continuous(breaks = 100*seq(-0.4, 0.4, 0.05)) + theme(plot.title = element_text(size = 10), axis.title.x = element_blank(),axis.title.y = element_text(size = 8),axis.text.x = element_text(size = 8), axis.text.y = element_blank(),axis.ticks.y = element_blank()) #plot.med p.grid <- plot_grid(plot.r, plot.exp, plot.m, plot.s, plot.t, plot.eff, plot.med, labels=c("readmission", "experience", "mortality", "safety", "timeliness", "effectiveness", "medical"), ncol = 3, nrow = 3,label_size = 8,hjust = -2) p.grid # Provider.ID saftey_score patient_exp_score readm_score mort_score timeliness_score eff_score imaging_eff_score final_score rating_clster new_cluster_id # 947 140010 0.11 -238.337 1296.652 405.07 7970.751 NA 12100.4 1194.599 4 2 # Hospital.overall.rating # 947 3 ############################################################################################################ # Setting the provider yintercepts to compare with the national levels average ############################################################################################################ provider.grid <- plot_grid(plot.r + geom_hline(yintercept=1296.7, col="red"), plot.s + geom_hline(yintercept=-0.11, col="red"), plot.exp + geom_hline(yintercept=--238, col="red"), plot.m + geom_hline(yintercept=405, col="red"), plot.t + geom_hline(yintercept=7970, col="red"), plot.eff + geom_hline(yintercept=3.8, col="red"), plot.med + geom_hline(yintercept=12100, col="red"), labels=c("readmission", "safety", "experience", "mortality", "timeliness", "effectiveness", "medical"), ncol = 3, nrow = 3) provider.grid # Isolating the measures which are lowering the Hosptial rating grid_low <- plot_grid(plot.s + geom_hline(yintercept=0.11, col="red")+ggtitle("Safety"), plot.exp + geom_hline(yintercept=-238, col="red")+ggtitle("Experience")) grid_low # These measures hould be improved in order to increase the rating grid_med <- plot_grid(plot.r + geom_hline(yintercept=1296.5, col="red")+ggtitle("Readmission"), plot.m + geom_hline(yintercept=405, col="red")+ggtitle("Mortality"), plot.t + geom_hline(yintercept=7970, col="red")+ggtitle("Timeliness"), plot.eff + geom_hline(yintercept=3.8, col="red")+ggtitle("Effectiveness"), plot.med + geom_hline(yintercept=12100, col="red")+ggtitle("Medical"), ncol = 3) grid_med # These measures have to improved to improve the rating # - On comparing the average scores of groups with the provider's score, we find that: # The final score is higher than the average score for rating=4 but lower than for rating=5 # Mortality score is better than average for rating=5 # Safety score is between the avg score for rating = 2 and 1 # Effectiveness score is better than average for rating=5 # Readmissions score is between avg of ratings 3 and 4 # Timeliness score is better than average for rating=5 # Medical score is better than average for rating=5 # Experience score is between avg of ratings 2 and 3 # # - Overall, the rating is low because of a very low safety score (lesser than avg for rating=2), # - a low experience score and (between the avg for ratings 2 and 3) and (both are 22% weightage groups) # Compare the provider's score with the overall averages mean_scores <- round(sapply(group_scores[2:9], function(x) mean(x, na.rm = T)), 2) mean_scores group_scores[which(group_scores$Provider.ID == 140010),][2:9] # Can see that the safety and experience scores of the provider are lesser than the respective averages plot.s + geom_hline(yintercept=-0.11, col="red") plot.exp + geom_hline(yintercept=-1.5, col="red") # Exploring further into safety and experience scores (measure wise) safety safety_scores <- data.frame(Provider.ID = master_df_imputed_final[,1],safety_of_care_group) head(safety_scores) # Average safety scores of the provider are avg_safety = round(safety_scores[which(safety_scores$Provider.ID == 140010),][3:ncol(safety_scores)], 3) # comparing with the average safety scores provider_safety = round(sapply(safety_scores[, -1], function(x) mean(x, na.rm = T)), 3) # Binding the both the variables head(rbind(avg_safety, provider_safety)) ######################################################################## # Hosptial Safety - -- Provider Analysis ######################################################################## # merging safety scores with hospital ratings safety_scores_merged <- merge(safety_scores, final_scores, by = "Provider.ID") plot.hai_4 <- ggplot(safety_scores_merged, aes(x = factor(as.character(new_cluster_id)), y=HAI_4_SIR*100)) + geom_boxplot(na.rm=TRUE) + xlab("star rating") + ylab("HAI_4 score") + theme_minimal() plot.hai_4 # Observation: HAI_4 score of the provider (0.00) is lower than the median scores for ratings 2, 3, 4, 5 plot.hai_5 <- ggplot(safety_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=HAI_5_SIR*100)) + geom_boxplot(na.rm=TRUE) + xlab("star rating") + ylab("HAI_5 score") + theme_minimal() plot.hai_5 # - HAI_2, HAI_1, HAI_3, HAI_6 scores of the provider are better than avg # - But HAI_4 and HAI_5 are lower than average # - HAI_4 is Surgical Site Infection from abdominal hysterectomy (SSI: Hysterectomy) # - HAI_5 is Methicillin-resistant Staphylococcus Aureus (MRSA) Blood Laboratory-identified Events (Bloodstream infections) # - *Conclusions* - Improviing the HAI 4 and HAI_5 scores may improve the overall score rating. ######################################################################## # 2. Patient experience- -- Provider Analysis ######################################################################## # Exploring further into safety and patient experience scores (measure wise) experience_scores <- data.frame(Provider.ID = master_df_imputed_final[,1],patient_experience_group) # safety scores of the provider avg_experience = round(experience_scores[which(experience_scores$Provider.ID == 140010),][3:ncol(experience_scores)], 3) # comparing with the average safety scores provider_experience = round(sapply(experience_scores[, -1], function(x) mean(x, na.rm = T)), 3) head(rbind(avg_experience, provider_experience)) # For patient experience, H_COMP_1_LINEAR_SCORE, H_HSP_RATING_LINEAR_SCORE, # - H_COMP_7_LINEAR_SCORE, H_COMP_3_LINEAR_SCORE, H_COMP_4_LINEAR_SCORE, # - H_COMP_5_LINEAR_SCORE, H_RECMND_LINEAR_SCORE are the most imp measures # - *Observations:* # - H_HSP_RATING_LINEAR_SCORE, H_RECMND_LINEAR_SCORE, H_CLEAN_LINEAR_SCORE are better than the National avg # - H_HSP_RATING is overall hospital rating linear mean score # - H_RECMND is Recommend hospital - linear mean score # - H_CLEAN is Cleanliness - linear mean score # *Conclusions:* - Improving the H_COMP_1 H_COMP_2 H_COMP_3 H_COMP_4 H_COMP_5 H_COMP_6 H_COMP_7 H_QUIET will improve the overall rating # merging safety scores with hospital ratings experience_scores_merged <- merge(experience_scores, final_scores, by="Provider.ID") plot.hsp <- ggplot(experience_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=H_HSP_RATING*100)) + geom_boxplot(na.rm=TRUE) + xlab("star rating") + ylab("H_HSP_RATING score") + theme_minimal() plot.hsp # Observation: HSP_RATING score of the provider (0.00) is lower than the 25th percentile of rating=4 plot.recmd <- ggplot(experience_scores_merged, aes(x=factor(as.character(new_cluster_id)), y=H_RECMND*100)) + geom_boxplot(na.rm=TRUE) + xlab("star rating") + ylab("H_RECMND score") + theme_minimal() plot.recmd # H_RECMND is near the 25th percentile of rating=4 plot.clean <- ggplot(experience_scores_merged, aes(x=factor(as.character(new_cluster_id)), y = H_CLEAN*100)) + geom_boxplot(na.rm=TRUE) + xlab("star rating") + ylab("H_CLEAN score") + theme_minimal() plot.clean # H_CLEAN of 0 is comparable to the median cleanliness score for rating=3 ######################################################################## # 3 Readmission -- Provider Analysis ######################################################################## readm_scores <- data.frame(Provider.ID = master_df_imputed_final[,1],readmission_group) # readm scores of the provider avg_readm = round(readm_scores[which(readm_scores$Provider.ID == 140010),][3:ncol(readm_scores)], 3) # comparing with the average readm scores provider_readm = round(sapply(readm_scores[, -1], function(x) mean(x, na.rm = T)), 3) rbind(avg_readm, provider_readm) # - The measures READM_30_CABG, READM_30_CABG are lower than the national average. However, Readm_30 has scores higher than the national average. ######################################################################## # 4 Mortality group -- Provider Analysis ######################################################################## mortality_scores <- data.frame(Provider.ID = master_df_imputed_final[,1],mortality_group) # readm scores of the provider avg_mort = round(mortality_scores[which(mortality_scores$Provider.ID == 140010),][3:ncol(mortality_scores)], 3) # comparing with the average readm scores provider_mort = round(sapply(mortality_scores[, -1], function(x) mean(x, na.rm = T)), 3) rbind(avg_mort, provider_mort) # Average score is higher than the national average for the Mortality. ############################################################################################################ # - *Summary of provider analysis:* ############################################################################################################ # - The provider has low scores in the groups safety and patient-experience and readmission and this pulled the over.rating of the hosptial downs. provider.grid # The safety score of provider is -4.4 (on the scale of this plot) plot.s + geom_hline(yintercept=-4.4, col = "red") # The experience score is -1.5 (on this scale) plot.exp + geom_hline(yintercept=-1.5, col = "red") # - within safety, HAI_4 and HAI_5 scores are lower than the average scores of these measures # - within experience, H_HSP_RATING, H_RECMND, H_CLEAN are worse than avg
29e355a45110d0b7919f13389975cb80af5de1d2
a4e18c89c6c0a151c7ba753bc78379e393fa8527
/bap/bin/R/10b_knee_execute.R
c97aed55b466ccf829b186c95748f5e0bddd03d1
[ "MIT" ]
permissive
caleblareau/bap
48afa890f63283701a5901c264c7d748acf6655b
44061cdcc16d60d6a757d518d55b4dfe66afb436
refs/heads/master
2021-11-25T20:39:18.077898
2021-11-13T08:10:36
2021-11-13T08:10:36
126,544,643
27
12
MIT
2020-03-23T20:02:51
2018-03-23T22:26:00
Python
UTF-8
R
false
false
1,122
r
10b_knee_execute.R
options(warn=-1) suppressMessages(suppressWarnings(library(tools))) suppressMessages(suppressWarnings(library(data.table))) suppressMessages(suppressWarnings(library(dplyr))) # I/O args <- commandArgs(trailingOnly = FALSE) # Parse parameters working backwards since I am keeping all of them # to be able to source the knee call Rscript nn <- length(args) vals_file <- args[nn-2] logTransform <- args[nn-1] column_name <- args[nn] # Grab knee call functions from figuring out where this script lives and then changing the file name needle <- "--file=" match <- grep(needle, args) source(normalizePath(gsub("10b_knee_execute.R", "00_knee_CL.R", gsub(needle, "", args[match])))) # Import data supplied by the user vec_values <- as.numeric(fread(vals_file)[[as.character(column_name)]]) # Call knee estimated_knee_threshold <- get_density_threshold_CL(vec_values, "bead", as.logical(as.numeric(as.character(logTransform)))) # Write value to table write.table(data.frame(estimated_knee_threshold), row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "", file = paste0(vals_file, "_kneeValue.txt"))
21a9d1e7ab995e1cdaf4a703b6e75ad07b3fa837
f7b5423257a4e65cae9578edb9539811de6600b1
/man/tdew.Rd
a5845417ad11e1758bacc42bd02376ae71db5116
[]
no_license
cran/lue
a19c0361da6389cfbb3adba6cf372fc770de371e
40f4e907b2af0823bff2b6c4e237e932ba525a0a
refs/heads/master
2020-03-16T01:57:22.749475
2018-06-14T11:39:16
2018-06-14T11:39:16
132,453,214
2
0
null
null
null
null
UTF-8
R
false
true
282
rd
tdew.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tdew.r \docType{data} \name{tdew} \alias{tdew} \title{Dewpoint Temperature} \format{A rasterbrick (.nc)} \usage{ tdew } \description{ Input dewpoint temperature dataset } \keyword{datasets}
131567dc3cd31607cb661d3c04a84f5893a334ed
8bfb07d4492bf8901070e678846991f497794199
/man/add_mdi.Rd
1884c2ef0df49241c9521b598bf3048ec6c7c32d
[]
no_license
DanielSprockett/reltools
5b38577e1a3257af40d47c19c39778a32214107d
f693109c03ebd6944fdb42b7bff29d18cb49341b
refs/heads/master
2021-08-23T23:04:48.558234
2017-12-07T00:49:13
2017-12-07T00:49:13
113,137,957
3
0
null
null
null
null
UTF-8
R
false
true
721
rd
add_mdi.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/microbiome_tools.R \name{add_mdi} \alias{add_mdi} \title{Adds Microbial Dysbiosis Index to a phyloseq object} \usage{ add_mdi(ps) } \arguments{ \item{ps}{A \code{phyloseq} object that contains \code{\link[phyloseq]{sample_data}}} } \value{ This function returns the input \code{phyloseq} object with a \code{MDI} column in the \code{sample_data()}. } \description{ Adds a \code{MDI} column to \code{\link[phyloseq]{sample_data}} in a \code{phyloseq} object. The Microbial Dysbiosis Index first proposed by \href{http://www.sciencedirect.com/science/article/pii/S1931312814000638}{Gevers \emph{et al.} (2014)}. } \examples{ ps <- add_mdi(ps) }
34507009dddb20edd5c30f011d8fdc46a1b7fdd1
1754bdcc779ddea0d3ce59565d5df0ef51b6233f
/man/new_difftime.Rd
3143aa92f9a62fd2ebd2ba798bb721bcf2e2edaf
[]
no_license
Vald/lubridate
357b6ff5fdde6f9be7275f262d4cbb43036f1306
52f127ebcf8aa7f6ef6535f80468223ec32a70ec
refs/heads/master
2021-01-18T07:42:55.389632
2012-01-08T01:43:12
2012-01-08T01:43:12
2,876,319
0
0
null
null
null
null
UTF-8
R
false
false
1,773
rd
new_difftime.Rd
\name{new_difftime} \alias{new_difftime} \title{Create a difftime object.} \usage{ new_difftime(...) } \arguments{ \item{...}{a list of time units to be included in the difftime and their amounts. Seconds, minutes, hours, days, and weeks are supported.} } \value{ a difftime object } \description{ new_difftime creates a difftime object with the specified number of units. Entries for different units are cumulative. difftime displays durations in various units, but these units are estimates given for convenience. The underlying object is always recorded as a fixed number of seconds. For display and creation purposes, units are converted to seconds using their most common lengths in seconds. Minutes = 60 seconds, hours = 3600 seconds, days = 86400 seconds, weeks = 604800. Units larger than weeks are not used due to their variability. } \details{ difftime objects are durations. Durations are time spans measured in seconds. They measure the exact passage of time but do not always align with measurements made in larger units of time such as hours, months and years. This is because the length of larger time units can be affected by conventions such as leap years and Daylight Savings Time. lubridate provides a second class for measuring durations, the duration class. } \examples{ new_difftime(second = 90) # Time difference of 1.5 mins new_difftime(minute = 1.5) # Time difference of 1.5 mins new_difftime(second = 3, minute = 1.5, hour = 2, day = 6, week = 1) # Time difference of 1.869201 weeks new_difftime(hour = 1, minute = -60) # Time difference of 0 secs new_difftime(day = -1) # Time difference of -1 days } \seealso{ \code{\link{duration}}, \code{\link{as.duration}} } \keyword{chron} \keyword{classes}
e1c99695e5ee9b46c0f1ae20a5dee74fb7138164
442b054ceb7a3c5c2cc4e04e11be41002109e294
/man/diffproportion.CI.Rd
91b852a18ae4f85f2880d95578b91aa5645b240b
[]
no_license
cran/LearningStats
966b2a0d973443d186af8cdbe3c695fe3bce1c82
df5f6a1dfe0bd8922b2f5501054c963cf88e05b4
refs/heads/master
2023-04-09T11:15:51.318191
2021-04-21T06:10:05
2021-04-21T06:10:05
360,260,614
0
0
null
null
null
null
UTF-8
R
false
true
1,992
rd
diffproportion.CI.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diffproportion.CI.R \name{diffproportion.CI} \alias{diffproportion.CI} \title{Large Sample Confidence Interval for the Difference between Two Population Proportions} \usage{ diffproportion.CI(x1, x2, n1, n2, conf.level) } \arguments{ \item{x1}{a single numeric value corresponding with either the proportion estimate or the number of successes of one of the samples.} \item{x2}{a single numeric value corresponding with either the proportion estimate or the number of successes of the other sample.} \item{n1}{a single positive integer value corresponding with one sample size.} \item{n2}{a single positive integer value corresponding with the other sample size.} \item{conf.level}{a single numeric value corresponding with the confidence level of the interval; must be a value in (0,1).} } \value{ A list containing the following components: \item{estimate}{a numeric value corresponding with the difference between the two sample proportions.} \item{CI}{a numeric vector of length two containing the lower and upper bounds of the confidence interval.} Independently on the user saving those values, the function provides a summary of the result on the console. } \description{ \code{diffproportion.CI} provides a pointwise estimation and a confidence interval for the difference between two population proportions. } \details{ Counts of successes and failures must be nonnegative and hence not greater than the corresponding numbers of trials which must be positive. All finite counts should be integers. If the number of successes are given, then the proportion estimate is computed. } \examples{ #Given the sample proportion estimate diffproportion.CI(0.3,0.4,100,120,conf.level=0.95) #Given the number of successes diffproportion.CI(30,48,100,120,conf.level=0.95) #Given in one sample the number of successes and in the other the proportion estimate diffproportion.CI(0.3,48,100,120,conf.level=0.95) }
3f827e9b58360a9e7dd531c22a6f737e69024e39
79b935ef556d5b9748b69690275d929503a90cf6
/man/intensity.dppm.Rd
3d5fee432fd54d900fb698767f871fb78b95659f
[]
no_license
spatstat/spatstat.core
d0b94ed4f86a10fb0c9893b2d6d497183ece5708
6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70
refs/heads/master
2022-06-26T21:58:46.194519
2022-05-24T05:37:16
2022-05-24T05:37:16
77,811,657
6
10
null
2022-03-09T02:53:21
2017-01-02T04:54:22
R
UTF-8
R
false
false
730
rd
intensity.dppm.Rd
\name{intensity.dppm} \alias{intensity.dppm} \alias{intensity.detpointprocfamily} \title{Intensity of Determinantal Point Process Model} \description{Extracts the intensity of a determinantal point process model.} \usage{ \method{intensity}{detpointprocfamily}(X, \dots) \method{intensity}{dppm}(X, \dots) } \arguments{ \item{X}{ A determinantal point process model (object of class \code{"detpointprocfamily"} or \code{"dppm"}). } \item{\dots}{Ignored.} } \value{ A numeric value (if the model is stationary), a pixel image (if the model is non-stationary) or \code{NA} if the intensity is unknown for the model. } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models}
3df460b90f3bb029308af9e01bdc53dd2788d869
8ae4321d27dc0a0edd9d1017dd411a659e0afcf4
/DSO_545_Assignment4.R
59c2b395db83ddb42671a057c00ed398b84837bb
[]
no_license
alok-abhishek/data_Visualization_works
e621aef8375e679ec20eb3906db9bb222a1012d5
189aded0e787a8d3cba94e1b9c77fbd851eedb70
refs/heads/master
2020-03-17T22:17:17.236181
2018-07-01T03:11:33
2018-07-01T03:11:33
null
0
0
null
null
null
null
UTF-8
R
false
false
4,763
r
DSO_545_Assignment4.R
## Assignment 4... library(stringr) library(ggmap) library(ggplot2) library(dplyr) library(lubridate) library(maps) library(mapproj) # set the working directory setwd("C:/Users/alok_/OneDrive/MBA Academics/Second Semester/DSO-545 Statistical Computing and Data Visualization/Assignment/Assignment_4") # Case# 1.. states_data = read.csv("states_data.csv") # Q1: Which 5 States saw the largest population growth (as a percentage change, not absolute change) between 2010 and 2016? states_data = states_data %>% mutate(percentage_growth = 100* ((estimate_2016 - census_2010)/census_2010)) states_data_percent_growth_top5 = states_data %>% top_n(5,percentage_growth) %>% arrange(-percentage_growth) print(states_data_percent_growth_top5$state) #Q2: Add two new variables, month and year, to your dataset that indicates the month and year that each state was added to the union. Which month saw the most states added? states_data = states_data %>% mutate(admission_date_month = month(mdy(admission_date))) %>% mutate(admission_date_year = year(mdy(admission_date))) states_data %>% count(admission_date_month) %>% arrange(-n) %>% slice(1:1) states_data = states_data%>% mutate(admission_date_year_group = ifelse(admission_date_year<1800, "Before 1800",ifelse(admission_date_year<1850, "1800 - 1849",ifelse(admission_date_year<1900, "1850 - 1899","After 1900")))) states_maps = map_data("state") ggplot(states_maps,aes(x=long,y=lat, group=group))+ geom_polygon(fill="white", color="black") states_data$state=tolower(states_data$state) states_data_maps = merge(x=states_maps, y= states_data, by.x="region",by.y="state", all.x = T) states_data_maps = arrange(states_data_maps,group,order.x) states_data_maps %>% ggplot(aes(x=long,y=lat, group=group, fill=admission_date_year_group))+ geom_polygon(color="black") + scale_fill_manual(values = c("red", "darkorange", "yellow", "purple"), name = "")+ theme_void()+ coord_map()+ ggtitle("States by Year Admitted to Union") + theme(plot.title = element_text(hjust = 0.5)) # Q4 Create a new variable called population_density, defined as the the ratio of population to total area, and recreate an EXACT copy of the following graph. Use colors darkred and white. (Note: Please use estimate_2016 rather than census_2016 when computing population_density). states_data_maps = states_data_maps %>% mutate(population_density = (estimate_2016/total_area)) states_data_maps %>% ggplot(aes(x=long,y=lat, group=group, fill=population_density))+ geom_polygon(color="black") + scale_fill_gradient(low="white",high="darkred", name = "")+ theme_void()+ coord_map() + ggtitle("Population Density (persons per sq. mile) by State") + theme(plot.title = element_text(hjust = 0.5)) # Case# 2... california_counties = read.csv("california_counties.csv") california_counties = california_counties %>% mutate(admission_date_year = year(ymd(date_formed))) california_counties %>% ggplot(aes(x=admission_date_year, fill = "Identity"))+ geom_bar(color="black")+ scale_fill_manual(values="lightblue",guide = FALSE) # Q 6 - Create a new variable, percent_democrat, defined as the percentage of the population that voted democrat in each county during the most recent presidential election. Which county had the highest percentage of the population vote Democrat? california_counties = california_counties %>% mutate(percent_democrat = (100*(democrat/population))) %>% arrange(desc(percent_democrat)) high_cal_dem = california_counties %>% top_n(1,percent_democrat) print("Highest Democrates voters") print(high_cal_dem) california_counties = california_counties %>% mutate(Party = ifelse(democrat>=republican, "Democrat","Republican")) california_map = map_data("county") %>% filter(region=="california") california_counties$county=tolower(california_counties$county) california_counties_maps = merge(x=california_map, y= california_counties, by.x="subregion",by.y="county", all.x = T) california_counties_maps = arrange(california_counties_maps,group,order) california_counties_maps %>% ggplot(aes(x=long,y=lat, group=group, fill=Party))+ geom_polygon(color="black") + scale_fill_manual(values= c("blue","red"), name= " ")+ theme_void()+ coord_map() + ggtitle("California Voters by County") + theme(plot.title = element_text(hjust = 0.5))
51ab6dd8a54930c2355fc4835a0f5a43f7add30e
d3e457b28d58a36084cb85b47aae44cdcd447be2
/Fuller_2015_scripts/Sim_thresh.R
0736df9c03eebd7f19c152bae72067470f91fb51
[]
no_license
afredston/dispersal-model-old
f2701e7651d0e473db9301ed558166695a97cf7d
16573c62af67b28e449def387449460561211566
refs/heads/master
2021-06-10T07:50:50.588920
2017-01-31T17:16:38
2017-01-31T17:16:38
null
0
0
null
null
null
null
UTF-8
R
false
false
1,410
r
Sim_thresh.R
# Sim_thresh.R # runs simulations in which there is threshold management, MPAs are not possible # initializing the population with no pressure (no harvesting, no climate) init<-rep(0,w) # rows are world, columns are time init[which(patch==0.55)]=50 MPA.start = rep(c(mpa.yes,mpa.no),length.out=length(world)) output <- startUp(s=0,mpa.yes=mpa.yes,mpa.no=mpa.no,burn_in=burn_in, Fharv=NA, Fthresh=NA, init=init, MPA.start = MPA.start) init.s <- output[[1]] MPA.start <- output[[2]] for(q in 1:length(speeds)){ for(j in 1:length(thresholds)){ # adding harvesting output <- startUp(s=0,mpa.yes=mpa.yes,mpa.no=mpa.no,burn_in=burn_in,Fharv=1,Fthresh=thresholds[j], init=init.s, MPA.start = MPA.start, effort_re_allocate=effort_allocate) init.h <- output[[1]] MPA.start <- output[[2]]Fharv=1 # adding speed output <- longRun(s=speeds[q], mpa.yes=mpa.yes, mpa.no=mpa.no, Fthresh=thresholds[j], Fharv=1, init = init.h, MPA.start = MPA.start, generations_total=generations_total, generations_av=generations_av, effort_re_allocate=effort_allocate) # save output pop = output[[1]] pop.sd = output[[2]] summaries[rownumber[j,q],] <- c(pop, pop.sd, speeds[q], 1, ifelse(exists("thresholds"), thresholds[j],NA)) } } write.csv(summaries,file = paste("Data/Thresh_",Sys.Date(),".csv",sep="")) #------------------------------------------------------------------------#
f57151074fbc9776caf20c5a0b4a5c374a579a83
ea492f927e78f9eef5e805bb1b884830c1a76f68
/ncTemplates/man/ctd_nc.Rd
8f1276453ce2e37097908e3d3e470b992eb0974f
[]
no_license
EOGrady21/netCDF
b1f2d7041fb3e556919953b9b54963ac812e00d3
742615c0b8422ca4c260065d7118fc216fe40c46
refs/heads/master
2022-04-27T16:23:19.705034
2019-04-17T16:46:40
2019-04-17T16:46:40
null
0
0
null
null
null
null
UTF-8
R
false
true
962
rd
ctd_nc.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctd_nc.R \name{ctd_nc} \alias{ctd_nc} \title{CTD netCDF template} \usage{ ctd_nc(obj, upcast = NULL, metadata, filename = NULL) } \arguments{ \item{obj}{an odf object from oce which contains mctd data} \item{metadata}{a csv file following the standard template which includes all necessary metadata} \item{filename}{the desired name for the netCDF file produced, if left NULL the default will conform to BIO naming conventions} } \value{ netCDF file with variables temperature, conductivity, pressure, sigma theta, oxygen, salinity, station, latitude, longitude, oxygen voltage, fluoresence, par, scan, scan2 and QC flag variables for each data variable } \description{ CTD netCDF template } \examples{ file <- list.files('.', pattern = "CTD*...*.ODF") metadata <- ('CTD_SAMPLE_METADATA.csv') obj <- read.odf(file[1]) upcast <- read.odf(file[2]) mcm_nc(obj, upcast, metadata) }
565d4c38b13b7e45a2c72b65075302bdc2b2935c
fb3f24a4af741e73144fc473c3dc317a0eb87cf0
/code/analysis/fit_posteriors_preamble.R
5806e8521c0e929168cd97d56d5c649cf8e4ba17
[]
no_license
littlecanargie/CtTrajectories
26893d318dccba3e32190aa01c0933850e939f75
7c359a8a175022544124a504d7e899cbf28e67f0
refs/heads/main
2023-02-07T19:38:54.423836
2020-12-31T10:10:47
2020-12-31T10:10:47
325,772,241
0
0
null
2020-12-31T10:10:30
2020-12-31T10:10:30
null
UTF-8
R
false
false
2,275
r
fit_posteriors_preamble.R
library(tidyverse) library(lazyeval) library(rstan) library(shinystan) library(purrr) library(data.table) options(mc.cores=parallel::detectCores()) source('code/utilities/utils_analysis.R') # Store the number of people we've kept: n_indiv <- length(unique(ct_dat_refined$Person.ID)) # Store the ids of the people we've kept in the refined set: kept_ids <- data.frame(Person.ID=sort(unique(ct_dat_refined$Person.ID)), kept=1) # Define a pared-down dataset for passing to Stan: indiv_data <- with(as.list(global_pars),{ ct_dat_refined %>% select(Person.ID, Person.ID.Clean, Date.Index, CT.Mean, Adjusted) %>% rename(id=Person.ID) %>% rename(id_clean=Person.ID.Clean) %>% rename(t=Date.Index) %>% rename(y=CT.Mean) %>% rename(adjusted=Adjusted) %>% replace_na(list(y=lod, adjusted=0)) %>% trim_negatives(global_pars) }) # Useful dataframe for mapping official ID to Stan ID: id_map <- ct_dat_refined %>% group_by(Person.ID) %>% summarise(Person.ID.Clean=first(Person.ID.Clean)) %>% select(Person.ID, Person.ID.Clean) %>% rename(id=Person.ID) %>% rename(id_clean=Person.ID.Clean) %>% mutate(id_clean=as.character(id_clean)) # Useful dataframe for mapping official ID to symptoms symptom_map <- ct_dat_refined %>% mutate(Symptomatic=case_when(Symptomatic=="Yes"~1, TRUE~0)) %>% group_by(Person.ID) %>% summarise(Symptomatic=max(Symptomatic)) %>% rename(id=Person.ID) %>% rename(symptomatic=Symptomatic) # Append symptoms onto indiv_data: indiv_data <- left_join(indiv_data, symptom_map, by="id") # Generatae list of individuals with symptoms for passing to Stan: symp <- indiv_data %>% group_by(id) %>% slice(1) %>% select(id, symptomatic) %>% arrange(id) %>% pull(symptomatic) # Store key prior parameters for passing to Stan: prior_pars <- with(as.list(current_pars),{ list( symp=symp, tpsd=2, dpmean_prior=global_pars[["lod"]]/2, dpsd_prior=global_pars[["lod"]]/6, wpmax=14, wpmean_prior=14/2, wpsd_prior=14/6, wrmax=30, wrmean_prior=30/2, wrsd_prior=30/6, sigma_max=10, sigma_prior_scale=5, lambda=0.01, fpmean=1/log(10) # so that 90% of mass is <1 and 99% is <2 ) }) sensitivity_pars <- with(as.list(current_pars),{ list( splitsymptoms=if_else(current_pars[["symptom_treatment"]]=="split",1,0) ) })
15b8661264aa98a62d36ef97c65401da52109cf0
a3294d2cf003c2ebebbd177cc94aa621747df650
/NetworkEvolution.R
f1174d52a12c59523e69a3fa3baca3b9b417e64d
[]
no_license
derek-corcoran-barrios/Rscraper
77d36800899125f391337a78149f767fd36c084a
a3068a82fdb3eaa4907c55aa49bf248ee7117f5b
refs/heads/master
2020-03-23T08:33:05.498662
2018-07-17T19:32:54
2018-07-17T19:32:54
141,333,417
0
0
null
null
null
null
UTF-8
R
false
false
315
r
NetworkEvolution.R
if (!require("pacman")) install.packages("pacman") pacman::p_load(tidyverse, sqldf, igraph) NetworkCran <- read_rds("2018_06_23NetworkCran.rds") pageDate <- read_rds("pageDateComplete.rds") NetworkDate <- NetworkCran %>% left_join(pageDate) %>% dplyr::select(Package, Depends, First_date) %>% arrange(First_date)
7b540a649558f212d814f8e57cd7903cc395c379
196d6a47ef55f1c68584ef443dcc611d1e21835d
/tests/testthat/test_amPie.R
27725d846ae6eaf51e280f1826f6119214a22d28
[]
no_license
datastorm-open/rAmCharts
452083b44658b99423ae790f176c856eae24f4c5
5f61ad704aa77fbe2af7fc6b90d96e7873615d69
refs/heads/master
2022-10-06T04:54:36.784313
2022-09-29T07:47:42
2022-09-29T07:47:42
38,245,790
42
18
null
2020-11-19T11:11:13
2015-06-29T12:10:46
JavaScript
UTF-8
R
false
false
666
r
test_amPie.R
testthat::context("amPie") # Load data data("data_pie") testthat::test_that("Reference example", { testthat::expect_silent({ amPie(data = data_pie) }) }) testthat::test_that("Hide values", { testthat::expect_silent({ amPie(data = data_pie, show_values = FALSE) }) }) testthat::test_that("3D pie", { testthat::expect_silent({ amPie(data = data_pie, depth = 10) }) }) testthat::test_that("Donut", { testthat::expect_silent({ amPie(data = data_pie, inner_radius = 50) }) }) testthat::test_that("Other parameters", { testthat::expect_silent({ amPie(data = data_pie, inner_radius = 50, depth = 10, show_values = FALSE) }) })
b106a1536afc5454e7ec0cd0d45dc3afcecbb50c
3bee634c1dd4863f3f3af53bed237f9320a8f9e9
/run_analysis.R
aaadd37dbd6bd8cab2fcc8d34f51654a8bf03522
[]
no_license
todrews/CleanSmartphoneData
7ba675c5e0de3958cf79bcbe362a5d5bb703f05a
8f3f7e3943ee6fc9fda29912d68ed07e52bca584
refs/heads/master
2021-01-01T06:44:51.031619
2015-06-21T17:31:15
2015-06-21T17:31:15
37,817,850
0
0
null
null
null
null
UTF-8
R
false
false
2,674
r
run_analysis.R
## read in the test data test_data <- read.table("./UCI HAR Dataset/test/X_test.txt") test_data <- data.matrix(test_data) test_activity <- read.table("./UCI HAR Dataset/test/Y_test.txt") test_subject <- read.table("./UCI HAR Dataset/test/subject_test.txt") ## create the test data set by adding the activity and the subject all_test_data <- cbind(test_subject,test_activity,test_data) ## read in the train data train_data <- read.table("./UCI HAR Dataset/train/X_train.txt") train_data <- data.matrix(train_data) train_activity <- read.table("./UCI HAR Dataset/train/Y_train.txt") train_subject <- read.table("./UCI HAR Dataset/train/subject_train.txt") ## create the train data set by adding the train activity and subject all_train_data <- cbind(train_subject,train_activity,train_data) ## merge the data sets by adding the train data to the bottom of the test data set all_data <- rbind(all_test_data,all_train_data) ## add the variable labels as the names first_labels <- c("Subject","Activity") column_labels_table <- read.table("./UCI HAR Dataset/features.txt") column_labels <- lapply(column_labels_table$V2,as.character) all_column_labels <- c(first_labels,column_labels) all_data_frame <- data.frame(all_data) colnames(all_data_frame) <- all_column_labels ## extract only the mean and standard deviation measurements, along with the subject and activity data_mean_std <- all_data_frame[,(grepl("mean",as.character(names(all_data_frame))) | grepl("std",as.character(names(all_data_frame))) | grepl("Subject",as.character(names(all_data_frame))) | grepl("Activity",as.character(names(all_data_frame))))] ## sort the data by subject and activity df_final <- data_mean_std[order(data_mean_std$Subject,data_mean_std$Activity),] ## change the names of the activities in the data set to something meaningful df_final$Activity[df_final$Activity == "1"] <- "WALKING" df_final$Activity[df_final$Activity == "2"] <- "WALKING_UPSTAIRS" df_final$Activity[df_final$Activity == "3"] <- "WALKING_DOWNSTAIRS" df_final$Activity[df_final$Activity == "4"] <- "SITTING" df_final$Activity[df_final$Activity == "5"] <- "STANDING" df_final$Activity[df_final$Activity == "6"] <- "LAYING" ## compute the average of each variable for each activity and each subject df_final_by_subject_by_activity <- aggregate(. ~ Subject+Activity, data = df_final, FUN = mean) ## present the final data ordered by subject final_stats <- df_final_by_subject_by_activity[order(df_final_by_subject_by_activity$Subject),] ## write the final data write.table(final_stats,"final_stats.txt",row.names = FALSE)
a78b5facfcb38b8be2b02d158a09f909c2da28da
5857a9a943eb95eccc6bf11273ccd6468e8c9e63
/photometric_redshift_teddy/process_data.R
c6c7f22eb285f662aafe41c3a6bf05047475343b
[ "MIT" ]
permissive
Mr8ND/cdetools_applications
068f79ee159012eb9a4398b31f3489675b3cf0a9
a5ffa033bc304f74b485b783f74a047f726ec468
refs/heads/master
2020-06-11T13:03:54.303821
2020-03-29T16:01:06
2020-03-29T16:01:06
193,974,191
1
0
null
null
null
null
UTF-8
R
false
false
965
r
process_data.R
library(rhdf5) datadir <- "Teddy/" # Name of the folder where Teddy data are #### Defining functions for selecting covariates and response #### and dividing training and testing process_data <- function(fname) { dat <- read.table(fname, skip = 7, header = FALSE) x <- as.matrix(dat[, 8:12]) y <- as.vector(dat[, 7]) return(list(x = x, y = y)) } train_data <- process_data(paste0(datadir, "teddy_A")) x_train <- train_data$x y_train <- train_data$y test_data <- process_data(paste0(datadir, "teddy_B")) x_test <- test_data$x y_test <- test_data$y #### Output saving function datadir_out <- 'data/' fname <- paste0(datadir_out, "processed.hdf5") if (file.exists(fname)) { file.remove(fname) } #### Run and save train and test sets h5createFile(fname) h5write(x_train, file = fname, name = "/x_train") h5write(y_train, file = fname, name = "/y_train") h5write(x_test, file = fname, name = "/x_test") h5write(y_test, file = fname, name = "/y_test")
527c901d6a3b71cc5b2d579a76135817e4e51e3e
decd025fc88fa50fb3b22f431310641b184c8f9d
/CST383/hw5/hw5_knn.R
2bfbae128ed110f09acf51a6b972b627a722e536
[]
no_license
mlisec/cst383
6697edba4fc082920ac99eb933e656c63cc8e58f
9655b73c941ef89f53b1e3c2c8d046a706dc4678
refs/heads/master
2020-06-28T03:55:27.864007
2019-08-12T16:52:27
2019-08-12T16:52:27
200,136,723
0
0
null
null
null
null
UTF-8
R
false
false
5,138
r
hw5_knn.R
# # k nearest neighbor with applications to college data # # Instructions: replace the 'YOUR CODE HERE' with your own code. # There are five place where you need to do this. # DO NOT modify the code in any other way! # # a simple kNN classifier # # return a knn classifier # dat - training data; a data frame with only numeric data # labels - a vectors of labels with length equal to the number of rows in dat knn_create = function(dat, labels) { return (list(dat=dat, labels=labels)) } # return the mode of the given vector x # (the mode is the value that appears most often) # In case of tie return any of the "most appearing" values. # # example: vmode(c("a","b","a","c")) should return "a" vmode = function(x) { # YOUR CODE HERE (1) y = table(x) y = sort(-y) z = names(y)[[1]] return(z) # HINT: there are lots of ways to do this. Consider # using R function 'table' or 'tabular' } # given a numeric vector x, and a string vector 'labels' of the # same length as x, return the vector containing the labels that # correspond to the k smallest values in vector x. # # example: # if x = c(3.1, 2.3, 0.1, 4.2) and # labels = c("a", "b", "c", "d") and # k = 2 # then smallest_labels(x, labels, k) should return c("c", "b") smallest_labels = function(x, labels, k) { # YOUR CODE HERE (2) y = as.data.frame(x, col.names = labels) z = order(y) l = c() for(i in 1:k){ l = c(l, labels[z[i]]) } return(l) # HINT: R function 'order' is very handy for this } # given two data frames x,y of numeric data, having the # same column names, return a matrix of m rows and n columns # (where nrow(x) = m and nrow(y) = n) where element # m[i,j] gives the euclidean distance between the ith row of x and # the jth row of y # # example: # if x = data.frame(a=1:2, b=2:3) # y = data.frame(a=1:3, b=2:4) # then the returned matrix should have 2 rows and 3 columns, # and the value of the first row and third column should # be the distance between the first row of x and the third # row of y. distances_between = function(x,y) { m = nrow(x) n = nrow(y) # YOUR CODE HERE (3) df = rbind(x, y) d = as.matrix(dist(df)) z = matrix(nrow = m, ncol = n) for(i in 1:m){ for(j in 1:n){ z[i,j] = d[i,j+m] } } return(z) # HINT: rbind the two data frames together, get an # (m+n) by (m+n) distance matrix using R function 'dist', # and then get the rows and columns you need from the matrix } # return the predicted labels of x using a k nearest neighbor classifier # knn - a knn object # x - a data frame with the same number columns as the knn object data # k - the k in knn; a positive odd number should be used # the returned value is a vector of labels, the same length as the number # of rows in x knn_predict = function(knn, x, k = 5) { # YOUR CODE HERE (4) y = distances_between(x, knn$dat) z = c() for(i in 1:nrow(y)){ s = smallest_labels(y[i,], knn$labels, k) v = vmode(s) z = c(z, v) } return(z) # HINT: first use your function distances_between to get # a matrix of the distances from each row of x to each row # of knn$dat. Next, apply your functions smallest_labels # and vmode to each row of the matrix. This will give the # most common label of the k nearest neighbors, for each # row of x. # Return your result. } # Using a knn classifier, predict which colleges are private # using the specified feature set and value k. # Return a confusion matrix showing the result of classification. predict_private = function(dat, features, k) { # # prepare the training and test sets # # make the college names the row names rownames(dat) = dat[,1] dat = dat[,-1] # scale all features dat[,2:18] = scale(dat[,2:18]) # randomize data before creating training and test sets set.seed(123) dat = dat[sample(1:nrow(dat)),] # number of training examples n = floor(nrow(dat)*.80) # feature vectors training and testing data fvs = dat[,features] tr_dat = fvs[1:n,] te_dat = fvs[-(1:n),] # labels for training and test data labels = dat[,"Private"] tr_labels = labels[1:n] te_labels = labels[-(1:n)] # # run the classifier and build confusion matrix # # get predictions knn = knn_create(tr_dat, tr_labels) # YOUR CODE HERE (5) predicts = knn_predict(knn, te_dat, k) # HINT: call function knn_predict and assign the result to variable 'predicts'. # Your call should provide the knn object, the test data set, and the parameter k # create confusion matrix actuals = te_labels tbl = table(actuals, predicts) return(tbl) } # # the following comments give an example of how you can test your function # (do not leave your tests in this file!) # dat = read.csv("https://raw.githubusercontent.com/grbruns/cst383/master/College.csv") features = c("PhD", "Personal") predict_private(dat, features, k=3) k = 3
66bae3e8382dd4e2796dc613914f18685ef1e8b4
f4c82fdbeed7de135dbfbf5a5da1ea8790dfdcd1
/2021.07.15-gas_prices/scripts/gas_wrangle.R
1b29a733655f6c3197eca66ea45ea8c7c7f6ab77
[ "MIT" ]
permissive
markjrieke/thedatadiary
917e2728729c2240ad76250e9179f63026fe7ac1
355425a07652bb9c19a655b657437af5157fb485
refs/heads/main
2023-08-26T09:57:32.529806
2021-11-01T18:54:22
2021-11-01T18:54:22
329,080,502
0
0
MIT
2021-10-04T14:25:03
2021-01-12T18:45:02
R
UTF-8
R
false
false
4,001
r
gas_wrangle.R
# libraries ---- library(tidyverse) library(lubridate) # theme ---- source("dd_theme_elements/dd_theme_elements.R") # cpi f_cpi <- read_csv("https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=CPIAUCSL&scale=left&cosd=1947-01-01&coed=2021-06-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2020-02-01&line_index=1&transformation=lin&vintage_date=2021-07-18&revision_date=2021-07-18&nd=1947-01-01") %>% rename(date = DATE, cpi = CPIAUCSL) %>% arrange(date) # cpi as of 6/1/2021 v_cpi <- f_cpi %>% slice_tail(n = 1) %>% pull(cpi) # get cpi adjusted to each month f_cpi <- f_cpi %>% mutate(cpi_adj = v_cpi/cpi) # gas prices merged w/cpi # read_csv("https://www.eia.gov/dnav/pet/hist/LeafHandler.ashx?n=pet&s=emm_epm0_pte_nus_dpg&f=w") f_gas <- read_csv("2021.07.15-gas_prices/data/Weekly_U.S._All_Grades_All_Formulations_Retail_Gasoline_Prices.csv", skip = 4) %>% rename(date = `Week of`, price = `Weekly U.S. All Grades All Formulations Retail Gasoline Prices Dollars per Gallon`) %>% mutate(date = mdy(date)) %>% arrange(date) %>% mutate(date = floor_date(date, unit = "month")) %>% distinct(date, .keep_all = TRUE) %>% left_join(f_cpi, by = "date") %>% mutate(gas_adj = price * cpi_adj) %>% select(-starts_with("cpi")) %>% pivot_longer(cols = c(price, gas_adj), values_to = "gas_price", names_to = "category") # gas prices unmerged f_gas_weekly <- read_csv("2021.07.15-gas_prices/data/Weekly_U.S._All_Grades_All_Formulations_Retail_Gasoline_Prices.csv", skip = 4) %>% rename(date = `Week of`, price = `Weekly U.S. All Grades All Formulations Retail Gasoline Prices Dollars per Gallon`) %>% mutate(date = mdy(date)) %>% arrange(date) # plots ---- f_gas_weekly %>% filter(date >= "2021-01-01") %>% ggplot(aes(x = date, y = price)) + geom_line(color = dd_blue_dark, size = 1) + dd_theme + labs(title = "Prices at the Pump", subtitle = "Price of Gasoline since Biden assumed the Presidency", caption = "US Energy Information Administration<br>Weekly US All Grades All Formulations Retail Gasoline Prices", x = NULL, y = NULL) + scale_y_continuous(labels = scales::dollar_format()) + scale_x_date(breaks = "1 month", labels = scales::date_format("%b")) ggsave(file = "2021.07.15-gas_prices/p1.png", width = 9, height = 6, units = "in", dpi = 500) f_gas %>% filter(category == "gas_adj", date >= "2017-01-01") %>% ggplot(aes(x = date, y = gas_price)) + geom_line(color = dd_blue_dark, size = 1) + annotate(geom = "rect", xmin = as.Date(-Inf), xmax = as.Date("2021-01-20"), ymin = -Inf, ymax = Inf, fill = dd_red, alpha = 0.2) + annotate(geom = "rect", xmin = as.Date("2021-01-20"), xmax = as.Date(Inf), ymin = -Inf, ymax = Inf, fill = dd_blue, alpha = 0.2) + dd_theme + labs(title = "Prices at the Pump", subtitle = "Inflation Adjusted Price of Gasoline during the <span style=color:'#D75565'>**Trump**</span> and <span style=color:'#5565D7'>**Biden**</span> Presidencies", x = NULL, y = NULL, caption = "US Energy Information Administration<br>Weekly US All Grades All Formulations Retail Gasoline Prices") + scale_y_continuous(labels = scales::dollar_format()) ggsave("2021.07.15-gas_prices/p2.png", width = 9, height = 6, units = "in", dpi = 500) # test commit written by me
d0a968890c79318e0d12afab1dcd4a9cc258347c
c24d1fd18c1cd08287ec718158c47f12a9b801b3
/Scripts/10.LOLA/Scripts/0.LOLA_Learning.R
37d265f728dc559f385843057ca58d02c43014b7
[]
no_license
Christensen-Lab-Dartmouth/PCNS_5hmC_06112020
68b516905bd12827fe476fd7f1b4c134f3bf0b48
f89e8fbbae365369aa90e6a16827d1da96515abc
refs/heads/master
2022-11-07T19:01:36.180358
2020-06-11T13:35:14
2020-06-11T13:35:14
271,554,640
0
1
null
null
null
null
UTF-8
R
false
false
1,361
r
0.LOLA_Learning.R
#LOLA #subset to appropriate cell lines #universe is top5% #collection of interest -> filter #Load LOLA #Can figure out which databases may be most useful to you using this #http://lolaweb.databio.org/ #Go to website, laod universe and region list. Go to tables and it will give you most relevant databases with info on them library(LOLA) #Set path where LOLA region files are located dbPath = system.file("extdata", "hg19", package="LOLA") #Load the region files regionDB = loadRegionDB(dbPath) regionDB = loadRegionDB("LOLACore/hg19") #Annotation names(regionDB) #Dictinoary: dbLocation is where your region files are #collectionAnno is what you are interestedin looking at like TFBS or DHS and where the fiels are #regionGRL is your list of region (granges file) #Load data and universe #Both need to be GRANGES objects data("sample_input", package="LOLA") # load userSets data("sample_universe", package="LOLA") # load userUniverse #How you run the test # runLOLA tests for pairwise overlap between each user set and each # region set in regionDB. It then uses a Fisher’s exact test to assess # significance of the overlap. The results are a data.table with several columns: locResults = runLOLA(userSets, userUniverse, regionDB, cores=1) setwd("/Users/nasimazizgolshani/Dropbox/christensen/PCNS_Analysis/PCNS/Script/16.LOLA") library(simpleCache)
89ee602b0818fb6cf3a635059f5a0331f8ea2953
b4d3e44e7da647defaf290cc219a0011e9bab5f9
/man/read_apsim.Rd
e2f4b91650ed14225614948f28d54c0a4e47b8ab
[]
no_license
femiguez/apsimx
81313570fbbbb915ba1519ad0cd95ac0d38dc4df
7df3b0a34d273e8035dbe22d457ed3911d07d7bc
refs/heads/master
2023-08-10T13:49:57.777487
2023-05-22T14:53:51
2023-05-22T14:53:51
194,395,452
34
21
null
2023-07-28T15:27:39
2019-06-29T10:54:27
R
UTF-8
R
false
true
1,130
rd
read_apsim.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/apsim_classic.R \name{read_apsim} \alias{read_apsim} \title{Read APSIM generated .out files} \usage{ read_apsim( file = "", src.dir = ".", value = c("report", "all"), date.format = "\%d/\%m/\%Y", silent = FALSE ) } \arguments{ \item{file}{file name} \item{src.dir}{source directory where file is located} \item{value}{either \sQuote{report} (data.frame), \sQuote{user-defined} or \sQuote{all} (list)} \item{date.format}{format for adding \sQuote{Date} column} \item{silent}{whether to issue warnings or suppress them} } \value{ This function returns a data frame with APSIM output or a list if value equals \sQuote{all} } \description{ read \sQuote{output} databases created by APSIM runs (.out and .sim). One file at a time. } \details{ Read APSIM generated .out files } \examples{ \dontrun{ extd.dir <- system.file("extdata", package = "apsimx") maize.out <- read_apsim("Maize", src.dir = extd.dir, value = "report") millet.out <- read_apsim("Millet", src.dir = extd.dir, value = "report") } } \seealso{ \code{\link{read_apsim_all}} }
a8ecc280a49857fae96f4f0c4e09cc5a55a5ec38
cdf80ced11c2453eeeccaca52718ea0f9591a178
/cachematrix.R
8d40891d262f78968799586a33adc52684028153
[]
no_license
puruparu/ProgrammingAssignment2
9c8b2711c7fcdadfddaba5efceba89243f13c63b
32e9744825307eb88141d451e2f70f81abb216e4
refs/heads/master
2022-11-24T12:31:44.989513
2020-07-28T08:45:48
2020-07-28T08:45:48
282,711,922
0
0
null
2020-07-26T18:43:45
2020-07-26T18:43:44
null
UTF-8
R
false
false
978
r
cachematrix.R
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse. ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { inv<-NULL set<-function(y){ x<<-y inv<<-NULL } get<-function()x setinverse<-function(inverse) inv<<-inverse getinverse<-function() inv list(set=set,get=get,setinverse=setinverse,getinverse=getinverse) } ##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ##If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache. ## Write a short comment describing this function cacheSolve <- function(x, ...) { inv<-x$getinverse() if(!is.null(inv)){ message("getting cached data") return(inv) } mat<-x$get() inv<-solve(data,...) x$setinverse(inv) inv ## Return a matrix that is the inverse of 'x' }