content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
library(dplyr) #Reading in the information from stop_times.txt setwd("/home/ewahmed/Desktop/SubwayData/") stop_times <- read.table("stop_times.txt",header=TRUE, sep=",",fill=TRUE,quote = "",row.names = NULL, stringsAsFactors = FALSE) #Reading in the information from stops.txt stops <- read.table("modifiedstops2.txt",header=TRUE, sep=",",fill=TRUE,quote = "",row.names = NULL, stringsAsFactors = FALSE) #Getting rid of the following columns: stop_headsign, pickup_type, drop_off_type, shape_dist_traveled stop_times <- data.frame(stop_times[,c("trip_id","arrival_time","departure_time","stop_id","stop_sequence")]) #Need to convert into Hour/Minute/Second format to subtract stop_times$arrival_time <- as.POSIXct(stop_times$arrival_time, format='%H:%M:%S') stop_times$departure_time <- as.POSIXct(stop_times$departure_time, format='%H:%M:%S') #Joining the stop names with the stop_times data frame so we know the names of the various stops stop_times_names <- inner_join(stop_times,stops) #Read in the information so that I have trips only trips <- read.table("trips.txt",header=TRUE, sep=",",fill=TRUE,quote = "",row.names = NULL, stringsAsFactors = FALSE) #Parse so we have the trains with their trip_ids and we can then merge so we have only trains trips <- data.frame(trips[,c(1,3)]) trains_info <- inner_join(trips, stop_times_names) #getting the day of the week the trip was recorded on (THE 3 TRAIN HAS NO TRIPS ON WKD?????????) trains_info$day <- substr(trains_info$trip_id,10,12) #Subtracting times: column n - column n-1 trains_info$departure_time <- as.POSIXct(trains_info$departure_time, format='%H:%M:%S') trains_info<- mutate(trains_info, duration = departure_time - lag(arrival_time)) #Trying to get only times between 9am-6pm trains_info$departure_time <- strftime(trains_info$departure_time, format="%H:%M:%S") trains_info <- trains_info %>% mutate(is_good = ifelse(departure_time<= "18:00:00" & departure_time > "09:00:00", 1, 0)) #Filtering for day= WKD and timing between 9am 6pm trains_info <- filter(trains_info,is_good==1 & day=="WKD") #reformatting with only columns we need trains_info$trip_id=NULL trains_info<- data.frame(trains_info[,c(1,6,4,5,8)]) #Renaming columns because..... I want to colnames(trains_info) <- c("train","station","stop_id","stop","time_travel") #Combine trips with their stops to get format of B01 B02 etc zero<- "0" trains_info$stop <- paste(zero,trains_info$stop,sep="") trains_info$train_stop <- paste(trains_info$train,trains_info$stop,sep="") #Get rid of north and south on the stop ids trains_info$stop_id <- substr(trains_info$stop_id,0,3) #Entire 1 track is from rows 25894 - 25931 train_extraction<- rbind(trains_info[25894:25931,]) #Entire 2 track is from rows 36200 - 36251 train_extraction <- rbind(train_extraction,trains_info[36200:36251,]) #Entire 3 track is from 41064 - 41097 train_extraction <- rbind(train_extraction,trains_info[41064:41097,]) #Entire 4 track is from rows 551 - 585 train_extraction <- rbind(train_extraction,trains_info[551:585,]) #Entire 5 track is from 4701- 4726 train_extraction <- rbind(train_extraction,trains_info[4701:4726,]) #Entire 6 track is from 10454 - 10491 train_extraction <- rbind(train_extraction,trains_info[10454:10491,]) #Entire 6X track is from 10763 - 10795 train_extraction <- rbind(train_extraction,trains_info[10763:10795,]) #Entire 7 track is from 19960-19980 train_extraction <- rbind(train_extraction,trains_info[19960:19980,]) #Entire 7X track is from 20128 - 20138 train_extraction <- rbind(train_extraction,trains_info[20128:20138,]) #Entire A track is from 86703 - 86739 train_extraction <- rbind(train_extraction,trains_info[86703:86739,]) #Entire B track is from 91513 - 91549 train_extraction <- rbind(train_extraction,trains_info[91513:91549,]) #Entire C track is from 94855 - 94894 train_extraction <- rbind(train_extraction,trains_info[94855:94894,]) #Entire D track is from 99301 - 99336 train_extraction <- rbind(train_extraction,trains_info[99301:99336,]) #Entire E track is from 46019 - 46038 train_extraction <- rbind(train_extraction,trains_info[46019:46038,]) #Entire F track is from 49821 - 49865 train_extraction <- rbind(train_extraction,trains_info[49821:49865,]) #Entire FS track is from 57467 - 57470 train_extraction <- rbind(train_extraction, trains_info[57467:57470,]) #Entire GS track is from 25278 - 25279 train_extraction <- rbind(train_extraction,trains_info[25278:25279,]) #Entire G track is from 57899- 57919 train_extraction<- rbind(train_extraction,trains_info[57899:57919,]) #Entire H track is from 60372- 60376 train_extraction <- rbind(train_extraction,trains_info[60372:60376,]) #Entire J track is from 60819 - 60848 train_extraction<- rbind(train_extraction,trains_info[60819:60848,]) #Entire L track is from 64186 - 64209 train_extraction <- rbind(train_extraction,trains_info[64186:64209,]) #Entire M track is from 69305 - 69340 train_extraction <- rbind(train_extraction,trains_info[69305:69340,]) #Entire N track is from 73407 - 73438 train_extraction <- rbind(train_extraction,trains_info[73407:73438,]) #Entire Q track is from 77073 - 77107 train_extraction <- rbind(train_extraction,trains_info[77073:77107,]) #Entire R track is from 81046 - 81090 train_extraction <- rbind(train_extraction,trains_info[81046:81090,]) #Entire SI track is from 103409 - 103430 train_extraction <- rbind(train_extraction,trains_info[103409:103430,]) #Entire Z track is from 62521 - 62541 train_extraction <- rbind(train_extraction,trains_info[62521:62541,]) #Taking care of the shuttle trains aka the H, FS, and GS trains_info <- train_extraction #Clean the duration so that we get rid of the weird durations (-7k etc) trains_info[trains_info$stop == "01",]$time_travel=0 #Change the names of the columns and add a column with the train being tracked trains_info<- data.frame(trains_info[,c(1,6,3,2,5,4)]) #change name back to stop_id so we can join and get line names names(trains_info) <- c('train','train_stop','stop_id','station_name','time_travel','stop') #Reading in the line names data setwd("/home/ewahmed/subway-flow/") linenames <- read.table("new_google_data.txt",header=TRUE, sep=",",fill=TRUE,quote = "",row.names = NULL, stringsAsFactors = FALSE) #reformatting to get rid of extra quotes linenames<- data.frame(linenames[,c(2,3)]) names(linenames) <- c('stop_id','line_name') linenames$stop_id<- sapply(linenames$stop_id,function(x) gsub ("\"", "", x)) linenames$line_name<-sapply(linenames$line_name,function(x) gsub("\"", "", x)) #Merging the linenames with train data trains_linenames <- left_join(trains_info,linenames) #fixing the na line_names n <- 1:length(trains_linenames$train) for (i in seq (along=n)){ if(is.na(trains_linenames$line_name[i])){ trains_linenames$line_name[i] = trains_linenames$train[i] } } #making a column called station_id firstids <- trains_linenames %>% group_by(station_name,line_name) %>% summarise(first(stop_id)) trains_linenames <- inner_join(trains_linenames,firstids) #Taking out the stopids and replacing with stationids names(trains_linenames) <- c('train','train_stop','stop_id','station','time_travel','stop','line_name','station_id') seperate_linenames<- trains_linenames #Put it in the order we want so we can manipulate the data trains_linenames <- mutate(trains_linenames, train_stop2 = lag(train_stop)) trains_linenames <- mutate(trains_linenames, station2= lag(station)) trains_linenames <- mutate(trains_linenames,station_id2= lag(station_id)) #Get rid of NA trains_linenames[trains_linenames$stop == "01",]$station=NA trains_linenames<- trains_linenames[complete.cases(trains_linenames),] #Reformatting trains_linenames <- data.frame(trains_linenames[,c(1,11,10,8,4,5)]) names(trains_linenames)<- c('Train','FromStationID','FromStation','ToStationID','ToStation','TimeTravel') #Adding the station_id next to the train so that it is easier to graph through network x #skip if any other graphing tool is being used trains_linenames$FromStation <- paste(trains_linenames$FromStation,trains_linenames$FromStationID,sep="") trains_linenames$ToStation <- paste(trains_linenames$ToStation,trains_linenames$ToStationID, sep = "") #Export file as TrainTravel.csv write.csv(trains_linenames,"/home/ewahmed/subway-flow/TrainTravel.csv")
/TrainTravel.R
no_license
eimanahmed/Subway-Flow
R
false
false
8,475
r
library(dplyr) #Reading in the information from stop_times.txt setwd("/home/ewahmed/Desktop/SubwayData/") stop_times <- read.table("stop_times.txt",header=TRUE, sep=",",fill=TRUE,quote = "",row.names = NULL, stringsAsFactors = FALSE) #Reading in the information from stops.txt stops <- read.table("modifiedstops2.txt",header=TRUE, sep=",",fill=TRUE,quote = "",row.names = NULL, stringsAsFactors = FALSE) #Getting rid of the following columns: stop_headsign, pickup_type, drop_off_type, shape_dist_traveled stop_times <- data.frame(stop_times[,c("trip_id","arrival_time","departure_time","stop_id","stop_sequence")]) #Need to convert into Hour/Minute/Second format to subtract stop_times$arrival_time <- as.POSIXct(stop_times$arrival_time, format='%H:%M:%S') stop_times$departure_time <- as.POSIXct(stop_times$departure_time, format='%H:%M:%S') #Joining the stop names with the stop_times data frame so we know the names of the various stops stop_times_names <- inner_join(stop_times,stops) #Read in the information so that I have trips only trips <- read.table("trips.txt",header=TRUE, sep=",",fill=TRUE,quote = "",row.names = NULL, stringsAsFactors = FALSE) #Parse so we have the trains with their trip_ids and we can then merge so we have only trains trips <- data.frame(trips[,c(1,3)]) trains_info <- inner_join(trips, stop_times_names) #getting the day of the week the trip was recorded on (THE 3 TRAIN HAS NO TRIPS ON WKD?????????) trains_info$day <- substr(trains_info$trip_id,10,12) #Subtracting times: column n - column n-1 trains_info$departure_time <- as.POSIXct(trains_info$departure_time, format='%H:%M:%S') trains_info<- mutate(trains_info, duration = departure_time - lag(arrival_time)) #Trying to get only times between 9am-6pm trains_info$departure_time <- strftime(trains_info$departure_time, format="%H:%M:%S") trains_info <- trains_info %>% mutate(is_good = ifelse(departure_time<= "18:00:00" & departure_time > "09:00:00", 1, 0)) #Filtering for day= WKD and timing between 9am 6pm trains_info <- filter(trains_info,is_good==1 & day=="WKD") #reformatting with only columns we need trains_info$trip_id=NULL trains_info<- data.frame(trains_info[,c(1,6,4,5,8)]) #Renaming columns because..... I want to colnames(trains_info) <- c("train","station","stop_id","stop","time_travel") #Combine trips with their stops to get format of B01 B02 etc zero<- "0" trains_info$stop <- paste(zero,trains_info$stop,sep="") trains_info$train_stop <- paste(trains_info$train,trains_info$stop,sep="") #Get rid of north and south on the stop ids trains_info$stop_id <- substr(trains_info$stop_id,0,3) #Entire 1 track is from rows 25894 - 25931 train_extraction<- rbind(trains_info[25894:25931,]) #Entire 2 track is from rows 36200 - 36251 train_extraction <- rbind(train_extraction,trains_info[36200:36251,]) #Entire 3 track is from 41064 - 41097 train_extraction <- rbind(train_extraction,trains_info[41064:41097,]) #Entire 4 track is from rows 551 - 585 train_extraction <- rbind(train_extraction,trains_info[551:585,]) #Entire 5 track is from 4701- 4726 train_extraction <- rbind(train_extraction,trains_info[4701:4726,]) #Entire 6 track is from 10454 - 10491 train_extraction <- rbind(train_extraction,trains_info[10454:10491,]) #Entire 6X track is from 10763 - 10795 train_extraction <- rbind(train_extraction,trains_info[10763:10795,]) #Entire 7 track is from 19960-19980 train_extraction <- rbind(train_extraction,trains_info[19960:19980,]) #Entire 7X track is from 20128 - 20138 train_extraction <- rbind(train_extraction,trains_info[20128:20138,]) #Entire A track is from 86703 - 86739 train_extraction <- rbind(train_extraction,trains_info[86703:86739,]) #Entire B track is from 91513 - 91549 train_extraction <- rbind(train_extraction,trains_info[91513:91549,]) #Entire C track is from 94855 - 94894 train_extraction <- rbind(train_extraction,trains_info[94855:94894,]) #Entire D track is from 99301 - 99336 train_extraction <- rbind(train_extraction,trains_info[99301:99336,]) #Entire E track is from 46019 - 46038 train_extraction <- rbind(train_extraction,trains_info[46019:46038,]) #Entire F track is from 49821 - 49865 train_extraction <- rbind(train_extraction,trains_info[49821:49865,]) #Entire FS track is from 57467 - 57470 train_extraction <- rbind(train_extraction, trains_info[57467:57470,]) #Entire GS track is from 25278 - 25279 train_extraction <- rbind(train_extraction,trains_info[25278:25279,]) #Entire G track is from 57899- 57919 train_extraction<- rbind(train_extraction,trains_info[57899:57919,]) #Entire H track is from 60372- 60376 train_extraction <- rbind(train_extraction,trains_info[60372:60376,]) #Entire J track is from 60819 - 60848 train_extraction<- rbind(train_extraction,trains_info[60819:60848,]) #Entire L track is from 64186 - 64209 train_extraction <- rbind(train_extraction,trains_info[64186:64209,]) #Entire M track is from 69305 - 69340 train_extraction <- rbind(train_extraction,trains_info[69305:69340,]) #Entire N track is from 73407 - 73438 train_extraction <- rbind(train_extraction,trains_info[73407:73438,]) #Entire Q track is from 77073 - 77107 train_extraction <- rbind(train_extraction,trains_info[77073:77107,]) #Entire R track is from 81046 - 81090 train_extraction <- rbind(train_extraction,trains_info[81046:81090,]) #Entire SI track is from 103409 - 103430 train_extraction <- rbind(train_extraction,trains_info[103409:103430,]) #Entire Z track is from 62521 - 62541 train_extraction <- rbind(train_extraction,trains_info[62521:62541,]) #Taking care of the shuttle trains aka the H, FS, and GS trains_info <- train_extraction #Clean the duration so that we get rid of the weird durations (-7k etc) trains_info[trains_info$stop == "01",]$time_travel=0 #Change the names of the columns and add a column with the train being tracked trains_info<- data.frame(trains_info[,c(1,6,3,2,5,4)]) #change name back to stop_id so we can join and get line names names(trains_info) <- c('train','train_stop','stop_id','station_name','time_travel','stop') #Reading in the line names data setwd("/home/ewahmed/subway-flow/") linenames <- read.table("new_google_data.txt",header=TRUE, sep=",",fill=TRUE,quote = "",row.names = NULL, stringsAsFactors = FALSE) #reformatting to get rid of extra quotes linenames<- data.frame(linenames[,c(2,3)]) names(linenames) <- c('stop_id','line_name') linenames$stop_id<- sapply(linenames$stop_id,function(x) gsub ("\"", "", x)) linenames$line_name<-sapply(linenames$line_name,function(x) gsub("\"", "", x)) #Merging the linenames with train data trains_linenames <- left_join(trains_info,linenames) #fixing the na line_names n <- 1:length(trains_linenames$train) for (i in seq (along=n)){ if(is.na(trains_linenames$line_name[i])){ trains_linenames$line_name[i] = trains_linenames$train[i] } } #making a column called station_id firstids <- trains_linenames %>% group_by(station_name,line_name) %>% summarise(first(stop_id)) trains_linenames <- inner_join(trains_linenames,firstids) #Taking out the stopids and replacing with stationids names(trains_linenames) <- c('train','train_stop','stop_id','station','time_travel','stop','line_name','station_id') seperate_linenames<- trains_linenames #Put it in the order we want so we can manipulate the data trains_linenames <- mutate(trains_linenames, train_stop2 = lag(train_stop)) trains_linenames <- mutate(trains_linenames, station2= lag(station)) trains_linenames <- mutate(trains_linenames,station_id2= lag(station_id)) #Get rid of NA trains_linenames[trains_linenames$stop == "01",]$station=NA trains_linenames<- trains_linenames[complete.cases(trains_linenames),] #Reformatting trains_linenames <- data.frame(trains_linenames[,c(1,11,10,8,4,5)]) names(trains_linenames)<- c('Train','FromStationID','FromStation','ToStationID','ToStation','TimeTravel') #Adding the station_id next to the train so that it is easier to graph through network x #skip if any other graphing tool is being used trains_linenames$FromStation <- paste(trains_linenames$FromStation,trains_linenames$FromStationID,sep="") trains_linenames$ToStation <- paste(trains_linenames$ToStation,trains_linenames$ToStationID, sep = "") #Export file as TrainTravel.csv write.csv(trains_linenames,"/home/ewahmed/subway-flow/TrainTravel.csv")
library(edgeR) source('io/rnaseq.R') #' Prepare a table of DE genes with the given FDR cutoff. #' lrt: must be compatible with topTags, for example the output of glmLRT prepare_de_table <- function(lrt, fdr = 0.05, log2FC.min = NULL) { de <- as.data.frame(topTags(lrt, p.value = fdr, n = Inf)) if (!is.null(log2FC.min)) { de <- de[abs(de$logFC) >= log2FC.min,] } de$ensembl <- rownames(lrt$table)[as.integer(rownames(de))] de$direction <- ifelse(de$logFC > 0, 'U', 'D') de <- de[, c("genes", "logFC", "ensembl", "direction", "FDR", "logCPM")] de } #' Get lists of DE genes for all possible Venn segments for an arbitrary number of comparisons. #' The input arguments are DGELRT objects or anything else that can be passed into prepare_de_table. #' If `background` is supplied, only features that are NOT included in the background are counted venn_edger_de_lists <- function(..., background=NULL, fdr=0.05, log2FC.min = NULL, id.key='ensembl') { de <- lapply(list(...), function (x) {prepare_de_table(x, fdr=fdr, log2FC.min = log2FC.min)}) if (!is.null(background)) { print("BACKGROUND") de.ref <- prepare_de_table(background, fdr=fdr) de <- lapply(de, function(x) { these.ids <- setdiff(x[[id.key]], de.ref[[id.key]]) x[x[[id.key]] %in% these.ids,] }) } ids <- lapply(de, function(x){x[[id.key]]}) blocks.ids <- do.call(venn_sets, ids) #' Function to get the original topTags data corresponding to each Venn block #' The input is a string representing the binary index, e.g. '0011' and a list of the ids get_de <- function(bin) { these.ids <- blocks.ids[[bin]] idx.in <- which(strsplit(bin, "")[[1]] == 1) tmp <- lapply( idx.in, function (x) { aa <- de[[x]][de[[x]][[id.key]] %in% these.ids,] aa[order(aa[, 'FDR']),] } ) do.call(cbind, tmp) } blocks <- lapply(names(blocks.ids), get_de) names(blocks) <- names(blocks.ids) blocks$contrasts <- names(de) blocks } grouped_analysis <- function(data, groups, groups.lumped, contrasts, gene.symbols=NULL, output.dir=NULL, fdr = 0.05) { #' Carry out analysis based on a paired study, where there are no replicates available, e.g. cancer vs healthy tissue in individual patients. #' In this case, we need to 'lump' individuals together based on a simpler grouping (cancer vs healthy) to estimate dispersion, then transfer #' this estimate across for the purpose of computing differential expression whilst honouring the paired structure. #' contrasts: list of characters containing valid formulae based on design matrix ~0 + groups. makeContrasts will be called on each element of the list. #' data: Numeric matrix containing gene counts. #' groups: vector or factor giving the group each sample belongs to. #' groups.lumped: vector or factor giving the lumped groups for each sample. groups <- factor(groups) groups.lumped <- factor(groups.lumped) if (is.null(gene.symbols)) { ens.map <- biomart_annotation(index.by='ensembl_gene_id') gene.symbols <- ens.map[rownames(data), "hgnc_symbol"] } y.lumped <- DGEList(counts=data, group=groups.lumped) y.lumped <- calcNormFactors(y.lumped) design <- model.matrix(~groups.lumped) # estimate dispersion of lumped groups y.lumped <- estimateDisp(y.lumped, design) #' Store the values for later use #' This is one of the recommended approaches from the authors of edgeR in the situation where no replicates are available dispersion.trended.lumped <- y.lumped$trended.dispersion dispersion.common.lumped <- y.lumped$common.dispersion dispersion.tagwise.lumped <- y.lumped$tagwise.dispersion # now re-initialise with the correct groupings y <- DGEList(data, genes = gene.symbols) y <- calcNormFactors(y) design <- model.matrix(~0 + groups) colnames(design) <- levels(groups) # in this case, we need to use the dispersion estimated earlier y$common.dispersion <- dispersion.common.lumped y$trended.dispersion <- dispersion.trended.lumped y$tagwise.dispersion <- dispersion.tagwise.lumped fit.glm <- glmFit(y, design) contrasts[['levels']] <- design contrasts.made <- do.call(makeContrasts, contrasts) lrt <- list() for (t in colnames(contrasts.made)) { lrt[[t]] <- glmLRT(fit.glm, contrast=contrasts.made[, t]) if (!is.null(output.dir)) { #' Save the results output.fn = file.path(output.dir, paste0(t, '.csv')) write.csv(prepare_de_table(lrt[[t]], fdr=fdr), file=output.fn) } } lrt } filter_genes <- function(data, cpm.min = 1, nsamples.min = 3, unless.cpm.gte = NULL) { #' Filter the genes (rows in data) based on prevalence, in order to remove genes with consistently low expression. #' cpm.min: The minimum CPM required to 'pass' #' nsamples.min: The minimum number of samples that must pass in order to keep this gene #' unless.cpm.gte: If supplied, this acts as an override; if any single CPM value is >= this value, the gene is retained, even if #' it would otherwise be earmarked for removal. #' Returns a reduced data frame y <- DGEList(counts=data) keep <- rowSums(cpm(y) > cpm.min) >= nsamples.min if (!is.null(unless.cpm.gte)) { keep <- keep | (rowSums(cpm(y) >= unless.cpm.gte) > 0) } data <- data[keep,] } export_de_list <- function(blocks, outfile) { #' Generate a Venn-like DE CSV and write to disk #' Blocks is a list with names as a binary representation of the Venn region, e.g. 1101 means "in groups 1, 2 and 4 but not 3". #' # only keep names that are in binary format idx <- names(blocks)[grep('^[01]+$', names(blocks))] idx <- idx[order(idx, decreasing = T)] # check the format: all should have the same length ns <- sapply(idx, nchar) if (!all(ns == ns[1])) { stop("Unequal block names. Expecting them to have the same format, e.g. `011`.") } n <- ns[[1]] message(sprintf("Exporting %i way DE comparison to %s.", n, outfile)) csv.data <- data.frame(blocks[[idx[1]]]) if (ncol(csv.data) %% n != 0) { stop(sprintf("Unequal number of rows detected (%i / %i)", ncol(csv.data), n)) } else { csv.ncol <- as.integer(ncol(csv.data) / n) message(sprintf("Detected %i columns per block.", csv.ncol)) } block.colnames <- colnames(csv.data)[1:csv.ncol] for (i in seq(2, 2^n - 1)) { # build this block this.data <- blocks[[idx[i]]] this.nrow <- nrow(this.data) this.block <- list() k <- 1 l <- 1 for (j in strsplit(idx[i], "")[[1]]) { if (j == '1') { this.block[[k]] <- this.data[, (csv.ncol * (l - 1) + 1):(csv.ncol * l)] l <- l + 1 } else { this.block[[k]] <- data.frame(rep.col(rep.row("", this.nrow), csv.ncol)) colnames(this.block[[k]]) <- block.colnames } k <- k + 1 } this.csvdata <- do.call(cbind, c(this.block, list(deparse.level=0))) csv.data <- rbind(csv.data, this.csvdata) } write.csv(csv.data, file = outfile, row.names = F) }
/R/differential_expression/edger_de.R
no_license
gaberosser/qmul-bioinf
R
false
false
7,044
r
library(edgeR) source('io/rnaseq.R') #' Prepare a table of DE genes with the given FDR cutoff. #' lrt: must be compatible with topTags, for example the output of glmLRT prepare_de_table <- function(lrt, fdr = 0.05, log2FC.min = NULL) { de <- as.data.frame(topTags(lrt, p.value = fdr, n = Inf)) if (!is.null(log2FC.min)) { de <- de[abs(de$logFC) >= log2FC.min,] } de$ensembl <- rownames(lrt$table)[as.integer(rownames(de))] de$direction <- ifelse(de$logFC > 0, 'U', 'D') de <- de[, c("genes", "logFC", "ensembl", "direction", "FDR", "logCPM")] de } #' Get lists of DE genes for all possible Venn segments for an arbitrary number of comparisons. #' The input arguments are DGELRT objects or anything else that can be passed into prepare_de_table. #' If `background` is supplied, only features that are NOT included in the background are counted venn_edger_de_lists <- function(..., background=NULL, fdr=0.05, log2FC.min = NULL, id.key='ensembl') { de <- lapply(list(...), function (x) {prepare_de_table(x, fdr=fdr, log2FC.min = log2FC.min)}) if (!is.null(background)) { print("BACKGROUND") de.ref <- prepare_de_table(background, fdr=fdr) de <- lapply(de, function(x) { these.ids <- setdiff(x[[id.key]], de.ref[[id.key]]) x[x[[id.key]] %in% these.ids,] }) } ids <- lapply(de, function(x){x[[id.key]]}) blocks.ids <- do.call(venn_sets, ids) #' Function to get the original topTags data corresponding to each Venn block #' The input is a string representing the binary index, e.g. '0011' and a list of the ids get_de <- function(bin) { these.ids <- blocks.ids[[bin]] idx.in <- which(strsplit(bin, "")[[1]] == 1) tmp <- lapply( idx.in, function (x) { aa <- de[[x]][de[[x]][[id.key]] %in% these.ids,] aa[order(aa[, 'FDR']),] } ) do.call(cbind, tmp) } blocks <- lapply(names(blocks.ids), get_de) names(blocks) <- names(blocks.ids) blocks$contrasts <- names(de) blocks } grouped_analysis <- function(data, groups, groups.lumped, contrasts, gene.symbols=NULL, output.dir=NULL, fdr = 0.05) { #' Carry out analysis based on a paired study, where there are no replicates available, e.g. cancer vs healthy tissue in individual patients. #' In this case, we need to 'lump' individuals together based on a simpler grouping (cancer vs healthy) to estimate dispersion, then transfer #' this estimate across for the purpose of computing differential expression whilst honouring the paired structure. #' contrasts: list of characters containing valid formulae based on design matrix ~0 + groups. makeContrasts will be called on each element of the list. #' data: Numeric matrix containing gene counts. #' groups: vector or factor giving the group each sample belongs to. #' groups.lumped: vector or factor giving the lumped groups for each sample. groups <- factor(groups) groups.lumped <- factor(groups.lumped) if (is.null(gene.symbols)) { ens.map <- biomart_annotation(index.by='ensembl_gene_id') gene.symbols <- ens.map[rownames(data), "hgnc_symbol"] } y.lumped <- DGEList(counts=data, group=groups.lumped) y.lumped <- calcNormFactors(y.lumped) design <- model.matrix(~groups.lumped) # estimate dispersion of lumped groups y.lumped <- estimateDisp(y.lumped, design) #' Store the values for later use #' This is one of the recommended approaches from the authors of edgeR in the situation where no replicates are available dispersion.trended.lumped <- y.lumped$trended.dispersion dispersion.common.lumped <- y.lumped$common.dispersion dispersion.tagwise.lumped <- y.lumped$tagwise.dispersion # now re-initialise with the correct groupings y <- DGEList(data, genes = gene.symbols) y <- calcNormFactors(y) design <- model.matrix(~0 + groups) colnames(design) <- levels(groups) # in this case, we need to use the dispersion estimated earlier y$common.dispersion <- dispersion.common.lumped y$trended.dispersion <- dispersion.trended.lumped y$tagwise.dispersion <- dispersion.tagwise.lumped fit.glm <- glmFit(y, design) contrasts[['levels']] <- design contrasts.made <- do.call(makeContrasts, contrasts) lrt <- list() for (t in colnames(contrasts.made)) { lrt[[t]] <- glmLRT(fit.glm, contrast=contrasts.made[, t]) if (!is.null(output.dir)) { #' Save the results output.fn = file.path(output.dir, paste0(t, '.csv')) write.csv(prepare_de_table(lrt[[t]], fdr=fdr), file=output.fn) } } lrt } filter_genes <- function(data, cpm.min = 1, nsamples.min = 3, unless.cpm.gte = NULL) { #' Filter the genes (rows in data) based on prevalence, in order to remove genes with consistently low expression. #' cpm.min: The minimum CPM required to 'pass' #' nsamples.min: The minimum number of samples that must pass in order to keep this gene #' unless.cpm.gte: If supplied, this acts as an override; if any single CPM value is >= this value, the gene is retained, even if #' it would otherwise be earmarked for removal. #' Returns a reduced data frame y <- DGEList(counts=data) keep <- rowSums(cpm(y) > cpm.min) >= nsamples.min if (!is.null(unless.cpm.gte)) { keep <- keep | (rowSums(cpm(y) >= unless.cpm.gte) > 0) } data <- data[keep,] } export_de_list <- function(blocks, outfile) { #' Generate a Venn-like DE CSV and write to disk #' Blocks is a list with names as a binary representation of the Venn region, e.g. 1101 means "in groups 1, 2 and 4 but not 3". #' # only keep names that are in binary format idx <- names(blocks)[grep('^[01]+$', names(blocks))] idx <- idx[order(idx, decreasing = T)] # check the format: all should have the same length ns <- sapply(idx, nchar) if (!all(ns == ns[1])) { stop("Unequal block names. Expecting them to have the same format, e.g. `011`.") } n <- ns[[1]] message(sprintf("Exporting %i way DE comparison to %s.", n, outfile)) csv.data <- data.frame(blocks[[idx[1]]]) if (ncol(csv.data) %% n != 0) { stop(sprintf("Unequal number of rows detected (%i / %i)", ncol(csv.data), n)) } else { csv.ncol <- as.integer(ncol(csv.data) / n) message(sprintf("Detected %i columns per block.", csv.ncol)) } block.colnames <- colnames(csv.data)[1:csv.ncol] for (i in seq(2, 2^n - 1)) { # build this block this.data <- blocks[[idx[i]]] this.nrow <- nrow(this.data) this.block <- list() k <- 1 l <- 1 for (j in strsplit(idx[i], "")[[1]]) { if (j == '1') { this.block[[k]] <- this.data[, (csv.ncol * (l - 1) + 1):(csv.ncol * l)] l <- l + 1 } else { this.block[[k]] <- data.frame(rep.col(rep.row("", this.nrow), csv.ncol)) colnames(this.block[[k]]) <- block.colnames } k <- k + 1 } this.csvdata <- do.call(cbind, c(this.block, list(deparse.level=0))) csv.data <- rbind(csv.data, this.csvdata) } write.csv(csv.data, file = outfile, row.names = F) }
#' Frobenius norm #' #' \code{Fro} returns the Frobenius norm between a Bayesian network and its update after parameter variation. #' #' The details depend on the class the method \code{Fro} is applied to. #' #' @seealso \code{\link{KL.GBN}}, \code{\link{KL.CI}}, \code{\link{Fro.CI}}, \code{\link{Fro.GBN}}, \code{\link{Jeffreys.GBN}}, \code{\link{Jeffreys.CI}} #' @param x object of class \code{GBN} or \code{CI}. #' @param ... parameters specific to the class used. #' @export #' #'@return A dataframe whose columns depend of the class of the object. #' Fro <- function (x, ...) { UseMethod("Fro", x) } #' Frobenius norm for \code{GBN} #' #' \code{Fro.GBN} returns the Frobenius norm between between an object of class \code{GBN} and its update after a standard parameter variation. #' #' Computation of the Frobenius norm between a Bayesian network and the additively perturbed Bayesian network, where the perturbation is either to the mean vector or to the covariance matrix. The Frobenius norm is not computed for perturbations of the mean since it is always equal to zero. #' #'@param x object of class \code{GBN}. #'@param entry a vector of length 2 indicating the entry of the covariance matrix to vary. #'@param delta numeric vector, including the variation parameters that act additively. #'@param log boolean value. If \code{TRUE}, the logarithm of the Frobenius norm is returned. Set by default to \code{TRUE}. #'@param ... additional arguments for compatibility. #' #'@return A dataframe including in the first column the variations performed and in the second column the corresponding Frobenius norm. #' #'@examples Fro(synthetic_gbn,c(3,3),seq(-1,1,0.1)) #' #' @seealso \code{\link{KL.GBN}}, \code{\link{KL.CI}}, \code{\link{Fro.CI}}, \code{\link{Jeffreys.GBN}}, \code{\link{Jeffreys.CI}} #' #' @export #'@importFrom ggplot2 ggplot #'@importFrom ggplot2 geom_line #'@importFrom ggplot2 geom_point #'@importFrom ggplot2 labs #'@importFrom ggplot2 aes #' Fro.GBN <- function(x,entry,delta, log = TRUE, ...){ gbn <- x fro <- numeric(length(delta)) D <- matrix(0,length(gbn$mean),length(gbn$mean)) for(i in 1:length(fro)){ D[entry[1],entry[2]]<- delta[i] D[entry[2],entry[1]]<- delta[i] if(is.psd(round(gbn$covariance+D,2))){ fro[i] <- sum(diag(t(D)%*%D)) } else{fro[i]<-NA} } fro <- data.frame(Variation = delta,Frobenius=fro) if(log == TRUE){fro[,-1] <- log(fro[,-1])} Variation <- fro$Variation Frobenius <- fro$Frobenius if(nrow(fro)==1){ plot <- suppressWarnings(ggplot(data = fro, mapping = aes(x = Variation, y = Frobenius)) + geom_point( na.rm = T) + labs(x = "delta", y = "Frobenius", title = "Frobenius norm") + theme_minimal()) }else{ plot <- suppressWarnings(ggplot(data = fro, mapping = aes(x = Variation, y = Frobenius)) + geom_line( na.rm = T) + labs(x = "delta", y = "Frobenius", title = "Frobenius norm") + theme_minimal()) } out <- list(Frobenius = fro, plot = plot) attr(out,'class') <- 'fro' return(out) } #' Frobenius norm for \code{CI} #' #' \code{Fro.CI} returns the Frobenius norm between an object of class \code{CI} and its update after a model-preserving parameter variation. #' #' Computation of the Frobenius norm between a Bayesian network and its updated version after a model-preserving variation. #' #'@param x object of class \code{CI}. #'@param type character string. Type of model-preserving co-variation: either \code{"total"}, \code{"partial"}, \code{row}, \code{column} or \code{all}. If \code{all} the Frobenius norm is computed for every type of co-variation matrix. #'@param entry a vector of length 2 indicating the entry of the covariance matrix to vary. #'@param delta numeric vector with positive elements, including the variation parameters that act multiplicatively. #'@param log boolean value. If \code{TRUE}, the logarithm of the Frobenius norm is returned. Set by default to \code{TRUE}. #'@param ... additional arguments for compatibility. #' #' #'@return A dataframe including in the first column the variations performed, and in the following columns the corresponding Frobenius norms for the chosen model-preserving co-variations. #' #'@seealso \code{\link{KL.GBN}}, \code{\link{KL.CI}}, \code{\link{Fro.GBN}}, \code{\link{Jeffreys.GBN}}, \code{\link{Jeffreys.CI}} #'@references C. Görgen & M. Leonelli (2020), Model-preserving sensitivity analysis for families of Gaussian distributions. Journal of Machine Learning Research, 21: 1-32. #' #'@examples Fro(synthetic_ci,"total",c(1,1),seq(0.9,1.1,0.01)) #'@examples Fro(synthetic_ci,"partial",c(1,4),seq(0.9,1.1,0.01)) #'@examples Fro(synthetic_ci,"column",c(1,2),seq(0.9,1.1,0.01)) #'@examples Fro(synthetic_ci,"row",c(3,2),seq(0.9,1.1,0.01)) #' #'@export #' Fro.CI <- function(x, type, entry, delta, log = TRUE, ...){ ci <- x fro <- numeric(length(delta)) if(type == "total"){ for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov <- total_covar_matrix(ci,entry,delta[i]) if(is.psd(round(Cov*Delta*ci$covariance,2))){ fro[i] <- sum(diag(t(ci$covariance-Cov*Delta*ci$covariance)%*%(ci$covariance-Cov*Delta*ci$covariance))) } else{fro[i]<- NA} } } if(type == "partial"){ for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov <- partial_covar_matrix(ci,entry,delta[i]) if(is.psd(round(Cov*Delta*ci$covariance,2))){ fro[i] <- sum(diag(t(ci$covariance-Cov*Delta*ci$covariance)%*%(ci$covariance-Cov*Delta*ci$covariance))) } else{fro[i]<- NA} } } if(type == "row"){ for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov <- row_covar_matrix(ci, entry, delta[i]) if(is.psd(round(Cov*Delta*ci$covariance,2))){ fro[i] <- sum(diag(t(ci$covariance-Cov*Delta*ci$covariance)%*%(ci$covariance-Cov*Delta*ci$covariance))) } else{fro[i]<- NA} } } if(type == "column"){ for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov <- col_covar_matrix(ci,entry,delta[i]) if(is.psd(round(Cov*Delta*ci$covariance,2))){ fro[i] <- sum(diag(t(ci$covariance-Cov*Delta*ci$covariance)%*%(ci$covariance-Cov*Delta*ci$covariance))) } else{fro[i]<- NA} } } if(type == "all"){ fro <- matrix(0,length(delta),4) for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov_col <- col_covar_matrix(ci,entry,delta[i]) Cov_row <- row_covar_matrix(ci,entry,delta[i]) Cov_par <- partial_covar_matrix(ci,entry,delta[i]) Cov_tot <- total_covar_matrix(ci,entry,delta[i]) if(is.psd(round(Cov_tot*Delta*ci$covariance,2))){ fro[i,1] <- sum(diag(t(ci$covariance-Cov_tot*Delta*ci$covariance)%*%(ci$covariance-Cov_tot*Delta*ci$covariance))) } else{fro[i,1]<- NA} if(is.psd(round(Cov_par*Delta*ci$covariance,2))){ fro[i,2] <- sum(diag(t(ci$covariance-Cov_par*Delta*ci$covariance)%*%(ci$covariance-Cov_par*Delta*ci$covariance))) } else{fro[i,2]<- NA} if(is.psd(round(Cov_row*Delta*ci$covariance,2))){ fro[i,3] <- sum(diag(t(ci$covariance-Cov_row*Delta*ci$covariance)%*%(ci$covariance-Cov_row*Delta*ci$covariance))) } else{fro[i,3]<- NA} if(is.psd(round(Cov_col*Delta*ci$covariance,2))){ fro[i,4] <- sum(diag(t(ci$covariance-Cov_col*Delta*ci$covariance)%*%(ci$covariance-Cov_col*Delta*ci$covariance))) } else{fro[i,4]<- NA} } } if(type == "all") { Fro_data <- data.frame(Variation = delta, Total = fro[,1], Partial = fro[,2], Row_based = fro[,3], Column_based = fro[,4]) } else{ Fro_data <- data.frame(Variation = delta, Frobenius= fro) } if(log == TRUE){Fro_data[,-1] <- log(Fro_data[,-1])} if(type == "all"){ ci <- gather(Fro_data, "scheme", "value", - Variation) scheme <- ci$scheme value <- ci$value Variation <- ci$Variation if(nrow(Fro_data) == 1){ plot <- ggplot(data = ci, mapping = aes(x = Variation, y = value)) + geom_point(aes(color = scheme)) + labs( x = "delta", y = "Frobenius", title = "Frobenius Norm") + theme_minimal() } else{ plot <- ggplot(data = ci, mapping = aes(x = Variation, y = value)) + geom_line(aes(color = scheme)) + labs(x = "delta", y = "Frobenius", title = "Frobenius Norm") + theme_minimal() } } else{ Variation <- Fro_data$Variation Frobenius <- Fro_data$Frobenius if(nrow(Fro_data) == 1){ plot <- suppressWarnings(ggplot(data = Fro_data, mapping = aes(x = Variation, y = Frobenius)) + geom_point( na.rm = T) + labs(x = "delta", y = "Frobenius", title = "Frobenius norm") + theme_minimal()) }else{ plot <- suppressWarnings(ggplot(data = Fro_data, mapping = aes(x = Variation, y = Frobenius)) + geom_line(na.rm = T ) + labs(x = "delta", y = "Frobenius", title = "Frobenius norm") + theme_minimal()) } } out <- list(Frobenius = Fro_data, plot = plot) attr(out,'class') <- 'fro' return(out) } is.psd <- function (x, tol = 1e-08) { if (!is.square.matrix(x)) stop("argument x is not a square matrix") if (!is.symmetric.matrix(x)) stop("argument x is not a symmetric matrix") if (!is.numeric(x)) stop("argument x is not a numeric matrix") eigenvalues <- eigen(x, only.values = TRUE)$values n <- nrow(x) for (i in 1:n) { if (abs(eigenvalues[i]) < tol) { eigenvalues[i] <- 0 } } if (any(eigenvalues < 0)) { return(FALSE) } return(TRUE) } is.square.matrix <- function (x) { if (!is.matrix(x)) stop("argument x is not a matrix") return(nrow(x) == ncol(x)) } is.symmetric.matrix <- function (x) { if (!is.matrix(x)) { stop("argument x is not a matrix") } if (!is.numeric(x)) { stop("argument x is not a numeric matrix") } if (!is.square.matrix(x)) stop("argument x is not a square numeric matrix") return(sum(x == t(x)) == (nrow(x)^2)) }
/R/frobenius.R
no_license
manueleleonelli/bnmonitor
R
false
false
9,970
r
#' Frobenius norm #' #' \code{Fro} returns the Frobenius norm between a Bayesian network and its update after parameter variation. #' #' The details depend on the class the method \code{Fro} is applied to. #' #' @seealso \code{\link{KL.GBN}}, \code{\link{KL.CI}}, \code{\link{Fro.CI}}, \code{\link{Fro.GBN}}, \code{\link{Jeffreys.GBN}}, \code{\link{Jeffreys.CI}} #' @param x object of class \code{GBN} or \code{CI}. #' @param ... parameters specific to the class used. #' @export #' #'@return A dataframe whose columns depend of the class of the object. #' Fro <- function (x, ...) { UseMethod("Fro", x) } #' Frobenius norm for \code{GBN} #' #' \code{Fro.GBN} returns the Frobenius norm between between an object of class \code{GBN} and its update after a standard parameter variation. #' #' Computation of the Frobenius norm between a Bayesian network and the additively perturbed Bayesian network, where the perturbation is either to the mean vector or to the covariance matrix. The Frobenius norm is not computed for perturbations of the mean since it is always equal to zero. #' #'@param x object of class \code{GBN}. #'@param entry a vector of length 2 indicating the entry of the covariance matrix to vary. #'@param delta numeric vector, including the variation parameters that act additively. #'@param log boolean value. If \code{TRUE}, the logarithm of the Frobenius norm is returned. Set by default to \code{TRUE}. #'@param ... additional arguments for compatibility. #' #'@return A dataframe including in the first column the variations performed and in the second column the corresponding Frobenius norm. #' #'@examples Fro(synthetic_gbn,c(3,3),seq(-1,1,0.1)) #' #' @seealso \code{\link{KL.GBN}}, \code{\link{KL.CI}}, \code{\link{Fro.CI}}, \code{\link{Jeffreys.GBN}}, \code{\link{Jeffreys.CI}} #' #' @export #'@importFrom ggplot2 ggplot #'@importFrom ggplot2 geom_line #'@importFrom ggplot2 geom_point #'@importFrom ggplot2 labs #'@importFrom ggplot2 aes #' Fro.GBN <- function(x,entry,delta, log = TRUE, ...){ gbn <- x fro <- numeric(length(delta)) D <- matrix(0,length(gbn$mean),length(gbn$mean)) for(i in 1:length(fro)){ D[entry[1],entry[2]]<- delta[i] D[entry[2],entry[1]]<- delta[i] if(is.psd(round(gbn$covariance+D,2))){ fro[i] <- sum(diag(t(D)%*%D)) } else{fro[i]<-NA} } fro <- data.frame(Variation = delta,Frobenius=fro) if(log == TRUE){fro[,-1] <- log(fro[,-1])} Variation <- fro$Variation Frobenius <- fro$Frobenius if(nrow(fro)==1){ plot <- suppressWarnings(ggplot(data = fro, mapping = aes(x = Variation, y = Frobenius)) + geom_point( na.rm = T) + labs(x = "delta", y = "Frobenius", title = "Frobenius norm") + theme_minimal()) }else{ plot <- suppressWarnings(ggplot(data = fro, mapping = aes(x = Variation, y = Frobenius)) + geom_line( na.rm = T) + labs(x = "delta", y = "Frobenius", title = "Frobenius norm") + theme_minimal()) } out <- list(Frobenius = fro, plot = plot) attr(out,'class') <- 'fro' return(out) } #' Frobenius norm for \code{CI} #' #' \code{Fro.CI} returns the Frobenius norm between an object of class \code{CI} and its update after a model-preserving parameter variation. #' #' Computation of the Frobenius norm between a Bayesian network and its updated version after a model-preserving variation. #' #'@param x object of class \code{CI}. #'@param type character string. Type of model-preserving co-variation: either \code{"total"}, \code{"partial"}, \code{row}, \code{column} or \code{all}. If \code{all} the Frobenius norm is computed for every type of co-variation matrix. #'@param entry a vector of length 2 indicating the entry of the covariance matrix to vary. #'@param delta numeric vector with positive elements, including the variation parameters that act multiplicatively. #'@param log boolean value. If \code{TRUE}, the logarithm of the Frobenius norm is returned. Set by default to \code{TRUE}. #'@param ... additional arguments for compatibility. #' #' #'@return A dataframe including in the first column the variations performed, and in the following columns the corresponding Frobenius norms for the chosen model-preserving co-variations. #' #'@seealso \code{\link{KL.GBN}}, \code{\link{KL.CI}}, \code{\link{Fro.GBN}}, \code{\link{Jeffreys.GBN}}, \code{\link{Jeffreys.CI}} #'@references C. Görgen & M. Leonelli (2020), Model-preserving sensitivity analysis for families of Gaussian distributions. Journal of Machine Learning Research, 21: 1-32. #' #'@examples Fro(synthetic_ci,"total",c(1,1),seq(0.9,1.1,0.01)) #'@examples Fro(synthetic_ci,"partial",c(1,4),seq(0.9,1.1,0.01)) #'@examples Fro(synthetic_ci,"column",c(1,2),seq(0.9,1.1,0.01)) #'@examples Fro(synthetic_ci,"row",c(3,2),seq(0.9,1.1,0.01)) #' #'@export #' Fro.CI <- function(x, type, entry, delta, log = TRUE, ...){ ci <- x fro <- numeric(length(delta)) if(type == "total"){ for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov <- total_covar_matrix(ci,entry,delta[i]) if(is.psd(round(Cov*Delta*ci$covariance,2))){ fro[i] <- sum(diag(t(ci$covariance-Cov*Delta*ci$covariance)%*%(ci$covariance-Cov*Delta*ci$covariance))) } else{fro[i]<- NA} } } if(type == "partial"){ for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov <- partial_covar_matrix(ci,entry,delta[i]) if(is.psd(round(Cov*Delta*ci$covariance,2))){ fro[i] <- sum(diag(t(ci$covariance-Cov*Delta*ci$covariance)%*%(ci$covariance-Cov*Delta*ci$covariance))) } else{fro[i]<- NA} } } if(type == "row"){ for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov <- row_covar_matrix(ci, entry, delta[i]) if(is.psd(round(Cov*Delta*ci$covariance,2))){ fro[i] <- sum(diag(t(ci$covariance-Cov*Delta*ci$covariance)%*%(ci$covariance-Cov*Delta*ci$covariance))) } else{fro[i]<- NA} } } if(type == "column"){ for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov <- col_covar_matrix(ci,entry,delta[i]) if(is.psd(round(Cov*Delta*ci$covariance,2))){ fro[i] <- sum(diag(t(ci$covariance-Cov*Delta*ci$covariance)%*%(ci$covariance-Cov*Delta*ci$covariance))) } else{fro[i]<- NA} } } if(type == "all"){ fro <- matrix(0,length(delta),4) for(i in 1:length(delta)){ Delta <- variation_mat(ci,entry,delta[i]) Cov_col <- col_covar_matrix(ci,entry,delta[i]) Cov_row <- row_covar_matrix(ci,entry,delta[i]) Cov_par <- partial_covar_matrix(ci,entry,delta[i]) Cov_tot <- total_covar_matrix(ci,entry,delta[i]) if(is.psd(round(Cov_tot*Delta*ci$covariance,2))){ fro[i,1] <- sum(diag(t(ci$covariance-Cov_tot*Delta*ci$covariance)%*%(ci$covariance-Cov_tot*Delta*ci$covariance))) } else{fro[i,1]<- NA} if(is.psd(round(Cov_par*Delta*ci$covariance,2))){ fro[i,2] <- sum(diag(t(ci$covariance-Cov_par*Delta*ci$covariance)%*%(ci$covariance-Cov_par*Delta*ci$covariance))) } else{fro[i,2]<- NA} if(is.psd(round(Cov_row*Delta*ci$covariance,2))){ fro[i,3] <- sum(diag(t(ci$covariance-Cov_row*Delta*ci$covariance)%*%(ci$covariance-Cov_row*Delta*ci$covariance))) } else{fro[i,3]<- NA} if(is.psd(round(Cov_col*Delta*ci$covariance,2))){ fro[i,4] <- sum(diag(t(ci$covariance-Cov_col*Delta*ci$covariance)%*%(ci$covariance-Cov_col*Delta*ci$covariance))) } else{fro[i,4]<- NA} } } if(type == "all") { Fro_data <- data.frame(Variation = delta, Total = fro[,1], Partial = fro[,2], Row_based = fro[,3], Column_based = fro[,4]) } else{ Fro_data <- data.frame(Variation = delta, Frobenius= fro) } if(log == TRUE){Fro_data[,-1] <- log(Fro_data[,-1])} if(type == "all"){ ci <- gather(Fro_data, "scheme", "value", - Variation) scheme <- ci$scheme value <- ci$value Variation <- ci$Variation if(nrow(Fro_data) == 1){ plot <- ggplot(data = ci, mapping = aes(x = Variation, y = value)) + geom_point(aes(color = scheme)) + labs( x = "delta", y = "Frobenius", title = "Frobenius Norm") + theme_minimal() } else{ plot <- ggplot(data = ci, mapping = aes(x = Variation, y = value)) + geom_line(aes(color = scheme)) + labs(x = "delta", y = "Frobenius", title = "Frobenius Norm") + theme_minimal() } } else{ Variation <- Fro_data$Variation Frobenius <- Fro_data$Frobenius if(nrow(Fro_data) == 1){ plot <- suppressWarnings(ggplot(data = Fro_data, mapping = aes(x = Variation, y = Frobenius)) + geom_point( na.rm = T) + labs(x = "delta", y = "Frobenius", title = "Frobenius norm") + theme_minimal()) }else{ plot <- suppressWarnings(ggplot(data = Fro_data, mapping = aes(x = Variation, y = Frobenius)) + geom_line(na.rm = T ) + labs(x = "delta", y = "Frobenius", title = "Frobenius norm") + theme_minimal()) } } out <- list(Frobenius = Fro_data, plot = plot) attr(out,'class') <- 'fro' return(out) } is.psd <- function (x, tol = 1e-08) { if (!is.square.matrix(x)) stop("argument x is not a square matrix") if (!is.symmetric.matrix(x)) stop("argument x is not a symmetric matrix") if (!is.numeric(x)) stop("argument x is not a numeric matrix") eigenvalues <- eigen(x, only.values = TRUE)$values n <- nrow(x) for (i in 1:n) { if (abs(eigenvalues[i]) < tol) { eigenvalues[i] <- 0 } } if (any(eigenvalues < 0)) { return(FALSE) } return(TRUE) } is.square.matrix <- function (x) { if (!is.matrix(x)) stop("argument x is not a matrix") return(nrow(x) == ncol(x)) } is.symmetric.matrix <- function (x) { if (!is.matrix(x)) { stop("argument x is not a matrix") } if (!is.numeric(x)) { stop("argument x is not a numeric matrix") } if (!is.square.matrix(x)) stop("argument x is not a square numeric matrix") return(sum(x == t(x)) == (nrow(x)^2)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/substoptimes.R \name{substoptimes} \alias{substoptimes} \title{Subset the stop_times table} \usage{ substoptimes(SIRIdf, GTFSstop_times., GTFSroutes., GTFStrips., GTFScalendar.) } \arguments{ \item{SIRIdf}{A SIRI data.frame to use as reference} \item{GTFSstop_times.}{The Stop times table from the GTFS, default name is GTFSstop_times} \item{GTFSroutes.}{The Routes table from the GTFS, default name is GTFSroutes} \item{GTFStrips.}{The trips table from the GTFS, default name is GTFStrips} \item{GTFScalendar.}{The Calendar table from the GTFS, default name is TFScalendar} } \value{ A \code{\link[base]{data.frame}} with the exact same columns as the GTFSstop_times table, with only the rows relevant to the SIRI data present } \description{ Creates a simple subset of the GTFS to reference the SIRI data frame } \details{ creates a smaller subset of the GTFS stop_times table to use in further analysis of the SIRI data. the function is a part of STG and should not be used on it's own. } \section{Warning}{ Do Not use this function on it's own, it is meant to be used only as part of the STG process } \references{ Bogin, D., Levy, N. and Ben-Elia E. (2018) \emph{Spatial and Temporal Estimation of the Service Reliability of Public Transportation Using Big Data and Open Source Tools} } \seealso{ \code{\link{STG}} } \keyword{internal} \keyword{misc}
/man/substoptimes.Rd
no_license
cran/SIRItoGTFS
R
false
true
1,482
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/substoptimes.R \name{substoptimes} \alias{substoptimes} \title{Subset the stop_times table} \usage{ substoptimes(SIRIdf, GTFSstop_times., GTFSroutes., GTFStrips., GTFScalendar.) } \arguments{ \item{SIRIdf}{A SIRI data.frame to use as reference} \item{GTFSstop_times.}{The Stop times table from the GTFS, default name is GTFSstop_times} \item{GTFSroutes.}{The Routes table from the GTFS, default name is GTFSroutes} \item{GTFStrips.}{The trips table from the GTFS, default name is GTFStrips} \item{GTFScalendar.}{The Calendar table from the GTFS, default name is TFScalendar} } \value{ A \code{\link[base]{data.frame}} with the exact same columns as the GTFSstop_times table, with only the rows relevant to the SIRI data present } \description{ Creates a simple subset of the GTFS to reference the SIRI data frame } \details{ creates a smaller subset of the GTFS stop_times table to use in further analysis of the SIRI data. the function is a part of STG and should not be used on it's own. } \section{Warning}{ Do Not use this function on it's own, it is meant to be used only as part of the STG process } \references{ Bogin, D., Levy, N. and Ben-Elia E. (2018) \emph{Spatial and Temporal Estimation of the Service Reliability of Public Transportation Using Big Data and Open Source Tools} } \seealso{ \code{\link{STG}} } \keyword{internal} \keyword{misc}
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/rm_abbreviation.R \docType{data} \name{rm_abbreviation} \alias{rm_abbreviation} \title{Abbreviations} \format{\preformatted{Classes 'regexr', 'character' atomic [1:1] ([A-Za-z][\\.]\\s*)\{1,\}([A-Za-z][\\.]) ..- attr(*, "subs")=List of 2 .. ..$ let_per_1:Classes 'subcom', 'character' atomic [1:1] ([A-Za-z][\\.]\\s*)\{1,\} .. .. .. ..- attr(*, "comment")= chr "Letter folowed by period and optional space (1 or more times)" .. ..$ let_per_2:Classes 'subcom', 'character' atomic [1:1] ([A-Za-z][\\.]) .. .. .. ..- attr(*, "comment")= chr "Ending letter folowed by period" ..- attr(*, "comments")=List of 2 .. ..$ let_per_1: chr "Letter folowed by period and optional space (1 or more times)" .. ..$ let_per_2: chr "Ending letter folowed by period" }} \usage{ rm_abbreviation } \description{ Find abbreviates that contain capitals or or lower case. Must have at minimum 1 letter followed by a period folowed by another letter and period. May contain additional letters and spaces. } \section{Regex}{ TRUE } \examples{ input <- c("I want $2.33 at 2:30 p.m. to go to A.n.p.", "She will send it A.S.A.P. (e.g. as soon as you can) said I.", "Hello world.", "In the U. S. A.") regmatches(input, gregexpr(rm_abbreviation, input, perl = TRUE)) gsub(rm_abbreviation, "", input, perl = TRUE) strsplit(input, rm_abbreviation, perl = TRUE) } \keyword{datasets}
/inst/sample/man/rm_abbreviation.Rd
no_license
trinker/regextools
R
false
false
1,466
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/rm_abbreviation.R \docType{data} \name{rm_abbreviation} \alias{rm_abbreviation} \title{Abbreviations} \format{\preformatted{Classes 'regexr', 'character' atomic [1:1] ([A-Za-z][\\.]\\s*)\{1,\}([A-Za-z][\\.]) ..- attr(*, "subs")=List of 2 .. ..$ let_per_1:Classes 'subcom', 'character' atomic [1:1] ([A-Za-z][\\.]\\s*)\{1,\} .. .. .. ..- attr(*, "comment")= chr "Letter folowed by period and optional space (1 or more times)" .. ..$ let_per_2:Classes 'subcom', 'character' atomic [1:1] ([A-Za-z][\\.]) .. .. .. ..- attr(*, "comment")= chr "Ending letter folowed by period" ..- attr(*, "comments")=List of 2 .. ..$ let_per_1: chr "Letter folowed by period and optional space (1 or more times)" .. ..$ let_per_2: chr "Ending letter folowed by period" }} \usage{ rm_abbreviation } \description{ Find abbreviates that contain capitals or or lower case. Must have at minimum 1 letter followed by a period folowed by another letter and period. May contain additional letters and spaces. } \section{Regex}{ TRUE } \examples{ input <- c("I want $2.33 at 2:30 p.m. to go to A.n.p.", "She will send it A.S.A.P. (e.g. as soon as you can) said I.", "Hello world.", "In the U. S. A.") regmatches(input, gregexpr(rm_abbreviation, input, perl = TRUE)) gsub(rm_abbreviation, "", input, perl = TRUE) strsplit(input, rm_abbreviation, perl = TRUE) } \keyword{datasets}
## R function to cache inverse of a Matrix ## This function accepts a matrix and creates inverse of it. ## Part of assignment for programming week 3 by Johns Hopkins makeCacheMatrix <- function(x = matrix()) { makeCacheMatrix <- function(x= matrix()){ inv <- NULL ## hold value of inv matrix set<- function(y){ ## assign valueof matrix to parent environment x<<-y inv<<- NULL ## inv to NULL if new matrix } get<- function() x ## returns matrix argument setInverse <- function(inverse) inv <<- inverse ## inv is sent to parent environment getInverse<- function() inv ## get inv value list(set = set, get = get, setInverse=setInverse, getInverse=getInverse) } ## Returns inverse of a matrix cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getInverse() if(!is.null(inv)) { message("caching!!....") return(inv) } data <- x$get() inv <- solve(data, ...) x$setInverse(inv) inv } }
/cachematrix.R
no_license
Scikit-Man/ProgrammingAssignment2
R
false
false
1,405
r
## R function to cache inverse of a Matrix ## This function accepts a matrix and creates inverse of it. ## Part of assignment for programming week 3 by Johns Hopkins makeCacheMatrix <- function(x = matrix()) { makeCacheMatrix <- function(x= matrix()){ inv <- NULL ## hold value of inv matrix set<- function(y){ ## assign valueof matrix to parent environment x<<-y inv<<- NULL ## inv to NULL if new matrix } get<- function() x ## returns matrix argument setInverse <- function(inverse) inv <<- inverse ## inv is sent to parent environment getInverse<- function() inv ## get inv value list(set = set, get = get, setInverse=setInverse, getInverse=getInverse) } ## Returns inverse of a matrix cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getInverse() if(!is.null(inv)) { message("caching!!....") return(inv) } data <- x$get() inv <- solve(data, ...) x$setInverse(inv) inv } }
# --------------------------------------------------------------- # # This script prepare the data for the modelling in Stan # # Latency split analysis # # --------------------------------------------------------------- # rm(list=ls()) setwd("~/git_local/serial-integration-analysis/") rm(list=ls()) # --------------------------------------------------------------- # # dataset exp 1 d <- read.table("./data/exp1_data",sep="\t",header=T) d$id <- paste(d$id,"_xp1",sep="") dxy <- data.frame(D = d$tSteps, sacXresp=d$sacXresp, sacYresp= d$sacYresp, xs = d$xs, ys = d$ys, meanXpos = abs(d$tarX), id=d$id, rt=d$sacOnset, vpcode=d$vpcode, block=d$block, trial3=d$trial3,session=d$session) d$xR <- ifelse(d$side==1, d$xNear, d$xFar) d$xL <- ifelse(d$side==-1, d$xNear, d$xFar) d$yR <- ifelse(d$side==1, d$yNear,d$yFar) d$yL <- ifelse(d$side==-1, d$yNear, d$yFar) d$xyR <- sqrt(d$yR^2 + d$xR^2) d$xyL <- sqrt(d$yL^2 + d$xL^2) d$xRmean <- ifelse(d$side==1, d$meanXnear, d$meanXfar) d$xLmean <- ifelse(d$side==-1, d$meanXnear, d$meanXfar) dpe <- data.frame(D = d$tSteps, resp=-d$resp+1, xDiff = d$xyR-d$xyL, xDiffMean = d$xRmean-abs(d$xLmean), vpcode=d$vpcode, block=d$block, trial3=d$trial3, session=d$session, id=d$id, rt=d$sacOnset) dxy_1 <- dxy dpe_1 <- dpe rm(dxy, dpe) # --------------------------------------------------------------- # # dataset exp 2 d <- read.table("./data/exp2_data",sep="\t",header=T) d$id <- paste(d$id,"_xp2",sep="") dxy <- data.frame(D = d$tSteps, sacXresp=d$sacXresp, sacYresp= d$sacYresp, xs = d$xs, ys = d$ys, meanXpos = abs(d$tarX), id=d$id, rt=d$sacOnset, vpcode=d$vpcode, block=d$block, trial3=d$trial3,session=d$session) dpe <- data.frame(D = d$tSteps, resp=-d$resp+1, xDiff = d$LumDiff, xDiffMean = d$meanLumDiff, vpcode=d$vpcode, block=d$block, trial3=d$trial3, session=d$session, id=d$id, rt=d$sacOnset) dxy_2 <- dxy dpe_2 <- dpe rm(dxy, dpe) # --------------------------------------------------------------- # # equalize values of perceptual task stimuli str(dpe_1) str(dpe_2) dpe_1$xDiff <- scale(dpe_1$xDiff, center=F) * 2.5 # just to have perceptual stimuli in the same scale dpe_2$xDiff <- scale(dpe_2$xDiff, center=F) * 2.5 dpe_1$xDiffMean <- scale(dpe_1$xDiffMean, center=F) * 2.5 dpe_2$xDiffMean <- scale(dpe_2$xDiffMean, center=F) * 2.5 # --------------------------------------------------------------- # # merge database dpe <- rbind(dpe_1, dpe_2) dxy <- rbind(dxy_1, dxy_2) rm(dpe_1,dpe_2,dxy_1,dxy_2) # --------------------------------------------------------------- # # compute latency bins dpe$bin <- NA dxy$bin <- NA for(i in unique(dpe$id)){ # quartiles dpe$bin[dpe$id==i] <- cut(dpe$rt[dpe$id==i],breaks=quantile(dpe$rt[dpe$id==i]),labels=1:4,include.lowest=T) dxy$bin[dxy$id==i] <- cut(dxy$rt[dxy$id==i],breaks=quantile(dxy$rt[dxy$id==i]),labels=1:4,include.lowest=T) } any(is.na(dpe$bin)) any(is.na(dxy$bin)) ## saveRDS(dxy, paste("./data/rt_split/raw_data_saccade_RTbin.RDS")) saveRDS(dpe, paste("./data/rt_split/raw_data_perception_RTbin.RDS")) unique(dxy$bin) # --------------------------------------------------------------- # # make stan data for each bin # trial_label <- paste(dpe$vpcode, dpe$vp,dpe$session,dpe$block,dpe$trial3,sep="_") # dpe$trial <- as.numeric(factor(trial_label, labels=1:length(unique(trial_label)))) # count_uniques <- function(x){ length(unique(x))} # trial_count_table <- tapply(dpe$trial, list(dpe$bin, dpe$id), count_uniques) for(bin_i in 1:4){ for(id_i in 1:length(unique(dpe$id))){ id_label <- unique(dpe$id)[id_i] d_sac_i <- dxy[dxy$id==id_label & dxy$bin==bin_i,] d_per_i <- dpe[dpe$id==id_label & dpe$bin==bin_i,] trial_label <- paste(d_sac_i$vpcode, d_sac_i$vp,d_sac_i$session,d_sac_i$block,d_sac_i$trial3,sep="_") d_sac_i$trial <- as.numeric(factor(trial_label, labels=1:length(unique(trial_label)))) d_per_i$trial <- as.numeric(factor(trial_label, labels=1:length(unique(trial_label)))) # build data matrix # range(d_sac_i$D) n_time_bin <- 100 bin_width <- 900/n_time_bin # saccade data X_h <- matrix(NA,nrow=100, ncol=max(d_sac_i$trial)) X_v <- matrix(NA,nrow=100, ncol=max(d_sac_i$trial)) S_h <- rep(NA, max(d_sac_i$trial)) S_v <- rep(NA, max(d_sac_i$trial)) # perception data xDiff <- matrix(NA,nrow=100, ncol=max(d_sac_i$trial)) choice <- rep(NA, max(d_sac_i$trial)) # populate for(i in sort(unique(d_sac_i$trial))){ D_di <- d_sac_i$D[d_sac_i$trial==i] x_di <- d_sac_i$xs[d_sac_i$trial==i] + d_sac_i$meanXpos[d_sac_i$trial==i] y_di <- d_sac_i$ys[d_sac_i$trial==i] S_h[i] <- unique(d_sac_i$sacXresp[d_sac_i$trial==i]) # maybe add sanity check that this is a scalar? S_v[i] <- unique(d_sac_i$sacYresp[d_sac_i$trial==i]) xDiff_di <- d_per_i$xDiff[d_per_i$trial==i] # choice[i, id_i] <- 1-unique(d_per_i$resp[d_per_i$trial==i]) choice[i] <- unique(d_per_i$resp[d_per_i$trial==i]) # already inverted above # expand D_i <- seq(-900,0) x_i <- rep(0, length(D_i)) y_i <- rep(0, length(D_i)) xDiff_i <- rep(0, length(D_i)) current <- 1 for(t_i in 1:length(D_i)){ if(D_i[t_i] >= D_di[current]){ current <- current + 1 } if(current > 1){ if(D_i[t_i]>=D_di[current-1]){ x_i[t_i] <- x_di[current-1] y_i[t_i] <- y_di[current-1] xDiff_i[t_i] <- xDiff_di[current-1] } } } # # sanity check # plot(D_i, x_i, type="l") # sanity check # points(D_di, x_di) #this is discrete bin for(t_i in 1:n_time_bin){ index_t <- (1+(t_i-1)*bin_width):(t_i*bin_width) X_h[ t_i, i] <- mean(x_i[index_t],na.rm=T) X_v[t_i, i] <- mean(y_i[index_t],na.rm=T) xDiff[ t_i, i] <- mean(xDiff_i[index_t],na.rm=T) } } # make data list for fitting with Stan # saccade d_stan <- list(Xh = X_h, Xv = X_v, Yh= S_h, Yv=S_v, K=n_time_bin, N=max(d_sac_i$trial)) str(d_stan) # perception d_prc <- list(X = xDiff, Y = choice, K=n_time_bin, N=max(d_sac_i$trial)) str(d_prc) saveRDS(d_stan, paste("./stan_data_rtsplit/",id_label,"_bin",bin_i,"saccade.RDS",sep="")) saveRDS(d_prc, paste("./stan_data_rtsplit/",id_label,"_bin",bin_i,"perception.RDS",sep="")) } }
/prepare_data_latency_split.R
no_license
mattelisi/serial-int
R
false
false
6,394
r
# --------------------------------------------------------------- # # This script prepare the data for the modelling in Stan # # Latency split analysis # # --------------------------------------------------------------- # rm(list=ls()) setwd("~/git_local/serial-integration-analysis/") rm(list=ls()) # --------------------------------------------------------------- # # dataset exp 1 d <- read.table("./data/exp1_data",sep="\t",header=T) d$id <- paste(d$id,"_xp1",sep="") dxy <- data.frame(D = d$tSteps, sacXresp=d$sacXresp, sacYresp= d$sacYresp, xs = d$xs, ys = d$ys, meanXpos = abs(d$tarX), id=d$id, rt=d$sacOnset, vpcode=d$vpcode, block=d$block, trial3=d$trial3,session=d$session) d$xR <- ifelse(d$side==1, d$xNear, d$xFar) d$xL <- ifelse(d$side==-1, d$xNear, d$xFar) d$yR <- ifelse(d$side==1, d$yNear,d$yFar) d$yL <- ifelse(d$side==-1, d$yNear, d$yFar) d$xyR <- sqrt(d$yR^2 + d$xR^2) d$xyL <- sqrt(d$yL^2 + d$xL^2) d$xRmean <- ifelse(d$side==1, d$meanXnear, d$meanXfar) d$xLmean <- ifelse(d$side==-1, d$meanXnear, d$meanXfar) dpe <- data.frame(D = d$tSteps, resp=-d$resp+1, xDiff = d$xyR-d$xyL, xDiffMean = d$xRmean-abs(d$xLmean), vpcode=d$vpcode, block=d$block, trial3=d$trial3, session=d$session, id=d$id, rt=d$sacOnset) dxy_1 <- dxy dpe_1 <- dpe rm(dxy, dpe) # --------------------------------------------------------------- # # dataset exp 2 d <- read.table("./data/exp2_data",sep="\t",header=T) d$id <- paste(d$id,"_xp2",sep="") dxy <- data.frame(D = d$tSteps, sacXresp=d$sacXresp, sacYresp= d$sacYresp, xs = d$xs, ys = d$ys, meanXpos = abs(d$tarX), id=d$id, rt=d$sacOnset, vpcode=d$vpcode, block=d$block, trial3=d$trial3,session=d$session) dpe <- data.frame(D = d$tSteps, resp=-d$resp+1, xDiff = d$LumDiff, xDiffMean = d$meanLumDiff, vpcode=d$vpcode, block=d$block, trial3=d$trial3, session=d$session, id=d$id, rt=d$sacOnset) dxy_2 <- dxy dpe_2 <- dpe rm(dxy, dpe) # --------------------------------------------------------------- # # equalize values of perceptual task stimuli str(dpe_1) str(dpe_2) dpe_1$xDiff <- scale(dpe_1$xDiff, center=F) * 2.5 # just to have perceptual stimuli in the same scale dpe_2$xDiff <- scale(dpe_2$xDiff, center=F) * 2.5 dpe_1$xDiffMean <- scale(dpe_1$xDiffMean, center=F) * 2.5 dpe_2$xDiffMean <- scale(dpe_2$xDiffMean, center=F) * 2.5 # --------------------------------------------------------------- # # merge database dpe <- rbind(dpe_1, dpe_2) dxy <- rbind(dxy_1, dxy_2) rm(dpe_1,dpe_2,dxy_1,dxy_2) # --------------------------------------------------------------- # # compute latency bins dpe$bin <- NA dxy$bin <- NA for(i in unique(dpe$id)){ # quartiles dpe$bin[dpe$id==i] <- cut(dpe$rt[dpe$id==i],breaks=quantile(dpe$rt[dpe$id==i]),labels=1:4,include.lowest=T) dxy$bin[dxy$id==i] <- cut(dxy$rt[dxy$id==i],breaks=quantile(dxy$rt[dxy$id==i]),labels=1:4,include.lowest=T) } any(is.na(dpe$bin)) any(is.na(dxy$bin)) ## saveRDS(dxy, paste("./data/rt_split/raw_data_saccade_RTbin.RDS")) saveRDS(dpe, paste("./data/rt_split/raw_data_perception_RTbin.RDS")) unique(dxy$bin) # --------------------------------------------------------------- # # make stan data for each bin # trial_label <- paste(dpe$vpcode, dpe$vp,dpe$session,dpe$block,dpe$trial3,sep="_") # dpe$trial <- as.numeric(factor(trial_label, labels=1:length(unique(trial_label)))) # count_uniques <- function(x){ length(unique(x))} # trial_count_table <- tapply(dpe$trial, list(dpe$bin, dpe$id), count_uniques) for(bin_i in 1:4){ for(id_i in 1:length(unique(dpe$id))){ id_label <- unique(dpe$id)[id_i] d_sac_i <- dxy[dxy$id==id_label & dxy$bin==bin_i,] d_per_i <- dpe[dpe$id==id_label & dpe$bin==bin_i,] trial_label <- paste(d_sac_i$vpcode, d_sac_i$vp,d_sac_i$session,d_sac_i$block,d_sac_i$trial3,sep="_") d_sac_i$trial <- as.numeric(factor(trial_label, labels=1:length(unique(trial_label)))) d_per_i$trial <- as.numeric(factor(trial_label, labels=1:length(unique(trial_label)))) # build data matrix # range(d_sac_i$D) n_time_bin <- 100 bin_width <- 900/n_time_bin # saccade data X_h <- matrix(NA,nrow=100, ncol=max(d_sac_i$trial)) X_v <- matrix(NA,nrow=100, ncol=max(d_sac_i$trial)) S_h <- rep(NA, max(d_sac_i$trial)) S_v <- rep(NA, max(d_sac_i$trial)) # perception data xDiff <- matrix(NA,nrow=100, ncol=max(d_sac_i$trial)) choice <- rep(NA, max(d_sac_i$trial)) # populate for(i in sort(unique(d_sac_i$trial))){ D_di <- d_sac_i$D[d_sac_i$trial==i] x_di <- d_sac_i$xs[d_sac_i$trial==i] + d_sac_i$meanXpos[d_sac_i$trial==i] y_di <- d_sac_i$ys[d_sac_i$trial==i] S_h[i] <- unique(d_sac_i$sacXresp[d_sac_i$trial==i]) # maybe add sanity check that this is a scalar? S_v[i] <- unique(d_sac_i$sacYresp[d_sac_i$trial==i]) xDiff_di <- d_per_i$xDiff[d_per_i$trial==i] # choice[i, id_i] <- 1-unique(d_per_i$resp[d_per_i$trial==i]) choice[i] <- unique(d_per_i$resp[d_per_i$trial==i]) # already inverted above # expand D_i <- seq(-900,0) x_i <- rep(0, length(D_i)) y_i <- rep(0, length(D_i)) xDiff_i <- rep(0, length(D_i)) current <- 1 for(t_i in 1:length(D_i)){ if(D_i[t_i] >= D_di[current]){ current <- current + 1 } if(current > 1){ if(D_i[t_i]>=D_di[current-1]){ x_i[t_i] <- x_di[current-1] y_i[t_i] <- y_di[current-1] xDiff_i[t_i] <- xDiff_di[current-1] } } } # # sanity check # plot(D_i, x_i, type="l") # sanity check # points(D_di, x_di) #this is discrete bin for(t_i in 1:n_time_bin){ index_t <- (1+(t_i-1)*bin_width):(t_i*bin_width) X_h[ t_i, i] <- mean(x_i[index_t],na.rm=T) X_v[t_i, i] <- mean(y_i[index_t],na.rm=T) xDiff[ t_i, i] <- mean(xDiff_i[index_t],na.rm=T) } } # make data list for fitting with Stan # saccade d_stan <- list(Xh = X_h, Xv = X_v, Yh= S_h, Yv=S_v, K=n_time_bin, N=max(d_sac_i$trial)) str(d_stan) # perception d_prc <- list(X = xDiff, Y = choice, K=n_time_bin, N=max(d_sac_i$trial)) str(d_prc) saveRDS(d_stan, paste("./stan_data_rtsplit/",id_label,"_bin",bin_i,"saccade.RDS",sep="")) saveRDS(d_prc, paste("./stan_data_rtsplit/",id_label,"_bin",bin_i,"perception.RDS",sep="")) } }
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.53892691178843e-220, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615770083-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
362
r
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.53892691178843e-220, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
library(mstR) ### Name: testListMST ### Title: Testing the format of the MST input lists ### Aliases: testListMST ### ** Examples # Creation and test of a 'start' list start <- list(theta = 0) testListMST(start, type = "start") # Creation and test of a 'test' list test <- list(method = "WL", moduleSelect = "MFI", constantPatt = "fixed4") testListMST(test, type = "test") # Creation and test of a 'final' list (with mistake) final <- list(method = "MAP") testListMST(final, type = "final") # Creation of cut-off scores for ability levels: cut score 0 between modules 3 and 4 # and cut scores -1 and 1 between modules 5, 6 and 7 cut <- matrix(NA, 7, 2) cut[3,] <- c(-Inf, 0) cut[4,] <- c(0, Inf) cut[5,] <- c(-Inf, -1) cut[6,] <- c(-1, 1) cut[7,] <- c(1, Inf) test <- list(method = "WL", constantPatt = "fixed4", cutoff = cut) testListMST(test, "test")
/data/genthat_extracted_code/mstR/examples/testListMST.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
883
r
library(mstR) ### Name: testListMST ### Title: Testing the format of the MST input lists ### Aliases: testListMST ### ** Examples # Creation and test of a 'start' list start <- list(theta = 0) testListMST(start, type = "start") # Creation and test of a 'test' list test <- list(method = "WL", moduleSelect = "MFI", constantPatt = "fixed4") testListMST(test, type = "test") # Creation and test of a 'final' list (with mistake) final <- list(method = "MAP") testListMST(final, type = "final") # Creation of cut-off scores for ability levels: cut score 0 between modules 3 and 4 # and cut scores -1 and 1 between modules 5, 6 and 7 cut <- matrix(NA, 7, 2) cut[3,] <- c(-Inf, 0) cut[4,] <- c(0, Inf) cut[5,] <- c(-Inf, -1) cut[6,] <- c(-1, 1) cut[7,] <- c(1, Inf) test <- list(method = "WL", constantPatt = "fixed4", cutoff = cut) testListMST(test, "test")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/repl_control.R \name{echoExpressionCallback} \alias{echoExpressionCallback} \title{This just echos the input expression. It is here for testing out the taskCallback code} \usage{ echoExpressionCallback(expr, value, status, visible, data) } \arguments{ \item{expr}{s-language expression} \item{value}{result of the expression evaluation} \item{status}{logical indicating success or not} \item{visible}{was the output printed} \item{data}{data object that is accessible to the callback ( passed in from addTaskCallback)} } \value{ I guess this indicates if the callback succeeded? } \description{ This just echos the input expression. It is here for testing out the taskCallback code }
/man/echoExpressionCallback.Rd
no_license
djacobs7/remembr
R
false
true
768
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/repl_control.R \name{echoExpressionCallback} \alias{echoExpressionCallback} \title{This just echos the input expression. It is here for testing out the taskCallback code} \usage{ echoExpressionCallback(expr, value, status, visible, data) } \arguments{ \item{expr}{s-language expression} \item{value}{result of the expression evaluation} \item{status}{logical indicating success or not} \item{visible}{was the output printed} \item{data}{data object that is accessible to the callback ( passed in from addTaskCallback)} } \value{ I guess this indicates if the callback succeeded? } \description{ This just echos the input expression. It is here for testing out the taskCallback code }
# Paul GOUJON & Stephane LOUIS # UTC - SY09 - TP4 # logistic regression validation # The aim of that function is to test the logistic model we learned from # training on a test ensemble. It takes as input a test individuals matrix, a # beta vector (corresponding to the beta we learned before) and returns a prob # matrix containing the posteriori probabilities and a ztst vector containing # the labels it gave to each individual source("postPr.R"); log.val = function (beta, Xtst) { # check beta dimension, add coordinate at the origin if not the same as Xapp if (nrow(beta) != ncol(Xtst)) { individuals = cbind(Xtst, matrix(1, nrow=nrow(Xtst), ncol=1)); } else { individuals = Xtst; } prob = post.pr(beta, individuals); ztst = matrix(1, nrow=nrow(Xtst), ncol=1); ztst[which(prob < 0.5),1] = 2; l = list(prob, ztst); names(l) = c("prob", "zpred"); return (l); }
/spam/log.R
permissive
goujonpa/sy09TP4
R
false
false
931
r
# Paul GOUJON & Stephane LOUIS # UTC - SY09 - TP4 # logistic regression validation # The aim of that function is to test the logistic model we learned from # training on a test ensemble. It takes as input a test individuals matrix, a # beta vector (corresponding to the beta we learned before) and returns a prob # matrix containing the posteriori probabilities and a ztst vector containing # the labels it gave to each individual source("postPr.R"); log.val = function (beta, Xtst) { # check beta dimension, add coordinate at the origin if not the same as Xapp if (nrow(beta) != ncol(Xtst)) { individuals = cbind(Xtst, matrix(1, nrow=nrow(Xtst), ncol=1)); } else { individuals = Xtst; } prob = post.pr(beta, individuals); ztst = matrix(1, nrow=nrow(Xtst), ncol=1); ztst[which(prob < 0.5),1] = 2; l = list(prob, ztst); names(l) = c("prob", "zpred"); return (l); }
##' .. content for \description{} (no empty lines) .. ##' ##' .. content for \details{} .. ##' ##' @title ##' @return ##' @author whtns ##' @export tabulate_liu_scnas <- function() { liu_scnas <- tibble::tribble( ~id, ~seqnames, ~gene_id, ~copy_number, ~study, "RB13", "2", "4613", 83L, "Liu et al.", "RB14", "2", "4613", NA, "Liu et al.", "RB15", "2", "4613", 25L, "Liu et al.", "RB22", "2", "4613", 14L, "Liu et al.", "RB215", "2", "4613", NA, "Liu et al.", "RB222", "2", "4613", 141L, "Liu et al.", "RB224", "2", "4613", 29L, "Liu et al.", "RB659", "2", "4613", 19L, "Liu et al.", "RBsjd2", "2", "4613", 29L, "Liu et al.", "RBsjd3", "2", "4613", 30L, "Liu et al.", "RBsjd7", "2", "4613", 246L, "Liu et al." ) %>% # dplyr::mutate(gene_id = "gene") %>% identity() }
/R/tabulate_liu_scnas.R
no_license
cobriniklab/rb_exome
R
false
false
1,070
r
##' .. content for \description{} (no empty lines) .. ##' ##' .. content for \details{} .. ##' ##' @title ##' @return ##' @author whtns ##' @export tabulate_liu_scnas <- function() { liu_scnas <- tibble::tribble( ~id, ~seqnames, ~gene_id, ~copy_number, ~study, "RB13", "2", "4613", 83L, "Liu et al.", "RB14", "2", "4613", NA, "Liu et al.", "RB15", "2", "4613", 25L, "Liu et al.", "RB22", "2", "4613", 14L, "Liu et al.", "RB215", "2", "4613", NA, "Liu et al.", "RB222", "2", "4613", 141L, "Liu et al.", "RB224", "2", "4613", 29L, "Liu et al.", "RB659", "2", "4613", 19L, "Liu et al.", "RBsjd2", "2", "4613", 29L, "Liu et al.", "RBsjd3", "2", "4613", 30L, "Liu et al.", "RBsjd7", "2", "4613", 246L, "Liu et al." ) %>% # dplyr::mutate(gene_id = "gene") %>% identity() }
test_that("ifelse_censor_linter skips allowed usages", { expect_lint("ifelse(x > 2, x, y)", NULL, ifelse_censor_linter()) expect_lint("ifelse(x > 2, x, y)", NULL, ifelse_censor_linter()) }) test_that("ifelse_censor_linter blocks simple disallowed usages", { expect_lint( "ifelse(x < 0, 0, x)", rex::rex("pmax(x, y) is preferable to ifelse(x < y, y, x)"), ifelse_censor_linter() ) # other equivalents to base::ifelse() expect_lint( "if_else(x < 0, 0, x)", rex::rex("pmax(x, y) is preferable to if_else(x < y, y, x)"), ifelse_censor_linter() ) expect_lint( "fifelse(x < 0, 0, x)", rex::rex("pmax(x, y) is preferable to fifelse(x < y, y, x)"), ifelse_censor_linter() ) # other equivalents for censoring expect_lint( "ifelse(x <= 0, 0, x)", rex::rex("pmax(x, y) is preferable to ifelse(x <= y, y, x)"), ifelse_censor_linter() ) expect_lint( "ifelse(x > 0, x, 0)", rex::rex("pmax(x, y) is preferable to ifelse(x > y, x, y)"), ifelse_censor_linter() ) expect_lint( "ifelse(x >= 0, x, 0)", rex::rex("pmax(x, y) is preferable to ifelse(x >= y, x, y)"), ifelse_censor_linter() ) # pairwise min/max (similar to censoring) expect_lint( "ifelse(x < y, x, y)", rex::rex("pmin(x, y) is preferable to ifelse(x < y, x, y)"), ifelse_censor_linter() ) expect_lint( "ifelse(x >= y, y, x)", rex::rex("pmin(x, y) is preferable to ifelse(x >= y, y, x)"), ifelse_censor_linter() ) # more complicated expression still matches lines <- trim_some(" ifelse(2 + p + 104 + 1 > ncols, ncols, 2 + p + 104 + 1 ) ") expect_lint( lines, rex::rex("pmin(x, y) is preferable to ifelse(x > y, y, x)"), ifelse_censor_linter() ) }) # TODO(michaelchirico): how easy would it be to strip parens when considering lint? # e.g. ifelse(x < (kMaxIndex - 1), x, kMaxIndex - 1)
/tests/testthat/test-ifelse_censor_linter.R
permissive
russHyde/lintr
R
false
false
1,922
r
test_that("ifelse_censor_linter skips allowed usages", { expect_lint("ifelse(x > 2, x, y)", NULL, ifelse_censor_linter()) expect_lint("ifelse(x > 2, x, y)", NULL, ifelse_censor_linter()) }) test_that("ifelse_censor_linter blocks simple disallowed usages", { expect_lint( "ifelse(x < 0, 0, x)", rex::rex("pmax(x, y) is preferable to ifelse(x < y, y, x)"), ifelse_censor_linter() ) # other equivalents to base::ifelse() expect_lint( "if_else(x < 0, 0, x)", rex::rex("pmax(x, y) is preferable to if_else(x < y, y, x)"), ifelse_censor_linter() ) expect_lint( "fifelse(x < 0, 0, x)", rex::rex("pmax(x, y) is preferable to fifelse(x < y, y, x)"), ifelse_censor_linter() ) # other equivalents for censoring expect_lint( "ifelse(x <= 0, 0, x)", rex::rex("pmax(x, y) is preferable to ifelse(x <= y, y, x)"), ifelse_censor_linter() ) expect_lint( "ifelse(x > 0, x, 0)", rex::rex("pmax(x, y) is preferable to ifelse(x > y, x, y)"), ifelse_censor_linter() ) expect_lint( "ifelse(x >= 0, x, 0)", rex::rex("pmax(x, y) is preferable to ifelse(x >= y, x, y)"), ifelse_censor_linter() ) # pairwise min/max (similar to censoring) expect_lint( "ifelse(x < y, x, y)", rex::rex("pmin(x, y) is preferable to ifelse(x < y, x, y)"), ifelse_censor_linter() ) expect_lint( "ifelse(x >= y, y, x)", rex::rex("pmin(x, y) is preferable to ifelse(x >= y, y, x)"), ifelse_censor_linter() ) # more complicated expression still matches lines <- trim_some(" ifelse(2 + p + 104 + 1 > ncols, ncols, 2 + p + 104 + 1 ) ") expect_lint( lines, rex::rex("pmin(x, y) is preferable to ifelse(x > y, y, x)"), ifelse_censor_linter() ) }) # TODO(michaelchirico): how easy would it be to strip parens when considering lint? # e.g. ifelse(x < (kMaxIndex - 1), x, kMaxIndex - 1)
library(LMest) ### Name: est_lm_basic ### Title: Estimate basic LM model ### Aliases: est_lm_basic ### ** Examples # Example of drug consumption data # load data data(data_drug) data_drug = as.matrix(data_drug) S = data_drug[,1:5]-1 yv = data_drug[,6] # fit of the Basic LM model k = 3 out = est_lm_basic(S,yv,k,mod=1) summary(out) ## Not run: ##D # Example based on criminal data ##D # load criminal data ##D data(data_criminal_sim) ##D out = long2wide(data_criminal_sim,"id","time","sex", ##D c("y1","y2","y3","y4","y5","y6","y7","y8","y9","y10"),aggr=T,full=999) ##D XX = out$XX ##D YY = out$YY ##D freq = out$freq ##D # fit basic LM model with increasing number of states to select the most suitable ##D Res0 = vector("list",7) ##D for(k in 1:7){ ##D Res0[[k]] = est_lm_basic(YY,freq,k,mod=1,tol=10^-4) ##D save(list = ls(),file="example_criminal_temp.RData") ##D } ##D out1 = Res0[[6]] ## End(Not run)
/data/genthat_extracted_code/LMest/examples/est_lm_basic.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
925
r
library(LMest) ### Name: est_lm_basic ### Title: Estimate basic LM model ### Aliases: est_lm_basic ### ** Examples # Example of drug consumption data # load data data(data_drug) data_drug = as.matrix(data_drug) S = data_drug[,1:5]-1 yv = data_drug[,6] # fit of the Basic LM model k = 3 out = est_lm_basic(S,yv,k,mod=1) summary(out) ## Not run: ##D # Example based on criminal data ##D # load criminal data ##D data(data_criminal_sim) ##D out = long2wide(data_criminal_sim,"id","time","sex", ##D c("y1","y2","y3","y4","y5","y6","y7","y8","y9","y10"),aggr=T,full=999) ##D XX = out$XX ##D YY = out$YY ##D freq = out$freq ##D # fit basic LM model with increasing number of states to select the most suitable ##D Res0 = vector("list",7) ##D for(k in 1:7){ ##D Res0[[k]] = est_lm_basic(YY,freq,k,mod=1,tol=10^-4) ##D save(list = ls(),file="example_criminal_temp.RData") ##D } ##D out1 = Res0[[6]] ## End(Not run)
# Calculate the historical payoff for a portfolio of FTR's. # # # Written by Adrian Dragulescu on 1-Sep-2005 historical.path.Payoff <- function(hdata, data, Position, options){ probFun <- function(xHist, x){ # Calculate the probability of the bid using prior realized payoffs y <- array(0, dim=length(x)) y[x>max(xHist)] <- 1 ind <- (x<=max(xHist))&(x>=min(xHist)) if (any(ind)){ y[ind] <- approx(sort(xHist), seq(0, 1, length=length(xHist)), x[ind])$y} return(y) } if ("PEAK" %in% unique(Position$Bucket)){ ind.PEAK <- select.onpeak(hdata[,1:4])$ind} if ("OFFPEAK" %in% unique(Position$Bucket)){ ind.OFFPEAK <- select.offpeak(hdata[,1:4])$ind} if ("FLAT" %in% unique(Position$Bucket)){ ind.FLAT <- 1:nrow(hdata)} dates <- as.Date(paste(hdata$year, hdata$month, hdata$day, sep="-")) yyyymm <- format(dates, "%b.%y") yyyymm <- factor(yyyymm, levels=unique(yyyymm)) bid.Probability=std=mu=q95=q05 <- array(NA, dim=length(options$paths)) hpayoff <- matrix(NA, nrow=length(options$paths), ncol=length(levels(yyyymm)), dimnames=list(NULL, levels(yyyymm))) for (r in 1:nrow(Position)){ # Loop over unique positions ind.POI <- which(options$uNodes == Position[r,"Source"]) ind.POW <- which(options$uNodes == Position[r,"Sink"]) ind <- switch(as.character(Position$Bucket[r]), FLAT = ind.FLAT, PEAK = ind.PEAK, OFFPEAK = ind.OFFPEAK) hspread <- hdata[,ind.POW+4] - hdata[,ind.POI+4] hp <- tapply(hspread[ind], yyyymm[ind], mean, na.rm=T) ind <- which(options$paths %in% options$uPaths[r]) bid.Probability[ind] <- probFun(unique(hp[is.finite(hp)]), data$BidPrice[ind]) mu[ind] <- mean(hp, na.rm=T); std[ind] <- sd(hp, na.rm=T) hpayoff[ind,] <- rep(hp, each=length(ind)) q95[ind] <- round(quantile(hspread, probs=c(0.95), na.rm=T),2) q05[ind] <- round(quantile(hspread, probs=c(0.05), na.rm=T),2) } #----------------------------------------------Aggregate positions----- Position$Source <- as.character(Position$Source) Position$Sink <- as.character(Position$Sink) Position$Bucket <- as.character(Position$Bucket) sPosition <- data.frame(Symbols = c(Position$Sink, Position$Source), MW = c(Position$MW, -Position$MW), Bucket = c(Position$Bucket, Position$Bucket)) for (m in as.character(unique(yyyymm))){ } OutT <- data.frame(path=options$paths, Bid.Probability=round(bid.Probability,2), Hist.Mean=round(mu,2), Hist.Std=round(std,2), Sharpe=round((mu-data$BidPrice)/std,2), q05, q95, Bid.Price=data$BidPrice, round(hpayoff, 2)) filename <- paste(save$dir, "hist.analysis.csv", sep="") write.csv(OutT, file=filename) return(OutT) } # historical.portfolio.Payoff <- function(hdata, data, save, options){ # dates <- as.Date(paste(hdata$year, hdata$month, hdata$day, sep="-")) # yyyymm <- format(dates, "%b.%y") # yyyymm <- factor(yyyymm, levels=unique(yyyymm)) # last.hdate <- dates[length(dates)] # bom <- as.Date(paste(format(last.hdate, "%Y-%m"), "-01", sep="")) # last.1m <- seq(bom, by="-1 month", length.out=2)[2] # last.3m <- seq(bom, by="-1 month", length.out=4)[4] # last.6m <- seq(bom, by="-1 month", length.out=7)[7] # } # TMM<-hdata # TMM$DateTime<-ISOdatetime(TMM[,"year"],TMM[,"month"],TMM[,"day"],TMM[,"hour"],0,0) # DTime <- intersect(TMM$DateTime,fhours) # Dindex <- which (TMM$DateTime %in% DTime) # RMM<-TMM[Dindex,] # classes <- c("OFFPEAK", "PEAK", "FLAT") # for (c in classes) # { # ind <- grep(c, uPaths) # if (length(ind)==0){next} # ind.Nodes <- unique(c(as.character(Position[ind,"Source"]), as.character(Position[ind, "Sink"]))) # ind <- which(uNodes %in% as.numeric(ind.Nodes)) # HMM <- RMM[,c(1:4,4+ind)] # if (c=="OFFPEAK"){ HMM <- select.offpeak(HMM) } # if (c=="PEAK"){ HMM <- select.onpeak(HMM) } # hist.prices[[c]] <- HMM[,-c(1:4,length(HMM))] # } # realized.prices.FTR = list() # realized.prices.FTR$OFFPEAK <- apply(hist.prices$OFFPEAK,2,sum,na.rm = T) # realized.prices.FTR$PEAK <- apply(hist.prices$PEAK,2,sum,na.rm = T) # for (index in 1:dim(Position)[1]) # { # FTRClass <- as.character(Position[index,"Class"]) # ind.Source <- which( names(realized.prices.FTR[[FTRClass]]) == # paste("NEPOOL","SMD","DA",as.character(Position[index,"Source"]),"CongComp",sep="_")) # ind.Sink <- which( names(realized.prices.FTR[[FTRClass]]) == # paste("NEPOOL","SMD","DA",as.character(Position[index,"Sink"]),"CongComp",sep="_")) # Position[index,"Realized.Price"] <- realized.prices.FTR[[FTRClass]][ind.Sink] - realized.prices.FTR[[FTRClass]][ind.Source] # }
/R Extension/RMG/Energy/CEAR/historical.path.Payoff.R
no_license
uhasan1/QLExtension-backup
R
false
false
4,819
r
# Calculate the historical payoff for a portfolio of FTR's. # # # Written by Adrian Dragulescu on 1-Sep-2005 historical.path.Payoff <- function(hdata, data, Position, options){ probFun <- function(xHist, x){ # Calculate the probability of the bid using prior realized payoffs y <- array(0, dim=length(x)) y[x>max(xHist)] <- 1 ind <- (x<=max(xHist))&(x>=min(xHist)) if (any(ind)){ y[ind] <- approx(sort(xHist), seq(0, 1, length=length(xHist)), x[ind])$y} return(y) } if ("PEAK" %in% unique(Position$Bucket)){ ind.PEAK <- select.onpeak(hdata[,1:4])$ind} if ("OFFPEAK" %in% unique(Position$Bucket)){ ind.OFFPEAK <- select.offpeak(hdata[,1:4])$ind} if ("FLAT" %in% unique(Position$Bucket)){ ind.FLAT <- 1:nrow(hdata)} dates <- as.Date(paste(hdata$year, hdata$month, hdata$day, sep="-")) yyyymm <- format(dates, "%b.%y") yyyymm <- factor(yyyymm, levels=unique(yyyymm)) bid.Probability=std=mu=q95=q05 <- array(NA, dim=length(options$paths)) hpayoff <- matrix(NA, nrow=length(options$paths), ncol=length(levels(yyyymm)), dimnames=list(NULL, levels(yyyymm))) for (r in 1:nrow(Position)){ # Loop over unique positions ind.POI <- which(options$uNodes == Position[r,"Source"]) ind.POW <- which(options$uNodes == Position[r,"Sink"]) ind <- switch(as.character(Position$Bucket[r]), FLAT = ind.FLAT, PEAK = ind.PEAK, OFFPEAK = ind.OFFPEAK) hspread <- hdata[,ind.POW+4] - hdata[,ind.POI+4] hp <- tapply(hspread[ind], yyyymm[ind], mean, na.rm=T) ind <- which(options$paths %in% options$uPaths[r]) bid.Probability[ind] <- probFun(unique(hp[is.finite(hp)]), data$BidPrice[ind]) mu[ind] <- mean(hp, na.rm=T); std[ind] <- sd(hp, na.rm=T) hpayoff[ind,] <- rep(hp, each=length(ind)) q95[ind] <- round(quantile(hspread, probs=c(0.95), na.rm=T),2) q05[ind] <- round(quantile(hspread, probs=c(0.05), na.rm=T),2) } #----------------------------------------------Aggregate positions----- Position$Source <- as.character(Position$Source) Position$Sink <- as.character(Position$Sink) Position$Bucket <- as.character(Position$Bucket) sPosition <- data.frame(Symbols = c(Position$Sink, Position$Source), MW = c(Position$MW, -Position$MW), Bucket = c(Position$Bucket, Position$Bucket)) for (m in as.character(unique(yyyymm))){ } OutT <- data.frame(path=options$paths, Bid.Probability=round(bid.Probability,2), Hist.Mean=round(mu,2), Hist.Std=round(std,2), Sharpe=round((mu-data$BidPrice)/std,2), q05, q95, Bid.Price=data$BidPrice, round(hpayoff, 2)) filename <- paste(save$dir, "hist.analysis.csv", sep="") write.csv(OutT, file=filename) return(OutT) } # historical.portfolio.Payoff <- function(hdata, data, save, options){ # dates <- as.Date(paste(hdata$year, hdata$month, hdata$day, sep="-")) # yyyymm <- format(dates, "%b.%y") # yyyymm <- factor(yyyymm, levels=unique(yyyymm)) # last.hdate <- dates[length(dates)] # bom <- as.Date(paste(format(last.hdate, "%Y-%m"), "-01", sep="")) # last.1m <- seq(bom, by="-1 month", length.out=2)[2] # last.3m <- seq(bom, by="-1 month", length.out=4)[4] # last.6m <- seq(bom, by="-1 month", length.out=7)[7] # } # TMM<-hdata # TMM$DateTime<-ISOdatetime(TMM[,"year"],TMM[,"month"],TMM[,"day"],TMM[,"hour"],0,0) # DTime <- intersect(TMM$DateTime,fhours) # Dindex <- which (TMM$DateTime %in% DTime) # RMM<-TMM[Dindex,] # classes <- c("OFFPEAK", "PEAK", "FLAT") # for (c in classes) # { # ind <- grep(c, uPaths) # if (length(ind)==0){next} # ind.Nodes <- unique(c(as.character(Position[ind,"Source"]), as.character(Position[ind, "Sink"]))) # ind <- which(uNodes %in% as.numeric(ind.Nodes)) # HMM <- RMM[,c(1:4,4+ind)] # if (c=="OFFPEAK"){ HMM <- select.offpeak(HMM) } # if (c=="PEAK"){ HMM <- select.onpeak(HMM) } # hist.prices[[c]] <- HMM[,-c(1:4,length(HMM))] # } # realized.prices.FTR = list() # realized.prices.FTR$OFFPEAK <- apply(hist.prices$OFFPEAK,2,sum,na.rm = T) # realized.prices.FTR$PEAK <- apply(hist.prices$PEAK,2,sum,na.rm = T) # for (index in 1:dim(Position)[1]) # { # FTRClass <- as.character(Position[index,"Class"]) # ind.Source <- which( names(realized.prices.FTR[[FTRClass]]) == # paste("NEPOOL","SMD","DA",as.character(Position[index,"Source"]),"CongComp",sep="_")) # ind.Sink <- which( names(realized.prices.FTR[[FTRClass]]) == # paste("NEPOOL","SMD","DA",as.character(Position[index,"Sink"]),"CongComp",sep="_")) # Position[index,"Realized.Price"] <- realized.prices.FTR[[FTRClass]][ind.Sink] - realized.prices.FTR[[FTRClass]][ind.Source] # }
# clean workspace rm(nl201505) rm(nl201506) rm(nl201506sf) rm(nl201507) rm(nl201508) rm(nl201509) rm(nl201610) rm(nl201611) rm(nl201612) rm(nl201701) rm(nl201702) rm(nl201703) rm(nl201704) rm(nl201705) # Left with claims, portfolios, vcsn ### Reduce datasets to only information needed for histograms: # Claims: claims <- claims[,c("claimID", "portfolioID", "lossDate")] # Properties: portfolios <- portfolios[,c("portfolioID","vcsnLongitude.x", "vcsnLatitude.x")] names(portfolios)[2:3] <- c("vcsnLongitude", "vcsnLatitude") # Merge the property info to the claims claimPortfolioVcsnID <- merge(claims, portfolios, by = "portfolioID", all.x = TRUE) # tidy workspace: rm(portfolios) rm(claims) #drop if no property ID? #if you're on a slow computer: create subsets #claim0004 <- filter(claimPortfolioVcsnID, # claimPortfolioVcsnID$lossDate > "1999-12-31", # claimPortfolioVcsnID$lossDate < "2005-01-01") #claim0508 <- filter(claimPortfolioVcsnID, # claimPortfolioVcsnID$lossDate > "2004-12-31", # claimPortfolioVcsnID$lossDate < "2009-01-01") claim0912 <- filter(claimPortfolioVcsnID, claimPortfolioVcsnID$lossDate > "2008-12-31", claimPortfolioVcsnID$lossDate < "2013-01-01") claim1316 <- filter(claimPortfolioVcsnID, claimPortfolioVcsnID$lossDate > "2012-12-01", claimPortfolioVcsnID$lossDate < "2017-01-01") #vcsn0004 <- filter(vcsn, # vcsn$vcsnDay > "1999-12-31", # vcsn$vcsnDay < "2005-01-01") #vcsn0508 <- filter(vcsn, # vcsn$vcsnDay > "2004-12-31", # vcsn$vcsnDay < "2009-01-01") vcsn0912 <- filter(vcsn, vcsn$vcsnDay > "2008-12-31", vcsn$vcsnDay < "2013-01-01") vcsn1316 <- filter(vcsn, vcsn$vcsnDay > "2012-12-31", vcsn$vcsnDay < "2017-01-01") #add rainfall to claim info (subsets) claimPortfolioSpatialVCS0004 <- merge(claim0004, vcsn0004, by = c("vcsnLongitude", "vcsnLatitude")) claimPortfolioSpatialVCS0508 <- merge(claim0508, vcsn0508, by = c("vcsnLongitude", "vcsnLatitude")) claimPortfolioSpatialVCS0912 <- merge(claim0912, vcsn0912, by = c("vcsnLongitude", "vcsnLatitude")) claimPortfolioSpatialVCS1316 <- merge(claim1316, vcsn1316, by = c("vcsnLongitude", "vcsnLatitude")) # Add rainfall to claim info #claimPortfolioSpatialVCS <- merge(claimPortfolioSpatial06, vcsn, by = c("vcsnLongitude", "vcsnLatitude")) claimPortfolioSpatialVCS <- claimPortfolioSpatialVCS0004 # Add "offset" - days from loss date claimPortfolioSpatialVCS <- mutate(claimPortfolioSpatialVCS, lossDate-vcsnDay) names(claimPortfolioSpatialVCS)[10] <- c("offsetRaw") claimPortfolioSpatialVCS$offset <- as.double(claimPortfolioSpatialVCS$offsetRaw) # Keep only claims from 1999 or 2000 # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate >= "2000-01-01") # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate <= "2004-12-01") # Keep if offset is fewer than 10 or greater than -10 claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw <= 10) claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw >= -10) #EQC 08 histograms library(ggplot2) ggplot(data=claimPortfolioSpatialVCS) + geom_col(claimPortfolioSpatialVCS, mapping = aes( x = claimPortfolioSpatialVCS$offset, y = claimPortfolioSpatialVCS$rain ) ) claimPortfolioSpatialVCS <- claimPortfolioSpatialVCS0508 # Add "offset" - days from loss date claimPortfolioSpatialVCS <- mutate(claimPortfolioSpatialVCS, lossDate-vcsnDay) names(claimPortfolioSpatialVCS)[10] <- c("offsetRaw") claimPortfolioSpatialVCS$offset <- as.double(claimPortfolioSpatialVCS$offsetRaw) # Keep only claims from 1999 or 2000 # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate >= "2000-01-01") # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate <= "2004-12-01") # Keep if offset is fewer than 10 or greater than -10 claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw <= 10) claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw >= -10) #EQC 08 histograms library(ggplot2) ggplot(data=claimPortfolioSpatialVCS) + geom_col(claimPortfolioSpatialVCS, mapping = aes( x = claimPortfolioSpatialVCS$offset, y = claimPortfolioSpatialVCS$rain ) ) claimPortfolioSpatialVCS <- claimPortfolioSpatialVCS0912 # Add "offset" - days from loss date claimPortfolioSpatialVCS <- mutate(claimPortfolioSpatialVCS, lossDate-vcsnDay) names(claimPortfolioSpatialVCS)[10] <- c("offsetRaw") claimPortfolioSpatialVCS$offset <- as.double(claimPortfolioSpatialVCS$offsetRaw) # Keep only claims from 1999 or 2000 # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate >= "2000-01-01") # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate <= "2004-12-01") # Keep if offset is fewer than 10 or greater than -10 claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw <= 10) claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw >= -10) #EQC 08 histograms library(ggplot2) ggplot(data=claimPortfolioSpatialVCS) + geom_col(claimPortfolioSpatialVCS, mapping = aes( x = claimPortfolioSpatialVCS$offset, y = claimPortfolioSpatialVCS$rain ) ) claimPortfolioSpatialVCS <- claimPortfolioSpatialVCS1316 # Add "offset" - days from loss date claimPortfolioSpatialVCS <- mutate(claimPortfolioSpatialVCS, lossDate-vcsnDay) names(claimPortfolioSpatialVCS)[10] <- c("offsetRaw") claimPortfolioSpatialVCS$offset <- as.double(claimPortfolioSpatialVCS$offsetRaw) # Keep only claims from 1999 or 2000 # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate >= "2000-01-01") # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate <= "2004-12-01") # Keep if offset is fewer than 10 or greater than -10 claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw <= 10) claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw >= -10) #EQC 08 histograms library(ggplot2) ggplot(data=claimPortfolioSpatialVCS) + geom_col(claimPortfolioSpatialVCS, mapping = aes( x = claimPortfolioSpatialVCS$offset, y = claimPortfolioSpatialVCS$rain ) )
/Archive/EQC-03-analysis-histograms.R
no_license
SallyFreanOwen/insurance-and-climate
R
false
false
6,702
r
# clean workspace rm(nl201505) rm(nl201506) rm(nl201506sf) rm(nl201507) rm(nl201508) rm(nl201509) rm(nl201610) rm(nl201611) rm(nl201612) rm(nl201701) rm(nl201702) rm(nl201703) rm(nl201704) rm(nl201705) # Left with claims, portfolios, vcsn ### Reduce datasets to only information needed for histograms: # Claims: claims <- claims[,c("claimID", "portfolioID", "lossDate")] # Properties: portfolios <- portfolios[,c("portfolioID","vcsnLongitude.x", "vcsnLatitude.x")] names(portfolios)[2:3] <- c("vcsnLongitude", "vcsnLatitude") # Merge the property info to the claims claimPortfolioVcsnID <- merge(claims, portfolios, by = "portfolioID", all.x = TRUE) # tidy workspace: rm(portfolios) rm(claims) #drop if no property ID? #if you're on a slow computer: create subsets #claim0004 <- filter(claimPortfolioVcsnID, # claimPortfolioVcsnID$lossDate > "1999-12-31", # claimPortfolioVcsnID$lossDate < "2005-01-01") #claim0508 <- filter(claimPortfolioVcsnID, # claimPortfolioVcsnID$lossDate > "2004-12-31", # claimPortfolioVcsnID$lossDate < "2009-01-01") claim0912 <- filter(claimPortfolioVcsnID, claimPortfolioVcsnID$lossDate > "2008-12-31", claimPortfolioVcsnID$lossDate < "2013-01-01") claim1316 <- filter(claimPortfolioVcsnID, claimPortfolioVcsnID$lossDate > "2012-12-01", claimPortfolioVcsnID$lossDate < "2017-01-01") #vcsn0004 <- filter(vcsn, # vcsn$vcsnDay > "1999-12-31", # vcsn$vcsnDay < "2005-01-01") #vcsn0508 <- filter(vcsn, # vcsn$vcsnDay > "2004-12-31", # vcsn$vcsnDay < "2009-01-01") vcsn0912 <- filter(vcsn, vcsn$vcsnDay > "2008-12-31", vcsn$vcsnDay < "2013-01-01") vcsn1316 <- filter(vcsn, vcsn$vcsnDay > "2012-12-31", vcsn$vcsnDay < "2017-01-01") #add rainfall to claim info (subsets) claimPortfolioSpatialVCS0004 <- merge(claim0004, vcsn0004, by = c("vcsnLongitude", "vcsnLatitude")) claimPortfolioSpatialVCS0508 <- merge(claim0508, vcsn0508, by = c("vcsnLongitude", "vcsnLatitude")) claimPortfolioSpatialVCS0912 <- merge(claim0912, vcsn0912, by = c("vcsnLongitude", "vcsnLatitude")) claimPortfolioSpatialVCS1316 <- merge(claim1316, vcsn1316, by = c("vcsnLongitude", "vcsnLatitude")) # Add rainfall to claim info #claimPortfolioSpatialVCS <- merge(claimPortfolioSpatial06, vcsn, by = c("vcsnLongitude", "vcsnLatitude")) claimPortfolioSpatialVCS <- claimPortfolioSpatialVCS0004 # Add "offset" - days from loss date claimPortfolioSpatialVCS <- mutate(claimPortfolioSpatialVCS, lossDate-vcsnDay) names(claimPortfolioSpatialVCS)[10] <- c("offsetRaw") claimPortfolioSpatialVCS$offset <- as.double(claimPortfolioSpatialVCS$offsetRaw) # Keep only claims from 1999 or 2000 # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate >= "2000-01-01") # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate <= "2004-12-01") # Keep if offset is fewer than 10 or greater than -10 claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw <= 10) claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw >= -10) #EQC 08 histograms library(ggplot2) ggplot(data=claimPortfolioSpatialVCS) + geom_col(claimPortfolioSpatialVCS, mapping = aes( x = claimPortfolioSpatialVCS$offset, y = claimPortfolioSpatialVCS$rain ) ) claimPortfolioSpatialVCS <- claimPortfolioSpatialVCS0508 # Add "offset" - days from loss date claimPortfolioSpatialVCS <- mutate(claimPortfolioSpatialVCS, lossDate-vcsnDay) names(claimPortfolioSpatialVCS)[10] <- c("offsetRaw") claimPortfolioSpatialVCS$offset <- as.double(claimPortfolioSpatialVCS$offsetRaw) # Keep only claims from 1999 or 2000 # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate >= "2000-01-01") # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate <= "2004-12-01") # Keep if offset is fewer than 10 or greater than -10 claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw <= 10) claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw >= -10) #EQC 08 histograms library(ggplot2) ggplot(data=claimPortfolioSpatialVCS) + geom_col(claimPortfolioSpatialVCS, mapping = aes( x = claimPortfolioSpatialVCS$offset, y = claimPortfolioSpatialVCS$rain ) ) claimPortfolioSpatialVCS <- claimPortfolioSpatialVCS0912 # Add "offset" - days from loss date claimPortfolioSpatialVCS <- mutate(claimPortfolioSpatialVCS, lossDate-vcsnDay) names(claimPortfolioSpatialVCS)[10] <- c("offsetRaw") claimPortfolioSpatialVCS$offset <- as.double(claimPortfolioSpatialVCS$offsetRaw) # Keep only claims from 1999 or 2000 # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate >= "2000-01-01") # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate <= "2004-12-01") # Keep if offset is fewer than 10 or greater than -10 claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw <= 10) claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw >= -10) #EQC 08 histograms library(ggplot2) ggplot(data=claimPortfolioSpatialVCS) + geom_col(claimPortfolioSpatialVCS, mapping = aes( x = claimPortfolioSpatialVCS$offset, y = claimPortfolioSpatialVCS$rain ) ) claimPortfolioSpatialVCS <- claimPortfolioSpatialVCS1316 # Add "offset" - days from loss date claimPortfolioSpatialVCS <- mutate(claimPortfolioSpatialVCS, lossDate-vcsnDay) names(claimPortfolioSpatialVCS)[10] <- c("offsetRaw") claimPortfolioSpatialVCS$offset <- as.double(claimPortfolioSpatialVCS$offsetRaw) # Keep only claims from 1999 or 2000 # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate >= "2000-01-01") # claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, lossDate <= "2004-12-01") # Keep if offset is fewer than 10 or greater than -10 claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw <= 10) claimPortfolioSpatialVCS <- filter(claimPortfolioSpatialVCS, offsetRaw >= -10) #EQC 08 histograms library(ggplot2) ggplot(data=claimPortfolioSpatialVCS) + geom_col(claimPortfolioSpatialVCS, mapping = aes( x = claimPortfolioSpatialVCS$offset, y = claimPortfolioSpatialVCS$rain ) )
#' Tourist arrivals to Sri Lanka by purpose of visit #' #' @description Tourist arrivals to Sri Lanka by purpose of visit. #' There was a civil war in the country from July 1983 to May 2009. #' Sri Lanka Easter bombings happened on 21 April 2019. From 2011 to 2019, data cover #' the entire island. #' On 26 December 2004, Sri Lanka became a victim of the Indian Ocean Tsunami. #' @format Time series of class tsibble #' \describe{ #' \item{Year}{Year} #' \item{Purpose}{Purpose of visit} #' \item{Arrivals}{Tourist arrivals} #'} #' #' @source Annual Statistical Reports, Sri Lanka Tourism Development Authority #' #' @author Priyanga Dilini Talagala #' #' @examples #' library(fable) #' library(ggplot2) #' head(touristsl_purpose) #' autoplot(touristsl_purpose) + #' geom_point(aes(shape= Purpose)) + #' scale_shape_manual(values = 1:11) #' "touristsl_purpose"
/R/touristsl_purpose.R
no_license
pridiltal/datahut
R
false
false
863
r
#' Tourist arrivals to Sri Lanka by purpose of visit #' #' @description Tourist arrivals to Sri Lanka by purpose of visit. #' There was a civil war in the country from July 1983 to May 2009. #' Sri Lanka Easter bombings happened on 21 April 2019. From 2011 to 2019, data cover #' the entire island. #' On 26 December 2004, Sri Lanka became a victim of the Indian Ocean Tsunami. #' @format Time series of class tsibble #' \describe{ #' \item{Year}{Year} #' \item{Purpose}{Purpose of visit} #' \item{Arrivals}{Tourist arrivals} #'} #' #' @source Annual Statistical Reports, Sri Lanka Tourism Development Authority #' #' @author Priyanga Dilini Talagala #' #' @examples #' library(fable) #' library(ggplot2) #' head(touristsl_purpose) #' autoplot(touristsl_purpose) + #' geom_point(aes(shape= Purpose)) + #' scale_shape_manual(values = 1:11) #' "touristsl_purpose"
\name{PH.Louis.ICsurv} \alias{PH.Louis.ICsurv} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Calculating the Hessian matrix using Louis's Method (1982) } \description{ Calculates the negative of the Hessian of the log of the observed data likelihood, obtained via Louis's method, evaluated at the last step of the EM algorithm described in Wang et al. (2014+). This is a support function for \code{\link{PH.ICsurv.EM}}. } \usage{ PH.Louis.ICsurv(b, g, bLi, bRi, d1, d2, d3, Xp) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{b}{estimates of the regression coefficients obtained at the convergence of the EM algorithm.} \item{g}{estimates of the spline coefficients obtained from at the convergence of the EM algorithm.} \item{bLi}{an I-spline basis matrix of dimension c(length(knots)+order-2, length(x)), corresponding to the left end points of the observed intervals.} \item{bRi}{an I-spline basis matrix of dimension c(length(knots)+order-2, length(x),), corresponding to the left end points of the observed intervals.} \item{d1}{vector indicating whether an observation is left-censored (1) or not (0).} \item{d2}{vector indicating whether an observation is interval-censored (1) or not (0).} \item{d3}{vector indicating whether an observation is right-censored (1) or not (0).} \item{Xp}{design matrix of predictor variables (in columns), should be specified without an intercept term.} } \details{ To obtain the Hessian matrix of the observed likelihood evaluated at the last step of the EM algorithm. } \value{ Hessian matrix. } \references{ Louis, T. (1982). Finding the observed information matrix when using the EM algorithm. Journal of the Royal Statistical Society, Series B 44, 226-233. Wang, L., McMahan, C., and Hudgens, M. (2014+). A flexible and computationally efficient method for fitting the proportional hazards model to interval censored data. Submitted. }
/man/PH.Louis.ICsurv.Rd
no_license
cran/ICsurv
R
false
false
1,997
rd
\name{PH.Louis.ICsurv} \alias{PH.Louis.ICsurv} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Calculating the Hessian matrix using Louis's Method (1982) } \description{ Calculates the negative of the Hessian of the log of the observed data likelihood, obtained via Louis's method, evaluated at the last step of the EM algorithm described in Wang et al. (2014+). This is a support function for \code{\link{PH.ICsurv.EM}}. } \usage{ PH.Louis.ICsurv(b, g, bLi, bRi, d1, d2, d3, Xp) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{b}{estimates of the regression coefficients obtained at the convergence of the EM algorithm.} \item{g}{estimates of the spline coefficients obtained from at the convergence of the EM algorithm.} \item{bLi}{an I-spline basis matrix of dimension c(length(knots)+order-2, length(x)), corresponding to the left end points of the observed intervals.} \item{bRi}{an I-spline basis matrix of dimension c(length(knots)+order-2, length(x),), corresponding to the left end points of the observed intervals.} \item{d1}{vector indicating whether an observation is left-censored (1) or not (0).} \item{d2}{vector indicating whether an observation is interval-censored (1) or not (0).} \item{d3}{vector indicating whether an observation is right-censored (1) or not (0).} \item{Xp}{design matrix of predictor variables (in columns), should be specified without an intercept term.} } \details{ To obtain the Hessian matrix of the observed likelihood evaluated at the last step of the EM algorithm. } \value{ Hessian matrix. } \references{ Louis, T. (1982). Finding the observed information matrix when using the EM algorithm. Journal of the Royal Statistical Society, Series B 44, 226-233. Wang, L., McMahan, C., and Hudgens, M. (2014+). A flexible and computationally efficient method for fitting the proportional hazards model to interval censored data. Submitted. }
/Quize/Quiz1.R
no_license
TrentLin/Getting-and-Cleaning-Data
R
false
false
1,182
r
df <-read.table("C:/Users/Vahidu/Desktop/Vahid/household_power_consumption.txt", header = TRUE,sep=";") GlobalAP <- df[,3] hist(GlobalAP, col="red", xlab="Global Active Power (kilowatts)") title(main = "Gobal Active Power)
/Project1.R
no_license
vahiduz/test_2
R
false
false
224
r
df <-read.table("C:/Users/Vahidu/Desktop/Vahid/household_power_consumption.txt", header = TRUE,sep=";") GlobalAP <- df[,3] hist(GlobalAP, col="red", xlab="Global Active Power (kilowatts)") title(main = "Gobal Active Power)
seed <- 169 log.wt <- 0.0 penalty <- 2.8115950178536287e-8 intervals.send <- c() intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400) dev.null <- 358759.0022669336 df.null <- 35567 dev.resid <- 225263.01231970207 df.resid <- 35402 df <- 165 coefs <- c(6.371808011299801, 5.632257261417825, 5.708373604524864, 5.390068064002022, 5.1399910727295435, 4.910277774496641, 4.736032640108778, 4.67304086915771, 4.3888490564350775, 4.247044340022233, 4.385743411941944, 4.170064053529501, 4.0334792215540185, 3.9749410623567627, 3.7892094092813986, 3.5488256513477916, 3.188260845884309, 2.9453757646007106, 2.4849483622722186, 2.040984971936788, 1.5757254516608628, 0.9457696018243895, 1.0209481883218323, 0.422832081509851, -9.566043264087223e-2, -1.1200773139869924, -0.34071912217378286, 0.8757225397215108, 1.0336274153688483, -1.4248348245596045, -2.17085735398045, -2.524461358515056, -0.20525992838526547, 0.7666590609643258, 1.2555379690935118, -1.1175120771166616, -0.4790673540225199, -1.6322166973342542, -5.066716050768051e-2, -0.6821997364795933, 0.9010566215718945, 1.0607690878183194, -1.0574180023829234, -1.8958171343592882, -0.6870692503481938, -0.7752242391974409, -0.6869418418841062, 0.28345493181318154, 8.009237523251764e-3, -0.7198277863789128, 0.401935654112183, 0.7983270696238858, -2.5278608417162576, 1.7412518072726935, 0.7729455613423319, 0.9210585643793081, -1.7149162261007693, -7.128920931840632e-2, 1.3240950836436564e-2, 1.5843967250059576, 1.1024987417727037, 0.9227060624923057, -1.5627813126859915, -0.7797508103148233, -0.5926526069416876, -0.11762406956507981, 0.7204991081582656, -0.6555894954215954, -1.0349396508089717, -0.6881428549479272, -1.8844584370298914, -0.3783275576466822, 0.6289103747356857, 1.0573624288215528, 0.6758982492943774, -0.7536931027930147, -1.1317155042493634, -1.2023831693289362, 2.857836564091531e-2, 0.764034443296256, 1.1572394252352938, 0.10547718155217777, 0.16913463359261974, -2.0616683010949353, -0.4083253247161117, 0.3986026576755187, 1.1776369385635506, 0.48883242392978865, 0.7655627262915718, -2.5948017970913786, 0.5238372717748094, 0.86762640408084, 0.6999171963121575, 0.46508112324116907, -0.2673776492712253, 1.2482288456809363, -0.8666019033581797, 0.5921976221029461, -7.662648261507161e-2, -0.27470428797348706, 0.46422966163171575, -4.6883971185697516e-2, 0.7311637944878415, -6.510680824511222e-2, 0.42197002520691596, 0.8331636191606978, 1.064226073232587, -1.0595739231678305, 4.6132906089845264e-2, -0.8226093901965728, 0.48555992373494666, 0.5554183973603396, 1.549417129129145, -0.5248924555212591, -0.10010048544845936, -0.8178323739084311, 0.7284494158586274, -0.35142943234692736, 0.45192854307332797, 0.6011594888815536, -0.41194498821542186, -0.4070105725563019, -1.8247466867032534, -0.38207593283775404, 0.4190212906813303, 0.9444909022770658, -8.281942157123753e-2, 1.0800971493099276, -0.9166228374986084, -0.4077680143707719, 0.4168973141377952, 0.9358896866245332, 0.9078272304338337, 0.3953421761997959, 0.12049094778548358, 0.8734903477194355, -0.39557917370348417, 1.0558774457060818, 0.7238952343385673, 0.9664837929018406, 0.6780034757083508, -0.7491993837304733, -1.5265528539831992, 0.7669218101645427, 0.6113731932007961, 0.550613698046351, -0.2568679815411744, -0.43443008008053335, -2.046528658666498, 1.3876449493690939, 0.11651315646887439, 1.206148811974097, -0.28002006733745816, -7.30974528308602e-2, -2.6582890119708012e-2, -1.2258017106545054, -1.043151680803022, 0.7849171151145801, 1.2508870327364312, -0.28493682787576347, 1.561421940602749, -0.27861390519132084, -0.206237228828563, 0.1533580045072034, 1.1532484221579204)
/analysis/boot/boot169.R
no_license
patperry/interaction-proc
R
false
false
3,764
r
seed <- 169 log.wt <- 0.0 penalty <- 2.8115950178536287e-8 intervals.send <- c() intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400) dev.null <- 358759.0022669336 df.null <- 35567 dev.resid <- 225263.01231970207 df.resid <- 35402 df <- 165 coefs <- c(6.371808011299801, 5.632257261417825, 5.708373604524864, 5.390068064002022, 5.1399910727295435, 4.910277774496641, 4.736032640108778, 4.67304086915771, 4.3888490564350775, 4.247044340022233, 4.385743411941944, 4.170064053529501, 4.0334792215540185, 3.9749410623567627, 3.7892094092813986, 3.5488256513477916, 3.188260845884309, 2.9453757646007106, 2.4849483622722186, 2.040984971936788, 1.5757254516608628, 0.9457696018243895, 1.0209481883218323, 0.422832081509851, -9.566043264087223e-2, -1.1200773139869924, -0.34071912217378286, 0.8757225397215108, 1.0336274153688483, -1.4248348245596045, -2.17085735398045, -2.524461358515056, -0.20525992838526547, 0.7666590609643258, 1.2555379690935118, -1.1175120771166616, -0.4790673540225199, -1.6322166973342542, -5.066716050768051e-2, -0.6821997364795933, 0.9010566215718945, 1.0607690878183194, -1.0574180023829234, -1.8958171343592882, -0.6870692503481938, -0.7752242391974409, -0.6869418418841062, 0.28345493181318154, 8.009237523251764e-3, -0.7198277863789128, 0.401935654112183, 0.7983270696238858, -2.5278608417162576, 1.7412518072726935, 0.7729455613423319, 0.9210585643793081, -1.7149162261007693, -7.128920931840632e-2, 1.3240950836436564e-2, 1.5843967250059576, 1.1024987417727037, 0.9227060624923057, -1.5627813126859915, -0.7797508103148233, -0.5926526069416876, -0.11762406956507981, 0.7204991081582656, -0.6555894954215954, -1.0349396508089717, -0.6881428549479272, -1.8844584370298914, -0.3783275576466822, 0.6289103747356857, 1.0573624288215528, 0.6758982492943774, -0.7536931027930147, -1.1317155042493634, -1.2023831693289362, 2.857836564091531e-2, 0.764034443296256, 1.1572394252352938, 0.10547718155217777, 0.16913463359261974, -2.0616683010949353, -0.4083253247161117, 0.3986026576755187, 1.1776369385635506, 0.48883242392978865, 0.7655627262915718, -2.5948017970913786, 0.5238372717748094, 0.86762640408084, 0.6999171963121575, 0.46508112324116907, -0.2673776492712253, 1.2482288456809363, -0.8666019033581797, 0.5921976221029461, -7.662648261507161e-2, -0.27470428797348706, 0.46422966163171575, -4.6883971185697516e-2, 0.7311637944878415, -6.510680824511222e-2, 0.42197002520691596, 0.8331636191606978, 1.064226073232587, -1.0595739231678305, 4.6132906089845264e-2, -0.8226093901965728, 0.48555992373494666, 0.5554183973603396, 1.549417129129145, -0.5248924555212591, -0.10010048544845936, -0.8178323739084311, 0.7284494158586274, -0.35142943234692736, 0.45192854307332797, 0.6011594888815536, -0.41194498821542186, -0.4070105725563019, -1.8247466867032534, -0.38207593283775404, 0.4190212906813303, 0.9444909022770658, -8.281942157123753e-2, 1.0800971493099276, -0.9166228374986084, -0.4077680143707719, 0.4168973141377952, 0.9358896866245332, 0.9078272304338337, 0.3953421761997959, 0.12049094778548358, 0.8734903477194355, -0.39557917370348417, 1.0558774457060818, 0.7238952343385673, 0.9664837929018406, 0.6780034757083508, -0.7491993837304733, -1.5265528539831992, 0.7669218101645427, 0.6113731932007961, 0.550613698046351, -0.2568679815411744, -0.43443008008053335, -2.046528658666498, 1.3876449493690939, 0.11651315646887439, 1.206148811974097, -0.28002006733745816, -7.30974528308602e-2, -2.6582890119708012e-2, -1.2258017106545054, -1.043151680803022, 0.7849171151145801, 1.2508870327364312, -0.28493682787576347, 1.561421940602749, -0.27861390519132084, -0.206237228828563, 0.1533580045072034, 1.1532484221579204)
p <- ggplot(diamonds, aes(carat, price)) + geom_boxplot(aes(group = cut_width(carat, 0.25)))
/ggplot2/Layers/Geoms/geom_boxplot/example10.R
no_license
plotly/ssim_baselines
R
false
false
100
r
p <- ggplot(diamonds, aes(carat, price)) + geom_boxplot(aes(group = cut_width(carat, 0.25)))
modelInfo <- list(label = "Bayesian Additive Regression Trees", library = "bartMachine", loop = NULL, type = c("Classification", "Regression"), parameters = data.frame(parameter = c("num_trees", "k", "alpha", "beta", "nu"), class = rep("numeric", 5), label = c("#Trees", "Prior Boundary", "Base Terminal Node Hyperparameter", "Power Terminal Node Hyperparameter", "Degrees of Freedom")), grid = function(x, y, len = NULL, search = "grid") { if(search == "grid") { out <- expand.grid(num_trees = 50, k = (1:len)+ 1, alpha = seq(.9, .99, length = len), beta = seq(1, 3, length = len), nu = (1:len)+ 1) } else { out <- data.frame(num_trees = sample(10:100, replace = TRUE, size = len), k = runif(len, min = 0, max = 5), alpha = runif(len, min = .9, max = 1), beta = runif(len, min = 0, max = 4), nu = runif(len, min = 0, max = 5)) } if(is.factor(y)) { out$k <- NA out$nu <- NA } out <- out[!duplicated(out),] }, fit = function(x, y, wts, param, lev, last, classProbs, ...) { if(!is.data.frame(x)) x <- as.data.frame(x) out <- if(is.factor(y)) { bartMachine(X = x, y = y, num_trees = param$num_trees, alpha = param$alpha, beta = param$beta, ...) } else { bartMachine(X = x, y = y, num_trees = param$num_trees, k = param$k, alpha = param$alpha, beta = param$beta, nu = param$nu, ...) } out }, predict = function(modelFit, newdata, submodels = NULL) { if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata) out <- if(is.factor(modelFit$y)) predict(modelFit, newdata, type = "class") else predict(modelFit, newdata) }, prob = function(modelFit, newdata, submodels = NULL) { if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata) out <- predict(modelFit, newdata, type = "prob") out <- data.frame(y1 = 1- out, y2 = out) colnames(out) <- modelFit$y_levels out }, predictors = function(x, ...) colnames(x$X), varImp = function(object, ...){ imps <- investigate_var_importance(object, plot = FALSE) imps <- imps$avg_var_props - 1.96*imps$sd_var_props missing_x <- !(colnames(object$X) %in% names(imps)) if(any(missing_x)) { imps2 <- rep(0, sum(missing_x)) names(imps2) <- colnames(object$X)[missing_x] imps <- c(imps, imps2) } out <- data.frame(Overall = as.vector(imps)) rownames(out) <- names(imps) out }, levels = function(x) x$y_levels, tags = c("Tree-Based Model", "Implicit Feature Selection", "Bayesian Model"), sort = function(x) x[order(-x[,"num_trees"]),])
/models/files/bartMachine.R
no_license
terrytangyuan/caret
R
false
false
4,453
r
modelInfo <- list(label = "Bayesian Additive Regression Trees", library = "bartMachine", loop = NULL, type = c("Classification", "Regression"), parameters = data.frame(parameter = c("num_trees", "k", "alpha", "beta", "nu"), class = rep("numeric", 5), label = c("#Trees", "Prior Boundary", "Base Terminal Node Hyperparameter", "Power Terminal Node Hyperparameter", "Degrees of Freedom")), grid = function(x, y, len = NULL, search = "grid") { if(search == "grid") { out <- expand.grid(num_trees = 50, k = (1:len)+ 1, alpha = seq(.9, .99, length = len), beta = seq(1, 3, length = len), nu = (1:len)+ 1) } else { out <- data.frame(num_trees = sample(10:100, replace = TRUE, size = len), k = runif(len, min = 0, max = 5), alpha = runif(len, min = .9, max = 1), beta = runif(len, min = 0, max = 4), nu = runif(len, min = 0, max = 5)) } if(is.factor(y)) { out$k <- NA out$nu <- NA } out <- out[!duplicated(out),] }, fit = function(x, y, wts, param, lev, last, classProbs, ...) { if(!is.data.frame(x)) x <- as.data.frame(x) out <- if(is.factor(y)) { bartMachine(X = x, y = y, num_trees = param$num_trees, alpha = param$alpha, beta = param$beta, ...) } else { bartMachine(X = x, y = y, num_trees = param$num_trees, k = param$k, alpha = param$alpha, beta = param$beta, nu = param$nu, ...) } out }, predict = function(modelFit, newdata, submodels = NULL) { if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata) out <- if(is.factor(modelFit$y)) predict(modelFit, newdata, type = "class") else predict(modelFit, newdata) }, prob = function(modelFit, newdata, submodels = NULL) { if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata) out <- predict(modelFit, newdata, type = "prob") out <- data.frame(y1 = 1- out, y2 = out) colnames(out) <- modelFit$y_levels out }, predictors = function(x, ...) colnames(x$X), varImp = function(object, ...){ imps <- investigate_var_importance(object, plot = FALSE) imps <- imps$avg_var_props - 1.96*imps$sd_var_props missing_x <- !(colnames(object$X) %in% names(imps)) if(any(missing_x)) { imps2 <- rep(0, sum(missing_x)) names(imps2) <- colnames(object$X)[missing_x] imps <- c(imps, imps2) } out <- data.frame(Overall = as.vector(imps)) rownames(out) <- names(imps) out }, levels = function(x) x$y_levels, tags = c("Tree-Based Model", "Implicit Feature Selection", "Bayesian Model"), sort = function(x) x[order(-x[,"num_trees"]),])
############################################################################ # Chapter : 7 # Description : Classifying Flickr Data using Random Forests and SVM ############################################################################ library(httr) library(plyr) library(dplyr) library(rlist) library(pipeR) library(stringr) library(ggplot2) library(reshape2) library(corrplot) library(jsonlite) library(lubridate) library(pROC) library(e1071) library(caret) ############################################################################ # Flickr APP Auth ############################################################################ # App Credentials api_key <- "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" secret <- "XXXXXXXXXXXXXXXXXXXXXXXX" # Create connection object flickr.app <- oauth_app("Flickr Sample App",api_key,secret) flickr.endpoint <- oauth_endpoint( request = "https://www.flickr.com/services/oauth/request_token" , authorize = "https://www.flickr.com/services/oauth/authorize" , access = "https://www.flickr.com/services/oauth/access_token" ) # connect using OAuth tok <- oauth1.0_token( flickr.endpoint , flickr.app , cache = F ) ############################################################################ # Utility methods ############################################################################ # get first entry from a space separated string get_first <- function(y){ tryCatch( as.numeric(strsplit(y," ")[[1]][1]) , warning = function(w) { 0 #handle errors by using 0 }, error = function(e) { 0 #handle errors by using 0 }) } # extract photos of given user_id # use flickr.people.getpublicphotos getPhotosFromFlickr <- function(api_key, token, user_id){ GET(url=sprintf( "https://api.flickr.com/services/rest/?method=flickr.people.getpublicphotos&api_key=%s&user_id=%s&format=json&nojsoncallback=1" , api_key , user_id , token$credentials$oauth_token ) ) %>>% content( as = "text" ) %>>% jsonlite::fromJSON () %>>% ( .$photos$photo ) %>>% ( data.frame( . ,stringsAsFactors=F )) } # get EXIF data for each photo in a given dataframe # use flickr.photos.getExif getEXIF <- function(api_key, photosDF){ lapply( 1:nrow(photosDF) ,function(photo){ exif <- GET(url=sprintf( "https://api.flickr.com/services/rest/?method=flickr.photos.getExif&api_key=%s&photo_id=%s&secret=%s&format=json&nojsoncallback=1" , api_key , photosDF[photo,"id"] , photosDF[photo,"secret"] ) ) %>>% content( as = "text" ) %>>% jsonlite::fromJSON () } ) } # get view counts for each photo in given dataframe # use flickr.photos.getInfo getViewCounts <- function(api_key, photosDF){ photos.tagData <- lapply( 1:nrow(photosDF) ,function(photo){ tag <- GET(url=sprintf( "https://api.flickr.com/services/rest/?method=flickr.photos.getInfo&api_key=%s&photo_id=%s&secret=%s&format=json&nojsoncallback=1" , api_key , photosDF[photo,"id"] , photosDF[photo,"secret"] ) ) %>>% content( as = "text" ) %>>% jsonlite::fromJSON () } ) # Image View Count as.numeric(photos.tagData %>>% list.map(unlist(.$photo$views)) %>>% as.character) } # extract ISO from EXIF data extractISO<-function(photos.exifData){ as.numeric(photos.exifData %>>% list.map(as.numeric( as.data.frame(.$photo$exif)[ which(.$photo$exif["label"]=="ISO Speed"), "raw"]) ) ) } # extract Make/Manufacturer from EXIF extractMakes<-function(photos.exifData){ make_list<-photos.exifData %>>% list.map(unlist( as.data.frame( .$photo$exif)[ which(.$photo$exif["label"]=="Make"), "raw"] )[1] %>>% as.character )%>>% as.character make_list <- ifelse(make_list=="character(0)",NA,make_list) } # extract Focal Length from EXIF extractFocalLength<-function(photos.exifData){ focal_list <- photos.exifData %>>% list.map( unlist( as.data.frame(.$photo$exif)[ which(.$photo$exif["label"]=="Focal Length"), "raw"] )[1] )%>>% as.character focal_list <- ifelse(focal_list=="NULL", NA, focal_list) focal_list <- unlist(lapply(focal_list, get_first)) } # extract White Balance from EXIF extractWB<-function(photos.exifData){ whiteBalance_list <- photos.exifData %>>% list.map( unlist( as.data.frame(.$photo$exif)[ which(.$photo$exif["label"]=="White Balance"), "raw"] )[1] %>>% as.character ) %>>% as.character whiteBalance_list <- ifelse(whiteBalance_list=="character(0)", NA, whiteBalance_list) } # extract Metering Mode from EXIF extractMeteringMode <- function(photos.exifData){ meteringMode_list <- photos.exifData %>>% list.map( unlist( as.data.frame(.$photo$exif)[ which(.$photo$exif["label"]=="Metering Mode"), "raw"] )[1] %>>% as.character )%>>% as.character meteringMode_list <- ifelse(meteringMode_list=="character(0)", NA, meteringMode_list) } # clean up camera makes mapCamMakes <- function(x) { if(!is.na(str_match(tolower(x),"canon"))){ "canon" }else if(!is.na(str_match(tolower(x),"nikon"))){ "nikon" }else if(!is.na(str_match(tolower(x),"fuji"))){ "fujifilm" }else if(!is.na(str_match(tolower(x),"leica")) ){ "leica" }else if(!is.na(str_match(tolower(x),"olympus"))){ "olympus" }else { tolower(x) } } # Get Photos for given user_id getUserPhotos <- function(api_key, token, user_id){ #get user's photos in a dataframe photosDF <- getPhotosFromFlickr(api_key,token,user_id) # get exif for each photo in dataframe photos.exifData <- getEXIF(api_key,photosDF) # Image ISO iso_list <- extractISO(photos.exifData) # Image Manufacturer/Make make_list <- extractMakes(photos.exifData) # Image Focal Length focal_list <- extractFocalLength(photos.exifData) # Image White Balance whiteBalance_list<-extractWB(photos.exifData) # Image Metering Mode meteringMode_list <- extractMeteringMode(photos.exifData) # Add attributes to main data frame photosDF$iso <- iso_list photosDF$make <- make_list photosDF$focal_length <- focal_list photosDF$white_balance <- whiteBalance_list photosDF$metering_mode <- meteringMode_list # get view counts photosDF$views <- getViewCounts(api_key,photosDF) as.data.frame(photosDF) } # typecast dataframes for use by classifier prepareClassifierDF <- function(classifyDF){ # convert white balance to factor and then encode numeric classifyDF$white_balance <- as.factor(classifyDF$white_balance) # convert metering mode to factor classifyDF$metering_mode <- as.factor(classifyDF$metering_mode) # convert make_clean to factor classifyDF$make_clean <- as.factor(classifyDF$make_clean) as.data.frame(classifyDF) } ############################################################################ # Prepare Dataset ############################################################################ # collect photos which have not featured on # Explore page # get user_id specific data mortal_userIDS <- c('XXXXXXXXXXX', 'XXXXXXXXXXX', 'XXXXXXXXXXX', 'XXXXXXXXXXX') neg_interesting_df <- lapply(mortal_userIDS, getUserPhotos, api_key=api_key,token=tok) %>>% ( do.call(rbind, .) ) neg_interesting_df <- na.omit(neg_interesting_df) neg_interesting_df$make_clean <- sapply(neg_interesting_df$make, mapCamMakes) neg_interesting_df$is_interesting <- 0 # Photos from Explore page pos_interesting_df <- na.omit(interesting) pos_interesting_df$is_interesting <- 1 # prepare overall dataset classifyDF <- rbind(pos_interesting_df[,colnames(neg_interesting_df)], neg_interesting_df) # convert attributes to proper data types classifyDF <- prepareClassifierDF(classifyDF) # convert is_interesting to factor -> class target classifyDF$is_interesting <- as.factor(classifyDF$is_interesting) # restrict columns req_cols <- c('is_interesting', 'iso', 'focal_length', 'white_balance', 'metering_mode', 'views', 'make_clean') classifyDF <- classifyDF[,req_cols] ############################################################################ # Prepare Train and Test Data Sets ############################################################################ # train - test split set.seed(42) samp <- sample(nrow(classifyDF), 0.6 * nrow(classifyDF)) train <- classifyDF[samp, ] test <- classifyDF[-samp, ] ############################################################################ # Random Forest based Classifier ############################################################################ # train model rfModel <- train(is_interesting ~ ., train, preProcess = c("scale"), tuneLength = 8, trControl = trainControl(method = "cv")) # Prediction predictedProb <- predict(rfModel, test[,-1], type="prob") # Draw ROC curve. resultROC <- roc(test$is_interesting, predictedProb$"1") plot(resultROC, print.thres="best", print.thres.best.method="closest.topleft") #to get threshold and accuracy resultCoords <- coords(resultROC, "best", best.method="closest.topleft", ret=c("threshold", "accuracy")) print(resultCoords) # Confusion Matrix confusionMatrix(test$is_interesting, predict(rfModel, test[,-1])) ############################################################################ # SVM based Classifier ############################################################################ # standard svm svm_model <- svm(is_interesting ~ ., data=train) summary(svm_model) # get model performance pred <- predict(svm_model,test[,-1]) confusionMatrix(test$is_interesting, pred) # Tune SVM and cross validate svm_tune <- tune(svm, is_interesting ~ ., data=train, ranges=list(cost=10^(-1:2), gamma=c(.5,1,2))) summary(svm_tune) # get model performance pred <- predict(svm_tune$best.model,test[,-1]) confusionMatrix(test$is_interesting, pred)
/Chapter 7 - Analyzing Flickr/Code/B06056_07_04.R
permissive
survivor114jg/learning-social-media-analytics-with-r
R
false
false
10,943
r
############################################################################ # Chapter : 7 # Description : Classifying Flickr Data using Random Forests and SVM ############################################################################ library(httr) library(plyr) library(dplyr) library(rlist) library(pipeR) library(stringr) library(ggplot2) library(reshape2) library(corrplot) library(jsonlite) library(lubridate) library(pROC) library(e1071) library(caret) ############################################################################ # Flickr APP Auth ############################################################################ # App Credentials api_key <- "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" secret <- "XXXXXXXXXXXXXXXXXXXXXXXX" # Create connection object flickr.app <- oauth_app("Flickr Sample App",api_key,secret) flickr.endpoint <- oauth_endpoint( request = "https://www.flickr.com/services/oauth/request_token" , authorize = "https://www.flickr.com/services/oauth/authorize" , access = "https://www.flickr.com/services/oauth/access_token" ) # connect using OAuth tok <- oauth1.0_token( flickr.endpoint , flickr.app , cache = F ) ############################################################################ # Utility methods ############################################################################ # get first entry from a space separated string get_first <- function(y){ tryCatch( as.numeric(strsplit(y," ")[[1]][1]) , warning = function(w) { 0 #handle errors by using 0 }, error = function(e) { 0 #handle errors by using 0 }) } # extract photos of given user_id # use flickr.people.getpublicphotos getPhotosFromFlickr <- function(api_key, token, user_id){ GET(url=sprintf( "https://api.flickr.com/services/rest/?method=flickr.people.getpublicphotos&api_key=%s&user_id=%s&format=json&nojsoncallback=1" , api_key , user_id , token$credentials$oauth_token ) ) %>>% content( as = "text" ) %>>% jsonlite::fromJSON () %>>% ( .$photos$photo ) %>>% ( data.frame( . ,stringsAsFactors=F )) } # get EXIF data for each photo in a given dataframe # use flickr.photos.getExif getEXIF <- function(api_key, photosDF){ lapply( 1:nrow(photosDF) ,function(photo){ exif <- GET(url=sprintf( "https://api.flickr.com/services/rest/?method=flickr.photos.getExif&api_key=%s&photo_id=%s&secret=%s&format=json&nojsoncallback=1" , api_key , photosDF[photo,"id"] , photosDF[photo,"secret"] ) ) %>>% content( as = "text" ) %>>% jsonlite::fromJSON () } ) } # get view counts for each photo in given dataframe # use flickr.photos.getInfo getViewCounts <- function(api_key, photosDF){ photos.tagData <- lapply( 1:nrow(photosDF) ,function(photo){ tag <- GET(url=sprintf( "https://api.flickr.com/services/rest/?method=flickr.photos.getInfo&api_key=%s&photo_id=%s&secret=%s&format=json&nojsoncallback=1" , api_key , photosDF[photo,"id"] , photosDF[photo,"secret"] ) ) %>>% content( as = "text" ) %>>% jsonlite::fromJSON () } ) # Image View Count as.numeric(photos.tagData %>>% list.map(unlist(.$photo$views)) %>>% as.character) } # extract ISO from EXIF data extractISO<-function(photos.exifData){ as.numeric(photos.exifData %>>% list.map(as.numeric( as.data.frame(.$photo$exif)[ which(.$photo$exif["label"]=="ISO Speed"), "raw"]) ) ) } # extract Make/Manufacturer from EXIF extractMakes<-function(photos.exifData){ make_list<-photos.exifData %>>% list.map(unlist( as.data.frame( .$photo$exif)[ which(.$photo$exif["label"]=="Make"), "raw"] )[1] %>>% as.character )%>>% as.character make_list <- ifelse(make_list=="character(0)",NA,make_list) } # extract Focal Length from EXIF extractFocalLength<-function(photos.exifData){ focal_list <- photos.exifData %>>% list.map( unlist( as.data.frame(.$photo$exif)[ which(.$photo$exif["label"]=="Focal Length"), "raw"] )[1] )%>>% as.character focal_list <- ifelse(focal_list=="NULL", NA, focal_list) focal_list <- unlist(lapply(focal_list, get_first)) } # extract White Balance from EXIF extractWB<-function(photos.exifData){ whiteBalance_list <- photos.exifData %>>% list.map( unlist( as.data.frame(.$photo$exif)[ which(.$photo$exif["label"]=="White Balance"), "raw"] )[1] %>>% as.character ) %>>% as.character whiteBalance_list <- ifelse(whiteBalance_list=="character(0)", NA, whiteBalance_list) } # extract Metering Mode from EXIF extractMeteringMode <- function(photos.exifData){ meteringMode_list <- photos.exifData %>>% list.map( unlist( as.data.frame(.$photo$exif)[ which(.$photo$exif["label"]=="Metering Mode"), "raw"] )[1] %>>% as.character )%>>% as.character meteringMode_list <- ifelse(meteringMode_list=="character(0)", NA, meteringMode_list) } # clean up camera makes mapCamMakes <- function(x) { if(!is.na(str_match(tolower(x),"canon"))){ "canon" }else if(!is.na(str_match(tolower(x),"nikon"))){ "nikon" }else if(!is.na(str_match(tolower(x),"fuji"))){ "fujifilm" }else if(!is.na(str_match(tolower(x),"leica")) ){ "leica" }else if(!is.na(str_match(tolower(x),"olympus"))){ "olympus" }else { tolower(x) } } # Get Photos for given user_id getUserPhotos <- function(api_key, token, user_id){ #get user's photos in a dataframe photosDF <- getPhotosFromFlickr(api_key,token,user_id) # get exif for each photo in dataframe photos.exifData <- getEXIF(api_key,photosDF) # Image ISO iso_list <- extractISO(photos.exifData) # Image Manufacturer/Make make_list <- extractMakes(photos.exifData) # Image Focal Length focal_list <- extractFocalLength(photos.exifData) # Image White Balance whiteBalance_list<-extractWB(photos.exifData) # Image Metering Mode meteringMode_list <- extractMeteringMode(photos.exifData) # Add attributes to main data frame photosDF$iso <- iso_list photosDF$make <- make_list photosDF$focal_length <- focal_list photosDF$white_balance <- whiteBalance_list photosDF$metering_mode <- meteringMode_list # get view counts photosDF$views <- getViewCounts(api_key,photosDF) as.data.frame(photosDF) } # typecast dataframes for use by classifier prepareClassifierDF <- function(classifyDF){ # convert white balance to factor and then encode numeric classifyDF$white_balance <- as.factor(classifyDF$white_balance) # convert metering mode to factor classifyDF$metering_mode <- as.factor(classifyDF$metering_mode) # convert make_clean to factor classifyDF$make_clean <- as.factor(classifyDF$make_clean) as.data.frame(classifyDF) } ############################################################################ # Prepare Dataset ############################################################################ # collect photos which have not featured on # Explore page # get user_id specific data mortal_userIDS <- c('XXXXXXXXXXX', 'XXXXXXXXXXX', 'XXXXXXXXXXX', 'XXXXXXXXXXX') neg_interesting_df <- lapply(mortal_userIDS, getUserPhotos, api_key=api_key,token=tok) %>>% ( do.call(rbind, .) ) neg_interesting_df <- na.omit(neg_interesting_df) neg_interesting_df$make_clean <- sapply(neg_interesting_df$make, mapCamMakes) neg_interesting_df$is_interesting <- 0 # Photos from Explore page pos_interesting_df <- na.omit(interesting) pos_interesting_df$is_interesting <- 1 # prepare overall dataset classifyDF <- rbind(pos_interesting_df[,colnames(neg_interesting_df)], neg_interesting_df) # convert attributes to proper data types classifyDF <- prepareClassifierDF(classifyDF) # convert is_interesting to factor -> class target classifyDF$is_interesting <- as.factor(classifyDF$is_interesting) # restrict columns req_cols <- c('is_interesting', 'iso', 'focal_length', 'white_balance', 'metering_mode', 'views', 'make_clean') classifyDF <- classifyDF[,req_cols] ############################################################################ # Prepare Train and Test Data Sets ############################################################################ # train - test split set.seed(42) samp <- sample(nrow(classifyDF), 0.6 * nrow(classifyDF)) train <- classifyDF[samp, ] test <- classifyDF[-samp, ] ############################################################################ # Random Forest based Classifier ############################################################################ # train model rfModel <- train(is_interesting ~ ., train, preProcess = c("scale"), tuneLength = 8, trControl = trainControl(method = "cv")) # Prediction predictedProb <- predict(rfModel, test[,-1], type="prob") # Draw ROC curve. resultROC <- roc(test$is_interesting, predictedProb$"1") plot(resultROC, print.thres="best", print.thres.best.method="closest.topleft") #to get threshold and accuracy resultCoords <- coords(resultROC, "best", best.method="closest.topleft", ret=c("threshold", "accuracy")) print(resultCoords) # Confusion Matrix confusionMatrix(test$is_interesting, predict(rfModel, test[,-1])) ############################################################################ # SVM based Classifier ############################################################################ # standard svm svm_model <- svm(is_interesting ~ ., data=train) summary(svm_model) # get model performance pred <- predict(svm_model,test[,-1]) confusionMatrix(test$is_interesting, pred) # Tune SVM and cross validate svm_tune <- tune(svm, is_interesting ~ ., data=train, ranges=list(cost=10^(-1:2), gamma=c(.5,1,2))) summary(svm_tune) # get model performance pred <- predict(svm_tune$best.model,test[,-1]) confusionMatrix(test$is_interesting, pred)
### 関数perspによる2変数関数の俯瞰図 f <- function(x,y) x^2 - y^2 x <- seq(-3, 3, length=51) # x座標の定義域の分割 y <- seq(-3, 3, length=51) # y座標の定義域の分割 z <- outer(x, y, f) # z座標の計算 ### 基本的な俯瞰図 persp(x, y, z, col="lightblue") ### 俯瞰する向きを指定 persp(x, y, z, theta=30, phi=30, expand=0.5, col="royalblue", main = expression(z==x^2-y^2)) ### 3次元散布図(パッケージscatterplot3dを利用) ## install.packages("scatterplot3d") # 初めて使う時に必要 require(scatterplot3d) # パッケージのロード kikou <- read.csv("kikou2016.csv", fileEncoding="sjis") dat <- subset(kikou, select=c("風速", "日射量", "気温")) scatterplot3d(dat, pch=4, color="orchid")
/docs/autumn/example/graph-plot3d.r
no_license
noboru-murata/sda
R
false
false
765
r
### 関数perspによる2変数関数の俯瞰図 f <- function(x,y) x^2 - y^2 x <- seq(-3, 3, length=51) # x座標の定義域の分割 y <- seq(-3, 3, length=51) # y座標の定義域の分割 z <- outer(x, y, f) # z座標の計算 ### 基本的な俯瞰図 persp(x, y, z, col="lightblue") ### 俯瞰する向きを指定 persp(x, y, z, theta=30, phi=30, expand=0.5, col="royalblue", main = expression(z==x^2-y^2)) ### 3次元散布図(パッケージscatterplot3dを利用) ## install.packages("scatterplot3d") # 初めて使う時に必要 require(scatterplot3d) # パッケージのロード kikou <- read.csv("kikou2016.csv", fileEncoding="sjis") dat <- subset(kikou, select=c("風速", "日射量", "気温")) scatterplot3d(dat, pch=4, color="orchid")
#[export] colsums <- function(x,indices = NULL,parallel = FALSE,na.rm = FALSE) { if(parallel){ .Call(Rfast_col_sums_p,x) }else{ .Call(Rfast_col_sums,x,indices,na.rm) } } #[export] rowsums <- function(x,indices = NULL,parallel = FALSE,na.rm = FALSE) { if(parallel){ .Call(Rfast_row_sums_p,x) }else{ .Call(Rfast_row_sums,x,indices,na.rm) } }
/R/sum.R
no_license
cran/Rfast
R
false
false
380
r
#[export] colsums <- function(x,indices = NULL,parallel = FALSE,na.rm = FALSE) { if(parallel){ .Call(Rfast_col_sums_p,x) }else{ .Call(Rfast_col_sums,x,indices,na.rm) } } #[export] rowsums <- function(x,indices = NULL,parallel = FALSE,na.rm = FALSE) { if(parallel){ .Call(Rfast_row_sums_p,x) }else{ .Call(Rfast_row_sums,x,indices,na.rm) } }
# Lista 1 - Exercicio 3 - item b # Zt = 0,3Z*t−1 − 0,585*Zt−2 + at ==> AR(p), p=2 ==> AR(2) set.seed(666) Z_t <- arima.sim(model = list(ar = c(.3, -.585)), n = 1000) par(mfrow=c(3,1)) ts.plot(Z_t) ar.acf <- acf(Z_t, type = "correlation", plot = T) ar.pacf <- acf(Z_t, type = "partial", plot = T)
/MAE5870/lista1/3b_sim.R
no_license
kayaman/tsa
R
false
false
304
r
# Lista 1 - Exercicio 3 - item b # Zt = 0,3Z*t−1 − 0,585*Zt−2 + at ==> AR(p), p=2 ==> AR(2) set.seed(666) Z_t <- arima.sim(model = list(ar = c(.3, -.585)), n = 1000) par(mfrow=c(3,1)) ts.plot(Z_t) ar.acf <- acf(Z_t, type = "correlation", plot = T) ar.pacf <- acf(Z_t, type = "partial", plot = T)
# Exercise 2: using built-in string functions # Create a variable `lyric` that contains the text "I like to eat apples and # bananas" lyric <- "I like to eat apples and bananas" # Use the `substr()` function to extract the 1st through 13th letters from the # `lyric`, and store the result in a variable called `intro` # Use `?substr` to see more about this function intro <- substr(lyric, 1, 13) # Use the `substr()` function to extract the 15th through the last letter of the # `lyric`, and store the result in a variable called `fruits` # Hint: use `nchar()` to determine how many total letters there are! fruits <- substr(lyric, 15, nchar(lyric)) # Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee". # Store the result in a variable called `fruits_e` # Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or # use `?gsub`) fruits_e <- gsub("a", "ee", fruits) # Use the `gsub()` function to substitute all the "a"s in `fruits` with "o". # Store the result in a variable called `fruits_o` fruits_o <- gsub("a", "o", fruits) # Create a new variable `lyric_e` that is the `intro` combined with the new # `fruits_e` ending. Print out this variable lyric_e <- paste(intro,fruits_e) # Without making a new variable, print out the `intro` combined with the new # `fruits_o` ending print(paste(intro, fruits_o))
/exercise-2/exercise.R
permissive
yinchuqian/ch6-functions
R
false
false
1,364
r
# Exercise 2: using built-in string functions # Create a variable `lyric` that contains the text "I like to eat apples and # bananas" lyric <- "I like to eat apples and bananas" # Use the `substr()` function to extract the 1st through 13th letters from the # `lyric`, and store the result in a variable called `intro` # Use `?substr` to see more about this function intro <- substr(lyric, 1, 13) # Use the `substr()` function to extract the 15th through the last letter of the # `lyric`, and store the result in a variable called `fruits` # Hint: use `nchar()` to determine how many total letters there are! fruits <- substr(lyric, 15, nchar(lyric)) # Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee". # Store the result in a variable called `fruits_e` # Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or # use `?gsub`) fruits_e <- gsub("a", "ee", fruits) # Use the `gsub()` function to substitute all the "a"s in `fruits` with "o". # Store the result in a variable called `fruits_o` fruits_o <- gsub("a", "o", fruits) # Create a new variable `lyric_e` that is the `intro` combined with the new # `fruits_e` ending. Print out this variable lyric_e <- paste(intro,fruits_e) # Without making a new variable, print out the `intro` combined with the new # `fruits_o` ending print(paste(intro, fruits_o))
#plot 2: ##step 2.1: allocating data into mData vector, setting header to True, sep to ';', setting colClasses (1st and 2nd Col is character and next 7 col are "numeric") ##"?" to be converted into n.a and set the number of rows we need to stay as numeric while the rest is char mData <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", colClasses = c("character","character", rep("numeric",7)), na = "?") ##Setting dates and subsetting the data ##set the format of the date to Year-Month-Date mData$Date <- as.Date(mData$Date, format = "%d/%m/%Y") ##subsetting the data into the required date frame of dates 2007-02-01 and 2007-02-02 and setting it into tData tData <- subset(mData, subset=(Date >= "2007-02-01" & Date <= "2007-02-02")) ##Set the DateTime col back to tData ##as tData was already been set to %d/%m/%Y - %H:%M:%S,, we do not need to use strptime. ##reassign the values of dateTime (which has now DateTime combine, back to tData, as a column DateTime <- paste(as.Date(tData$Date), tData$Time) tData$DateTime <- as.POSIXct(DateTime) print ("Plotting commences") ##step 2.2: create plot2, set type to "l" which is lines, there is no title, x label is missing, and y lab = Global Active Power (kilowatts) plot(tData$Global_active_power~tData$DateTime, type="l", xlab="", ylab="Global Active Power (kilowatts)") ##step 3.3: set the height/width (480 pixels) print to "plot1.png" dev.copy(png, file="plot2.png", height=480, width=480) dev.off() print ("plot2.png is now available")
/plot2.R
no_license
ThaddyTeo/ExData_Plotting1
R
false
false
1,520
r
#plot 2: ##step 2.1: allocating data into mData vector, setting header to True, sep to ';', setting colClasses (1st and 2nd Col is character and next 7 col are "numeric") ##"?" to be converted into n.a and set the number of rows we need to stay as numeric while the rest is char mData <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", colClasses = c("character","character", rep("numeric",7)), na = "?") ##Setting dates and subsetting the data ##set the format of the date to Year-Month-Date mData$Date <- as.Date(mData$Date, format = "%d/%m/%Y") ##subsetting the data into the required date frame of dates 2007-02-01 and 2007-02-02 and setting it into tData tData <- subset(mData, subset=(Date >= "2007-02-01" & Date <= "2007-02-02")) ##Set the DateTime col back to tData ##as tData was already been set to %d/%m/%Y - %H:%M:%S,, we do not need to use strptime. ##reassign the values of dateTime (which has now DateTime combine, back to tData, as a column DateTime <- paste(as.Date(tData$Date), tData$Time) tData$DateTime <- as.POSIXct(DateTime) print ("Plotting commences") ##step 2.2: create plot2, set type to "l" which is lines, there is no title, x label is missing, and y lab = Global Active Power (kilowatts) plot(tData$Global_active_power~tData$DateTime, type="l", xlab="", ylab="Global Active Power (kilowatts)") ##step 3.3: set the height/width (480 pixels) print to "plot1.png" dev.copy(png, file="plot2.png", height=480, width=480) dev.off() print ("plot2.png is now available")
source("https://bioconductor.org/biocLite.R") biocLite("GRENITS") library(GRENITS) TF <- data.matrix(Pilot_TF)[,-1] rownames(TF) <- Pilot_TF[,1] # plot.ts( t(TF), plot.type = "single", col = 1:5, xlim = c(0,65), # main = "Circadian Clock Network \n ODE simulated data", # xlab = "Time (h)", ylab = "Expression") # legend("topright", rownames(TF), lty = 1, col = 1:5) output.folder <- paste("./TF_LinearNet", sep="") LinearNet(output.folder, TF) analyse.output(output.folder) dir(output.folder) chain1 <- read.chain(output.folder,1) chain2 <- read.chain(output.folder,2) gamma1 <- colMeans(chain1$gamma) gamma2 <- colMeans(chain2$gamma) plot(x = gamma1, y = gamma2, xlab = "chain1", ylab = "chain2", main = "Convergence plot for link probabilities", xlim = c(0,1), ylim = c(0,1), cex = 1.5, cex.lab = 1.6, cex.main = 1.6) lines(c(0,1), c(0,1), col = "red") prob.file <- paste(output.folder, "/NetworkProbability_Matrix.txt", sep = "") prob.mat <- read.table(prob.file) print(prob.mat) inferred.net <- 1*(prob.mat > 0.8) print(inferred.net) library(network) inferred.net <- network(inferred.net) par(mfrow = c(1,2), cex = 1.76, cex.lab = 1.3, cex.main = 1.4) # Plot cut off prob.vec <- sort(as.vector(as.matrix(prob.mat)), T) # Remove self interaction (last 5 elements) prob.vec <- prob.vec[4:0 - length(prob.vec)] plot(x = prob.vec, y = 1:length(prob.vec), xlim = c(0,1), main = "Connections included vs threshold", xlab = "Probability threshold", ylab = "Connections included") lines(c(0.8,0.8), c(0, 30), col = "red", lty = 2, lwd = 2) # Plot Network plot(inferred.net, label = network.vertex.names(inferred.net), main = "A. thaliana Inferred Network", mode = "circle", vertex.cex=7, arrowhead.cex = 2,vertex.col="green") prob.list.file <- paste(output.folder, "/NetworkProbability_List.txt", sep = "") prob.list <- read.table(prob.list.file, header = T) above.08 <- (prob.list[,3] > 0.8) print(prob.list[above.08,])
/GRENITS.R
no_license
gg0027/Network
R
false
false
1,971
r
source("https://bioconductor.org/biocLite.R") biocLite("GRENITS") library(GRENITS) TF <- data.matrix(Pilot_TF)[,-1] rownames(TF) <- Pilot_TF[,1] # plot.ts( t(TF), plot.type = "single", col = 1:5, xlim = c(0,65), # main = "Circadian Clock Network \n ODE simulated data", # xlab = "Time (h)", ylab = "Expression") # legend("topright", rownames(TF), lty = 1, col = 1:5) output.folder <- paste("./TF_LinearNet", sep="") LinearNet(output.folder, TF) analyse.output(output.folder) dir(output.folder) chain1 <- read.chain(output.folder,1) chain2 <- read.chain(output.folder,2) gamma1 <- colMeans(chain1$gamma) gamma2 <- colMeans(chain2$gamma) plot(x = gamma1, y = gamma2, xlab = "chain1", ylab = "chain2", main = "Convergence plot for link probabilities", xlim = c(0,1), ylim = c(0,1), cex = 1.5, cex.lab = 1.6, cex.main = 1.6) lines(c(0,1), c(0,1), col = "red") prob.file <- paste(output.folder, "/NetworkProbability_Matrix.txt", sep = "") prob.mat <- read.table(prob.file) print(prob.mat) inferred.net <- 1*(prob.mat > 0.8) print(inferred.net) library(network) inferred.net <- network(inferred.net) par(mfrow = c(1,2), cex = 1.76, cex.lab = 1.3, cex.main = 1.4) # Plot cut off prob.vec <- sort(as.vector(as.matrix(prob.mat)), T) # Remove self interaction (last 5 elements) prob.vec <- prob.vec[4:0 - length(prob.vec)] plot(x = prob.vec, y = 1:length(prob.vec), xlim = c(0,1), main = "Connections included vs threshold", xlab = "Probability threshold", ylab = "Connections included") lines(c(0.8,0.8), c(0, 30), col = "red", lty = 2, lwd = 2) # Plot Network plot(inferred.net, label = network.vertex.names(inferred.net), main = "A. thaliana Inferred Network", mode = "circle", vertex.cex=7, arrowhead.cex = 2,vertex.col="green") prob.list.file <- paste(output.folder, "/NetworkProbability_List.txt", sep = "") prob.list <- read.table(prob.list.file, header = T) above.08 <- (prob.list[,3] > 0.8) print(prob.list[above.08,])
#################################################################### ## test subject-level breaks from panel residuals ## ## written by Jong Hee Park 03/2009 ## modified and integrated with other codes by JHP 07/2011 ## fixed a starting.id and ending.id ###################################################################### "testpanelSubjectBreak" <- function(subject.id, time.id, resid, max.break=2, minimum = 10, mcmc=1000, burnin=1000, thin=1, verbose=0, b0, B0, c0, d0, a = NULL, b = NULL, seed = NA, Time = NULL, ps.out = FALSE){ ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## Data N <- length(subject.id) ## groupinfo matrix ## col1: subj ID, col2: offset (first time C indexing), col3: #time periods if (min(subject.id) != 1){ stop("subject.id should start 1!") } if (min(time.id) != 1){ stop("time.id should start 1!") } if (is.null(Time)){ Time <- rep(N, 1) } NC <- length(unique(subject.id)) time.list <- as.numeric(table(subject.id)) ## Make a residula list resid.list <- as.list(rep(NA, NC)) start <- 1; end <- 0 for (i in 1:NC){ end <- start + time.list[i] - 1 resid.list[[i]] <- ts(resid[start:end], start=Time[start]) start <- end + 1 } ## Do the break analysis BFout <- matrix(NA, NC, max.break + 1) if (ps.out ==TRUE){ psout <- NULL } else { psout <- array(NA, c(max(time.list), sum(2:(max.break+1)), NC)) } for (i in 1:NC){ residual <- resid.list[[i]] nk <- length(residual) out <- as.list(rep(NA, max.break)) if(nk > minimum){ for (k in 0:max.break){ out[[k+1]] <- MCMCresidualBreakAnalysis(residual, m=k, b0=b0, B0=B0, c0=c0, d0=d0, a=a, b=b, burnin=burnin, mcmc=mcmc, thin=thin, verbose=verbose, marginal.likelihood="Chib95") if (ps.out ==TRUE&k>0){ if(k==1){ start <- 1 } else{ start <- sum(2:k)+1 } probstate <- attr(out[[k+1]], "prob.state") psout[1:length(probstate[,1]), start:(start+k), i] <- probstate } ## if no convergence diagnostic BFout[i, k+1] <- attr(out[[k+1]], "logmarglike") } } if (verbose > 0){ cat("\n ------------------------------------------------------------- ") cat("\n Break analysis for subject=", i, "is just finished! \n") } } if (ps.out ==TRUE){ attr(BFout, "psout") <- psout } model.prob.mat <- matrix(NA, NC, max.break + 1) for (i in 1:NC){ model.prob <- exp(BFout[i, ])/sum(exp(BFout[i, ])) winner <- which.max(model.prob) if (verbose > 0){ cat("\nPr(no residual break) for subject", i, "=", model.prob[1]) } model.prob.mat[i,] <- model.prob } attr(BFout, "model.prob") <- model.prob.mat return(BFout) }
/MCMCpack/R/testpanelSubjectBreak.R
no_license
ingted/R-Examples
R
false
false
3,219
r
#################################################################### ## test subject-level breaks from panel residuals ## ## written by Jong Hee Park 03/2009 ## modified and integrated with other codes by JHP 07/2011 ## fixed a starting.id and ending.id ###################################################################### "testpanelSubjectBreak" <- function(subject.id, time.id, resid, max.break=2, minimum = 10, mcmc=1000, burnin=1000, thin=1, verbose=0, b0, B0, c0, d0, a = NULL, b = NULL, seed = NA, Time = NULL, ps.out = FALSE){ ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## Data N <- length(subject.id) ## groupinfo matrix ## col1: subj ID, col2: offset (first time C indexing), col3: #time periods if (min(subject.id) != 1){ stop("subject.id should start 1!") } if (min(time.id) != 1){ stop("time.id should start 1!") } if (is.null(Time)){ Time <- rep(N, 1) } NC <- length(unique(subject.id)) time.list <- as.numeric(table(subject.id)) ## Make a residula list resid.list <- as.list(rep(NA, NC)) start <- 1; end <- 0 for (i in 1:NC){ end <- start + time.list[i] - 1 resid.list[[i]] <- ts(resid[start:end], start=Time[start]) start <- end + 1 } ## Do the break analysis BFout <- matrix(NA, NC, max.break + 1) if (ps.out ==TRUE){ psout <- NULL } else { psout <- array(NA, c(max(time.list), sum(2:(max.break+1)), NC)) } for (i in 1:NC){ residual <- resid.list[[i]] nk <- length(residual) out <- as.list(rep(NA, max.break)) if(nk > minimum){ for (k in 0:max.break){ out[[k+1]] <- MCMCresidualBreakAnalysis(residual, m=k, b0=b0, B0=B0, c0=c0, d0=d0, a=a, b=b, burnin=burnin, mcmc=mcmc, thin=thin, verbose=verbose, marginal.likelihood="Chib95") if (ps.out ==TRUE&k>0){ if(k==1){ start <- 1 } else{ start <- sum(2:k)+1 } probstate <- attr(out[[k+1]], "prob.state") psout[1:length(probstate[,1]), start:(start+k), i] <- probstate } ## if no convergence diagnostic BFout[i, k+1] <- attr(out[[k+1]], "logmarglike") } } if (verbose > 0){ cat("\n ------------------------------------------------------------- ") cat("\n Break analysis for subject=", i, "is just finished! \n") } } if (ps.out ==TRUE){ attr(BFout, "psout") <- psout } model.prob.mat <- matrix(NA, NC, max.break + 1) for (i in 1:NC){ model.prob <- exp(BFout[i, ])/sum(exp(BFout[i, ])) winner <- which.max(model.prob) if (verbose > 0){ cat("\nPr(no residual break) for subject", i, "=", model.prob[1]) } model.prob.mat[i,] <- model.prob } attr(BFout, "model.prob") <- model.prob.mat return(BFout) }
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.99939506918721e-241, 1.97274569258757e-154, 5.49464572566663e+109, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_beta_interleaved_matrices/AFL_communities_individual_based_sampling_beta_interleaved_matrices/communities_individual_based_sampling_beta_interleaved_matrices_valgrind_files/1615839802-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
363
r
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.99939506918721e-241, 1.97274569258757e-154, 5.49464572566663e+109, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist) str(result)
/R/Segunda Clase de st.R
no_license
sebas-prog/Series-de-tiempo-1
R
false
false
601
r
% Generated by roxygen2 (4.0.2): do not edit by hand \name{eval_pipe} \alias{eval_pipe} \title{Runs expressions from a list in an independently scoped environment.} \usage{ eval_pipe(data = NULL, pipeline = list()) } \arguments{ \item{data}{A list of environments (or data frames, or a data frame which is promoted to an environment. This creates a scoped context for evaluation expressions. This scope \emph{skips} .GlobalEnv.} \item{pipeline}{A quoted list of expressions which are evaluted in scoped context provided by the data.} } \value{ An internally generated list of expression results. } \description{ Runs expressions from a list in an independently scoped environment. } \examples{ data <- replicate(8,new.env()) data[[2]]$z <- pi pipe=quote(list({a <- 3+3}, {b <- a*2}, {q <-a*b*z} )) o <- eval_pipe(data,pipe) }
/package_dir/man/eval_pipe.Rd
no_license
sakrejda/data-integrator
R
false
false
830
rd
% Generated by roxygen2 (4.0.2): do not edit by hand \name{eval_pipe} \alias{eval_pipe} \title{Runs expressions from a list in an independently scoped environment.} \usage{ eval_pipe(data = NULL, pipeline = list()) } \arguments{ \item{data}{A list of environments (or data frames, or a data frame which is promoted to an environment. This creates a scoped context for evaluation expressions. This scope \emph{skips} .GlobalEnv.} \item{pipeline}{A quoted list of expressions which are evaluted in scoped context provided by the data.} } \value{ An internally generated list of expression results. } \description{ Runs expressions from a list in an independently scoped environment. } \examples{ data <- replicate(8,new.env()) data[[2]]$z <- pi pipe=quote(list({a <- 3+3}, {b <- a*2}, {q <-a*b*z} )) o <- eval_pipe(data,pipe) }
whately <- get_whately() orchard <- get_orchard() save(whately, file = "data/whately.rda", compress = "xz") save(orchard, file = "data/orchard.rda", compress = "xz")
/data-raw/macleish.R
no_license
nicholasjhorton/macleish
R
false
false
167
r
whately <- get_whately() orchard <- get_orchard() save(whately, file = "data/whately.rda", compress = "xz") save(orchard, file = "data/orchard.rda", compress = "xz")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EMM_functions_cpp.R \name{score.linker.cpp} \alias{score.linker.cpp} \title{Calculte -log10(p) by score test (fast, for limited cases)} \usage{ score.linker.cpp(y, Ws, Gammas, gammas.diag = TRUE, Gu, Ge, P0, chi0.mixture = 0.5) } \arguments{ \item{y}{A \eqn{n \times 1} vector. A vector of phenotypic values should be used. NA is allowed.} \item{Ws}{A list of low rank matrices (ZW; \eqn{n \times k} matrix). This forms linear kernel \eqn{ZKZ' = ZW \Gamma (ZW)'}. For example, Ws = list(A.part = ZW.A, D.part = ZW.D)} \item{Gammas}{A list of matrices for weighting SNPs (Gamma; \eqn{k \times k} matrix). This forms linear kernel \eqn{ZKZ' = ZW \Gamma (ZW)'}. For example, if there is no weighting, Gammas = lapply(Ws, function(x) diag(ncol(x)))} \item{gammas.diag}{If each Gamma is the diagonal matrix, please set this argument TRUE. The calculation time can be saved.} \item{Gu}{A \eqn{n \times n} matrix. You should assign \eqn{ZKZ'}, where K is covariance (relationship) matrix and Z is its design matrix.} \item{Ge}{A \eqn{n \times n} matrix. You should assign identity matrix I (diag(n)).} \item{P0}{A \eqn{n \times n} matrix. The Moore-Penrose generalized inverse of \eqn{SV0S}, where \eqn{S = X(X'X)^{-1}X'} and \eqn{V0 = \sigma^2_u Gu + \sigma^2_e Ge}. \eqn{\sigma^2_u} and \eqn{\sigma^2_e} are estimators of the null model.} \item{chi0.mixture}{RAINBOW assumes the statistic \eqn{l1' F l1} follows the mixture of \eqn{\chi^2_0} and \eqn{\chi^2_r}, where l1 is the first derivative of the log-likelihood and F is the Fisher information. And r is the degree of freedom. chi0.mixture determins the proportion of \eqn{\chi^2_0}} } \value{ -log10(p) calculated by score test } \description{ Calculte -log10(p) by score test (fast, for limited cases) }
/man/score.linker.cpp.Rd
permissive
quanrd/RAINBOWR
R
false
true
1,843
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EMM_functions_cpp.R \name{score.linker.cpp} \alias{score.linker.cpp} \title{Calculte -log10(p) by score test (fast, for limited cases)} \usage{ score.linker.cpp(y, Ws, Gammas, gammas.diag = TRUE, Gu, Ge, P0, chi0.mixture = 0.5) } \arguments{ \item{y}{A \eqn{n \times 1} vector. A vector of phenotypic values should be used. NA is allowed.} \item{Ws}{A list of low rank matrices (ZW; \eqn{n \times k} matrix). This forms linear kernel \eqn{ZKZ' = ZW \Gamma (ZW)'}. For example, Ws = list(A.part = ZW.A, D.part = ZW.D)} \item{Gammas}{A list of matrices for weighting SNPs (Gamma; \eqn{k \times k} matrix). This forms linear kernel \eqn{ZKZ' = ZW \Gamma (ZW)'}. For example, if there is no weighting, Gammas = lapply(Ws, function(x) diag(ncol(x)))} \item{gammas.diag}{If each Gamma is the diagonal matrix, please set this argument TRUE. The calculation time can be saved.} \item{Gu}{A \eqn{n \times n} matrix. You should assign \eqn{ZKZ'}, where K is covariance (relationship) matrix and Z is its design matrix.} \item{Ge}{A \eqn{n \times n} matrix. You should assign identity matrix I (diag(n)).} \item{P0}{A \eqn{n \times n} matrix. The Moore-Penrose generalized inverse of \eqn{SV0S}, where \eqn{S = X(X'X)^{-1}X'} and \eqn{V0 = \sigma^2_u Gu + \sigma^2_e Ge}. \eqn{\sigma^2_u} and \eqn{\sigma^2_e} are estimators of the null model.} \item{chi0.mixture}{RAINBOW assumes the statistic \eqn{l1' F l1} follows the mixture of \eqn{\chi^2_0} and \eqn{\chi^2_r}, where l1 is the first derivative of the log-likelihood and F is the Fisher information. And r is the degree of freedom. chi0.mixture determins the proportion of \eqn{\chi^2_0}} } \value{ -log10(p) calculated by score test } \description{ Calculte -log10(p) by score test (fast, for limited cases) }
envelope.selectedmodgof<- function(Y, fun=NULL, nrank=1,nsim=99,dimyx=c(128,128),...){ x<- Y # the argument should be named Y in order to regeister as an envelope S3 method simu.model <- x$best.model cual <- class(simu.model) bw <- x$best.sigma # envueltas para un HPC if("ecespa.minconfit"%in%cual & !is.na(bw) ){ if(is.null(fun)) fun<- Kinhom lambda <- density.ppp(x$pp, sigma=bw, dimyx=dimyx) simu.model$lambda <- lambda result <- envelope(x$pp, fun, sigma=bw, simulate=expression(rIPCP(simu.model)), savefuns =TRUE,nrank=nrank, nsim=nsim,...) } # envueltas para un PC if("ecespa.minconfit"%in%cual & is.na(bw) ){ if(is.null(fun)) fun<- Kest lambda <- predict(ppm(x$pp), type = "trend") simu.model$lambda <- lambda result <- envelope(x$pp, fun, simulate=expression(rIPCP(simu.model)), savefuns =TRUE,nrank=nrank, nsim=nsim,...) } # envueltas para un HPP if("im"%in%cual & !is.na(bw) ){ if(is.null(fun)) fun<- Kinhom lambda <- density.ppp(x$pp, sigma=bw, dimyx=dimyx) result <- envelope(x$pp, fun, sigma=bw, simulate=expression(rpoispp(lambda)), savefuns =TRUE,nrank=nrank, nsim=nsim,...) } if("im"%in%cual & is.na(bw)){ if(is.null(fun)) fun<- Kest result <- envelope(x$pp, fun, savefuns =TRUE,nrank=nrank, nsim=nsim,...) } return(result) }
/selectspm/R/envelope.selectedmodgof.R
no_license
albrizre/spatstat.revdep
R
false
false
1,395
r
envelope.selectedmodgof<- function(Y, fun=NULL, nrank=1,nsim=99,dimyx=c(128,128),...){ x<- Y # the argument should be named Y in order to regeister as an envelope S3 method simu.model <- x$best.model cual <- class(simu.model) bw <- x$best.sigma # envueltas para un HPC if("ecespa.minconfit"%in%cual & !is.na(bw) ){ if(is.null(fun)) fun<- Kinhom lambda <- density.ppp(x$pp, sigma=bw, dimyx=dimyx) simu.model$lambda <- lambda result <- envelope(x$pp, fun, sigma=bw, simulate=expression(rIPCP(simu.model)), savefuns =TRUE,nrank=nrank, nsim=nsim,...) } # envueltas para un PC if("ecespa.minconfit"%in%cual & is.na(bw) ){ if(is.null(fun)) fun<- Kest lambda <- predict(ppm(x$pp), type = "trend") simu.model$lambda <- lambda result <- envelope(x$pp, fun, simulate=expression(rIPCP(simu.model)), savefuns =TRUE,nrank=nrank, nsim=nsim,...) } # envueltas para un HPP if("im"%in%cual & !is.na(bw) ){ if(is.null(fun)) fun<- Kinhom lambda <- density.ppp(x$pp, sigma=bw, dimyx=dimyx) result <- envelope(x$pp, fun, sigma=bw, simulate=expression(rpoispp(lambda)), savefuns =TRUE,nrank=nrank, nsim=nsim,...) } if("im"%in%cual & is.na(bw)){ if(is.null(fun)) fun<- Kest result <- envelope(x$pp, fun, savefuns =TRUE,nrank=nrank, nsim=nsim,...) } return(result) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/atlantisfmsy_check.R \name{atlantis_checkmodule} \alias{atlantis_checkmodule} \title{Test if Atlantis modules are on.} \usage{ atlantis_checkmodule(model_path, exe_name, batch_file = NULL) } \arguments{ \item{model_path}{The directory of the calibrated model (containing all the parameters files and one bach file. Forcing files can be stored in a direct parent directory of model_path). \strong{WARNING:} Only working if the forcing folder is in the main model directory \code{model_path} or if it is in the direct parent directory. If not please either modify this package or modify the path structure of your Atlantis input forcing parameters file.} \item{exe_name}{The name of the atlantis executable you used (ex: atlantismain, atlantisNew).} \item{batch_file}{The name of the batch/shell file with extension you are using to run your model. If not provided, the function will search for the unique batch file in your \code{folder_path}. \strong{Default:} NULL.} } \value{ \code{test} A binary variable (1) all the modules are on (0) at least one module is off. } \description{ Test if at least physics, biology and fishery modules are on in the calibrated model. It looks inside the run parameters file and check if \code{flag_fisheries_on} = 1, \code{flag_skip_biol} = 0, and \code{flag_skip_phys} = 0. } \examples{ atlantis_checkmodule("C:/Atlantis/AtlantisEEC/AtlantisEECF_v3", "atlantismain", "runAtlantis.bat") atlantis_checkmodule("/home/Atlantis/AtlantisEEC/AtlantisEECF_v3", "atlantisNew", "runAtlantis.sh") } \seealso{ \code{\link{atlantis_paraselect}} for parameters file selection, and \code{\link{atlantis_openfile}} to open a parameters file and select a parameter. }
/man/atlantis_checkmodule.Rd
no_license
rgirardi/atlantisfmsy
R
false
true
1,822
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/atlantisfmsy_check.R \name{atlantis_checkmodule} \alias{atlantis_checkmodule} \title{Test if Atlantis modules are on.} \usage{ atlantis_checkmodule(model_path, exe_name, batch_file = NULL) } \arguments{ \item{model_path}{The directory of the calibrated model (containing all the parameters files and one bach file. Forcing files can be stored in a direct parent directory of model_path). \strong{WARNING:} Only working if the forcing folder is in the main model directory \code{model_path} or if it is in the direct parent directory. If not please either modify this package or modify the path structure of your Atlantis input forcing parameters file.} \item{exe_name}{The name of the atlantis executable you used (ex: atlantismain, atlantisNew).} \item{batch_file}{The name of the batch/shell file with extension you are using to run your model. If not provided, the function will search for the unique batch file in your \code{folder_path}. \strong{Default:} NULL.} } \value{ \code{test} A binary variable (1) all the modules are on (0) at least one module is off. } \description{ Test if at least physics, biology and fishery modules are on in the calibrated model. It looks inside the run parameters file and check if \code{flag_fisheries_on} = 1, \code{flag_skip_biol} = 0, and \code{flag_skip_phys} = 0. } \examples{ atlantis_checkmodule("C:/Atlantis/AtlantisEEC/AtlantisEECF_v3", "atlantismain", "runAtlantis.bat") atlantis_checkmodule("/home/Atlantis/AtlantisEEC/AtlantisEECF_v3", "atlantisNew", "runAtlantis.sh") } \seealso{ \code{\link{atlantis_paraselect}} for parameters file selection, and \code{\link{atlantis_openfile}} to open a parameters file and select a parameter. }
x <- c(0.593, 0.142, 0.329, 0.691, 0.231, 0.793, 0.519, 0.392, 0.418) t.test(x, alternative="greater", mu=0.3)
/HypothesisTesting.R
no_license
JanviDattani/Data-Science-with-R
R
false
false
112
r
x <- c(0.593, 0.142, 0.329, 0.691, 0.231, 0.793, 0.519, 0.392, 0.418) t.test(x, alternative="greater", mu=0.3)
data = read.table("household_power_consumption.txt", sep=";", header = TRUE)##Reading all the data data$Date = as.Date(data[,1], format = "%d/%m/%Y")##Setting the first column as a data datas = data[data$Date >= "2007-02-01" & data$Date <= "2007-02-02", ]##extracted the two dates of importance times = strptime(paste(datas[,1], datas[,2]), format = "%Y-%m-%d %H:%M:%S")##extracting the first two columns and setting time to a date/time column png(file = "plot3.png") plot(times, datas$Sub_metering_1, ylab = "Energy Sub Metering", type = "l", xlab="", col ="black", ylim = range(c(0,40))) #par(new=TRUE) lines(times, datas$Sub_metering_2, col="red", type = "l", xlab="", ylab="",ylim = range(c(0,40))) #par(new=TRUE) lines(times, datas$Sub_metering_3, col="blue", type = "l", xlab="", ylab="",ylim = range(c(0,40))) legend(x="topright", lty=c(1,1,1), col=c("black","red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.off()
/plot3.R
no_license
davijeo89/ExData_Plotting1
R
false
false
956
r
data = read.table("household_power_consumption.txt", sep=";", header = TRUE)##Reading all the data data$Date = as.Date(data[,1], format = "%d/%m/%Y")##Setting the first column as a data datas = data[data$Date >= "2007-02-01" & data$Date <= "2007-02-02", ]##extracted the two dates of importance times = strptime(paste(datas[,1], datas[,2]), format = "%Y-%m-%d %H:%M:%S")##extracting the first two columns and setting time to a date/time column png(file = "plot3.png") plot(times, datas$Sub_metering_1, ylab = "Energy Sub Metering", type = "l", xlab="", col ="black", ylim = range(c(0,40))) #par(new=TRUE) lines(times, datas$Sub_metering_2, col="red", type = "l", xlab="", ylab="",ylim = range(c(0,40))) #par(new=TRUE) lines(times, datas$Sub_metering_3, col="blue", type = "l", xlab="", ylab="",ylim = range(c(0,40))) legend(x="topright", lty=c(1,1,1), col=c("black","red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.R \name{predict.model_list} \alias{predict.model_list} \title{Make predictions using the best-performing model from tuning} \usage{ \method{predict}{model_list}(object, newdata, prepdata, ...) } \arguments{ \item{object}{model_list object, as from `tune_models`} \item{newdata}{data on which to make predictions. If missing, out-of-fold predictions from training will be returned If you want new predictions on training data using the final model, pass the training data to this argument, but know that you're getting over-fit predictions that very likely overestimate model performance relative to what will be achieved on new data. Should have the same structure as the input to `prep_data`,`tune_models` or `train_models`. `predict` will try to figure out if the data need to be sent through `prep_data` before making predictions; this can be overriden by setting `prepdata = FALSE`, but this should rarely be needed.} \item{prepdata}{Logical, this should rarely be set by the user. By default, if `newdata` hasn't been prepped, it will be prepped by `prep_data` before predictions are made. Set this to TRUE to force already-prepped data through `prep_data` again, or set to FALSE to prevent `newdata` from being sent through `prep_data`.} \item{...}{Unused.} } \value{ A tibble data frame: newdata with an additional column for the predictions in "predicted_TARGET" where TARGET is the name of the variable being predicted. If classification, the new column will contain predicted probabilities. The tibble will have child class "predicted_df" and attribute "model_info" that contains information about the model used to make predictions. } \description{ Make predictions using the best-performing model from tuning } \details{ The model and hyperparameter values with the best out-of-fold performance in model training according to the selected metric is used to make predictions. Prepping data inside `predict` has the advantage of returning your predictions with the newdata in its original format. } \examples{ # Tune models using only the first 40 rows to keep computation fast models <- machine_learn(pima_diabetes[1:40, ], patient_id, outcome = diabetes) # Make prediction on the next 10 rows. This uses the best-performing model from # tuning cross validation, and it also prepares the new data in the same way as # the training data was prepared. predictions <- predict(models, newdata = pima_diabetes[41:50, ]) predictions plot(predictions) } \seealso{ \code{\link{plot.predicted_df}}, \code{\link{tune_models}}, \code{\link{prep_data}} }
/man/predict.model_list.Rd
permissive
hughvnguyen/healthcareai-r
R
false
true
2,664
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.R \name{predict.model_list} \alias{predict.model_list} \title{Make predictions using the best-performing model from tuning} \usage{ \method{predict}{model_list}(object, newdata, prepdata, ...) } \arguments{ \item{object}{model_list object, as from `tune_models`} \item{newdata}{data on which to make predictions. If missing, out-of-fold predictions from training will be returned If you want new predictions on training data using the final model, pass the training data to this argument, but know that you're getting over-fit predictions that very likely overestimate model performance relative to what will be achieved on new data. Should have the same structure as the input to `prep_data`,`tune_models` or `train_models`. `predict` will try to figure out if the data need to be sent through `prep_data` before making predictions; this can be overriden by setting `prepdata = FALSE`, but this should rarely be needed.} \item{prepdata}{Logical, this should rarely be set by the user. By default, if `newdata` hasn't been prepped, it will be prepped by `prep_data` before predictions are made. Set this to TRUE to force already-prepped data through `prep_data` again, or set to FALSE to prevent `newdata` from being sent through `prep_data`.} \item{...}{Unused.} } \value{ A tibble data frame: newdata with an additional column for the predictions in "predicted_TARGET" where TARGET is the name of the variable being predicted. If classification, the new column will contain predicted probabilities. The tibble will have child class "predicted_df" and attribute "model_info" that contains information about the model used to make predictions. } \description{ Make predictions using the best-performing model from tuning } \details{ The model and hyperparameter values with the best out-of-fold performance in model training according to the selected metric is used to make predictions. Prepping data inside `predict` has the advantage of returning your predictions with the newdata in its original format. } \examples{ # Tune models using only the first 40 rows to keep computation fast models <- machine_learn(pima_diabetes[1:40, ], patient_id, outcome = diabetes) # Make prediction on the next 10 rows. This uses the best-performing model from # tuning cross validation, and it also prepares the new data in the same way as # the training data was prepared. predictions <- predict(models, newdata = pima_diabetes[41:50, ]) predictions plot(predictions) } \seealso{ \code{\link{plot.predicted_df}}, \code{\link{tune_models}}, \code{\link{prep_data}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxed.R \name{coxed} \alias{coxed} \title{Expected durations and marginal changes in expected duration from the Cox proportional hazards model} \usage{ coxed(cox.model, newdata = NULL, newdata2 = NULL, bootstrap = FALSE, method = "npsf", k = -1, B = 200, confidence = "studentized", level = 0.95, id = NULL, ...) } \arguments{ \item{cox.model}{The output from a Cox proportional hazards model estimated with the \code{\link[survival]{coxph}} function in the \code{survival} package or with the \code{\link[rms]{cph}} function in the \code{\link[rms]{rms}} package} \item{newdata}{An optional data frame in which to look for variables with which to predict. If omitted, the fitted values are used} \item{newdata2}{An optional data frame that can only be specified if \code{newdata} is not omitted, and must have the same dimensions as \code{newdata}. If specified, marginal changes are calculated by subtracting the expected durations for \code{newdata2} from the expected durations for \code{newdata}} \item{bootstrap}{Should bootstrapped standard errors and confidence intervals be calculated?} \item{method}{If "npsf" (the default), expected durations are calculated using the non-parametric step function approach described in Kropko and Harden (2018). If "gam", expected durations are calculated using the GAM method} \item{k}{The number of knots in the GAM smoother. The default is -1, which employs the \code{\link[mgcv]{choose.k}} function from the \code{\link{mgcv}} package to choose the number of knots} \item{B}{Number of bootstrap simulation iterations} \item{confidence}{If "studentized" (the default), bootstrapped CIs are calculated from the tails of a normal distribution where the mean and standard deviation are the point estimate and boostrapped SE of each duration estimate. If "empirical", bootstrapped confidence intervals are calculated empirically. If "bca", bootstrapped confidence intervals are calculated using the bias-correction and acceleration method described by DiCiccio and Efron (1996).} \item{level}{The level of the confidence interval to calculate (default is .95 for a 95 percent confidence interval)} \item{id}{Cluster variable if bootstrapping is to be done by clusters of observations rather than individual observations. If the data are coded with time-varying covariates (using the \code{time2} argument in the \code{\link[survival]{Surv}} function), this variable must be the ID variable in the \emph{data that are used to estimate the Cox PH model}, and not the ID variable in new data.} \item{...}{Additional arguments to be passed to the \code{\link[coxed]{bootcov2}} function, an adaptation of the \code{\link[rms]{bootcov}} function in the \code{\link{rms}} package} } \value{ \code{coxed} returns an object of \code{\link[base]{class}} "coxedExpdur" or "coxedMargin", which is a list containing some of the following components, depending on the implementation of \code{coxed}: \tabular{ll}{ \code{exp.dur} \tab A vector of predicted mean durations for the estimation sample if \code{newdata} is omitted, or else for the specified new data. If \code{bootstrap} is \code{TRUE} bootstrapped standard errors are also provided, as well as the confidence interval requested by \code{level}. \cr \code{mean} \tab The mean of the predicted durations. If \code{bootstrap} is \code{TRUE} bootstrapped standard errors are also provided, as well as the confidence interval requested by \code{level}. \cr \code{median} \tab The median of the predicted durations. If \code{bootstrap} is \code{TRUE} bootstrapped standard errors are also provided, as well as the confidence interval requested by \code{level}. \cr \code{baseline.functions} \tab The estimated cumulative baseline hazard function and survivor function. \cr \code{gam.model} \tab Output from the \code{\link[mgcv]{gam}} function in which the durations are fit against the exponentiated linear predictors from the Cox model.\cr \code{gam.data} \tab Fitted values and confidence intervals from the GAM model.\cr \code{exp.dur1} \tab A vector of predicted mean durations for the observations in \code{newdata1} when calculating marginal effects. \cr \code{exp.dur2} \tab A vector of predicted mean durations for the observations in \code{newdata2} when calculating marginal effects. \cr \code{mean1} \tab The mean of the predicted mean durations for the observations in \code{newdata1} when calculating marginal effects. \cr \code{mean2} \tab The mean of the predicted mean durations for the observations in \code{newdata2} when calculating marginal effects. \cr \code{median1} \tab The median of the predicted mean durations for the observations in \code{newdata1} when calculating marginal effects. \cr \code{median2} \tab The median of the predicted mean durations for the observations in \code{newdata2} when calculating marginal effects. \cr \code{diff} \tab A vector of the difference between the predicted mean durations for each observation under the covariate profile in \code{newdata2} and the covariate profile in \code{newdata1}.\cr \code{mean.diff} \tab The mean of the differences in duration across observations. \cr \code{median.diff} \tab The median of the differences in duration across observations. \cr } } \description{ \code{coxed()} returns expected durations for every observation in the data used to fit the model, or in new data, or returns the mean or median of these durations, or differences in duration for two pre-defined covariate profiles. Standard errors and confidence intervals for all quantities produced by \code{coxed()} are calculated via bootstrapping. } \details{ The \code{coxed} function generates expected durations for individual observations and/or marginal changes in expected duration given a change in a covariate from the Cox proportional hazards model. Specifically, the methods can compute (1) the expected duration for each observation used to fit the Cox model, given the covariates, (2) the expected duration for a "new" observation with a covariate profile set by the analyst, or (3) the first difference, or change, in expected duration given two new data frames. There are two different methods, described in Kropko and Harden (2018), of generating duration-based quantities in the package. The first method calculates expected durations by using a nonparametric estimate of the baseline hazard and survivor functions (see \code{\link[coxed]{coxed.npsf}} for details). The second method employs a generalized additive model (GAM) to map the model's estimated linear predictor values to duration times (see \code{\link[coxed]{coxed.gam}} for details). Both methods are also implemented for data structures with time-varying covariates (see \code{\link[coxed]{coxed.npsf.tvc}} and \code{\link[coxed]{coxed.gam.tvc}}). } \examples{ mv.surv <- Surv(martinvanberg$formdur, event = rep(1, nrow(martinvanberg))) mv.cox <- coxph(mv.surv ~ postel + prevdef + cont + ident + rgovm + pgovno + tpgovno + minority, method = "breslow", data = martinvanberg) summary(mv.cox) # NPSF method ed1 <- coxed(mv.cox, method="npsf") ed1$baseline.functions ed1$exp.dur summary(ed1, stat="mean") summary(ed1, stat="median") \dontrun{ed1 <- coxed(mv.cox, method="npsf", bootstrap = TRUE) ed1$exp.dur summary(ed1, stat="mean") summary(ed1, stat="median") } me <- coxed(mv.cox, method="npsf", bootstrap = FALSE, newdata = dplyr::mutate(martinvanberg, pgovno=1), newdata2 = dplyr::mutate(martinvanberg, pgovno=6)) summary(me, stat="mean") # GAM method ed2 <- coxed(mv.cox, method="gam") summary(ed2$gam.data) summary(ed2$gam.model) ed2$exp.dur summary(ed2, stat="mean") \dontrun{me <- coxed(mv.cox, method="gam", bootstrap = TRUE, newdata = dplyr::mutate(martinvanberg, pgovno=1), newdata2 = dplyr::mutate(martinvanberg, pgovno=6)) summary(me, stat="mean") summary(me, stat="median") } #Plotting the GAM fit \dontrun{ggplot(ed2$gam.data, aes(x=rank.xb, y=y)) + geom_point() + geom_line(aes(x=rank.xb, y=gam_fit)) + geom_ribbon(aes(ymin=gam_fit_95lb, ymax=gam_fit_95ub), alpha=.5) + xlab("Cox model LP rank (smallest to largest)") + ylab("Duration") } #Time-varying covariates bs.surv <- Surv(time = boxsteffensmeier$start, time2 = boxsteffensmeier$te, event = boxsteffensmeier$cut_hi) bs.cox <- coxph(bs.surv ~ ec + dem + south + iv, data = boxsteffensmeier, method = "breslow") summary(bs.cox) ed1 <- coxed(bs.cox, method="npsf", id=boxsteffensmeier$caseid) ed1$exp.dur summary(ed1, stat="mean") } \references{ Kropko, J. and Harden, J. J. (2018). Beyond the Hazard Ratio: Generating Expected Durations from the Cox Proportional Hazards Model. \emph{British Journal of Political Science} \url{https://doi.org/10.1017/S000712341700045X} DiCiccio, T. J. and B. Efron. (1996). Bootstrap Confidence Intervals. \emph{Statistical Science}. 11(3): 189–212. \url{https://doi.org/10.1214/ss/1032280214} } \seealso{ \code{\link[survival]{coxph}}, \code{\link[rms]{cph}}, \code{\link[coxed]{bootcov2}}, \code{\link[coxed]{coxed.gam}}, \code{\link[coxed]{coxed.gam.tvc}}, \code{\link[coxed]{coxed.npsf}}, \code{\link[coxed]{coxed.npsf.tvc}} } \author{ Jonathan Kropko <jkropko@virginia.edu> and Jeffrey J. Harden <jharden2@nd.edu> }
/man/coxed.Rd
no_license
alexa-woodward/coxed
R
false
true
9,334
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxed.R \name{coxed} \alias{coxed} \title{Expected durations and marginal changes in expected duration from the Cox proportional hazards model} \usage{ coxed(cox.model, newdata = NULL, newdata2 = NULL, bootstrap = FALSE, method = "npsf", k = -1, B = 200, confidence = "studentized", level = 0.95, id = NULL, ...) } \arguments{ \item{cox.model}{The output from a Cox proportional hazards model estimated with the \code{\link[survival]{coxph}} function in the \code{survival} package or with the \code{\link[rms]{cph}} function in the \code{\link[rms]{rms}} package} \item{newdata}{An optional data frame in which to look for variables with which to predict. If omitted, the fitted values are used} \item{newdata2}{An optional data frame that can only be specified if \code{newdata} is not omitted, and must have the same dimensions as \code{newdata}. If specified, marginal changes are calculated by subtracting the expected durations for \code{newdata2} from the expected durations for \code{newdata}} \item{bootstrap}{Should bootstrapped standard errors and confidence intervals be calculated?} \item{method}{If "npsf" (the default), expected durations are calculated using the non-parametric step function approach described in Kropko and Harden (2018). If "gam", expected durations are calculated using the GAM method} \item{k}{The number of knots in the GAM smoother. The default is -1, which employs the \code{\link[mgcv]{choose.k}} function from the \code{\link{mgcv}} package to choose the number of knots} \item{B}{Number of bootstrap simulation iterations} \item{confidence}{If "studentized" (the default), bootstrapped CIs are calculated from the tails of a normal distribution where the mean and standard deviation are the point estimate and boostrapped SE of each duration estimate. If "empirical", bootstrapped confidence intervals are calculated empirically. If "bca", bootstrapped confidence intervals are calculated using the bias-correction and acceleration method described by DiCiccio and Efron (1996).} \item{level}{The level of the confidence interval to calculate (default is .95 for a 95 percent confidence interval)} \item{id}{Cluster variable if bootstrapping is to be done by clusters of observations rather than individual observations. If the data are coded with time-varying covariates (using the \code{time2} argument in the \code{\link[survival]{Surv}} function), this variable must be the ID variable in the \emph{data that are used to estimate the Cox PH model}, and not the ID variable in new data.} \item{...}{Additional arguments to be passed to the \code{\link[coxed]{bootcov2}} function, an adaptation of the \code{\link[rms]{bootcov}} function in the \code{\link{rms}} package} } \value{ \code{coxed} returns an object of \code{\link[base]{class}} "coxedExpdur" or "coxedMargin", which is a list containing some of the following components, depending on the implementation of \code{coxed}: \tabular{ll}{ \code{exp.dur} \tab A vector of predicted mean durations for the estimation sample if \code{newdata} is omitted, or else for the specified new data. If \code{bootstrap} is \code{TRUE} bootstrapped standard errors are also provided, as well as the confidence interval requested by \code{level}. \cr \code{mean} \tab The mean of the predicted durations. If \code{bootstrap} is \code{TRUE} bootstrapped standard errors are also provided, as well as the confidence interval requested by \code{level}. \cr \code{median} \tab The median of the predicted durations. If \code{bootstrap} is \code{TRUE} bootstrapped standard errors are also provided, as well as the confidence interval requested by \code{level}. \cr \code{baseline.functions} \tab The estimated cumulative baseline hazard function and survivor function. \cr \code{gam.model} \tab Output from the \code{\link[mgcv]{gam}} function in which the durations are fit against the exponentiated linear predictors from the Cox model.\cr \code{gam.data} \tab Fitted values and confidence intervals from the GAM model.\cr \code{exp.dur1} \tab A vector of predicted mean durations for the observations in \code{newdata1} when calculating marginal effects. \cr \code{exp.dur2} \tab A vector of predicted mean durations for the observations in \code{newdata2} when calculating marginal effects. \cr \code{mean1} \tab The mean of the predicted mean durations for the observations in \code{newdata1} when calculating marginal effects. \cr \code{mean2} \tab The mean of the predicted mean durations for the observations in \code{newdata2} when calculating marginal effects. \cr \code{median1} \tab The median of the predicted mean durations for the observations in \code{newdata1} when calculating marginal effects. \cr \code{median2} \tab The median of the predicted mean durations for the observations in \code{newdata2} when calculating marginal effects. \cr \code{diff} \tab A vector of the difference between the predicted mean durations for each observation under the covariate profile in \code{newdata2} and the covariate profile in \code{newdata1}.\cr \code{mean.diff} \tab The mean of the differences in duration across observations. \cr \code{median.diff} \tab The median of the differences in duration across observations. \cr } } \description{ \code{coxed()} returns expected durations for every observation in the data used to fit the model, or in new data, or returns the mean or median of these durations, or differences in duration for two pre-defined covariate profiles. Standard errors and confidence intervals for all quantities produced by \code{coxed()} are calculated via bootstrapping. } \details{ The \code{coxed} function generates expected durations for individual observations and/or marginal changes in expected duration given a change in a covariate from the Cox proportional hazards model. Specifically, the methods can compute (1) the expected duration for each observation used to fit the Cox model, given the covariates, (2) the expected duration for a "new" observation with a covariate profile set by the analyst, or (3) the first difference, or change, in expected duration given two new data frames. There are two different methods, described in Kropko and Harden (2018), of generating duration-based quantities in the package. The first method calculates expected durations by using a nonparametric estimate of the baseline hazard and survivor functions (see \code{\link[coxed]{coxed.npsf}} for details). The second method employs a generalized additive model (GAM) to map the model's estimated linear predictor values to duration times (see \code{\link[coxed]{coxed.gam}} for details). Both methods are also implemented for data structures with time-varying covariates (see \code{\link[coxed]{coxed.npsf.tvc}} and \code{\link[coxed]{coxed.gam.tvc}}). } \examples{ mv.surv <- Surv(martinvanberg$formdur, event = rep(1, nrow(martinvanberg))) mv.cox <- coxph(mv.surv ~ postel + prevdef + cont + ident + rgovm + pgovno + tpgovno + minority, method = "breslow", data = martinvanberg) summary(mv.cox) # NPSF method ed1 <- coxed(mv.cox, method="npsf") ed1$baseline.functions ed1$exp.dur summary(ed1, stat="mean") summary(ed1, stat="median") \dontrun{ed1 <- coxed(mv.cox, method="npsf", bootstrap = TRUE) ed1$exp.dur summary(ed1, stat="mean") summary(ed1, stat="median") } me <- coxed(mv.cox, method="npsf", bootstrap = FALSE, newdata = dplyr::mutate(martinvanberg, pgovno=1), newdata2 = dplyr::mutate(martinvanberg, pgovno=6)) summary(me, stat="mean") # GAM method ed2 <- coxed(mv.cox, method="gam") summary(ed2$gam.data) summary(ed2$gam.model) ed2$exp.dur summary(ed2, stat="mean") \dontrun{me <- coxed(mv.cox, method="gam", bootstrap = TRUE, newdata = dplyr::mutate(martinvanberg, pgovno=1), newdata2 = dplyr::mutate(martinvanberg, pgovno=6)) summary(me, stat="mean") summary(me, stat="median") } #Plotting the GAM fit \dontrun{ggplot(ed2$gam.data, aes(x=rank.xb, y=y)) + geom_point() + geom_line(aes(x=rank.xb, y=gam_fit)) + geom_ribbon(aes(ymin=gam_fit_95lb, ymax=gam_fit_95ub), alpha=.5) + xlab("Cox model LP rank (smallest to largest)") + ylab("Duration") } #Time-varying covariates bs.surv <- Surv(time = boxsteffensmeier$start, time2 = boxsteffensmeier$te, event = boxsteffensmeier$cut_hi) bs.cox <- coxph(bs.surv ~ ec + dem + south + iv, data = boxsteffensmeier, method = "breslow") summary(bs.cox) ed1 <- coxed(bs.cox, method="npsf", id=boxsteffensmeier$caseid) ed1$exp.dur summary(ed1, stat="mean") } \references{ Kropko, J. and Harden, J. J. (2018). Beyond the Hazard Ratio: Generating Expected Durations from the Cox Proportional Hazards Model. \emph{British Journal of Political Science} \url{https://doi.org/10.1017/S000712341700045X} DiCiccio, T. J. and B. Efron. (1996). Bootstrap Confidence Intervals. \emph{Statistical Science}. 11(3): 189–212. \url{https://doi.org/10.1214/ss/1032280214} } \seealso{ \code{\link[survival]{coxph}}, \code{\link[rms]{cph}}, \code{\link[coxed]{bootcov2}}, \code{\link[coxed]{coxed.gam}}, \code{\link[coxed]{coxed.gam.tvc}}, \code{\link[coxed]{coxed.npsf}}, \code{\link[coxed]{coxed.npsf.tvc}} } \author{ Jonathan Kropko <jkropko@virginia.edu> and Jeffrey J. Harden <jharden2@nd.edu> }
###################### # # # More on rtweet # # # ###################### # Load packages library(rtweet) ## Resolving User Profile Information ---- # You have a collection of ids and need to resolve basic profile information (such as screen names) for these users. recent_rtweeters <- lookup_users() ## Crawling Followers to Approximate Primary Influence ---- rtweet::get_followers() ## Analyzing Friendship Relationships such as Friends of Friends ---- # Problem: You want to create a graph that facilitates the analysis of interesting relationships amongst users, # such as friends of friends. # Solution: Systematically harvest all of the friendships for users of interest, # and load the data into igraph which offers native graph operations.
/data-analysis/more_on_twitter_api.R
no_license
papaemman/Social-Network-Analysis-AUTh
R
false
false
802
r
###################### # # # More on rtweet # # # ###################### # Load packages library(rtweet) ## Resolving User Profile Information ---- # You have a collection of ids and need to resolve basic profile information (such as screen names) for these users. recent_rtweeters <- lookup_users() ## Crawling Followers to Approximate Primary Influence ---- rtweet::get_followers() ## Analyzing Friendship Relationships such as Friends of Friends ---- # Problem: You want to create a graph that facilitates the analysis of interesting relationships amongst users, # such as friends of friends. # Solution: Systematically harvest all of the friendships for users of interest, # and load the data into igraph which offers native graph operations.
##' @title plotLmCor ##' @description This function would generate a list including a countData and colData as input data for testing ##' @param data Default dataset to use for plot. ##' @param mapping Default list of aesthetic mappings to use for plot ##' @return a ggplot2 object ##' @examples ##' require(ggplot2) ##' print(plotLmCor(mpg,aes(cty,hwy))) ##' @import ggplot2 ##' @importFrom ggpubr stat_cor ##' @importFrom stats lm ##' @export plotLmCor <- function(data,mapping) { p<-ggplot(data,mapping)+geom_point()+ stat_smooth(method=lm) + stat_cor(method = "pearson") return(p) } ##' @title plotLmCor2 ##' @description This function would generate a list including a countData and colData as input data for testing ##' @param data Default dataset to use for plot. ##' @param x Default variable from data for aesthetic mappings to use for plot ##' @param y Default variable from data for aesthetic mappings to use for plot ##' @return a ggplot2 object ##' @examples ##' require(ggplot2) ##' print(plotLmCor2(mpg,cty,hwy)) ##' @import ggplot2 ##' @importFrom ggpubr stat_cor ##' @importFrom stats lm ##' @export plotLmCor2 <- function(data,x,y){ p <- ggplot(data,aes_(substitute(x),substitute(y)))+ geom_point()+ stat_smooth(method=lm) + stat_cor(method = "pearson") return(p) }
/R/plotLmCor.R
no_license
Feng-Zhang/bioPlots
R
false
false
1,314
r
##' @title plotLmCor ##' @description This function would generate a list including a countData and colData as input data for testing ##' @param data Default dataset to use for plot. ##' @param mapping Default list of aesthetic mappings to use for plot ##' @return a ggplot2 object ##' @examples ##' require(ggplot2) ##' print(plotLmCor(mpg,aes(cty,hwy))) ##' @import ggplot2 ##' @importFrom ggpubr stat_cor ##' @importFrom stats lm ##' @export plotLmCor <- function(data,mapping) { p<-ggplot(data,mapping)+geom_point()+ stat_smooth(method=lm) + stat_cor(method = "pearson") return(p) } ##' @title plotLmCor2 ##' @description This function would generate a list including a countData and colData as input data for testing ##' @param data Default dataset to use for plot. ##' @param x Default variable from data for aesthetic mappings to use for plot ##' @param y Default variable from data for aesthetic mappings to use for plot ##' @return a ggplot2 object ##' @examples ##' require(ggplot2) ##' print(plotLmCor2(mpg,cty,hwy)) ##' @import ggplot2 ##' @importFrom ggpubr stat_cor ##' @importFrom stats lm ##' @export plotLmCor2 <- function(data,x,y){ p <- ggplot(data,aes_(substitute(x),substitute(y)))+ geom_point()+ stat_smooth(method=lm) + stat_cor(method = "pearson") return(p) }
# Total emissions from PM2.5 decreased in the United States from 1999 to 2008 #Using the base plotting system #Loading Data# setwd("C:/Users/User/Desktop/Rdirectory/Data/zips/unzip") NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") #Observe the variables# head(NEI) head(SCC) #Plotting total emissions of PM2.5 in United States between 1999 - 2008# TotalEmissionPY <- tapply(NEI$Emissions, NEI$year, sum) ## Create plot1 par("mar"=c(5.1, 4.5, 4.1, 2.1)) png(filename = "plot1.png", width = 480, height = 480, units = "px") plot(TotalEmissionPY, x = rownames(TotalEmissionPY), type = "n", axes = FALSE, ylab = expression("Total PM"[2.5] * " Emission (in tons)"), xlab = "Year", main = expression("Total PM"[2.5] * " Emission (1999 - 2008)")) points(TotalEmissionPY, x = rownames(TotalEmissionPY), pch = 20, col = "red") lines(TotalEmissionPY, x = rownames(TotalEmissionPY), col = "green") axis(2) axis(side = 1, at = seq(1999, 2008, by = 3)) box() dev.off()
/Codes/plot1.R
no_license
ktkt2009/ExData_Plotting2
R
false
false
1,021
r
# Total emissions from PM2.5 decreased in the United States from 1999 to 2008 #Using the base plotting system #Loading Data# setwd("C:/Users/User/Desktop/Rdirectory/Data/zips/unzip") NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") #Observe the variables# head(NEI) head(SCC) #Plotting total emissions of PM2.5 in United States between 1999 - 2008# TotalEmissionPY <- tapply(NEI$Emissions, NEI$year, sum) ## Create plot1 par("mar"=c(5.1, 4.5, 4.1, 2.1)) png(filename = "plot1.png", width = 480, height = 480, units = "px") plot(TotalEmissionPY, x = rownames(TotalEmissionPY), type = "n", axes = FALSE, ylab = expression("Total PM"[2.5] * " Emission (in tons)"), xlab = "Year", main = expression("Total PM"[2.5] * " Emission (1999 - 2008)")) points(TotalEmissionPY, x = rownames(TotalEmissionPY), pch = 20, col = "red") lines(TotalEmissionPY, x = rownames(TotalEmissionPY), col = "green") axis(2) axis(side = 1, at = seq(1999, 2008, by = 3)) box() dev.off()
rm(list=ls()) #clear out environment library(GEOquery) #require package gset <- getGEO("GSE42414", GSEMatrix=TRUE) #load the NCBI database data using internet
/Refine_FullLaurenti.R
no_license
bwcont/Bioinform
R
false
false
162
r
rm(list=ls()) #clear out environment library(GEOquery) #require package gset <- getGEO("GSE42414", GSEMatrix=TRUE) #load the NCBI database data using internet
library(tidyverse) library(caret) library(e1071) # for skewness library(zoo) # for na.aggregate which replaces N/A with the mean for the respective group library(randomForest) library(rpart) library(elasticnet) # for lasso and ridge and elasticnet regression library(pls) # for principle component analysis library(fastICA) # for Independent Component regression library(monomvn) # for Bayesian ridge regression #Read in CSV file from my hard drive #Data available in my GitHub at https://github.com/nordicbychris/NYCPropertyMLEdxProject20190613.git nycproperties <- read_csv("C:/RCoding/nyc-property-sales/nyc-rolling-sales.csv") colnames(nycproperties) dim(nycproperties) # Pre-processing and wrangling # ---------------------------- # # Convert to numeric or date for graphing and analysis nycproperties$`SALE PRICE` <- as.numeric(as.character((nycproperties$`SALE PRICE`))) class(nycproperties$`SALE PRICE`) nycproperties$`LAND SQUARE FEET` <- as.numeric(as.character((nycproperties$`LAND SQUARE FEET`))) class(nycproperties$`LAND SQUARE FEET`) nycproperties$`GROSS SQUARE FEET` <- as.numeric(as.character((nycproperties$`GROSS SQUARE FEET`))) class(nycproperties$`GROSS SQUARE FEET`) nycproperties$`SALE DATE` <- as.Date(as.character((nycproperties$`SALE DATE`))) class(nycproperties$`SALE DATE`) # Create new column for BuildingAge and fill with data nycproperties[c("BuildingAge")] <- 2019 - nycproperties$`YEAR BUILT` # Replace NA values with the mean for the column/group for the numeric columns checknumeric <- sapply(nycproperties, is.numeric) nycproperties[checknumeric] <- lapply(nycproperties[checknumeric], na.aggregate) # Make a new data frame of all the living spaces (not condos) since these have complete data for multiple variables livingspaces <- nycproperties %>% filter(str_detect(`BUILDING CLASS AT TIME OF SALE` ,"A") | str_detect(`BUILDING CLASS AT TIME OF SALE` ,"B") | str_detect(`BUILDING CLASS AT TIME OF SALE` ,"C") | str_detect(`BUILDING CLASS AT TIME OF SALE` ,"D") | str_detect(`BUILDING CLASS AT TIME OF SALE` ,"L")) # Remove zero values and/or extreme outliers livingspaces <- livingspaces %>% filter((`SALE PRICE` > 20 & `SALE PRICE` < 100000000) & BuildingAge > 0 & BuildingAge < 500 & `TOTAL UNITS` > 0 & `TOTAL UNITS`< 500 & `GROSS SQUARE FEET`> 0 & `LAND SQUARE FEET` > 0 ) # Data exploration and visualization # ---------------------------------- # Histogram to show price distribution hist(livingspaces$`SALE PRICE`) # Repeat histogram with a smaller part of the data set livingspaces %>% filter(`SALE PRICE` > 100000 & `SALE PRICE` < 5000000) %>% ggplot(aes(`SALE PRICE`)) + geom_histogram(binwidth = 100000, fill = "lightsteelblue") # Note the peak at the mean value due to replacement of NA with the mean # Histograms of the predictors livingspaces %>% ggplot(aes(BuildingAge)) + geom_histogram(fill = "lightsteelblue") livingspaces %>% ggplot(aes(`GROSS SQUARE FEET`)) + geom_histogram(fill = "lightsteelblue") livingspaces %>% ggplot(aes(`LAND SQUARE FEET`)) + geom_histogram(fill = "lightsteelblue") livingspaces %>% ggplot(aes(`TOTAL UNITS`)) + geom_histogram(fill = "lightsteelblue") # Measure how skewed the sale price data is skewness(livingspaces$`SALE PRICE`, type = 1) # Skewnewss of predictors skewness(livingspaces$BuildingAge, type = 1) skewness(livingspaces$`GROSS SQUARE FEET`, type = 1) skewness(livingspaces$`LAND SQUARE FEET`, type = 1) skewness(livingspaces$`TOTAL UNITS`, type = 1) # All skewed except for BuildingAge. Something to be considered in the analysis later. # Dot plot of all sale prices divided into the category borough livingspaces %>% ggplot(aes(BOROUGH, `SALE PRICE`)) + geom_point(color = "lightblue") + ggtitle("Sale prices by NYC borough") + coord_flip() # Plot of the frequency in each borough livingspaces %>% ggplot(aes(BOROUGH)) + geom_bar(fill = "lightsteelblue") + ylab("Property count") + ggtitle("Frequency by NYC borough") # Note that the number of points for Manhattan has dropped # The mean sale price sorted by Borough livingspaces %>% group_by(BOROUGH) %>% summarise(n = n(), avg = mean(`SALE PRICE`), se = sd(`SALE PRICE`)/sqrt(n())) %>% ggplot(aes(x = BOROUGH, y = avg, ymin = avg - 2*se, ymax = avg + 2*se)) + geom_point() + geom_errorbar() + xlab("Borough") + ylab("Mean sale price") + ggtitle("Sale prices by NYC borough") # Note that Manhattan prices are well above the average for the remaining boroughs # Does the sale price vary according to sale date? i.e. are prices rising? livingspaces %>% ggplot(aes(`SALE DATE`,`SALE PRICE`)) + geom_point() + geom_smooth(na.rm = TRUE, color = "red", size = 0.1, method = lm) + ggtitle("Sale prices by date of sale") # No major change in price over time, so can be excluded from model # Does the sale price vary according to building age? livingspaces %>% ggplot(aes(BuildingAge,`SALE PRICE`)) + geom_point() + geom_smooth(color = "red", size = 0.1, method = lm) + xlab("Building Age (years)") + ylab("Sale Price") + ggtitle("Sale prices by age of building") # Buildings around 100 years old tend to have higher sale price # Does the sale price vary according to number of units? livingspaces %>% ggplot(aes(`TOTAL UNITS`,`SALE PRICE`)) + geom_point() + geom_smooth(color = "red", size = 0.1, method = lm) + xlab("Number of Units") + ylab("Sale Price") + ggtitle("Sale prices by number of units") # Relationship looks good for higher sale prices (i.e. some prices are probably invalid) # Does the sale price vary according to the lot area of the property? livingspaces %>% ggplot(aes(`LAND SQUARE FEET`,`SALE PRICE`)) + geom_point() + geom_smooth(color = "red", size = 0.1, method = lm) + xlab("Land Square Feet") + ylab("Sale Price") + ggtitle("Sale prices by area of lot") # For reasonable-sized lots the relationship looks good # Does the sale price vary according to the actual size of the living space (i.e gross square feet)? livingspaces %>% ggplot(aes(`GROSS SQUARE FEET`,`SALE PRICE`)) + geom_point() + geom_smooth(color = "red", size = 0.1, method = lm) + xlab("Gross Square Feet") + ylab("Sale Price") + ggtitle("Sale prices by living area") # Same as for lot size, but again the sale prices close to zero seem to affect the relationship. # Consider these outliers for the model # Data partitioning and further pre-processing # -------------------------------------------- # Remove unnecessary columns livingspaces <- livingspaces[, !(colnames(livingspaces) %in% c("X1", "BLOCK", "NEIGHBORHOOD", "BUILDING CLASS CATEGORY", "TAX CLASS AT PRESENT", "LOT", "EASE-MENT", "BUILDING CLASS AT PRESENT", "ADDRESS", "APARTMENT NUMBER", "ZIP CODE", "RESIDENTIAL UNITS", "COMMERCIAL UNITS", "TAX CLASS AT TIME OF SALE", "YEAR BUILT", "BUILDING CLASS AT TIME OF SALE", "SALE DATE"))] # Create data partition with 10% in the test/validation set set.seed(1) test_index <- createDataPartition(y = livingspaces$`SALE PRICE`, times = 1, p = 0.1, list = FALSE) livingtrain <- livingspaces[-test_index,] livingtest <- livingspaces[test_index,] rm(test_index) # Training the models and using them to predict sale price in the test set # ------------------------------------------------------------------------ # Definition of RMSE RMSE <- function(true_price, predicted_price){ sqrt(mean((true_price - predicted_price)^2)) } # Note that due to the skewed price data the RMSE comes out very large # These data should be log transformed but the model fits do not work on log transformed data # RMSE does decrease by using models other than linear regression # Model 1: Just predict that the sale price will be the mean mu_hat <- mean(livingtrain$`SALE PRICE`) meanRMSE <- RMSE(livingtest$`SALE PRICE`, mu_hat) rmse_results <- data_frame(Model = "Mean sale price", RMSE = meanRMSE) rmse_results # Model 2: Use linear regression of only the Gross Square Feet lmGross <- train(`SALE PRICE` ~ `GROSS SQUARE FEET`, data=livingtrain, method = "lm") lmGross predictedmodel2 <- predict(lmGross, livingtest) lmGrossRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel2) rmse_results <- bind_rows(rmse_results, data_frame(Model="Linreg Gross Area", RMSE = lmGrossRMSE )) rmse_results %>% knitr::kable() # Model 3: Use linear regression with all predictors lmAll <- train(`SALE PRICE` ~ ., data=livingtrain, method = "lm") lmAll predictedmodel3 <- predict(lmAll, livingtest) lmAllRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel3) rmse_results <- bind_rows(rmse_results, data_frame(Model="Linreg All Predictors", RMSE = lmAllRMSE )) rmse_results %>% knitr::kable() # Model 4: Use linear regression but exclude the predictor 'Borough' lmnoBorough <- train(`SALE PRICE` ~ BuildingAge + `TOTAL UNITS` + `LAND SQUARE FEET` + `GROSS SQUARE FEET`, data=livingtrain, method = "lm") lmnoBorough predictedmodel4 <- predict(lmnoBorough, livingtest) lmnoBoroughRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel4) rmse_results <- bind_rows(rmse_results, data_frame(Model="Linreg without Borough", RMSE = lmnoBoroughRMSE )) rmse_results %>% knitr::kable() # Model 5: Use lasso regression # Tune for fraction (default uses 0.1, 0.5, 0.9) fitLasso <- train(`SALE PRICE` ~ ., data=livingtrain, method = "lasso", tuneGrid = data.frame(fraction = seq(0.1, 1, 0.1))) fitLasso predictedmodel5 <- predict(fitLasso, livingtest) lassoRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel5) rmse_results <- bind_rows(rmse_results, data_frame(Model="Lasso regression", RMSE = lassoRMSE )) rmse_results %>% knitr::kable() # Model 6: Use principal component analysis # Tune for ncomp (default uses 1, 2, 3) fitPCR <- train(`SALE PRICE` ~ ., data=livingtrain, method = "pcr", tuneGrid = data.frame(ncomp = seq(1, 5, 0.5))) fitPCR predictedmodel6 <- predict(fitPCR, livingtest) PCRRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel6) rmse_results <- bind_rows(rmse_results, data_frame(Model="Principal component analysis", RMSE = PCRRMSE )) rmse_results %>% knitr::kable() # Model 7: Elasticnet regression fitEnet <- train(`SALE PRICE` ~ ., data=livingtrain, method = "enet") fitEnet predictedmodel7 <- predict(fitEnet, livingtest) EnetRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel7) rmse_results <- bind_rows(rmse_results, data_frame(Model="Elasticnet regression", RMSE = EnetRMSE )) rmse_results %>% knitr::kable() # Model 8: Ridge regression fitRidge <- train(`SALE PRICE` ~ ., data=livingtrain, method = "ridge", tuneGrid = data.frame(lambda = seq(0, 0.1, 0.01))) fitRidge predictedmodel8 <- predict(fitRidge, livingtest) RidgeRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel8) rmse_results <- bind_rows(rmse_results, data_frame(Model="Ridge regression", RMSE = RidgeRMSE )) rmse_results %>% knitr::kable() # Model 9: Independent Component Regression fitICR <- train(`SALE PRICE` ~ ., data=livingtrain, method = "icr", tuneGrid = data.frame(n.comp = seq(1, 5, 0.5))) fitICR predictedmodel9 <- predict(fitICR, livingtest) ICRRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel9) rmse_results <- bind_rows(rmse_results, data_frame(Model="ICR regression", RMSE = ICRRMSE )) rmse_results %>% knitr::kable() # Model 10: Bayesian ridge regression # Note: Longer running time and outputs iteration of t and m in the R Console fitBridge <- train(`SALE PRICE` ~ ., data=livingtrain, method = "bridge") fitBridge predictedmodel10 <- predict(fitBridge, livingtest) BridgeRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel10) rmse_results <- bind_rows(rmse_results, data_frame(Model="Bayesian Ridge regression", RMSE = BridgeRMSE )) # The final list of models sorted by increasing RMSE rmse_results %>% arrange(RMSE) %>% knitr::kable()
/NYCProperty.R
no_license
nordicbychris/NYCPropertyMLEdxProject20190613
R
false
false
12,675
r
library(tidyverse) library(caret) library(e1071) # for skewness library(zoo) # for na.aggregate which replaces N/A with the mean for the respective group library(randomForest) library(rpart) library(elasticnet) # for lasso and ridge and elasticnet regression library(pls) # for principle component analysis library(fastICA) # for Independent Component regression library(monomvn) # for Bayesian ridge regression #Read in CSV file from my hard drive #Data available in my GitHub at https://github.com/nordicbychris/NYCPropertyMLEdxProject20190613.git nycproperties <- read_csv("C:/RCoding/nyc-property-sales/nyc-rolling-sales.csv") colnames(nycproperties) dim(nycproperties) # Pre-processing and wrangling # ---------------------------- # # Convert to numeric or date for graphing and analysis nycproperties$`SALE PRICE` <- as.numeric(as.character((nycproperties$`SALE PRICE`))) class(nycproperties$`SALE PRICE`) nycproperties$`LAND SQUARE FEET` <- as.numeric(as.character((nycproperties$`LAND SQUARE FEET`))) class(nycproperties$`LAND SQUARE FEET`) nycproperties$`GROSS SQUARE FEET` <- as.numeric(as.character((nycproperties$`GROSS SQUARE FEET`))) class(nycproperties$`GROSS SQUARE FEET`) nycproperties$`SALE DATE` <- as.Date(as.character((nycproperties$`SALE DATE`))) class(nycproperties$`SALE DATE`) # Create new column for BuildingAge and fill with data nycproperties[c("BuildingAge")] <- 2019 - nycproperties$`YEAR BUILT` # Replace NA values with the mean for the column/group for the numeric columns checknumeric <- sapply(nycproperties, is.numeric) nycproperties[checknumeric] <- lapply(nycproperties[checknumeric], na.aggregate) # Make a new data frame of all the living spaces (not condos) since these have complete data for multiple variables livingspaces <- nycproperties %>% filter(str_detect(`BUILDING CLASS AT TIME OF SALE` ,"A") | str_detect(`BUILDING CLASS AT TIME OF SALE` ,"B") | str_detect(`BUILDING CLASS AT TIME OF SALE` ,"C") | str_detect(`BUILDING CLASS AT TIME OF SALE` ,"D") | str_detect(`BUILDING CLASS AT TIME OF SALE` ,"L")) # Remove zero values and/or extreme outliers livingspaces <- livingspaces %>% filter((`SALE PRICE` > 20 & `SALE PRICE` < 100000000) & BuildingAge > 0 & BuildingAge < 500 & `TOTAL UNITS` > 0 & `TOTAL UNITS`< 500 & `GROSS SQUARE FEET`> 0 & `LAND SQUARE FEET` > 0 ) # Data exploration and visualization # ---------------------------------- # Histogram to show price distribution hist(livingspaces$`SALE PRICE`) # Repeat histogram with a smaller part of the data set livingspaces %>% filter(`SALE PRICE` > 100000 & `SALE PRICE` < 5000000) %>% ggplot(aes(`SALE PRICE`)) + geom_histogram(binwidth = 100000, fill = "lightsteelblue") # Note the peak at the mean value due to replacement of NA with the mean # Histograms of the predictors livingspaces %>% ggplot(aes(BuildingAge)) + geom_histogram(fill = "lightsteelblue") livingspaces %>% ggplot(aes(`GROSS SQUARE FEET`)) + geom_histogram(fill = "lightsteelblue") livingspaces %>% ggplot(aes(`LAND SQUARE FEET`)) + geom_histogram(fill = "lightsteelblue") livingspaces %>% ggplot(aes(`TOTAL UNITS`)) + geom_histogram(fill = "lightsteelblue") # Measure how skewed the sale price data is skewness(livingspaces$`SALE PRICE`, type = 1) # Skewnewss of predictors skewness(livingspaces$BuildingAge, type = 1) skewness(livingspaces$`GROSS SQUARE FEET`, type = 1) skewness(livingspaces$`LAND SQUARE FEET`, type = 1) skewness(livingspaces$`TOTAL UNITS`, type = 1) # All skewed except for BuildingAge. Something to be considered in the analysis later. # Dot plot of all sale prices divided into the category borough livingspaces %>% ggplot(aes(BOROUGH, `SALE PRICE`)) + geom_point(color = "lightblue") + ggtitle("Sale prices by NYC borough") + coord_flip() # Plot of the frequency in each borough livingspaces %>% ggplot(aes(BOROUGH)) + geom_bar(fill = "lightsteelblue") + ylab("Property count") + ggtitle("Frequency by NYC borough") # Note that the number of points for Manhattan has dropped # The mean sale price sorted by Borough livingspaces %>% group_by(BOROUGH) %>% summarise(n = n(), avg = mean(`SALE PRICE`), se = sd(`SALE PRICE`)/sqrt(n())) %>% ggplot(aes(x = BOROUGH, y = avg, ymin = avg - 2*se, ymax = avg + 2*se)) + geom_point() + geom_errorbar() + xlab("Borough") + ylab("Mean sale price") + ggtitle("Sale prices by NYC borough") # Note that Manhattan prices are well above the average for the remaining boroughs # Does the sale price vary according to sale date? i.e. are prices rising? livingspaces %>% ggplot(aes(`SALE DATE`,`SALE PRICE`)) + geom_point() + geom_smooth(na.rm = TRUE, color = "red", size = 0.1, method = lm) + ggtitle("Sale prices by date of sale") # No major change in price over time, so can be excluded from model # Does the sale price vary according to building age? livingspaces %>% ggplot(aes(BuildingAge,`SALE PRICE`)) + geom_point() + geom_smooth(color = "red", size = 0.1, method = lm) + xlab("Building Age (years)") + ylab("Sale Price") + ggtitle("Sale prices by age of building") # Buildings around 100 years old tend to have higher sale price # Does the sale price vary according to number of units? livingspaces %>% ggplot(aes(`TOTAL UNITS`,`SALE PRICE`)) + geom_point() + geom_smooth(color = "red", size = 0.1, method = lm) + xlab("Number of Units") + ylab("Sale Price") + ggtitle("Sale prices by number of units") # Relationship looks good for higher sale prices (i.e. some prices are probably invalid) # Does the sale price vary according to the lot area of the property? livingspaces %>% ggplot(aes(`LAND SQUARE FEET`,`SALE PRICE`)) + geom_point() + geom_smooth(color = "red", size = 0.1, method = lm) + xlab("Land Square Feet") + ylab("Sale Price") + ggtitle("Sale prices by area of lot") # For reasonable-sized lots the relationship looks good # Does the sale price vary according to the actual size of the living space (i.e gross square feet)? livingspaces %>% ggplot(aes(`GROSS SQUARE FEET`,`SALE PRICE`)) + geom_point() + geom_smooth(color = "red", size = 0.1, method = lm) + xlab("Gross Square Feet") + ylab("Sale Price") + ggtitle("Sale prices by living area") # Same as for lot size, but again the sale prices close to zero seem to affect the relationship. # Consider these outliers for the model # Data partitioning and further pre-processing # -------------------------------------------- # Remove unnecessary columns livingspaces <- livingspaces[, !(colnames(livingspaces) %in% c("X1", "BLOCK", "NEIGHBORHOOD", "BUILDING CLASS CATEGORY", "TAX CLASS AT PRESENT", "LOT", "EASE-MENT", "BUILDING CLASS AT PRESENT", "ADDRESS", "APARTMENT NUMBER", "ZIP CODE", "RESIDENTIAL UNITS", "COMMERCIAL UNITS", "TAX CLASS AT TIME OF SALE", "YEAR BUILT", "BUILDING CLASS AT TIME OF SALE", "SALE DATE"))] # Create data partition with 10% in the test/validation set set.seed(1) test_index <- createDataPartition(y = livingspaces$`SALE PRICE`, times = 1, p = 0.1, list = FALSE) livingtrain <- livingspaces[-test_index,] livingtest <- livingspaces[test_index,] rm(test_index) # Training the models and using them to predict sale price in the test set # ------------------------------------------------------------------------ # Definition of RMSE RMSE <- function(true_price, predicted_price){ sqrt(mean((true_price - predicted_price)^2)) } # Note that due to the skewed price data the RMSE comes out very large # These data should be log transformed but the model fits do not work on log transformed data # RMSE does decrease by using models other than linear regression # Model 1: Just predict that the sale price will be the mean mu_hat <- mean(livingtrain$`SALE PRICE`) meanRMSE <- RMSE(livingtest$`SALE PRICE`, mu_hat) rmse_results <- data_frame(Model = "Mean sale price", RMSE = meanRMSE) rmse_results # Model 2: Use linear regression of only the Gross Square Feet lmGross <- train(`SALE PRICE` ~ `GROSS SQUARE FEET`, data=livingtrain, method = "lm") lmGross predictedmodel2 <- predict(lmGross, livingtest) lmGrossRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel2) rmse_results <- bind_rows(rmse_results, data_frame(Model="Linreg Gross Area", RMSE = lmGrossRMSE )) rmse_results %>% knitr::kable() # Model 3: Use linear regression with all predictors lmAll <- train(`SALE PRICE` ~ ., data=livingtrain, method = "lm") lmAll predictedmodel3 <- predict(lmAll, livingtest) lmAllRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel3) rmse_results <- bind_rows(rmse_results, data_frame(Model="Linreg All Predictors", RMSE = lmAllRMSE )) rmse_results %>% knitr::kable() # Model 4: Use linear regression but exclude the predictor 'Borough' lmnoBorough <- train(`SALE PRICE` ~ BuildingAge + `TOTAL UNITS` + `LAND SQUARE FEET` + `GROSS SQUARE FEET`, data=livingtrain, method = "lm") lmnoBorough predictedmodel4 <- predict(lmnoBorough, livingtest) lmnoBoroughRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel4) rmse_results <- bind_rows(rmse_results, data_frame(Model="Linreg without Borough", RMSE = lmnoBoroughRMSE )) rmse_results %>% knitr::kable() # Model 5: Use lasso regression # Tune for fraction (default uses 0.1, 0.5, 0.9) fitLasso <- train(`SALE PRICE` ~ ., data=livingtrain, method = "lasso", tuneGrid = data.frame(fraction = seq(0.1, 1, 0.1))) fitLasso predictedmodel5 <- predict(fitLasso, livingtest) lassoRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel5) rmse_results <- bind_rows(rmse_results, data_frame(Model="Lasso regression", RMSE = lassoRMSE )) rmse_results %>% knitr::kable() # Model 6: Use principal component analysis # Tune for ncomp (default uses 1, 2, 3) fitPCR <- train(`SALE PRICE` ~ ., data=livingtrain, method = "pcr", tuneGrid = data.frame(ncomp = seq(1, 5, 0.5))) fitPCR predictedmodel6 <- predict(fitPCR, livingtest) PCRRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel6) rmse_results <- bind_rows(rmse_results, data_frame(Model="Principal component analysis", RMSE = PCRRMSE )) rmse_results %>% knitr::kable() # Model 7: Elasticnet regression fitEnet <- train(`SALE PRICE` ~ ., data=livingtrain, method = "enet") fitEnet predictedmodel7 <- predict(fitEnet, livingtest) EnetRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel7) rmse_results <- bind_rows(rmse_results, data_frame(Model="Elasticnet regression", RMSE = EnetRMSE )) rmse_results %>% knitr::kable() # Model 8: Ridge regression fitRidge <- train(`SALE PRICE` ~ ., data=livingtrain, method = "ridge", tuneGrid = data.frame(lambda = seq(0, 0.1, 0.01))) fitRidge predictedmodel8 <- predict(fitRidge, livingtest) RidgeRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel8) rmse_results <- bind_rows(rmse_results, data_frame(Model="Ridge regression", RMSE = RidgeRMSE )) rmse_results %>% knitr::kable() # Model 9: Independent Component Regression fitICR <- train(`SALE PRICE` ~ ., data=livingtrain, method = "icr", tuneGrid = data.frame(n.comp = seq(1, 5, 0.5))) fitICR predictedmodel9 <- predict(fitICR, livingtest) ICRRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel9) rmse_results <- bind_rows(rmse_results, data_frame(Model="ICR regression", RMSE = ICRRMSE )) rmse_results %>% knitr::kable() # Model 10: Bayesian ridge regression # Note: Longer running time and outputs iteration of t and m in the R Console fitBridge <- train(`SALE PRICE` ~ ., data=livingtrain, method = "bridge") fitBridge predictedmodel10 <- predict(fitBridge, livingtest) BridgeRMSE <- RMSE(livingtest$`SALE PRICE`, predictedmodel10) rmse_results <- bind_rows(rmse_results, data_frame(Model="Bayesian Ridge regression", RMSE = BridgeRMSE )) # The final list of models sorted by increasing RMSE rmse_results %>% arrange(RMSE) %>% knitr::kable()
shade_before <- function(x, y, boundary, ...){ # Shades area under the curve from boundary to values of x that are lower. # TODO: BUG: Only printing graph with dots, specifying lines not working lower <- x[1] shade_between(x, y, lower, boundary) }
/plot.convenience/R/shade_before.R
no_license
gpawlik/con-functions-R
R
false
false
264
r
shade_before <- function(x, y, boundary, ...){ # Shades area under the curve from boundary to values of x that are lower. # TODO: BUG: Only printing graph with dots, specifying lines not working lower <- x[1] shade_between(x, y, lower, boundary) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dic.fit.R \name{pgammaOff1} \alias{pgammaOff1} \title{Function that calculates pgamma with a offset of 1 (i.e., 1 is equivalent to 0)} \usage{ pgammaOff1(x, replace0 = FALSE, ...) } \arguments{ \item{x}{value to calculate pgamma at} \item{replace0}{should we replace 0 with epsilon} \item{...}{other parameters to pgamma} } \value{ pgamma offset } \description{ Function that calculates pgamma with a offset of 1 (i.e., 1 is equivalent to 0) }
/man/pgammaOff1.Rd
no_license
nickreich/coarseDataTools
R
false
true
524
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dic.fit.R \name{pgammaOff1} \alias{pgammaOff1} \title{Function that calculates pgamma with a offset of 1 (i.e., 1 is equivalent to 0)} \usage{ pgammaOff1(x, replace0 = FALSE, ...) } \arguments{ \item{x}{value to calculate pgamma at} \item{replace0}{should we replace 0 with epsilon} \item{...}{other parameters to pgamma} } \value{ pgamma offset } \description{ Function that calculates pgamma with a offset of 1 (i.e., 1 is equivalent to 0) }
require(lubridate) require(dplyr) require(magrittr) require(ggplot2) require(reshape2) require(stargazer) require(readr) require(tidyr) require(broom) require(lme4) require(arm) require(rms) require(mfx) #win #setwd("C:/Users/fh/Documents/GitHub/parlbias/data") #mac setwd("~/GitHub/parlbias/data") ### READ IN DATA ft<-readRDS("ft.rds") ### REGRESSION MODELS #define basic models m1<-lm(m1f<-as.formula(secs~copartisan),data=ft) m2<-lm(m2f<-as.formula(secs~copartisan+timeofday),data=ft) m3<-lm(m3f<-as.formula(secs~copartisan+timeofday+female+debatetype),data=ft) m4<-lm(m4f<-as.formula(secs~copartisan+timeofday+female+debatetype+coarseparty),data=ft) m5<-lm(m5f<-as.formula(secs~copartisan+timeofday+female+debatetype+coarseparty+chairparty),data=ft) stargazer(m1,m2,m3,m4,m5,type="text",omit=c("factor","debate"),omit.stat=c("f","ser")) stargazer(m1,m2,m3,m4,m5,type="text",omit=c("debate"),omit.stat=c("f","ser")) summary(mx<-lm(secs~copartisan+timeofday+female+factor(coarseparty)+factor(chairparty)+debate,data=ft)) #quick robustness checks summary(lm(m5f,data=ft)) summary(lm(m5f,data=sample_frac(ft,.5,replace=F))) summary(lm(m5f,data=subset(ft,timeofday>11))) summary(lm(m5f,data=subset(ft,pm==0))) # identify followups ft$followup<-0 for (i in 5:nrow(ft)){ if (ft$chair[i]==0 & ft$fullname[i]==ft$fullname[i-2] & !(ft$fullname[i]==ft$fullname[i-4])){ ft$followup[i]<-1 } } table(ft$followup) ggplot(subset(ft,copartisan<2),aes(x=secs)) + geom_density() + facet_grid(copartisan~followup) + theme_bw() summary(lm(m5f,data=subset(ft,followup==0))) #conclusion: data is robust to the exclusion of followups, but they do not (solely) explain the bimodal distribution #collect effect estimates in data frame effectests<-data.frame(est=rep(NA,10),se=rep(NA,10),method=c(rep("OLS",5),rep("Logit",5)),model=rep(1:5,2)) effectests[1,1:2]<-tidy(m1)[2,2:3] effectests[2,1:2]<-tidy(m2)[2,2:3] effectests[3,1:2]<-tidy(m3)[2,2:3] effectests[4,1:2]<-tidy(m4)[2,2:3] effectests[5,1:2]<-tidy(m5)[2,2:3] #what is the effect size in standardized terms? get cohen's d cohensd <- effectests$est / sd(ft$secs) barplot(cohensd) #get average number of speeches lengths<-ft %>% group_by(debate,chairname) %>% summarise(speeches=sum(chair)) %>% filter(speeches>0) meanspeechesperchair<-mean(lengths$speeches) maxspeechesperchair<-max(lengths$speeches) #how many seconds does 2.8 seconds add up to over the average debate per chair? effectests$est[4]*maxspeechesperchair summary(m1.logit<-glm(secsgt60~copartisan,data=ft,family=binomial())) summary(m2.logit<-glm(secsgt60~copartisan+timeofday,data=ft,family=binomial())) summary(m3.logit<-glm(secsgt60~copartisan+timeofday+female+debatetype,data=ft,family=binomial())) summary(m4.logit<-glm(secsgt60~copartisan+timeofday+female+debatetype+factor(coarseparty),data=ft,family=binomial())) summary(m5.logit<-glm(secsgt60~copartisan+timeofday+female+debatetype+factor(coarseparty)+factor(chairparty),data=ft,family=binomial())) effectests[6,1:2]<-logitmfx(m1.logit$formula,data=ft)$mfxest[1,1:2]*100 effectests[7,1:2]<-logitmfx(m2.logit$formula,data=ft)$mfxest[1,1:2]*100 effectests[8,1:2]<-logitmfx(m3.logit$formula,data=ft)$mfxest[1,1:2]*100 effectests[9,1:2]<-logitmfx(m4.logit$formula,data=ft)$mfxest[1,1:2]*100 effectests[10,1:2]<-logitmfx(m5.logit$formula,data=ft)$mfxest[1,1:2]*100 #magnitude check sd(ft$secs,na.rm=T) 3/sd(ft$secs,na.rm=T) #for the main models, cluster observations by presiding chair m1rob<-robcov(ols(m1f,data=ft,x=T,y=T),cluster=ft$chairname) m2rob<-robcov(ols(m2f,data=ft,x=T,y=T),cluster=ft$chairname) m3rob<-robcov(ols(m3f,data=ft,x=T,y=T),cluster=ft$chairname) m4rob<-robcov(ols(m4f,data=ft,x=T,y=T),cluster=ft$chairname) m5rob<-robcov(ols(m5f,data=ft,x=T,y=T),cluster=ft$chairname) m5intposdifltmedian<-robcov(ols(m5f,data=subset(ft,intposdif<=2.34),x=T,y=T),cluster=subset(ft,intposdif<=2.34)$chairname) m5ordposdifltmedian<-robcov(ols(m5f,data=subset(ft,ordposdif<=2),x=T,y=T),cluster=subset(ft,ordposdif<=2)$chairname) m5cobloc<-ols(m5f,data=subset(ft,cobloc==1),x=T,y=T) #setup up varying slopes model to test how the bias varies by mlm3<-lmer(secs~copartisan+timeofday+female+(1|coarseparty)+(1+copartisan|chairparty),data=ft) ranef(mlm3) fixef(mlm3) partyranefs<-data.frame(party=rownames(ranef(mlm3)$chairparty),coef=fixef(mlm3)[2]+ranef(mlm3)$chairparty[,2],se=se.ranef(mlm3)$chairparty[,2]) partyranefs partyranefs<- ft %>% group_by(chairparty) %>% summarise(ordpos=mean(chairordpos,na.rm=T)) %>% rename(party=chairparty) %>% left_join(partyranefs,.,by="party") partyranefs #varying slopes by chairman name mlm3_chairs<-lmer(secs~copartisan+timeofday+female+(1|coarseparty)+(1+copartisan|chairname),data=ft) ranef(mlm3_chairs) fixef(mlm3_chairs) chairranefs<-data.frame(chair=rownames(ranef(mlm3_chairs)$chairname),coef=fixef(mlm3)[2]+ranef(mlm3_chairs)$chairname[,2],se=se.ranef(mlm3_chairs)$chairname[,2]) chairranefs<-ft %>% group_by(chairname,chairparty) %>% summarise(ordpos=mean(chairordpos)) %>% rename(party=chairparty,chair=chairname) %>% left_join(chairranefs,.,by="chair") %>% arrange(.,ordpos,-coef) %>% mutate(chairorder=1:20,chairparty=paste(chair," ","(",party,")",sep="")) #robustness check 1: only leadership parties m1robrs<-robcov(ols(m1f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) m2robrs<-robcov(ols(m2f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) m3robrs<-robcov(ols(m3f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) m4robrs<-robcov(ols(m4f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) m5robrs<-robcov(ols(m5f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) #robustness check 2: debate fixed effects m1fdfe<-as.formula(secs~copartisan+debate) m2fdfe<-as.formula(secs~copartisan+timeofday+debate) m3fdfe<-as.formula(secs~copartisan+timeofday+female+debate) m4fdfe<-as.formula(secs~copartisan+timeofday+female+coarseparty+debate) m5fdfe<-as.formula(secs~copartisan+timeofday+female+coarseparty+chairparty+debate) m1robdfe<-robcov(ols(m1fdfe,data=ft,x=T,y=T),cluster=ft$chairname) m2robdfe<-robcov(ols(m2fdfe,data=ft,x=T,y=T),cluster=ft$chairname) m3robdfe<-robcov(ols(m3fdfe,data=ft,x=T,y=T),cluster=ft$chairname) m4robdfe<-robcov(ols(m4fdfe,data=ft,x=T,y=T),cluster=ft$chairname) m5robdfe<-robcov(ols(m5fdfe,data=ft,x=T,y=T),cluster=ft$chairname) #robustness check 3: exclude prime ministers m1robexpm<-robcov(ols(m1f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) m2robexpm<-robcov(ols(m2f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) m3robexpm<-robcov(ols(m3f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) m4robexpm<-robcov(ols(m4f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) m5robexpm<-robcov(ols(m5f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) #get list of chairmen by debate allchairmen<-ftall %>% filter(chair==1) %>% group_by(chairname,debate) %>% summarise(remarks=length(chairname)) seatvec<-c(16,rep(22,2),rep(47,7),rep(8,3),rep(18,2),47,23,16,23,16,9,47,44,34,17,17,13,45,rep(47,3),45,rep(47,4),rep(22,4),37,25,22,25,22,16,14,rep(46,2)) #careful! here I manually type in the seats for each chairman's party by debate allchairmen$seats<-seatvec allchairmen<-allchairmen %>% group_by(chairname) %>% summarise(remarks=sum(remarks),debates=length(debate),avgseats=mean(seats)) %>% mutate(president=ifelse(chairname %in% c("Thor Pedersen","Mogens Lykketoft","Pia Kjaersgaard"),1,0)) #can we predict chairman activity? summary(remarksm1<-lm(remarks~president+debates,data=allchairmen)) summary(remarksm2<-lm(remarks~president+debates+avgseats,data=allchairmen)) summary(remarksm3<-lm(remarks~president+avgseats,data=allchairmen)) allchairmen$exest<-NA allchairmen$exse<-NA for (i in 1:nrow(allchairmen)){ exm<-robcov(ols(m5f,data=subset(ft,chairname!=allchairmen$chairname[i]),x=T,y=T),cluster=subset(ft,chairname!=allchairmen$chairname[i])$chairname) allchairmen[i,6:7]<-c(exm$coefficients[2],sqrt(diag(exm$var))[2]) } ### TABLES checkmarks<-c("Speaker party FE & & & & $\\checkmark$ & $\\checkmark$ \\\\", "Chair party FE & & & & & $\\checkmark$ \\\\") #, "Debate FE & & & & $\\checkmark$ & $\\checkmark$ \\\\") covarlabs<-c("Copartisan","Time of day","Gender (female)","Debate type (Opening)","Intercept") regtab1<-stargazer(m1rob,m2rob,m3rob,m4rob,m5rob,style="apsr",omit=c("coarse","chair"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtab1",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01),align=T,title="OLS models of speaking time",digits=2, covariate.labels=covarlabs) regtab1<-c(regtab1[1:24],checkmarks,regtab1[25:length(regtab1)]) regtab1 writeLines(regtab1,con="../tables/parlbias_regtab1.txt") regtab1logit<-stargazer(m1.logit,m2.logit,m3.logit,m4.logit,m5.logit,style="apsr",omit=c("coarse","chair","debate"), dep.var.labels="Dummy: speaking time exceeds 60 seconds",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtab1logit",column.sep.width="-5pt",covariate.labels=covarlabs,star.cutoffs=c(.1,.05,.01), align=T,title="Logit models of exceeding standard speaking time",digits=2) regtab1logit regtab1logit<-c(regtab1logit[1:22],checkmarks,regtab1logit[23:length(regtab1logit)]) writeLines(regtab1logit,con="../tables/parlbias_regtab1logit.txt") modscolumnlabels<-c("Full","Distance$\\leq$median (interval)","Distance$\\leq$median (ordinal)","Same bloc") modscheckmarks<-c("Speaker party FE & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ \\\\", "Chair party FE & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ \\\\") #, "Debate FE & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ \\\\") regtabmods<-stargazer(m5rob,m5intposdifltmedian,m5ordposdifltmedian,m5cobloc,style="apsr",omit=c("coarse","chair","debate"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtabmods",column.sep.width="-5pt",covariate.labels=covarlabs,star.cutoffs=c(.1,.05,.01), align=T,title="Tests of political moderators",digits=2,column.labels=modscolumnlabels) regtabmods regtabmods<-c(regtabmods[1:23],modscheckmarks,regtabmods[24:length(regtabmods)]) regtabmods writeLines(regtabmods,con="../tables/parlbias_regtabmods.txt") modeffectests<-data.frame(est=rep(NA,4),se=rep(NA,4),model=modscolumnlabels) #get coef and se on copartisan for each model modeffectms<-list(m5rob,m5intposdifltmedian,m5ordposdifltmedian,m5cobloc) for (i in 1:4){ modeffectests[i,1]<-coef(modeffectms[[i]])["copartisan"] modeffectests[i,2]<-sqrt(diag(modeffectms[[i]]$var))["copartisan"] } modeffectests #sub in <= for latex symbols modeffectests$model<-modeffectests$model %>% as.character() %>% gsub("\\$\\\\leq\\$","<=",.) #robustness check 1: restricted sample regtabrs<-stargazer(m1robrs,m2robrs,m3robrs,m4robrs,m5robrs,style="apsr",omit=c("coarse","chair"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtabrs",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01),align=T,title="Results for only members of leadership parties",digits=2, covariate.labels=covarlabs) regtabrs<-c(regtabrs[1:24],checkmarks,regtabrs[25:length(regtabrs)]) regtabrs writeLines(regtabrs,con="../tables/parlbias_regtabrs.txt") #robustness check 2: debate-specific fixed effects dfecheckmarks<-c("Speaker party FE & & & $\\checkmark$ & & $\\checkmark$ \\\\", "Chair party FE & & & & & $\\checkmark$ \\\\", "Debate FE & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ \\\\") dfecovarlabs<-c("Copartisan","Time of day","Gender (female)","Intercept") regtabdfe<-stargazer(m1robdfe,m2robdfe,m3robdfe,m4robdfe,m5robdfe,style="apsr",omit=c("coarse","chair","debate"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtabdfe",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01),align=T,title="Results with debate-specific fixed effects",digits=2, covariate.labels=dfecovarlabs) regtabdfe<-c(regtabdfe[1:22],dfecheckmarks,regtabdfe[23:length(regtabdfe)]) regtabdfe writeLines(regtabdfe,con="../tables/parlbias_regtabdfe.txt") #robustness check 3: excluding pm's regtabexpm<-stargazer(m1robexpm,m2robexpm,m3robexpm,m4robexpm,m5robexpm,style="apsr",omit=c("coarse","chair"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtabexpm",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01),align=T,title="Results excluding prime ministers",digits=2, covariate.labels=covarlabs) regtabexpm<-c(regtabexpm[1:24],checkmarks,regtabexpm[25:length(regtabexpm)]) regtabexpm writeLines(regtabexpm,con="../tables/parlbias_regtabexpm.txt") #table for reg predicting remarks presided over remarksregtab<-stargazer(remarksm1,remarksm2,remarksm3,style="apsr", dep.var.labels="Number of remarks enforced",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_remarksregtab",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01), align=T,title="Model predicting number of remarks enforced by chairmen",digits=2, covariate.labels=c("Head chairman","No. of debates in leadership","No. of party seats")) writeLines(remarksregtab,con="../tables/parlbias_remarksregtab.txt") ### SUMMARY STATS TABLES ftall$type<-NA ftall$type[ftall$secs<150 & ftall$secs>10 & !is.na(ftall$copartisan)]<-"Brief remark" ftall$type[ftall$secs>=150]<-"Spokesperson speech" ftall$type[ftall$secs>=150 & ftall$pm==1]<-"PM speech" require(dplyr) totaln<-nrow(subset(ftall,chair==0 & year(starttime)>2000 & !is.na(type))) totalsecs<-sum(subset(ftall,chair==0 & year(starttime)>2000 & !is.na(type))$secs) options(digits=2) sumstatstab<- ftall %>% filter(chair==0 & year(starttime)>2000) %>% filter(!is.na(type)) %>% group_by(type) %>% summarise(count=n(),nshare=100*(count/totaln),secshare=100*sum(secs)/totalsecs) sumstatstab #add total line at bottom sumstatstab[4,]<-c("Total",colSums(sumstatstab[,2:4])) class(sumstatstab$nshare)<-"numeric" class(sumstatstab$secshare)<-"numeric" sumstatstab$nshare<-as.character(format(sumstatstab$nshare,digits=2)) sumstatstab$secshare<-as.character(format(sumstatstab$secshare,digits=2)) sumstatstab str(sumstatstab) sumstatstabtex<-stargazer(sumstatstab,summary=F,digits=2,title="Types of speeches in opening and closing debates in the Folketing",label="parlbias_sumstatstab",font.size="footnotesize",align=T,colnames=T,rownames=F) sumstatstabtex[12]<-"\\multicolumn{1}{l}{Type} & \\multicolumn{1}{r}{Number} & \\multicolumn{1}{r}{Share (numeric)} & \\multicolumn{1}{r}{Share (time-weighted)} \\\\ " #awkward left alignment of labels sumstatstabtex<-gsub("column\\{1\\}\\{c\\}\\{","column{1}{l}{",sumstatstabtex) writeLines(sumstatstabtex,con="../tables/parlbias_sumstatstab.txt") options(digits=7) ### PLOTS #reorder factor levels effectests<-within(effectests,method<-factor(method,levels=c("OLS","Logit"))) #plot estimates ggplot(effectests,aes(x=est,y=reorder(model,rep(5:1,2)))) + geom_point(size=2.5) + facet_grid(.~method,scales="free_x") + geom_errorbarh(aes(xmin=est-1.96*se,xmax=est+1.96*se),height=0,size=.5) + geom_errorbarh(aes(xmin=est-1.65*se,xmax=est+1.65*se),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + xlab("Estimate (seconds/percentage points)") + ylab("Model") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_effectplot.pdf",height=4,width=9) ggsave(file="../figures/parlbias_effectplot.png",height=4,width=9) ggplot(modeffectests,aes(x=est,y=reorder(model,c(4,3,2,1)))) + geom_point(size=2.5) + geom_errorbarh(aes(xmin=est-1.96*se,xmax=est+1.96*se),height=0,size=.5) + geom_errorbarh(aes(xmin=est-1.65*se,xmax=est+1.65*se),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + xlab("Estimate (seconds)") + ylab("Model") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_modeffectplot.pdf",height=4,width=9) ggsave(file="../figures/parlbias_modeffectplot.png",height=4,width=9) #factors for copartisanship ft$copartisan_factor<-factor(ft$copartisan,labels=c("Non-copartisan","Co-partisan")) ft$copartisan_factor<-factor(ft$copartisan_factor,levels(ft$copartisan_factor)[c(2,1)]) ft<-ft %>% mutate(copartisancobloc=factor(copartisan+cobloc,labels=c("Non-copartisan, other bloc","Non-copartisan, same bloc","Co-partisan"))) ggplot(subset(ft,chair==0 & !is.na(copartisancobloc)),aes(x=secs)) + geom_density(fill="gray",alpha=.5,adjust=1.5) + facet_grid(.~copartisancobloc) + geom_vline(xintercept=60,linetype="dashed") + xlab("Speech duration (seconds)") + ylab("") + scale_y_continuous(breaks=c(0,.005,.01,.015),labels=c("0",".005",".01",".015")) + scale_x_continuous(breaks=c(0,30,60,90,120),labels=c("0","30","60","90","120")) + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_dens.pdf",height=5,width=9) ggsave(file="../figures/parlbias_dens.png",height=5,width=9) ## varying coefficients by party ggplot(subset(partyranefs,party!="Other"),aes(x=coef,y=reorder(party,-ordpos))) + geom_point(size=2.5) + geom_errorbarh(aes(xmin=coef-1.96*se,xmax=coef+1.96*se),height=0,size=.5) + geom_errorbarh(aes(xmin=coef-1.65*se,xmax=coef+1.65*se),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + geom_vline(xintercept=fixef(mlm3)[2],linetype="dashed",color="grey40") + xlab("Estimate (seconds)") + ylab("Chairman's party") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_partyranefs.pdf",height=4,width=9) ggsave(file="../figures/parlbias_partyranefs.png",height=4,width=9) ## varying coefficients by chairman ggplot(chairranefs,aes(x=coef,y=reorder(chairparty,-chairorder))) + geom_point(size=2.5) + geom_errorbarh(aes(xmin=coef-1.96*se,xmax=coef+1.96*se),height=0,size=.5) + geom_errorbarh(aes(xmin=coef-1.65*se,xmax=coef+1.65*se),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + geom_vline(xintercept=fixef(mlm3)[2],linetype="dashed",color="grey40") + xlab("Estimate (seconds)") + ylab("Chairman") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_chairranefs.pdf",height=6,width=9) ggsave(file="../figures/parlbias_chairranefs.png",height=6,width=9) # estimate coefficient excluding each chairman ggplot(allchairmen,aes(x=exest,y=chairname)) + geom_point(size=2.5) + geom_errorbarh(aes(xmin=exest-1.96*exse,xmax=exest+1.96*exse),height=0,size=.5) + geom_errorbarh(aes(xmin=exest-1.65*exse,xmax=exest+1.65*exse),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + geom_vline(xintercept=m5rob$coefficients[2],linetype="dashed",color="grey40") + xlab("Estimate excluding chairman (seconds)") + ylab("Chairman") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_exchairests.pdf",height=6,width=9) ggsave(file="../figures/parlbias_exchairests.png",height=6,width=9) ## plot predicted remarks presided over seatsprdf1<-data.frame(expand.grid(debates=1:9,president=0:1,avgseats=21)) %>% bind_cols(.,as.data.frame(predict(remarksm2,newdata=.,se.fit=T))) %>% dplyr::select(debates,president,fit,se.fit)
/analysis/parlbias_analysis.R
no_license
fghjorth/parlbias
R
false
false
21,049
r
require(lubridate) require(dplyr) require(magrittr) require(ggplot2) require(reshape2) require(stargazer) require(readr) require(tidyr) require(broom) require(lme4) require(arm) require(rms) require(mfx) #win #setwd("C:/Users/fh/Documents/GitHub/parlbias/data") #mac setwd("~/GitHub/parlbias/data") ### READ IN DATA ft<-readRDS("ft.rds") ### REGRESSION MODELS #define basic models m1<-lm(m1f<-as.formula(secs~copartisan),data=ft) m2<-lm(m2f<-as.formula(secs~copartisan+timeofday),data=ft) m3<-lm(m3f<-as.formula(secs~copartisan+timeofday+female+debatetype),data=ft) m4<-lm(m4f<-as.formula(secs~copartisan+timeofday+female+debatetype+coarseparty),data=ft) m5<-lm(m5f<-as.formula(secs~copartisan+timeofday+female+debatetype+coarseparty+chairparty),data=ft) stargazer(m1,m2,m3,m4,m5,type="text",omit=c("factor","debate"),omit.stat=c("f","ser")) stargazer(m1,m2,m3,m4,m5,type="text",omit=c("debate"),omit.stat=c("f","ser")) summary(mx<-lm(secs~copartisan+timeofday+female+factor(coarseparty)+factor(chairparty)+debate,data=ft)) #quick robustness checks summary(lm(m5f,data=ft)) summary(lm(m5f,data=sample_frac(ft,.5,replace=F))) summary(lm(m5f,data=subset(ft,timeofday>11))) summary(lm(m5f,data=subset(ft,pm==0))) # identify followups ft$followup<-0 for (i in 5:nrow(ft)){ if (ft$chair[i]==0 & ft$fullname[i]==ft$fullname[i-2] & !(ft$fullname[i]==ft$fullname[i-4])){ ft$followup[i]<-1 } } table(ft$followup) ggplot(subset(ft,copartisan<2),aes(x=secs)) + geom_density() + facet_grid(copartisan~followup) + theme_bw() summary(lm(m5f,data=subset(ft,followup==0))) #conclusion: data is robust to the exclusion of followups, but they do not (solely) explain the bimodal distribution #collect effect estimates in data frame effectests<-data.frame(est=rep(NA,10),se=rep(NA,10),method=c(rep("OLS",5),rep("Logit",5)),model=rep(1:5,2)) effectests[1,1:2]<-tidy(m1)[2,2:3] effectests[2,1:2]<-tidy(m2)[2,2:3] effectests[3,1:2]<-tidy(m3)[2,2:3] effectests[4,1:2]<-tidy(m4)[2,2:3] effectests[5,1:2]<-tidy(m5)[2,2:3] #what is the effect size in standardized terms? get cohen's d cohensd <- effectests$est / sd(ft$secs) barplot(cohensd) #get average number of speeches lengths<-ft %>% group_by(debate,chairname) %>% summarise(speeches=sum(chair)) %>% filter(speeches>0) meanspeechesperchair<-mean(lengths$speeches) maxspeechesperchair<-max(lengths$speeches) #how many seconds does 2.8 seconds add up to over the average debate per chair? effectests$est[4]*maxspeechesperchair summary(m1.logit<-glm(secsgt60~copartisan,data=ft,family=binomial())) summary(m2.logit<-glm(secsgt60~copartisan+timeofday,data=ft,family=binomial())) summary(m3.logit<-glm(secsgt60~copartisan+timeofday+female+debatetype,data=ft,family=binomial())) summary(m4.logit<-glm(secsgt60~copartisan+timeofday+female+debatetype+factor(coarseparty),data=ft,family=binomial())) summary(m5.logit<-glm(secsgt60~copartisan+timeofday+female+debatetype+factor(coarseparty)+factor(chairparty),data=ft,family=binomial())) effectests[6,1:2]<-logitmfx(m1.logit$formula,data=ft)$mfxest[1,1:2]*100 effectests[7,1:2]<-logitmfx(m2.logit$formula,data=ft)$mfxest[1,1:2]*100 effectests[8,1:2]<-logitmfx(m3.logit$formula,data=ft)$mfxest[1,1:2]*100 effectests[9,1:2]<-logitmfx(m4.logit$formula,data=ft)$mfxest[1,1:2]*100 effectests[10,1:2]<-logitmfx(m5.logit$formula,data=ft)$mfxest[1,1:2]*100 #magnitude check sd(ft$secs,na.rm=T) 3/sd(ft$secs,na.rm=T) #for the main models, cluster observations by presiding chair m1rob<-robcov(ols(m1f,data=ft,x=T,y=T),cluster=ft$chairname) m2rob<-robcov(ols(m2f,data=ft,x=T,y=T),cluster=ft$chairname) m3rob<-robcov(ols(m3f,data=ft,x=T,y=T),cluster=ft$chairname) m4rob<-robcov(ols(m4f,data=ft,x=T,y=T),cluster=ft$chairname) m5rob<-robcov(ols(m5f,data=ft,x=T,y=T),cluster=ft$chairname) m5intposdifltmedian<-robcov(ols(m5f,data=subset(ft,intposdif<=2.34),x=T,y=T),cluster=subset(ft,intposdif<=2.34)$chairname) m5ordposdifltmedian<-robcov(ols(m5f,data=subset(ft,ordposdif<=2),x=T,y=T),cluster=subset(ft,ordposdif<=2)$chairname) m5cobloc<-ols(m5f,data=subset(ft,cobloc==1),x=T,y=T) #setup up varying slopes model to test how the bias varies by mlm3<-lmer(secs~copartisan+timeofday+female+(1|coarseparty)+(1+copartisan|chairparty),data=ft) ranef(mlm3) fixef(mlm3) partyranefs<-data.frame(party=rownames(ranef(mlm3)$chairparty),coef=fixef(mlm3)[2]+ranef(mlm3)$chairparty[,2],se=se.ranef(mlm3)$chairparty[,2]) partyranefs partyranefs<- ft %>% group_by(chairparty) %>% summarise(ordpos=mean(chairordpos,na.rm=T)) %>% rename(party=chairparty) %>% left_join(partyranefs,.,by="party") partyranefs #varying slopes by chairman name mlm3_chairs<-lmer(secs~copartisan+timeofday+female+(1|coarseparty)+(1+copartisan|chairname),data=ft) ranef(mlm3_chairs) fixef(mlm3_chairs) chairranefs<-data.frame(chair=rownames(ranef(mlm3_chairs)$chairname),coef=fixef(mlm3)[2]+ranef(mlm3_chairs)$chairname[,2],se=se.ranef(mlm3_chairs)$chairname[,2]) chairranefs<-ft %>% group_by(chairname,chairparty) %>% summarise(ordpos=mean(chairordpos)) %>% rename(party=chairparty,chair=chairname) %>% left_join(chairranefs,.,by="chair") %>% arrange(.,ordpos,-coef) %>% mutate(chairorder=1:20,chairparty=paste(chair," ","(",party,")",sep="")) #robustness check 1: only leadership parties m1robrs<-robcov(ols(m1f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) m2robrs<-robcov(ols(m2f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) m3robrs<-robcov(ols(m3f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) m4robrs<-robcov(ols(m4f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) m5robrs<-robcov(ols(m5f,data=subset(ft,leadshipparty==1),x=T,y=T),cluster=subset(ft,leadshipparty==1)$chairname) #robustness check 2: debate fixed effects m1fdfe<-as.formula(secs~copartisan+debate) m2fdfe<-as.formula(secs~copartisan+timeofday+debate) m3fdfe<-as.formula(secs~copartisan+timeofday+female+debate) m4fdfe<-as.formula(secs~copartisan+timeofday+female+coarseparty+debate) m5fdfe<-as.formula(secs~copartisan+timeofday+female+coarseparty+chairparty+debate) m1robdfe<-robcov(ols(m1fdfe,data=ft,x=T,y=T),cluster=ft$chairname) m2robdfe<-robcov(ols(m2fdfe,data=ft,x=T,y=T),cluster=ft$chairname) m3robdfe<-robcov(ols(m3fdfe,data=ft,x=T,y=T),cluster=ft$chairname) m4robdfe<-robcov(ols(m4fdfe,data=ft,x=T,y=T),cluster=ft$chairname) m5robdfe<-robcov(ols(m5fdfe,data=ft,x=T,y=T),cluster=ft$chairname) #robustness check 3: exclude prime ministers m1robexpm<-robcov(ols(m1f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) m2robexpm<-robcov(ols(m2f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) m3robexpm<-robcov(ols(m3f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) m4robexpm<-robcov(ols(m4f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) m5robexpm<-robcov(ols(m5f,data=subset(ft,pm==0),x=T,y=T),cluster=subset(ft,pm==0)$chairname) #get list of chairmen by debate allchairmen<-ftall %>% filter(chair==1) %>% group_by(chairname,debate) %>% summarise(remarks=length(chairname)) seatvec<-c(16,rep(22,2),rep(47,7),rep(8,3),rep(18,2),47,23,16,23,16,9,47,44,34,17,17,13,45,rep(47,3),45,rep(47,4),rep(22,4),37,25,22,25,22,16,14,rep(46,2)) #careful! here I manually type in the seats for each chairman's party by debate allchairmen$seats<-seatvec allchairmen<-allchairmen %>% group_by(chairname) %>% summarise(remarks=sum(remarks),debates=length(debate),avgseats=mean(seats)) %>% mutate(president=ifelse(chairname %in% c("Thor Pedersen","Mogens Lykketoft","Pia Kjaersgaard"),1,0)) #can we predict chairman activity? summary(remarksm1<-lm(remarks~president+debates,data=allchairmen)) summary(remarksm2<-lm(remarks~president+debates+avgseats,data=allchairmen)) summary(remarksm3<-lm(remarks~president+avgseats,data=allchairmen)) allchairmen$exest<-NA allchairmen$exse<-NA for (i in 1:nrow(allchairmen)){ exm<-robcov(ols(m5f,data=subset(ft,chairname!=allchairmen$chairname[i]),x=T,y=T),cluster=subset(ft,chairname!=allchairmen$chairname[i])$chairname) allchairmen[i,6:7]<-c(exm$coefficients[2],sqrt(diag(exm$var))[2]) } ### TABLES checkmarks<-c("Speaker party FE & & & & $\\checkmark$ & $\\checkmark$ \\\\", "Chair party FE & & & & & $\\checkmark$ \\\\") #, "Debate FE & & & & $\\checkmark$ & $\\checkmark$ \\\\") covarlabs<-c("Copartisan","Time of day","Gender (female)","Debate type (Opening)","Intercept") regtab1<-stargazer(m1rob,m2rob,m3rob,m4rob,m5rob,style="apsr",omit=c("coarse","chair"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtab1",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01),align=T,title="OLS models of speaking time",digits=2, covariate.labels=covarlabs) regtab1<-c(regtab1[1:24],checkmarks,regtab1[25:length(regtab1)]) regtab1 writeLines(regtab1,con="../tables/parlbias_regtab1.txt") regtab1logit<-stargazer(m1.logit,m2.logit,m3.logit,m4.logit,m5.logit,style="apsr",omit=c("coarse","chair","debate"), dep.var.labels="Dummy: speaking time exceeds 60 seconds",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtab1logit",column.sep.width="-5pt",covariate.labels=covarlabs,star.cutoffs=c(.1,.05,.01), align=T,title="Logit models of exceeding standard speaking time",digits=2) regtab1logit regtab1logit<-c(regtab1logit[1:22],checkmarks,regtab1logit[23:length(regtab1logit)]) writeLines(regtab1logit,con="../tables/parlbias_regtab1logit.txt") modscolumnlabels<-c("Full","Distance$\\leq$median (interval)","Distance$\\leq$median (ordinal)","Same bloc") modscheckmarks<-c("Speaker party FE & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ \\\\", "Chair party FE & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ \\\\") #, "Debate FE & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ \\\\") regtabmods<-stargazer(m5rob,m5intposdifltmedian,m5ordposdifltmedian,m5cobloc,style="apsr",omit=c("coarse","chair","debate"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtabmods",column.sep.width="-5pt",covariate.labels=covarlabs,star.cutoffs=c(.1,.05,.01), align=T,title="Tests of political moderators",digits=2,column.labels=modscolumnlabels) regtabmods regtabmods<-c(regtabmods[1:23],modscheckmarks,regtabmods[24:length(regtabmods)]) regtabmods writeLines(regtabmods,con="../tables/parlbias_regtabmods.txt") modeffectests<-data.frame(est=rep(NA,4),se=rep(NA,4),model=modscolumnlabels) #get coef and se on copartisan for each model modeffectms<-list(m5rob,m5intposdifltmedian,m5ordposdifltmedian,m5cobloc) for (i in 1:4){ modeffectests[i,1]<-coef(modeffectms[[i]])["copartisan"] modeffectests[i,2]<-sqrt(diag(modeffectms[[i]]$var))["copartisan"] } modeffectests #sub in <= for latex symbols modeffectests$model<-modeffectests$model %>% as.character() %>% gsub("\\$\\\\leq\\$","<=",.) #robustness check 1: restricted sample regtabrs<-stargazer(m1robrs,m2robrs,m3robrs,m4robrs,m5robrs,style="apsr",omit=c("coarse","chair"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtabrs",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01),align=T,title="Results for only members of leadership parties",digits=2, covariate.labels=covarlabs) regtabrs<-c(regtabrs[1:24],checkmarks,regtabrs[25:length(regtabrs)]) regtabrs writeLines(regtabrs,con="../tables/parlbias_regtabrs.txt") #robustness check 2: debate-specific fixed effects dfecheckmarks<-c("Speaker party FE & & & $\\checkmark$ & & $\\checkmark$ \\\\", "Chair party FE & & & & & $\\checkmark$ \\\\", "Debate FE & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ & $\\checkmark$ \\\\") dfecovarlabs<-c("Copartisan","Time of day","Gender (female)","Intercept") regtabdfe<-stargazer(m1robdfe,m2robdfe,m3robdfe,m4robdfe,m5robdfe,style="apsr",omit=c("coarse","chair","debate"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtabdfe",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01),align=T,title="Results with debate-specific fixed effects",digits=2, covariate.labels=dfecovarlabs) regtabdfe<-c(regtabdfe[1:22],dfecheckmarks,regtabdfe[23:length(regtabdfe)]) regtabdfe writeLines(regtabdfe,con="../tables/parlbias_regtabdfe.txt") #robustness check 3: excluding pm's regtabexpm<-stargazer(m1robexpm,m2robexpm,m3robexpm,m4robexpm,m5robexpm,style="apsr",omit=c("coarse","chair"), dep.var.labels="Speaking time (seconds)",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_regtabexpm",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01),align=T,title="Results excluding prime ministers",digits=2, covariate.labels=covarlabs) regtabexpm<-c(regtabexpm[1:24],checkmarks,regtabexpm[25:length(regtabexpm)]) regtabexpm writeLines(regtabexpm,con="../tables/parlbias_regtabexpm.txt") #table for reg predicting remarks presided over remarksregtab<-stargazer(remarksm1,remarksm2,remarksm3,style="apsr", dep.var.labels="Number of remarks enforced",dep.var.labels.include=T,font.size="footnotesize", label="parlbias_remarksregtab",column.sep.width="-5pt",star.cutoffs=c(.1,.05,.01), align=T,title="Model predicting number of remarks enforced by chairmen",digits=2, covariate.labels=c("Head chairman","No. of debates in leadership","No. of party seats")) writeLines(remarksregtab,con="../tables/parlbias_remarksregtab.txt") ### SUMMARY STATS TABLES ftall$type<-NA ftall$type[ftall$secs<150 & ftall$secs>10 & !is.na(ftall$copartisan)]<-"Brief remark" ftall$type[ftall$secs>=150]<-"Spokesperson speech" ftall$type[ftall$secs>=150 & ftall$pm==1]<-"PM speech" require(dplyr) totaln<-nrow(subset(ftall,chair==0 & year(starttime)>2000 & !is.na(type))) totalsecs<-sum(subset(ftall,chair==0 & year(starttime)>2000 & !is.na(type))$secs) options(digits=2) sumstatstab<- ftall %>% filter(chair==0 & year(starttime)>2000) %>% filter(!is.na(type)) %>% group_by(type) %>% summarise(count=n(),nshare=100*(count/totaln),secshare=100*sum(secs)/totalsecs) sumstatstab #add total line at bottom sumstatstab[4,]<-c("Total",colSums(sumstatstab[,2:4])) class(sumstatstab$nshare)<-"numeric" class(sumstatstab$secshare)<-"numeric" sumstatstab$nshare<-as.character(format(sumstatstab$nshare,digits=2)) sumstatstab$secshare<-as.character(format(sumstatstab$secshare,digits=2)) sumstatstab str(sumstatstab) sumstatstabtex<-stargazer(sumstatstab,summary=F,digits=2,title="Types of speeches in opening and closing debates in the Folketing",label="parlbias_sumstatstab",font.size="footnotesize",align=T,colnames=T,rownames=F) sumstatstabtex[12]<-"\\multicolumn{1}{l}{Type} & \\multicolumn{1}{r}{Number} & \\multicolumn{1}{r}{Share (numeric)} & \\multicolumn{1}{r}{Share (time-weighted)} \\\\ " #awkward left alignment of labels sumstatstabtex<-gsub("column\\{1\\}\\{c\\}\\{","column{1}{l}{",sumstatstabtex) writeLines(sumstatstabtex,con="../tables/parlbias_sumstatstab.txt") options(digits=7) ### PLOTS #reorder factor levels effectests<-within(effectests,method<-factor(method,levels=c("OLS","Logit"))) #plot estimates ggplot(effectests,aes(x=est,y=reorder(model,rep(5:1,2)))) + geom_point(size=2.5) + facet_grid(.~method,scales="free_x") + geom_errorbarh(aes(xmin=est-1.96*se,xmax=est+1.96*se),height=0,size=.5) + geom_errorbarh(aes(xmin=est-1.65*se,xmax=est+1.65*se),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + xlab("Estimate (seconds/percentage points)") + ylab("Model") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_effectplot.pdf",height=4,width=9) ggsave(file="../figures/parlbias_effectplot.png",height=4,width=9) ggplot(modeffectests,aes(x=est,y=reorder(model,c(4,3,2,1)))) + geom_point(size=2.5) + geom_errorbarh(aes(xmin=est-1.96*se,xmax=est+1.96*se),height=0,size=.5) + geom_errorbarh(aes(xmin=est-1.65*se,xmax=est+1.65*se),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + xlab("Estimate (seconds)") + ylab("Model") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_modeffectplot.pdf",height=4,width=9) ggsave(file="../figures/parlbias_modeffectplot.png",height=4,width=9) #factors for copartisanship ft$copartisan_factor<-factor(ft$copartisan,labels=c("Non-copartisan","Co-partisan")) ft$copartisan_factor<-factor(ft$copartisan_factor,levels(ft$copartisan_factor)[c(2,1)]) ft<-ft %>% mutate(copartisancobloc=factor(copartisan+cobloc,labels=c("Non-copartisan, other bloc","Non-copartisan, same bloc","Co-partisan"))) ggplot(subset(ft,chair==0 & !is.na(copartisancobloc)),aes(x=secs)) + geom_density(fill="gray",alpha=.5,adjust=1.5) + facet_grid(.~copartisancobloc) + geom_vline(xintercept=60,linetype="dashed") + xlab("Speech duration (seconds)") + ylab("") + scale_y_continuous(breaks=c(0,.005,.01,.015),labels=c("0",".005",".01",".015")) + scale_x_continuous(breaks=c(0,30,60,90,120),labels=c("0","30","60","90","120")) + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_dens.pdf",height=5,width=9) ggsave(file="../figures/parlbias_dens.png",height=5,width=9) ## varying coefficients by party ggplot(subset(partyranefs,party!="Other"),aes(x=coef,y=reorder(party,-ordpos))) + geom_point(size=2.5) + geom_errorbarh(aes(xmin=coef-1.96*se,xmax=coef+1.96*se),height=0,size=.5) + geom_errorbarh(aes(xmin=coef-1.65*se,xmax=coef+1.65*se),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + geom_vline(xintercept=fixef(mlm3)[2],linetype="dashed",color="grey40") + xlab("Estimate (seconds)") + ylab("Chairman's party") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_partyranefs.pdf",height=4,width=9) ggsave(file="../figures/parlbias_partyranefs.png",height=4,width=9) ## varying coefficients by chairman ggplot(chairranefs,aes(x=coef,y=reorder(chairparty,-chairorder))) + geom_point(size=2.5) + geom_errorbarh(aes(xmin=coef-1.96*se,xmax=coef+1.96*se),height=0,size=.5) + geom_errorbarh(aes(xmin=coef-1.65*se,xmax=coef+1.65*se),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + geom_vline(xintercept=fixef(mlm3)[2],linetype="dashed",color="grey40") + xlab("Estimate (seconds)") + ylab("Chairman") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_chairranefs.pdf",height=6,width=9) ggsave(file="../figures/parlbias_chairranefs.png",height=6,width=9) # estimate coefficient excluding each chairman ggplot(allchairmen,aes(x=exest,y=chairname)) + geom_point(size=2.5) + geom_errorbarh(aes(xmin=exest-1.96*exse,xmax=exest+1.96*exse),height=0,size=.5) + geom_errorbarh(aes(xmin=exest-1.65*exse,xmax=exest+1.65*exse),height=0,size=1.2) + expand_limits(x=0) + geom_vline(xintercept=0,linetype="dashed") + geom_vline(xintercept=m5rob$coefficients[2],linetype="dashed",color="grey40") + xlab("Estimate excluding chairman (seconds)") + ylab("Chairman") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_blank(), panel.border = element_rect(colour = "black")) ggsave(file="../figures/parlbias_exchairests.pdf",height=6,width=9) ggsave(file="../figures/parlbias_exchairests.png",height=6,width=9) ## plot predicted remarks presided over seatsprdf1<-data.frame(expand.grid(debates=1:9,president=0:1,avgseats=21)) %>% bind_cols(.,as.data.frame(predict(remarksm2,newdata=.,se.fit=T))) %>% dplyr::select(debates,president,fit,se.fit)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/chk-environment.R \name{chk_environment} \alias{chk_environment} \alias{vld_environment} \title{Check Environment} \usage{ chk_environment(x, x_name = NULL) vld_environment(x) } \arguments{ \item{x}{The object to check.} \item{x_name}{A string of the name of object x or NULL.} } \value{ The \code{chk_} function throws an informative error if the test fails. The \code{vld_} function returns a flag indicating whether the test was met. } \description{ Checks if environment using \code{is.environment(x)} } \section{Functions}{ \itemize{ \item \code{vld_environment}: Validate Environment }} \examples{ # chk_environment chk_environment(.GlobalEnv) try(chk_environment(1)) # vld_environment vld_environment(1) vld_environment(list(1)) vld_environment(.GlobalEnv) vld_environment(environment()) } \seealso{ Other chk_is: \code{\link{chk_array}()}, \code{\link{chk_atomic}()}, \code{\link{chk_function}()}, \code{\link{chk_list}()}, \code{\link{chk_matrix}()}, \code{\link{chk_numeric}()}, \code{\link{chk_s3_class}()}, \code{\link{chk_s4_class}()}, \code{\link{chk_vector}()}, \code{\link{chk_whole_numeric}()} } \concept{chk_is}
/man/chk_environment.Rd
permissive
krlmlr/chk
R
false
true
1,216
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/chk-environment.R \name{chk_environment} \alias{chk_environment} \alias{vld_environment} \title{Check Environment} \usage{ chk_environment(x, x_name = NULL) vld_environment(x) } \arguments{ \item{x}{The object to check.} \item{x_name}{A string of the name of object x or NULL.} } \value{ The \code{chk_} function throws an informative error if the test fails. The \code{vld_} function returns a flag indicating whether the test was met. } \description{ Checks if environment using \code{is.environment(x)} } \section{Functions}{ \itemize{ \item \code{vld_environment}: Validate Environment }} \examples{ # chk_environment chk_environment(.GlobalEnv) try(chk_environment(1)) # vld_environment vld_environment(1) vld_environment(list(1)) vld_environment(.GlobalEnv) vld_environment(environment()) } \seealso{ Other chk_is: \code{\link{chk_array}()}, \code{\link{chk_atomic}()}, \code{\link{chk_function}()}, \code{\link{chk_list}()}, \code{\link{chk_matrix}()}, \code{\link{chk_numeric}()}, \code{\link{chk_s3_class}()}, \code{\link{chk_s4_class}()}, \code{\link{chk_vector}()}, \code{\link{chk_whole_numeric}()} } \concept{chk_is}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check.R \name{check} \alias{check} \alias{check_built} \title{Build and check a package, cleaning up automatically on success.} \usage{ check( pkg = ".", document = NULL, build_args = NULL, ..., manual = FALSE, cran = TRUE, remote = FALSE, incoming = remote, force_suggests = FALSE, run_dont_test = FALSE, args = "--timings", env_vars = c(NOT_CRAN = "true"), quiet = FALSE, check_dir = tempdir(), cleanup = TRUE, vignettes = TRUE, error_on = c("never", "error", "warning", "note") ) check_built( path = NULL, cran = TRUE, remote = FALSE, incoming = remote, force_suggests = FALSE, run_dont_test = FALSE, manual = FALSE, args = "--timings", env_vars = NULL, check_dir = tempdir(), quiet = FALSE, error_on = c("never", "error", "warning", "note") ) } \arguments{ \item{pkg}{The package to use, can be a file path to the package or a package object. See \code{\link[=as.package]{as.package()}} for more information.} \item{document}{By default (\code{NULL}) will document if your installed roxygen2 version matches the version declared in the \code{DESCRIPTION} file. Use \code{TRUE} or \code{FALSE} to override the default.} \item{build_args}{Additional arguments passed to \verb{R CMD build}} \item{...}{Additional arguments passed on to \code{\link[pkgbuild:build]{pkgbuild::build()}}.} \item{manual}{If \code{FALSE}, don't build and check manual (\code{--no-manual}).} \item{cran}{if \code{TRUE} (the default), check using the same settings as CRAN uses.} \item{remote}{Sets \verb{_R_CHECK_CRAN_INCOMING_REMOTE_} env var. If \code{TRUE}, performs a number of CRAN incoming checks that require remote access.} \item{incoming}{Sets \verb{_R_CHECK_CRAN_INCOMING_} env var. If \code{TRUE}, performs a number of CRAN incoming checks.} \item{force_suggests}{Sets \verb{_R_CHECK_FORCE_SUGGESTS_}. If \code{FALSE} (the default), check will proceed even if all suggested packages aren't found.} \item{run_dont_test}{Sets \code{--run-donttest} so that tests surrounded in \verb{\donttest{}} are also tested. When \code{cran = TRUE}, this only affects R 3.6 and earlier; in R 4.0.0 code in \verb{\donttest{}} is always run as part of CRAN submission.} \item{args}{Character vector of arguments to pass to \verb{R CMD check}. Pass each argument as a single element of this character vector (do not use spaces to delimit arguments like you would in the shell). For example, to skip running of examples and tests, use \code{args = c("--no-examples", "--no-tests")} and not \code{args = "--no-examples --no-tests"}. (Note that instead of the \code{--output} option you should use the \code{check_dir} argument, because \code{--output} cannot deal with spaces and other special characters on Windows.)} \item{env_vars}{Environment variables set during \verb{R CMD check}} \item{quiet}{if \code{TRUE} suppresses output from this function.} \item{check_dir}{the directory in which the package is checked compatibility. \code{args = "--output=/foo/bar"} can be used to change the check directory.} \item{cleanup}{Deprecated.} \item{vignettes}{If \code{FALSE}, do not build or check vignettes, equivalent to using \verb{args = '--ignore-vignettes' and }build_args = '--no-build-vignettes'.} \item{error_on}{Whether to throw an error on \verb{R CMD check} failures. Note that the check is always completed (unless a timeout happens), and the error is only thrown after completion. If \code{"never"}, then no errors are thrown. If \code{"error"}, then only \code{ERROR} failures generate errors. If \code{"warning"}, then \code{WARNING} failures generate errors as well. If \code{"note"}, then any check failure generated an error. Its default can be modified with the \code{RCMDCHECK_ERROR_ON} environment variable. If that is not set, then \code{"never"} is used.} \item{path}{Path to built package.} } \value{ An object containing errors, warnings, and notes. } \description{ \code{check} automatically builds and checks a source package, using all known best practices. \code{check_built} checks an already built package. } \details{ Passing \verb{R CMD check} is essential if you want to submit your package to CRAN: you must not have any ERRORs or WARNINGs, and you want to ensure that there are as few NOTEs as possible. If you are not submitting to CRAN, at least ensure that there are no ERRORs or WARNINGs: these typically represent serious problems. \code{check} automatically builds a package before calling \code{check_built} as this is the recommended way to check packages. Note that this process runs in an independent realisation of R, so nothing in your current workspace will affect the process. } \section{Environment variables}{ Devtools does its best to set up an environment that combines best practices with how check works on CRAN. This includes: \itemize{ \item The standard environment variables set by devtools: \code{\link[=r_env_vars]{r_env_vars()}}. Of particular note for package tests is the \code{NOT_CRAN} env var which lets you know that your tests are not running on CRAN, and hence can take a reasonable amount of time. \item Debugging flags for the compiler, set by \code{\link{compiler_flags}(FALSE)}. \item If \code{aspell} is found \verb{_R_CHECK_CRAN_INCOMING_USE_ASPELL_} is set to \code{TRUE}. If no spell checker is installed, a warning is issued.) \item env vars set by arguments \code{incoming}, \code{remote} and \code{force_suggests} } } \seealso{ \code{\link[=release]{release()}} if you want to send the checked package to CRAN. }
/man/check.Rd
permissive
rmsharp/devtools
R
false
true
5,629
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check.R \name{check} \alias{check} \alias{check_built} \title{Build and check a package, cleaning up automatically on success.} \usage{ check( pkg = ".", document = NULL, build_args = NULL, ..., manual = FALSE, cran = TRUE, remote = FALSE, incoming = remote, force_suggests = FALSE, run_dont_test = FALSE, args = "--timings", env_vars = c(NOT_CRAN = "true"), quiet = FALSE, check_dir = tempdir(), cleanup = TRUE, vignettes = TRUE, error_on = c("never", "error", "warning", "note") ) check_built( path = NULL, cran = TRUE, remote = FALSE, incoming = remote, force_suggests = FALSE, run_dont_test = FALSE, manual = FALSE, args = "--timings", env_vars = NULL, check_dir = tempdir(), quiet = FALSE, error_on = c("never", "error", "warning", "note") ) } \arguments{ \item{pkg}{The package to use, can be a file path to the package or a package object. See \code{\link[=as.package]{as.package()}} for more information.} \item{document}{By default (\code{NULL}) will document if your installed roxygen2 version matches the version declared in the \code{DESCRIPTION} file. Use \code{TRUE} or \code{FALSE} to override the default.} \item{build_args}{Additional arguments passed to \verb{R CMD build}} \item{...}{Additional arguments passed on to \code{\link[pkgbuild:build]{pkgbuild::build()}}.} \item{manual}{If \code{FALSE}, don't build and check manual (\code{--no-manual}).} \item{cran}{if \code{TRUE} (the default), check using the same settings as CRAN uses.} \item{remote}{Sets \verb{_R_CHECK_CRAN_INCOMING_REMOTE_} env var. If \code{TRUE}, performs a number of CRAN incoming checks that require remote access.} \item{incoming}{Sets \verb{_R_CHECK_CRAN_INCOMING_} env var. If \code{TRUE}, performs a number of CRAN incoming checks.} \item{force_suggests}{Sets \verb{_R_CHECK_FORCE_SUGGESTS_}. If \code{FALSE} (the default), check will proceed even if all suggested packages aren't found.} \item{run_dont_test}{Sets \code{--run-donttest} so that tests surrounded in \verb{\donttest{}} are also tested. When \code{cran = TRUE}, this only affects R 3.6 and earlier; in R 4.0.0 code in \verb{\donttest{}} is always run as part of CRAN submission.} \item{args}{Character vector of arguments to pass to \verb{R CMD check}. Pass each argument as a single element of this character vector (do not use spaces to delimit arguments like you would in the shell). For example, to skip running of examples and tests, use \code{args = c("--no-examples", "--no-tests")} and not \code{args = "--no-examples --no-tests"}. (Note that instead of the \code{--output} option you should use the \code{check_dir} argument, because \code{--output} cannot deal with spaces and other special characters on Windows.)} \item{env_vars}{Environment variables set during \verb{R CMD check}} \item{quiet}{if \code{TRUE} suppresses output from this function.} \item{check_dir}{the directory in which the package is checked compatibility. \code{args = "--output=/foo/bar"} can be used to change the check directory.} \item{cleanup}{Deprecated.} \item{vignettes}{If \code{FALSE}, do not build or check vignettes, equivalent to using \verb{args = '--ignore-vignettes' and }build_args = '--no-build-vignettes'.} \item{error_on}{Whether to throw an error on \verb{R CMD check} failures. Note that the check is always completed (unless a timeout happens), and the error is only thrown after completion. If \code{"never"}, then no errors are thrown. If \code{"error"}, then only \code{ERROR} failures generate errors. If \code{"warning"}, then \code{WARNING} failures generate errors as well. If \code{"note"}, then any check failure generated an error. Its default can be modified with the \code{RCMDCHECK_ERROR_ON} environment variable. If that is not set, then \code{"never"} is used.} \item{path}{Path to built package.} } \value{ An object containing errors, warnings, and notes. } \description{ \code{check} automatically builds and checks a source package, using all known best practices. \code{check_built} checks an already built package. } \details{ Passing \verb{R CMD check} is essential if you want to submit your package to CRAN: you must not have any ERRORs or WARNINGs, and you want to ensure that there are as few NOTEs as possible. If you are not submitting to CRAN, at least ensure that there are no ERRORs or WARNINGs: these typically represent serious problems. \code{check} automatically builds a package before calling \code{check_built} as this is the recommended way to check packages. Note that this process runs in an independent realisation of R, so nothing in your current workspace will affect the process. } \section{Environment variables}{ Devtools does its best to set up an environment that combines best practices with how check works on CRAN. This includes: \itemize{ \item The standard environment variables set by devtools: \code{\link[=r_env_vars]{r_env_vars()}}. Of particular note for package tests is the \code{NOT_CRAN} env var which lets you know that your tests are not running on CRAN, and hence can take a reasonable amount of time. \item Debugging flags for the compiler, set by \code{\link{compiler_flags}(FALSE)}. \item If \code{aspell} is found \verb{_R_CHECK_CRAN_INCOMING_USE_ASPELL_} is set to \code{TRUE}. If no spell checker is installed, a warning is issued.) \item env vars set by arguments \code{incoming}, \code{remote} and \code{force_suggests} } } \seealso{ \code{\link[=release]{release()}} if you want to send the checked package to CRAN. }
#' Delivers some default Parameters of Q-Predictions #' #' @export Get.Def.Par.QPredictions <- function(){ #How to choose Actions & Learning action.policy <- "epsilon.greedy" #"epsilon.greedy" weighting.policy <- "SARSA" weighting.factor <- 0.1 #a from Q learning end.start <- 0.5 #all after this percentage are classified as end points gamma <- NA #It is sensible (?) to set it to delta of game #Relevant parameters of epsilon.greedy epsilon.start <- 0.1 epsilon.decay <- NA epsilon.min <- 0.001 epsilon.decay.type <- "rate" #may be "rate" or "linear" # Incorporating new information and Updating Status mem.selection <- "all" #currently no other options are available - relevant for backwards compatibility mem.type <- "game.encoded" #game.encoded or game.encoded.rounds replay.every <- 100 # After How many rounds should the prediction model be updated? # Output show.current.status <- 50 q.param <- nlist(action.policy,weighting.policy, weighting.factor, end.start, gamma, epsilon.start, epsilon.decay, epsilon.min, epsilon.decay.type,mem.selection, mem.type, show.current.status, replay.every) return(q.param) } #Q-Predictions #' Q-Predictions is rather similar to Q-learning but we use a multilayer approach:\itemize{ #' \item We assume that even though the algorithm doesn't have complete information at runtime it may be used for training #' \item A predictive neural net is used to calculate the next move of the opponent to use monte carlo studies. #' \item Assumption: We have a single starting state. #' } #' #' @param game.object Game Object as defined by \code{Get.Game.Object.<NAME>}. #' @param model.par Model parameters. If \code{NULL}, the function \code{Get.Def.Par.QPredictions()} is called. #' @export Setup.QPredictions <- function(game.object, algo.par=NULL, model.par){ restore.point("Setup.QPredictions") if(is.null(algo.par)){ algo.par <- Get.Def.Par.QPredictions() } game.par <- game.object$game.par(game.object) if(is.null(model.par$name)){ stop("name parameter of model.par missing!") } model <- model.par$setup(model.par, game.par) return(model) } #' Set changeable model variables #' #' Returns a list with the following items \itemize{ #' \item \strong{epsilon} Specifies how often the Algorithm tries a random move. Initialized with \code{epsilon.start} of \code{model.par}. #' \item \strong{memory.net} A net based on the next state. Each Node holds the following information:\itemize{ #' \item \strong{encoding} Game state encoded for prediction algorithm. #' \item \strong{depth} Current depth of tree. In the case of the Prisoners Dilemma equal to the round. #' \item \strong{visited} How often have we visited this node? #' \item \strong{Q.values} Q.values of this node. #' \item \strong{precursors} All possibilites to get to this node #' \item \strong{successors} All nodes to go to. Is a list itself with one entry for each action. Here a list with the following items exist: Number of Reached node. Number of visits. #' } #' } #' #' @param game.object A Game Object as defined by \code{Get.Game.Object.<Name>}. Necessary in the case of memory intitialisation. #' @param model.par Parameters of QPredictions specification. Have to be specified and should be identical to the model.par as given to \code{Setup.QPredictions()}. #' @param memory.init Which type of initialization should take place? It \code{NULL}, the option \code{none} is used. The following types are supported \itemize{ #' \item \strong{none} No initialization takes place. Memory is an empty list. #' \item \strong{self.play} The other strategies play against themselves - to understand possible secret handshakes. The following \code{memory.param} are expected: \itemize{ #' \item \strong{no} How often should the other strategies play against themselves? #' } #' } #' If combinations of different memories are needed, one can use the function \code{Extend.Memory.QPredictions()} #' @param memory.param Parameters necessary for the chosen \code{memory.init}. #'@export Initialise.QPredictions <- function(game.object=NULL, algo.par, memory.init=NULL, memory.param = NULL){ if(is.null(memory.init)){ memory.init <- "none" } algo.var <- list() algo.var$epsilon <- algo.par$epsilon.start algo.var$memory.net <- list() if (memory.init != "none") { algo.var <- Extend.Memory.QPredictions(algo.var, algo.par=algo.par, game.object=game.object, memory.type=memory.init, memory.param=memory.param) } return(algo.var) } #' Extend Memory by specified experiences #' #' Returns modified algo.var, where memory has been extended as specified. #' #' @param algo.var A variable algorithm object, where to be modified variables are saved. Given by \code{Initialise.QPredictions()} #' @param game.object A game object as defined by \code{Get.Game.Object.<Name>}. #' @param memory.init Which type of extension should take place? The following types are supported \itemize{ #' \item \strong{self.play} The other strategies play against themselves - to understand possible secret handshakes. If I am myself part of the other strategies, the "self" strategy is ignored. The following \code{memory.param} are expected: \itemize{ #' \item \strong{no} How often should the other strategies play against themselves? #' } #' \item \strong{solid.foundation} Not only self.play, but also a random initialisation with increasing defect probabilities.The following \code{memory.param} are expected: \itemize{ #' \item \strong{self.no} How often should the other strategies play against themselves? #' \item \strong{rep.no} How often should a random strategy be played? The defection probability is linearly increased. #' } #' } #' If combinations of different memories are needed, one can use the function multiple times. #' @param memory.param Parameters necessary for the chosen \code{memory.type}. #'@export Extend.Memory.QPredictions <- function(algo.var, algo.par=NULL, game.object, memory.type, memory.param=NULL){ restore.point("Extend.Memory.QPredictions") if(memory.type == "self.play"){ new.mem <- unlist(lapply(1:memory.param$no,FUN=function(x){ if(!is.null(game.object$supports) && any(game.object$supports == "memory.self.play")){ return(game.object$memory.self.play(game.object, algo.par)) } }), recursive=FALSE) } else if (memory.type== "solid.foundation"){ self.mem <- unlist(lapply(1:memory.param$self.no,FUN=function(x){ if(!is.null(game.object$supports) && any(game.object$supports == "memory.self.play")){ return(game.object$memory.self.play(game.object, algo.par)) } }), recursive=FALSE) def.arr <- seq(0,1,length.out = memory.param$rep.no) rand.mem <- unlist(lapply(def.arr,FUN=function(x){ if(!is.null(game.object$supports) && any(game.object$supports == "memory.random.play")){ algo.par$def.prob <- x return(game.object$memory.random.play(game.object, algo.par)) } }), recursive=FALSE) new.mem <- c(self.mem, rand.mem) } else { stop(paste0("memory.type ",memory.type," not supported.")) } algo.var$memory.net <- Update.Net.QPredictions(algo.var$memory.net, new.mem, game.object, algo.par) return(algo.var) } #'Calculate Expected Value based on action #' #'Needed to calculate the next Q value. If converged, should result in the same value as the respective Q.value itself. #'Also outputs edge value. #' #' @export Calc.Reward.QPredictions.expectedQ <- function(net, pointer, action, option="ignore", mode){ restore.point("Calc.Reward.QPredictions.expectedQ") if(option!="ignore") { stop("Not yet implemented. Use option ignore in Calc.Reward.QPredictions.expectedQ") } suc.vec <- net[[pointer]]$successors[[action]] if(option=="ignore" && length(suc.vec)==0){ #No successors known return(NA) } suc.weights <- net[[pointer]]$successors.visited[[action]] rel.weights <- suc.weights/sum(suc.weights) if(mode=="max"){ Q.values <- sapply(suc.vec,FUN=function(x){ if(all(is.na(net[[x]]$Q.values))){ return(NA) } else { return(max(net[[x]]$Q.values, na.rm=TRUE)) } }) expQ <- sum((Q.values)*rel.weights) return(expQ) } else { stop("Not implemented yet.") } } #'Calculate Expected immediate Reward based on action #' #'Returns expected Reward based on historic data. Only used inuternally to update net as input for q values. #' #' @export Calc.Reward.QPredictions.expectedReward <- function(net, pointer, action, mode="history"){ restore.point("Calc.Reward.QPredictions.expectedReward") if(mode!="history") { stop("Not yet implemented. Use option history in Calc.Reward.QPredictions.expectedReward") } suc.vec <- net[[pointer]]$successors[[action]] if(length(suc.vec)==0){ #No successors known return(NA) } suc.weights <- net[[pointer]]$successors.visited[[action]] rel.weights <- suc.weights/sum(suc.weights) edge.rewards <- net[[pointer]]$edge.rewards[[action]] if(mode=="history"){ expR <- sum(edge.rewards*rel.weights) return(expR) } else { stop("Not implemented yet.") } } #'Calc.Reward.QPredictions #' #' @export Calc.Reward.QPredictions <- function(net, pointer, action, mode, mode.par, end.state=NULL, end.reward=NULL){ restore.point("Calc.Reward.QPredictions") if(!is.na(net[[pointer]]$Q.values[action])){ Q.current <- net[[pointer]]$Q.values[action] } else { #Here optimistic evaluation might take place, but we path through Q.current <- NA } if(!is.null(end.state)){ #there are no relevant following Q.states Q.next <- end.state/(1-algo.par$gamma) #Expected value of repeating end.state infinitely often but with discontinuity probability of gamma reward <- end.reward } else { if(mode=="max"){ Q.next <- Calc.Reward.QPredictions.expectedQ(net=net, pointer=pointer,action=action, mode=mode) if(length(Q.next)>1){ stop("Q to big") } if(is.infinite(Q.next)){ stop("is infinite") } reward <- Calc.Reward.QPredictions.expectedReward(net=net, pointer=pointer,action=action) } else { stop("weighting policy unknown") } } Q.update <- (reward + algo.par$gamma * Q.next) if(is.na(Q.current)){ res <- Q.update } else { res <- (1-algo.par$weighting.factor) * Q.current + algo.par$weighting.factor * Q.update } return(res) } #' Calculates Endstate value #' #' Currently no option ist supported. Endstate value is calculated as average from end.pointer.pos to end of mem.path. #' #'@export Calc.Endstate.Value.QPredictions <- function(net, end.pointer.pos.net, end.pointer.pos.path, mem.path, option=NULL){ restore.point("Calc.Endstate.Value.QPredictions") if(is.null(option)){ option <- "SARSA" } if(option=="SARSA"){ res <- mean(unlist(lapply(mem.path[end.pointer.pos.path:length(mem.path)],FUN=function(x){x$reward}))) } else { stop("Endstate-Value can't be calculated due to incorrect option.") } return(res) } #'Internal Function #' #'Expects one or several Paths. #' #'@export Update.Net.QPredictions <- function(net, new.mem, game.object, algo.par){ init.net.node <- function(state, precursors, round=1){ visited <- 0 successors <- rep( list(list()), game.object$game.par(game.object)$output.nodes) successors.visited <- rep( list(list()), game.object$game.par(game.object)$output.nodes) edge.rewards <- rep( list(list()), game.object$game.par(game.object)$output.nodes) Q.values <- rep(NA, game.object$game.par(game.object)$output.nodes) round <- round return(nlist(state, visited, precursors, successors, successors.visited, edge.rewards, Q.values, round)) } restore.point("Update.Net.QPredictions") if(length(net)==0){ #initialise root net[[1]] <- init.net.node(state=new.mem[[1]]$state, precursors=0) } #Identify start/end points starts <- which(sapply(new.mem,FUN=function(x){x$start})) ends <- which(sapply(new.mem,FUN=function(x){x$done})) no.paths <- length(starts) #Update net for(i in 1:no.paths){ net.pointer.path <- c(1) end.pointer.pos <- ceiling(algo.par$end.start*(ends[i]-starts[i]))+1+starts[i] for(j in starts[i]:ends[i]){ #restore.point("within.for.update") x <- new.mem[[j]] if(j==starts[i]){ net.pointer <- 1 } net[[net.pointer]]$visited <- net[[net.pointer]]$visited+1 if(length(net[[net.pointer]]$successors[[x$action]])==0){ #first time visit if(game.object$full.encoding){ #Cycles are not possible no.next <- integer(0) } else { if(algo.par$mem.type=="game.encoded.round"){ could.be.state <- which(sapply(1:length(net),FUN=function(y){ net[[y]]$round==(x$round+1) })) } else { could.be.state <- 1:length(net) } if(length(could.be.state)==0){ no.next <- integer(0) } else { no.next <- which(sapply(could.be.state,FUN=function(y){ identical(x$next.state,net[[y]]$state) })) } } if(length(no.next)==0){ no.next <- length(net)+1 net[[no.next]] <- init.net.node(state=x$next.state, precursors=net.pointer, round=x$round+1) } else { ### Here we have to check whether the precursor is already identical to the net.pointer net[[no.next]]$precursors[length(net[[no.next]]$precursors)+1] <- net.pointer } net[[net.pointer]]$successors[[x$action]] <- no.next net[[net.pointer]]$successors.visited[[x$action]] <- 1 net[[net.pointer]]$edge.rewards[[x$action]] <- x$reward } else { #There are already known states #Check whether it is one of the states we have seen. pos.no.next <- which(sapply(net[[net.pointer]]$successors[[x$action]],FUN=function(y){ identical(x$next.state,net[[y]]$state) })) no.next <- net[[net.pointer]]$successors[[x$action]][pos.no.next] if(length(no.next)==0){ #first time visit but already others there if(game.object$full.encoding){ #Cycles are not possible no.next <- integer(0) } else { if(algo.par$mem.type=="game.encoded.round"){ could.be.state <- which(sapply(1:length(net),FUN=function(y){ net[[y]]$round==(x$round+1) })) } else { could.be.state <- 1:length(net) } if(length(could.be.state)==0){ no.next <- integer(0) } else { no.next <- which(sapply(could.be.state,FUN=function(y){ identical(x$next.state,net[[y]]$state) })) } } if(length(no.next)==0){ no.next <- length(net)+1 net[[no.next]] <- init.net.node(state=x$next.state, precursors=net.pointer, round=x$round+1) } else { net[[no.next]]$precursors[length(net[[no.next]]$precursors)+1] <- net.pointer } net[[net.pointer]]$successors[[x$action]] <- c(net[[net.pointer]]$successors[[x$action]], no.next) net[[net.pointer]]$successors.visited[[x$action]] <- c(net[[net.pointer]]$successors.visited[[x$action]],1) net[[net.pointer]]$edge.rewards[[x$action]] <- c(net[[net.pointer]]$edge.rewards[[x$action]],x$reward) } else { # we have already visited the next state net[[net.pointer]]$edge.rewards[[x$action]][pos.no.next] <- (net[[net.pointer]]$edge.rewards[[x$action]][pos.no.next]*net[[net.pointer]]$successors.visited[[x$action]][pos.no.next]+x$reward)/(net[[net.pointer]]$successors.visited[[x$action]][pos.no.next]+1) net[[net.pointer]]$successors.visited[[x$action]][pos.no.next] <- net[[net.pointer]]$successors.visited[[x$action]][pos.no.next]+1 } } if(j==end.pointer.pos){ net.pointer.end <- net.pointer } net.pointer <- no.next if(j==ends[i]){ net[[net.pointer]]$visited <- net[[net.pointer]]$visited+1 } net.pointer.path <- c(net.pointer.path,net.pointer) } restore.point("before.while.Update.Net") #net has now been filled with new nodes (if necessary) #We may now update the paths #If there are cycles this algo does not update perfectly but still halts already.updated <- rep(FALSE,length(net)) #We go again through the path, this time from the end to the start #end states could be several with the later ones not as good #Determine Endstate value end.state.value <- Calc.Endstate.Value.QPredictions(net=net, end.pointer.pos.net=net.pointer.end, end.pointer.pos.path=end.pointer.pos-starts[i]+1, mem.path=new.mem[starts[i]:ends[i]]) end.reward <- end.state.value end.pos <- net.pointer.path[length(net.pointer.path)] net[[end.pos]]$Q.values <- rep(Calc.Reward.QPredictions(net=net, pointer=net.pointer, action=new.mem[[j]]$action, mode=algo.par$weighting.policy, mode.par=list(weighting.factor=algo.par$weighting.factor, gamma=algo.par$gamma), end.state=end.state.value, end.reward=end.reward),length(net[[net.pointer]]$Q.values)) end.state.value <- NULL already.updated[end.pos] <- TRUE for(j in ends[i]:starts[i]){ net.pointer <- net.pointer.path[j-(starts[i])+1] if(!already.updated[net.pointer]){ net[[net.pointer]]$Q.values[new.mem[[j]]$action] <- Calc.Reward.QPredictions(net=net, pointer=net.pointer, action=new.mem[[j]]$action, mode=algo.par$weighting.policy, mode.par=list(weighting.factor=algo.par$weighting.factor, gamma=algo.par$gamma), end.state=end.state.value, end.reward=end.reward) already.updated[net.pointer] <- TRUE } } backlog <- integer(0) net.pointer <- net.pointer.path[length(net.pointer.path)] #Start again from end.node while(TRUE){ restore.point("inside.Updating.While") if(!already.updated[net.pointer]){ a.length=length(net[[net.pointer]]$successors) net[[net.pointer]]$Q.values <- sapply(1:a.length,FUN=function(x){ Calc.Reward.QPredictions(net=net, pointer=net.pointer, action=x, mode=algo.par$weighting.policy, mode.par=list(weighting.factor=algo.par$weighting.factor, gamma=algo.par$gamma), end.state=NULL) }) if(all(is.na(net[[net.pointer]]$Q.values) | is.infinite(net[[net.pointer]]$Q.values))){ stop("NA as Q Values in updating or infinite!") } already.updated[net.pointer] <- TRUE } if(length(backlog)==0){ to.check <- which(sapply(net[[net.pointer]]$precursors,FUN=function(x){ return(!(x %in% backlog) && !(already.updated[x])) })) to.check.no <- net[[net.pointer]]$precursors[to.check] if(length(to.check.no)==0){ break } if(length(to.check)==1){ net.pointer <- to.check.no } else { net.pointer <- to.check.no[1] backlog <- c(backlog, to.check.no[-1]) } } else { to.check <- which(sapply(net[[net.pointer]]$precursors,FUN=function(x){ return(!(x %in% backlog) && !(already.updated[x])) })) if(length(to.check)==0){ #do nothing } else { to.check.no <- net[[net.pointer]]$precursors[to.check] backlog <- c(backlog,to.check.no) } net.pointer <- backlog[1] backlog <- backlog[-1] } } } return(net) } #' Train model of Q Pathing #' #' As QPredictions updates all new paths directly Replay only trains the prediction model. #' #' @export Replay.QPredictions <- function(model, model.par, algo.par, algo.var, game.object){ restore.point("Replay.QPredictions") no.actions <- game.object$game.par(game.object)$output.nodes x_train.raw <- lapply(1:length(algo.var$memory.net), FUN=function(x){ restore.point("x.train.raw.replay.QPredictions") rel.a <- sapply(1:length(algo.var$memory.net[[x]]$Q.values), FUN=function(y){ if(!is.na(algo.var$memory.net[[x]]$Q.values[y])){ res <- rep(0,length(algo.var$memory.net[[x]]$Q.values)+1) res[y] <- 1 res[length(algo.var$memory.net[[x]]$Q.values)+1] <- algo.var$memory.net[[x]]$Q.values[y] return(res) } else { return(NULL) } }) rel.a.vec.tmp <- unlist(rel.a, recursive=FALSE) if(is.null(rel.a.vec.tmp)){ return(NULL) } else { rel.a.vec <- t(rel.a.vec.tmp) } restore.point("inside.x.train.replay") n.row <- nrow(rel.a.vec) state.info <- matrix(rep(algo.var$memory.net[[x]]$state,n.row),nrow=n.row, byrow=TRUE) x.res <- cbind(state.info,rel.a.vec) return(x.res) }) x_train <- do.call(rbind,x_train.raw) y_train <- as.matrix(x_train[,ncol(x_train)]) x_train <- x_train[,-ncol(x_train)] if(is.null(model.par$single.dimensional) || !(model.par$single.dimensional)){ stop("Model has to support single.dimensional and has it enabled.") } #Setup necessary precision if(!is.null(model.par$enforce.increasing.precision)&&model.par$enforce.increasing.precision==TRUE){ prec.repeat <- TRUE } else { prec.repeat <- FALSE } #Main Part -> Training of model model.train <- model.par$train(model, model.par, x_train, y_train) model <- model.train$model fit.obj <- model.train$fit.obj restore.point("before.pre.training") #If model is not trained enough, repeat Training until ready if(prec.repeat && is.null(algo.var$cur.loss)){ algo.var$cur.loss <- mean(fit.obj$metrics$loss) } else if (prec.repeat) { counter <- 0 while(mean(fit.obj$metrics$loss)>algo.var$cur.loss){ counter <- counter+1 writeLines(paste0("Loss was only ",round(mean(fit.obj$metrics$loss),5), " but ",round(algo.var$cur.loss,5), " needed","\n",collapse="")) model.train <- model.par$train(model, model.par, x_train, y_train) model <- model.train$model fit.obj <- model.train$fit.obj if(counter>model.par$give.up.precision){ break } } } if(prec.repeat){ algo.var$cur.loss <- mean(fit.obj$metrics$loss) } return(nlist(model, algo.var)) } #' Generates best guesses based on Experience #'@export Hybrid.Predict.Action.Values.QPredictions <- function(net, no.actions, net.pointer, model, model.par, state){ restore.point("Hybrid.Predict.Action.Values.QPredictions") if(is.null(net.pointer)){ #we are flying blind rel.a <- unlist(sapply(1:no.actions, FUN=function(y){ a.vec <- rep(0,no.actions) a.vec[y] <- 1 return(a.vec) })) state.info <- matrix(rep(state,no.actions),nrow=no.actions, byrow=TRUE) x.res <- cbind(state.info,rel.a) return(as.vector(model.par$predict(model,model.par,x.res))) } else { act.vals <- net[[net.pointer]]$Q.values if(any(is.na(act.vals))){ #Build info for unknown quantities unknown <- t(unlist(sapply((1:no.actions)[is.na(act.vals)], FUN=function(y){ a.vec <- rep(0,no.actions) a.vec[y] <- 1 return(a.vec) }))) state.info <- matrix(rep(state,nrow(unknown)),nrow=nrow(unknown), byrow=TRUE) x.res <- cbind(state.info,unknown) pred.vals <- as.vector(model.par$predict(model,model.par,x.res)) act.vals[is.na(act.vals)] <- pred.vals } return(act.vals) } } #' Determines which action to take #' #' @export Act.QPredictions <- function(state, model, model.par, algo.var, game.object, eval.only=FALSE, net.pointer){ restore.point("Act.QPredictions") if(eval.only){ no.actions <- game.object$game.par(game.object)$output.nodes act.values <- Hybrid.Predict.Action.Values.QPredictions(net=algo.var$memory.net, no.actions=no.actions, net.pointer=net.pointer, model=model, model.par=model.par, state=state) return(which.is.max(act.values)) } if(algo.par$action.policy=="epsilon.greedy"){ if(runif(1) <= algo.var$epsilon){ game.par <- game.object$game.par(game.object) return(sample(1:game.par$output.nodes,1)) } else { no.actions <- game.object$game.par(game.object)$output.nodes act.values <- Hybrid.Predict.Action.Values.QPredictions(net=algo.var$memory.net, no.actions=no.actions, net.pointer=net.pointer, model=model, model.par=model.par, state=state) return(which.is.max(act.values)) } } stop("Wrong action policy specified in algo.par.") } #' Train a model based on Q-Learning #' #' @export Train.QPredictions <- function(model, model.par, algo.par, algo.var, game.object, episodes, eval.only=FALSE, start.w.training=TRUE){ restore.point("Train.QPredictions") score.array <- NA if(is.null(algo.var$analysis)){ algo.var$analysis <- list() algo.var$analysis$score <- NA } if(length(algo.var$memory.net)>0 && start.w.training && !eval.only){ replay.res <- Replay.QPredictions(model, model.par, algo.par, algo.var, game.object) model <- replay.res$model algo.var <- replay.res$algo.var } for(i in 1:episodes){ #restore.point("within.Train.QPredictions") state <- game.object$start.state(game.object) mem <- list() start<-TRUE net.pointer <- 1 score <- 0 while(TRUE){ restore.point("within.Train.QPredictions.II") vis.state <- t(game.object$state.2.array(game.state=state, game.object=game.object)) # not a real state but what the algorithm sees. Could be a lot smaller than the real game state [but might depend on encoding] action <- Act.QPredictions(state=vis.state, model=model, model.par=model.par, algo.var=algo.var, game.object=game.object, eval.only=eval.only, net.pointer=net.pointer) next.state.full <- game.object$state.transition(game.state=state,action=action,game.object=game.object) next.state <- next.state.full$next.state reward <- next.state.full$reward score <- score+reward done <- next.state.full$game.finished vis.next.state <- t(game.object$state.2.array(game.state=next.state, game.object=game.object)) round <- next.state.full$next.state$round-1 #Update net.pointer if(is.null(net.pointer) || length(algo.var$memory.net[[net.pointer]]$successors[[action]])==0){ net.pointer <- NULL } else { net.pointer <- algo.var$memory.net[[net.pointer]]$successors[[action]][which(sapply(algo.var$memory.net[[net.pointer]]$successors[[action]], FUN=function(x){ identical(vis.next.state,algo.var$memory.net[[x]]$state) }))] if(length(net.pointer)==0){ net.pointer <- NULL } } mem[[length(mem)+1]] <- list(state=vis.state, action=action, next.state=vis.next.state, reward=reward, done=done, start=start, round=round) if(done){ break } state <- next.state start <- FALSE } #Update Memory if(!eval.only){ algo.var$memory.net <- Update.Net.QPredictions(net=algo.var$memory.net, new.mem=mem, game.object=game.object, algo.par=algo.par) #Update Prediction Model if(i%%algo.par$replay.every==0){ replay.res <- Replay.QPredictions(model, model.par, algo.par, algo.var, game.object) model <- replay.res$model algo.var <- replay.res$algo.var } } if(algo.par$action.policy=="epsilon.greedy" && algo.var$epsilon > algo.par$epsilon.min && i!=episodes){ if(algo.par$epsilon.decay.type=="linear"){ algo.var$epsilon <- seq(algo.par$epsilon.start,algo.par$epsilon.min,length.out=episodes)[i+1] } else if(algo.par$epsilon.decay.type=="rate"){ decay.par <- (algo.par$epsilon.min/algo.par$epsilon.start)^(1/episodes) algo.var$epsilon <- decay.par*algo.var$epsilon } else { algo.var$epsilon <- algo.par$epsilon.decay*algo.var$epsilon } } score.array[i] <- score #Not used for learning, only for analysis if(is.na(algo.var$analysis$score[1])){ algo.var$analysis$score[1] <- score.array[i] } else { algo.var$analysis$score[length(algo.var$analysis$score)+1] <- score.array[i] } if(i%%algo.par$show.current.status == 0){ output.message <- paste0(c("episode: ",i," with avg score of ",round(mean(score.array[(i-algo.par$show.current.status+1):i]),2)),collapse="") print(output.message) } if(eval.only){ eval.mess <- paste0(c("eval: ",i," with score of ",round(score.array[i],2)),collapse="") print(eval.mess) } out.save <<- list(model=model, algo.var=algo.var) } return(list(model=model, algo.var=algo.var)) }
/R/QPredictions.R
no_license
NiklasPaluszkiewicz/RLR
R
false
false
28,515
r
#' Delivers some default Parameters of Q-Predictions #' #' @export Get.Def.Par.QPredictions <- function(){ #How to choose Actions & Learning action.policy <- "epsilon.greedy" #"epsilon.greedy" weighting.policy <- "SARSA" weighting.factor <- 0.1 #a from Q learning end.start <- 0.5 #all after this percentage are classified as end points gamma <- NA #It is sensible (?) to set it to delta of game #Relevant parameters of epsilon.greedy epsilon.start <- 0.1 epsilon.decay <- NA epsilon.min <- 0.001 epsilon.decay.type <- "rate" #may be "rate" or "linear" # Incorporating new information and Updating Status mem.selection <- "all" #currently no other options are available - relevant for backwards compatibility mem.type <- "game.encoded" #game.encoded or game.encoded.rounds replay.every <- 100 # After How many rounds should the prediction model be updated? # Output show.current.status <- 50 q.param <- nlist(action.policy,weighting.policy, weighting.factor, end.start, gamma, epsilon.start, epsilon.decay, epsilon.min, epsilon.decay.type,mem.selection, mem.type, show.current.status, replay.every) return(q.param) } #Q-Predictions #' Q-Predictions is rather similar to Q-learning but we use a multilayer approach:\itemize{ #' \item We assume that even though the algorithm doesn't have complete information at runtime it may be used for training #' \item A predictive neural net is used to calculate the next move of the opponent to use monte carlo studies. #' \item Assumption: We have a single starting state. #' } #' #' @param game.object Game Object as defined by \code{Get.Game.Object.<NAME>}. #' @param model.par Model parameters. If \code{NULL}, the function \code{Get.Def.Par.QPredictions()} is called. #' @export Setup.QPredictions <- function(game.object, algo.par=NULL, model.par){ restore.point("Setup.QPredictions") if(is.null(algo.par)){ algo.par <- Get.Def.Par.QPredictions() } game.par <- game.object$game.par(game.object) if(is.null(model.par$name)){ stop("name parameter of model.par missing!") } model <- model.par$setup(model.par, game.par) return(model) } #' Set changeable model variables #' #' Returns a list with the following items \itemize{ #' \item \strong{epsilon} Specifies how often the Algorithm tries a random move. Initialized with \code{epsilon.start} of \code{model.par}. #' \item \strong{memory.net} A net based on the next state. Each Node holds the following information:\itemize{ #' \item \strong{encoding} Game state encoded for prediction algorithm. #' \item \strong{depth} Current depth of tree. In the case of the Prisoners Dilemma equal to the round. #' \item \strong{visited} How often have we visited this node? #' \item \strong{Q.values} Q.values of this node. #' \item \strong{precursors} All possibilites to get to this node #' \item \strong{successors} All nodes to go to. Is a list itself with one entry for each action. Here a list with the following items exist: Number of Reached node. Number of visits. #' } #' } #' #' @param game.object A Game Object as defined by \code{Get.Game.Object.<Name>}. Necessary in the case of memory intitialisation. #' @param model.par Parameters of QPredictions specification. Have to be specified and should be identical to the model.par as given to \code{Setup.QPredictions()}. #' @param memory.init Which type of initialization should take place? It \code{NULL}, the option \code{none} is used. The following types are supported \itemize{ #' \item \strong{none} No initialization takes place. Memory is an empty list. #' \item \strong{self.play} The other strategies play against themselves - to understand possible secret handshakes. The following \code{memory.param} are expected: \itemize{ #' \item \strong{no} How often should the other strategies play against themselves? #' } #' } #' If combinations of different memories are needed, one can use the function \code{Extend.Memory.QPredictions()} #' @param memory.param Parameters necessary for the chosen \code{memory.init}. #'@export Initialise.QPredictions <- function(game.object=NULL, algo.par, memory.init=NULL, memory.param = NULL){ if(is.null(memory.init)){ memory.init <- "none" } algo.var <- list() algo.var$epsilon <- algo.par$epsilon.start algo.var$memory.net <- list() if (memory.init != "none") { algo.var <- Extend.Memory.QPredictions(algo.var, algo.par=algo.par, game.object=game.object, memory.type=memory.init, memory.param=memory.param) } return(algo.var) } #' Extend Memory by specified experiences #' #' Returns modified algo.var, where memory has been extended as specified. #' #' @param algo.var A variable algorithm object, where to be modified variables are saved. Given by \code{Initialise.QPredictions()} #' @param game.object A game object as defined by \code{Get.Game.Object.<Name>}. #' @param memory.init Which type of extension should take place? The following types are supported \itemize{ #' \item \strong{self.play} The other strategies play against themselves - to understand possible secret handshakes. If I am myself part of the other strategies, the "self" strategy is ignored. The following \code{memory.param} are expected: \itemize{ #' \item \strong{no} How often should the other strategies play against themselves? #' } #' \item \strong{solid.foundation} Not only self.play, but also a random initialisation with increasing defect probabilities.The following \code{memory.param} are expected: \itemize{ #' \item \strong{self.no} How often should the other strategies play against themselves? #' \item \strong{rep.no} How often should a random strategy be played? The defection probability is linearly increased. #' } #' } #' If combinations of different memories are needed, one can use the function multiple times. #' @param memory.param Parameters necessary for the chosen \code{memory.type}. #'@export Extend.Memory.QPredictions <- function(algo.var, algo.par=NULL, game.object, memory.type, memory.param=NULL){ restore.point("Extend.Memory.QPredictions") if(memory.type == "self.play"){ new.mem <- unlist(lapply(1:memory.param$no,FUN=function(x){ if(!is.null(game.object$supports) && any(game.object$supports == "memory.self.play")){ return(game.object$memory.self.play(game.object, algo.par)) } }), recursive=FALSE) } else if (memory.type== "solid.foundation"){ self.mem <- unlist(lapply(1:memory.param$self.no,FUN=function(x){ if(!is.null(game.object$supports) && any(game.object$supports == "memory.self.play")){ return(game.object$memory.self.play(game.object, algo.par)) } }), recursive=FALSE) def.arr <- seq(0,1,length.out = memory.param$rep.no) rand.mem <- unlist(lapply(def.arr,FUN=function(x){ if(!is.null(game.object$supports) && any(game.object$supports == "memory.random.play")){ algo.par$def.prob <- x return(game.object$memory.random.play(game.object, algo.par)) } }), recursive=FALSE) new.mem <- c(self.mem, rand.mem) } else { stop(paste0("memory.type ",memory.type," not supported.")) } algo.var$memory.net <- Update.Net.QPredictions(algo.var$memory.net, new.mem, game.object, algo.par) return(algo.var) } #'Calculate Expected Value based on action #' #'Needed to calculate the next Q value. If converged, should result in the same value as the respective Q.value itself. #'Also outputs edge value. #' #' @export Calc.Reward.QPredictions.expectedQ <- function(net, pointer, action, option="ignore", mode){ restore.point("Calc.Reward.QPredictions.expectedQ") if(option!="ignore") { stop("Not yet implemented. Use option ignore in Calc.Reward.QPredictions.expectedQ") } suc.vec <- net[[pointer]]$successors[[action]] if(option=="ignore" && length(suc.vec)==0){ #No successors known return(NA) } suc.weights <- net[[pointer]]$successors.visited[[action]] rel.weights <- suc.weights/sum(suc.weights) if(mode=="max"){ Q.values <- sapply(suc.vec,FUN=function(x){ if(all(is.na(net[[x]]$Q.values))){ return(NA) } else { return(max(net[[x]]$Q.values, na.rm=TRUE)) } }) expQ <- sum((Q.values)*rel.weights) return(expQ) } else { stop("Not implemented yet.") } } #'Calculate Expected immediate Reward based on action #' #'Returns expected Reward based on historic data. Only used inuternally to update net as input for q values. #' #' @export Calc.Reward.QPredictions.expectedReward <- function(net, pointer, action, mode="history"){ restore.point("Calc.Reward.QPredictions.expectedReward") if(mode!="history") { stop("Not yet implemented. Use option history in Calc.Reward.QPredictions.expectedReward") } suc.vec <- net[[pointer]]$successors[[action]] if(length(suc.vec)==0){ #No successors known return(NA) } suc.weights <- net[[pointer]]$successors.visited[[action]] rel.weights <- suc.weights/sum(suc.weights) edge.rewards <- net[[pointer]]$edge.rewards[[action]] if(mode=="history"){ expR <- sum(edge.rewards*rel.weights) return(expR) } else { stop("Not implemented yet.") } } #'Calc.Reward.QPredictions #' #' @export Calc.Reward.QPredictions <- function(net, pointer, action, mode, mode.par, end.state=NULL, end.reward=NULL){ restore.point("Calc.Reward.QPredictions") if(!is.na(net[[pointer]]$Q.values[action])){ Q.current <- net[[pointer]]$Q.values[action] } else { #Here optimistic evaluation might take place, but we path through Q.current <- NA } if(!is.null(end.state)){ #there are no relevant following Q.states Q.next <- end.state/(1-algo.par$gamma) #Expected value of repeating end.state infinitely often but with discontinuity probability of gamma reward <- end.reward } else { if(mode=="max"){ Q.next <- Calc.Reward.QPredictions.expectedQ(net=net, pointer=pointer,action=action, mode=mode) if(length(Q.next)>1){ stop("Q to big") } if(is.infinite(Q.next)){ stop("is infinite") } reward <- Calc.Reward.QPredictions.expectedReward(net=net, pointer=pointer,action=action) } else { stop("weighting policy unknown") } } Q.update <- (reward + algo.par$gamma * Q.next) if(is.na(Q.current)){ res <- Q.update } else { res <- (1-algo.par$weighting.factor) * Q.current + algo.par$weighting.factor * Q.update } return(res) } #' Calculates Endstate value #' #' Currently no option ist supported. Endstate value is calculated as average from end.pointer.pos to end of mem.path. #' #'@export Calc.Endstate.Value.QPredictions <- function(net, end.pointer.pos.net, end.pointer.pos.path, mem.path, option=NULL){ restore.point("Calc.Endstate.Value.QPredictions") if(is.null(option)){ option <- "SARSA" } if(option=="SARSA"){ res <- mean(unlist(lapply(mem.path[end.pointer.pos.path:length(mem.path)],FUN=function(x){x$reward}))) } else { stop("Endstate-Value can't be calculated due to incorrect option.") } return(res) } #'Internal Function #' #'Expects one or several Paths. #' #'@export Update.Net.QPredictions <- function(net, new.mem, game.object, algo.par){ init.net.node <- function(state, precursors, round=1){ visited <- 0 successors <- rep( list(list()), game.object$game.par(game.object)$output.nodes) successors.visited <- rep( list(list()), game.object$game.par(game.object)$output.nodes) edge.rewards <- rep( list(list()), game.object$game.par(game.object)$output.nodes) Q.values <- rep(NA, game.object$game.par(game.object)$output.nodes) round <- round return(nlist(state, visited, precursors, successors, successors.visited, edge.rewards, Q.values, round)) } restore.point("Update.Net.QPredictions") if(length(net)==0){ #initialise root net[[1]] <- init.net.node(state=new.mem[[1]]$state, precursors=0) } #Identify start/end points starts <- which(sapply(new.mem,FUN=function(x){x$start})) ends <- which(sapply(new.mem,FUN=function(x){x$done})) no.paths <- length(starts) #Update net for(i in 1:no.paths){ net.pointer.path <- c(1) end.pointer.pos <- ceiling(algo.par$end.start*(ends[i]-starts[i]))+1+starts[i] for(j in starts[i]:ends[i]){ #restore.point("within.for.update") x <- new.mem[[j]] if(j==starts[i]){ net.pointer <- 1 } net[[net.pointer]]$visited <- net[[net.pointer]]$visited+1 if(length(net[[net.pointer]]$successors[[x$action]])==0){ #first time visit if(game.object$full.encoding){ #Cycles are not possible no.next <- integer(0) } else { if(algo.par$mem.type=="game.encoded.round"){ could.be.state <- which(sapply(1:length(net),FUN=function(y){ net[[y]]$round==(x$round+1) })) } else { could.be.state <- 1:length(net) } if(length(could.be.state)==0){ no.next <- integer(0) } else { no.next <- which(sapply(could.be.state,FUN=function(y){ identical(x$next.state,net[[y]]$state) })) } } if(length(no.next)==0){ no.next <- length(net)+1 net[[no.next]] <- init.net.node(state=x$next.state, precursors=net.pointer, round=x$round+1) } else { ### Here we have to check whether the precursor is already identical to the net.pointer net[[no.next]]$precursors[length(net[[no.next]]$precursors)+1] <- net.pointer } net[[net.pointer]]$successors[[x$action]] <- no.next net[[net.pointer]]$successors.visited[[x$action]] <- 1 net[[net.pointer]]$edge.rewards[[x$action]] <- x$reward } else { #There are already known states #Check whether it is one of the states we have seen. pos.no.next <- which(sapply(net[[net.pointer]]$successors[[x$action]],FUN=function(y){ identical(x$next.state,net[[y]]$state) })) no.next <- net[[net.pointer]]$successors[[x$action]][pos.no.next] if(length(no.next)==0){ #first time visit but already others there if(game.object$full.encoding){ #Cycles are not possible no.next <- integer(0) } else { if(algo.par$mem.type=="game.encoded.round"){ could.be.state <- which(sapply(1:length(net),FUN=function(y){ net[[y]]$round==(x$round+1) })) } else { could.be.state <- 1:length(net) } if(length(could.be.state)==0){ no.next <- integer(0) } else { no.next <- which(sapply(could.be.state,FUN=function(y){ identical(x$next.state,net[[y]]$state) })) } } if(length(no.next)==0){ no.next <- length(net)+1 net[[no.next]] <- init.net.node(state=x$next.state, precursors=net.pointer, round=x$round+1) } else { net[[no.next]]$precursors[length(net[[no.next]]$precursors)+1] <- net.pointer } net[[net.pointer]]$successors[[x$action]] <- c(net[[net.pointer]]$successors[[x$action]], no.next) net[[net.pointer]]$successors.visited[[x$action]] <- c(net[[net.pointer]]$successors.visited[[x$action]],1) net[[net.pointer]]$edge.rewards[[x$action]] <- c(net[[net.pointer]]$edge.rewards[[x$action]],x$reward) } else { # we have already visited the next state net[[net.pointer]]$edge.rewards[[x$action]][pos.no.next] <- (net[[net.pointer]]$edge.rewards[[x$action]][pos.no.next]*net[[net.pointer]]$successors.visited[[x$action]][pos.no.next]+x$reward)/(net[[net.pointer]]$successors.visited[[x$action]][pos.no.next]+1) net[[net.pointer]]$successors.visited[[x$action]][pos.no.next] <- net[[net.pointer]]$successors.visited[[x$action]][pos.no.next]+1 } } if(j==end.pointer.pos){ net.pointer.end <- net.pointer } net.pointer <- no.next if(j==ends[i]){ net[[net.pointer]]$visited <- net[[net.pointer]]$visited+1 } net.pointer.path <- c(net.pointer.path,net.pointer) } restore.point("before.while.Update.Net") #net has now been filled with new nodes (if necessary) #We may now update the paths #If there are cycles this algo does not update perfectly but still halts already.updated <- rep(FALSE,length(net)) #We go again through the path, this time from the end to the start #end states could be several with the later ones not as good #Determine Endstate value end.state.value <- Calc.Endstate.Value.QPredictions(net=net, end.pointer.pos.net=net.pointer.end, end.pointer.pos.path=end.pointer.pos-starts[i]+1, mem.path=new.mem[starts[i]:ends[i]]) end.reward <- end.state.value end.pos <- net.pointer.path[length(net.pointer.path)] net[[end.pos]]$Q.values <- rep(Calc.Reward.QPredictions(net=net, pointer=net.pointer, action=new.mem[[j]]$action, mode=algo.par$weighting.policy, mode.par=list(weighting.factor=algo.par$weighting.factor, gamma=algo.par$gamma), end.state=end.state.value, end.reward=end.reward),length(net[[net.pointer]]$Q.values)) end.state.value <- NULL already.updated[end.pos] <- TRUE for(j in ends[i]:starts[i]){ net.pointer <- net.pointer.path[j-(starts[i])+1] if(!already.updated[net.pointer]){ net[[net.pointer]]$Q.values[new.mem[[j]]$action] <- Calc.Reward.QPredictions(net=net, pointer=net.pointer, action=new.mem[[j]]$action, mode=algo.par$weighting.policy, mode.par=list(weighting.factor=algo.par$weighting.factor, gamma=algo.par$gamma), end.state=end.state.value, end.reward=end.reward) already.updated[net.pointer] <- TRUE } } backlog <- integer(0) net.pointer <- net.pointer.path[length(net.pointer.path)] #Start again from end.node while(TRUE){ restore.point("inside.Updating.While") if(!already.updated[net.pointer]){ a.length=length(net[[net.pointer]]$successors) net[[net.pointer]]$Q.values <- sapply(1:a.length,FUN=function(x){ Calc.Reward.QPredictions(net=net, pointer=net.pointer, action=x, mode=algo.par$weighting.policy, mode.par=list(weighting.factor=algo.par$weighting.factor, gamma=algo.par$gamma), end.state=NULL) }) if(all(is.na(net[[net.pointer]]$Q.values) | is.infinite(net[[net.pointer]]$Q.values))){ stop("NA as Q Values in updating or infinite!") } already.updated[net.pointer] <- TRUE } if(length(backlog)==0){ to.check <- which(sapply(net[[net.pointer]]$precursors,FUN=function(x){ return(!(x %in% backlog) && !(already.updated[x])) })) to.check.no <- net[[net.pointer]]$precursors[to.check] if(length(to.check.no)==0){ break } if(length(to.check)==1){ net.pointer <- to.check.no } else { net.pointer <- to.check.no[1] backlog <- c(backlog, to.check.no[-1]) } } else { to.check <- which(sapply(net[[net.pointer]]$precursors,FUN=function(x){ return(!(x %in% backlog) && !(already.updated[x])) })) if(length(to.check)==0){ #do nothing } else { to.check.no <- net[[net.pointer]]$precursors[to.check] backlog <- c(backlog,to.check.no) } net.pointer <- backlog[1] backlog <- backlog[-1] } } } return(net) } #' Train model of Q Pathing #' #' As QPredictions updates all new paths directly Replay only trains the prediction model. #' #' @export Replay.QPredictions <- function(model, model.par, algo.par, algo.var, game.object){ restore.point("Replay.QPredictions") no.actions <- game.object$game.par(game.object)$output.nodes x_train.raw <- lapply(1:length(algo.var$memory.net), FUN=function(x){ restore.point("x.train.raw.replay.QPredictions") rel.a <- sapply(1:length(algo.var$memory.net[[x]]$Q.values), FUN=function(y){ if(!is.na(algo.var$memory.net[[x]]$Q.values[y])){ res <- rep(0,length(algo.var$memory.net[[x]]$Q.values)+1) res[y] <- 1 res[length(algo.var$memory.net[[x]]$Q.values)+1] <- algo.var$memory.net[[x]]$Q.values[y] return(res) } else { return(NULL) } }) rel.a.vec.tmp <- unlist(rel.a, recursive=FALSE) if(is.null(rel.a.vec.tmp)){ return(NULL) } else { rel.a.vec <- t(rel.a.vec.tmp) } restore.point("inside.x.train.replay") n.row <- nrow(rel.a.vec) state.info <- matrix(rep(algo.var$memory.net[[x]]$state,n.row),nrow=n.row, byrow=TRUE) x.res <- cbind(state.info,rel.a.vec) return(x.res) }) x_train <- do.call(rbind,x_train.raw) y_train <- as.matrix(x_train[,ncol(x_train)]) x_train <- x_train[,-ncol(x_train)] if(is.null(model.par$single.dimensional) || !(model.par$single.dimensional)){ stop("Model has to support single.dimensional and has it enabled.") } #Setup necessary precision if(!is.null(model.par$enforce.increasing.precision)&&model.par$enforce.increasing.precision==TRUE){ prec.repeat <- TRUE } else { prec.repeat <- FALSE } #Main Part -> Training of model model.train <- model.par$train(model, model.par, x_train, y_train) model <- model.train$model fit.obj <- model.train$fit.obj restore.point("before.pre.training") #If model is not trained enough, repeat Training until ready if(prec.repeat && is.null(algo.var$cur.loss)){ algo.var$cur.loss <- mean(fit.obj$metrics$loss) } else if (prec.repeat) { counter <- 0 while(mean(fit.obj$metrics$loss)>algo.var$cur.loss){ counter <- counter+1 writeLines(paste0("Loss was only ",round(mean(fit.obj$metrics$loss),5), " but ",round(algo.var$cur.loss,5), " needed","\n",collapse="")) model.train <- model.par$train(model, model.par, x_train, y_train) model <- model.train$model fit.obj <- model.train$fit.obj if(counter>model.par$give.up.precision){ break } } } if(prec.repeat){ algo.var$cur.loss <- mean(fit.obj$metrics$loss) } return(nlist(model, algo.var)) } #' Generates best guesses based on Experience #'@export Hybrid.Predict.Action.Values.QPredictions <- function(net, no.actions, net.pointer, model, model.par, state){ restore.point("Hybrid.Predict.Action.Values.QPredictions") if(is.null(net.pointer)){ #we are flying blind rel.a <- unlist(sapply(1:no.actions, FUN=function(y){ a.vec <- rep(0,no.actions) a.vec[y] <- 1 return(a.vec) })) state.info <- matrix(rep(state,no.actions),nrow=no.actions, byrow=TRUE) x.res <- cbind(state.info,rel.a) return(as.vector(model.par$predict(model,model.par,x.res))) } else { act.vals <- net[[net.pointer]]$Q.values if(any(is.na(act.vals))){ #Build info for unknown quantities unknown <- t(unlist(sapply((1:no.actions)[is.na(act.vals)], FUN=function(y){ a.vec <- rep(0,no.actions) a.vec[y] <- 1 return(a.vec) }))) state.info <- matrix(rep(state,nrow(unknown)),nrow=nrow(unknown), byrow=TRUE) x.res <- cbind(state.info,unknown) pred.vals <- as.vector(model.par$predict(model,model.par,x.res)) act.vals[is.na(act.vals)] <- pred.vals } return(act.vals) } } #' Determines which action to take #' #' @export Act.QPredictions <- function(state, model, model.par, algo.var, game.object, eval.only=FALSE, net.pointer){ restore.point("Act.QPredictions") if(eval.only){ no.actions <- game.object$game.par(game.object)$output.nodes act.values <- Hybrid.Predict.Action.Values.QPredictions(net=algo.var$memory.net, no.actions=no.actions, net.pointer=net.pointer, model=model, model.par=model.par, state=state) return(which.is.max(act.values)) } if(algo.par$action.policy=="epsilon.greedy"){ if(runif(1) <= algo.var$epsilon){ game.par <- game.object$game.par(game.object) return(sample(1:game.par$output.nodes,1)) } else { no.actions <- game.object$game.par(game.object)$output.nodes act.values <- Hybrid.Predict.Action.Values.QPredictions(net=algo.var$memory.net, no.actions=no.actions, net.pointer=net.pointer, model=model, model.par=model.par, state=state) return(which.is.max(act.values)) } } stop("Wrong action policy specified in algo.par.") } #' Train a model based on Q-Learning #' #' @export Train.QPredictions <- function(model, model.par, algo.par, algo.var, game.object, episodes, eval.only=FALSE, start.w.training=TRUE){ restore.point("Train.QPredictions") score.array <- NA if(is.null(algo.var$analysis)){ algo.var$analysis <- list() algo.var$analysis$score <- NA } if(length(algo.var$memory.net)>0 && start.w.training && !eval.only){ replay.res <- Replay.QPredictions(model, model.par, algo.par, algo.var, game.object) model <- replay.res$model algo.var <- replay.res$algo.var } for(i in 1:episodes){ #restore.point("within.Train.QPredictions") state <- game.object$start.state(game.object) mem <- list() start<-TRUE net.pointer <- 1 score <- 0 while(TRUE){ restore.point("within.Train.QPredictions.II") vis.state <- t(game.object$state.2.array(game.state=state, game.object=game.object)) # not a real state but what the algorithm sees. Could be a lot smaller than the real game state [but might depend on encoding] action <- Act.QPredictions(state=vis.state, model=model, model.par=model.par, algo.var=algo.var, game.object=game.object, eval.only=eval.only, net.pointer=net.pointer) next.state.full <- game.object$state.transition(game.state=state,action=action,game.object=game.object) next.state <- next.state.full$next.state reward <- next.state.full$reward score <- score+reward done <- next.state.full$game.finished vis.next.state <- t(game.object$state.2.array(game.state=next.state, game.object=game.object)) round <- next.state.full$next.state$round-1 #Update net.pointer if(is.null(net.pointer) || length(algo.var$memory.net[[net.pointer]]$successors[[action]])==0){ net.pointer <- NULL } else { net.pointer <- algo.var$memory.net[[net.pointer]]$successors[[action]][which(sapply(algo.var$memory.net[[net.pointer]]$successors[[action]], FUN=function(x){ identical(vis.next.state,algo.var$memory.net[[x]]$state) }))] if(length(net.pointer)==0){ net.pointer <- NULL } } mem[[length(mem)+1]] <- list(state=vis.state, action=action, next.state=vis.next.state, reward=reward, done=done, start=start, round=round) if(done){ break } state <- next.state start <- FALSE } #Update Memory if(!eval.only){ algo.var$memory.net <- Update.Net.QPredictions(net=algo.var$memory.net, new.mem=mem, game.object=game.object, algo.par=algo.par) #Update Prediction Model if(i%%algo.par$replay.every==0){ replay.res <- Replay.QPredictions(model, model.par, algo.par, algo.var, game.object) model <- replay.res$model algo.var <- replay.res$algo.var } } if(algo.par$action.policy=="epsilon.greedy" && algo.var$epsilon > algo.par$epsilon.min && i!=episodes){ if(algo.par$epsilon.decay.type=="linear"){ algo.var$epsilon <- seq(algo.par$epsilon.start,algo.par$epsilon.min,length.out=episodes)[i+1] } else if(algo.par$epsilon.decay.type=="rate"){ decay.par <- (algo.par$epsilon.min/algo.par$epsilon.start)^(1/episodes) algo.var$epsilon <- decay.par*algo.var$epsilon } else { algo.var$epsilon <- algo.par$epsilon.decay*algo.var$epsilon } } score.array[i] <- score #Not used for learning, only for analysis if(is.na(algo.var$analysis$score[1])){ algo.var$analysis$score[1] <- score.array[i] } else { algo.var$analysis$score[length(algo.var$analysis$score)+1] <- score.array[i] } if(i%%algo.par$show.current.status == 0){ output.message <- paste0(c("episode: ",i," with avg score of ",round(mean(score.array[(i-algo.par$show.current.status+1):i]),2)),collapse="") print(output.message) } if(eval.only){ eval.mess <- paste0(c("eval: ",i," with score of ",round(score.array[i],2)),collapse="") print(eval.mess) } out.save <<- list(model=model, algo.var=algo.var) } return(list(model=model, algo.var=algo.var)) }
\name{ExCluster} \alias{ExCluster} \title{ ExCluster is the main function in this package, which clusters exon bins within each gene to determine signficiantly differentially expressed exon clusters. } \description{ The ExCluster function takes an input of normalized exon bin read counts per sample, and only accepts data from exactly two conditions at once, requiring two biological replicates per condition. ExCluster also requires GFF annotations to annotate exon bins } \usage{ ExCluster(exon.Counts, cond.Nums, annot.GFF, GFF.File, out.Dir, result.Filename, combine.Exons=TRUE, plot.Results=FALSE, FDR.cutoff=0.05) } \arguments{ \item{exon.Counts}{exon.Counts must be assigned a data frame of normalized read counts per exon bin, which are attained from running the processCounts function. For example, if the results of processCounts are assigned to the normCounts variable, exon.Counts is assigned as: exon.Counts=normCounts This input is required. } \item{cond.Nums}{cond.Nums must be assigned an array of sample numbers corresponding, in exact order, to each BAM file counted and passed into the exon.Counts argument. The length of cond.Nums must also match the number of columns in exon.Counts exactly. For example, analyzing data with 3 biological replicates per each of two conditions, in that order, would be denoted as: cond.Nums=c(1,1,1,2,2,2) cond.Nums must be given at least 2 biological replicates per condition, and exactly 2 conditions -- otherwise, the ExCluster package will throw an error describing the problem. This is a required input. } \item{annot.GFF}{If this argument is specified, it must be given a GFF annotation data frame, which is generated from the GFF_convert function. For example, it can be specified as annot.GFF=GFF, assuming your GFF data frame is assigned to the GFF variable. Either annot.GFF or GFF.File must be specified. If both are specified, annot.GFF will take priority. } \item{GFF.File}{If this argument is specified, it must be given a full file path to a GFF file, including file name and extension. For example, this may look like: /Users/username/path/to/file.gff This argument is not required, but either annot.GFF or GFF.File must be specified. } \item{out.Dir}{The out.Dir argument should be assigned a character string specifying a full folder path where results may be written. This should be a specific folder to the data being analyzed, to avoid writing results from different analyses to the same folder.This argument is not required, but it is STRONGLY recommended that you write out the results of the ExCluster analysis, as this portion of the package can take well over 2 hours to complete. An example path for out.Dir would be: /Users/username/Documents/RNA-seq/project_A/ } \item{result.Filename}{result.Filename should be given a character string specifying the specific filename for the ExCluster results table to be written to, within the out.Dir folder. File extension specification is not necessary, as '.txt.' will be added to the result.Filename regardless. This argument is not required, although it may be helpful to name your results by including both condition names, so the specific comparison made is easily identifiable at a later point in time. By default, result.Filename will be assigned the value "ExClust_Results". An example filename that may be helpful could be: "ExClust_res_GeneA_shRNA_vs_scramble" } \item{combine.Exons}{combine.Exons should be assigned a logical value of either TRUE or FALSE. This denotes whether exon bins which are always co-expressed in the same transcripts should be combined into 'super-exons'. Doing this can be helpful to increase exon read depth, and it greatly reduces computation time. However, this should only be done in a standard RNA-seq analysis, when no instances of abberant splicing are predicted. If one suspects aberrant splicing in one of your conditions, this argument should not be set. By default combine.Exons=FALSE } \item{plot.Results}{plot.Results should be assigned a logical value of either TRUE or FALSE. This determines whether or not the ExCluster function should automatically run the plotExonlog2FC function, and plot exon bin log2FCs for each significantly differentially expressed gene. It is generally helpful to run this alongside your analysis, as it saves time. However, your ExCluster results can be saved and read back into R at a later date, from which you can run the plotExonlog2FC function separately. By default plot.Results is set to FALSE. } \item{FDR.cutoff}{The FDR.cutoff argument should be assigned a value between 0.01 and 0.2. Using FDR cutoffs outside these bounds is not recommended. This number determines which false discovery rate cutoff will be used to discover significant genes by ExCluster. However, this parameter is only used if plot.Results is specified. By default FDR.cutoff=0.05 } } \examples{ # specify the path to the normCounts file in the ExCluster package countsPath<- system.file("extdata","normCounts.txt",package="ExCluster") # now read in the normCounts.txt file normCounts <- read.table(file=countsPath,header=TRUE,row.names=1, stringsAsFactors=FALSE) # now grab the path to the sub-sampled example GFF file GFF_file <- system.file("extdata","sub_gen.v23.ExClust.gff3",package="ExCluster") # assign condition numbers to your samples (we have 4 samples, 2 replicates per condition) condNums <- c(1,1,2,2) # now we run ExCluster, assigning its output to the ExClustResults variable # we are not writing out the ExClustResults table, nor are we plotting exons # we also use combine.Exons=TRUE, since we are conducting a standard analysis ExClust_Results <- ExCluster(exon.Counts=normCounts,cond.Nums=condNums, GFF.File=GFF_file) } \value{ This is the main function of the ExCluster package, and returns a data frame of exon bin log2FCs, log2 read variances, exon clustering per gene, p-values, FDRs, and normalized exon counts. The results of the ExCluster function should typically be assigned to a variable, such as 'ExClustResults'. } \references{ Charrad M., Ghazzali N., Boiteau V., Niknafs A. (2014). NbClust: An R Package for Determining the Relevant Number of Clusters in a Data Set. Journal of Statistical Software, 61(6), 1-36. } \author{ R. Matthew Tanner } \note{ The ExCluster packages uses a scatter-distance index function to optimally cut hierarchically clustered exons within each gene. The code for the functions required were adapted in part, or in whole, from the NbClust R package (Charrad et al., 2014). These sections of the code are explicity specified, and the authors of NbClust provide no warranty for the use and fucntionality of said code. }
/man/ExCluster.Rd
no_license
RMTbioinfo/ExCluster
R
false
false
6,747
rd
\name{ExCluster} \alias{ExCluster} \title{ ExCluster is the main function in this package, which clusters exon bins within each gene to determine signficiantly differentially expressed exon clusters. } \description{ The ExCluster function takes an input of normalized exon bin read counts per sample, and only accepts data from exactly two conditions at once, requiring two biological replicates per condition. ExCluster also requires GFF annotations to annotate exon bins } \usage{ ExCluster(exon.Counts, cond.Nums, annot.GFF, GFF.File, out.Dir, result.Filename, combine.Exons=TRUE, plot.Results=FALSE, FDR.cutoff=0.05) } \arguments{ \item{exon.Counts}{exon.Counts must be assigned a data frame of normalized read counts per exon bin, which are attained from running the processCounts function. For example, if the results of processCounts are assigned to the normCounts variable, exon.Counts is assigned as: exon.Counts=normCounts This input is required. } \item{cond.Nums}{cond.Nums must be assigned an array of sample numbers corresponding, in exact order, to each BAM file counted and passed into the exon.Counts argument. The length of cond.Nums must also match the number of columns in exon.Counts exactly. For example, analyzing data with 3 biological replicates per each of two conditions, in that order, would be denoted as: cond.Nums=c(1,1,1,2,2,2) cond.Nums must be given at least 2 biological replicates per condition, and exactly 2 conditions -- otherwise, the ExCluster package will throw an error describing the problem. This is a required input. } \item{annot.GFF}{If this argument is specified, it must be given a GFF annotation data frame, which is generated from the GFF_convert function. For example, it can be specified as annot.GFF=GFF, assuming your GFF data frame is assigned to the GFF variable. Either annot.GFF or GFF.File must be specified. If both are specified, annot.GFF will take priority. } \item{GFF.File}{If this argument is specified, it must be given a full file path to a GFF file, including file name and extension. For example, this may look like: /Users/username/path/to/file.gff This argument is not required, but either annot.GFF or GFF.File must be specified. } \item{out.Dir}{The out.Dir argument should be assigned a character string specifying a full folder path where results may be written. This should be a specific folder to the data being analyzed, to avoid writing results from different analyses to the same folder.This argument is not required, but it is STRONGLY recommended that you write out the results of the ExCluster analysis, as this portion of the package can take well over 2 hours to complete. An example path for out.Dir would be: /Users/username/Documents/RNA-seq/project_A/ } \item{result.Filename}{result.Filename should be given a character string specifying the specific filename for the ExCluster results table to be written to, within the out.Dir folder. File extension specification is not necessary, as '.txt.' will be added to the result.Filename regardless. This argument is not required, although it may be helpful to name your results by including both condition names, so the specific comparison made is easily identifiable at a later point in time. By default, result.Filename will be assigned the value "ExClust_Results". An example filename that may be helpful could be: "ExClust_res_GeneA_shRNA_vs_scramble" } \item{combine.Exons}{combine.Exons should be assigned a logical value of either TRUE or FALSE. This denotes whether exon bins which are always co-expressed in the same transcripts should be combined into 'super-exons'. Doing this can be helpful to increase exon read depth, and it greatly reduces computation time. However, this should only be done in a standard RNA-seq analysis, when no instances of abberant splicing are predicted. If one suspects aberrant splicing in one of your conditions, this argument should not be set. By default combine.Exons=FALSE } \item{plot.Results}{plot.Results should be assigned a logical value of either TRUE or FALSE. This determines whether or not the ExCluster function should automatically run the plotExonlog2FC function, and plot exon bin log2FCs for each significantly differentially expressed gene. It is generally helpful to run this alongside your analysis, as it saves time. However, your ExCluster results can be saved and read back into R at a later date, from which you can run the plotExonlog2FC function separately. By default plot.Results is set to FALSE. } \item{FDR.cutoff}{The FDR.cutoff argument should be assigned a value between 0.01 and 0.2. Using FDR cutoffs outside these bounds is not recommended. This number determines which false discovery rate cutoff will be used to discover significant genes by ExCluster. However, this parameter is only used if plot.Results is specified. By default FDR.cutoff=0.05 } } \examples{ # specify the path to the normCounts file in the ExCluster package countsPath<- system.file("extdata","normCounts.txt",package="ExCluster") # now read in the normCounts.txt file normCounts <- read.table(file=countsPath,header=TRUE,row.names=1, stringsAsFactors=FALSE) # now grab the path to the sub-sampled example GFF file GFF_file <- system.file("extdata","sub_gen.v23.ExClust.gff3",package="ExCluster") # assign condition numbers to your samples (we have 4 samples, 2 replicates per condition) condNums <- c(1,1,2,2) # now we run ExCluster, assigning its output to the ExClustResults variable # we are not writing out the ExClustResults table, nor are we plotting exons # we also use combine.Exons=TRUE, since we are conducting a standard analysis ExClust_Results <- ExCluster(exon.Counts=normCounts,cond.Nums=condNums, GFF.File=GFF_file) } \value{ This is the main function of the ExCluster package, and returns a data frame of exon bin log2FCs, log2 read variances, exon clustering per gene, p-values, FDRs, and normalized exon counts. The results of the ExCluster function should typically be assigned to a variable, such as 'ExClustResults'. } \references{ Charrad M., Ghazzali N., Boiteau V., Niknafs A. (2014). NbClust: An R Package for Determining the Relevant Number of Clusters in a Data Set. Journal of Statistical Software, 61(6), 1-36. } \author{ R. Matthew Tanner } \note{ The ExCluster packages uses a scatter-distance index function to optimally cut hierarchically clustered exons within each gene. The code for the functions required were adapted in part, or in whole, from the NbClust R package (Charrad et al., 2014). These sections of the code are explicity specified, and the authors of NbClust provide no warranty for the use and fucntionality of said code. }
# Getting and Cleaning Data Project John Hopkins Coursera # Author: tfvip2008 setwd("C:/Users/user/Desktop/Coursera/03.DataScienceFoundationsusingRSpecialization/3.GettingandCleaningData/CourseProject") # 1. Merges the training and the test sets to create one data set. # 2. Extracts only the measurements on the mean and standard deviation for each measurement. # 3. Uses descriptive activity names to name the activities in the data set # 4. Appropriately labels the data set with descriptive variable names. # 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. # Load packages and get the data # install.packages("reshape2") library(reshape2) path <- getwd() url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(url, file.path(path, "HAR.zip")) unzip(zipfile = "HAR.zip") # the folder name is UCI HAR Dataset # 4. Appropriately labels the data set with descriptive variable names. # Load activity labels + features activityLabels <- read.table(file.path(path, "UCI HAR Dataset/activity_labels.txt"), col.names = c("classLabels", "activityName")) features <- read.table(file.path(path, "UCI HAR Dataset/features.txt"), col.names = c("index", "featureNames")) # 2. Extracts only the measurements on the mean and standard deviation for each measurement. features_sub <- grep("mean|std", features[, "featureNames"]) col_sub <- features[features_sub, "featureNames"] col_sub <- gsub("[()]", "", col_sub) # 4. Appropriately labels the data set with descriptive variable names. # Load train datasets x_train <- read.table(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, features_sub] colnames(x_train) <- col_sub y_train <- read.table(file.path(path, "UCI HAR Dataset/train/Y_train.txt"), col.names = c("Activity")) sub_train <- read.table(file.path(path, "UCI HAR Dataset/train/subject_train.txt"), col.names = c("SubjectID")) train <- cbind(sub_train, y_train, x_train) # Load test datasets x_test <- read.table(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, features_sub] colnames(x_test) <- col_sub y_test <- read.table(file.path(path, "UCI HAR Dataset/test/Y_test.txt"), col.names = c("Activity")) sub_test <- read.table(file.path(path, "UCI HAR Dataset/test/subject_test.txt"), col.names = c("SubjectID")) test <- cbind(sub_test, y_test, x_test) # 1. Merges the training and the test sets to create one data set. mrg <- rbind(train, test) # 3. Uses descriptive activity names to name the activities in the data set mrg$Activity <- factor(mrg$Activity , levels = activityLabels$classLabels , labels = activityLabels$activityName) mrg$SubjectID <- as.factor(mrg$SubjectID) # 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. meltedData <- melt(mrg, id = c("SubjectID", "Activity")) tidyData <- dcast(meltedData, SubjectID + Activity ~ variable, mean) # write data to .txt file write.table(tidyData, "./tidy_dataset.txt", row.names = FALSE, quote = FALSE)
/run_analysis.R
no_license
ritayuehu/Coursera-JHU-Getting-and-Cleaning-Data-Course-Project
R
false
false
3,298
r
# Getting and Cleaning Data Project John Hopkins Coursera # Author: tfvip2008 setwd("C:/Users/user/Desktop/Coursera/03.DataScienceFoundationsusingRSpecialization/3.GettingandCleaningData/CourseProject") # 1. Merges the training and the test sets to create one data set. # 2. Extracts only the measurements on the mean and standard deviation for each measurement. # 3. Uses descriptive activity names to name the activities in the data set # 4. Appropriately labels the data set with descriptive variable names. # 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. # Load packages and get the data # install.packages("reshape2") library(reshape2) path <- getwd() url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(url, file.path(path, "HAR.zip")) unzip(zipfile = "HAR.zip") # the folder name is UCI HAR Dataset # 4. Appropriately labels the data set with descriptive variable names. # Load activity labels + features activityLabels <- read.table(file.path(path, "UCI HAR Dataset/activity_labels.txt"), col.names = c("classLabels", "activityName")) features <- read.table(file.path(path, "UCI HAR Dataset/features.txt"), col.names = c("index", "featureNames")) # 2. Extracts only the measurements on the mean and standard deviation for each measurement. features_sub <- grep("mean|std", features[, "featureNames"]) col_sub <- features[features_sub, "featureNames"] col_sub <- gsub("[()]", "", col_sub) # 4. Appropriately labels the data set with descriptive variable names. # Load train datasets x_train <- read.table(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, features_sub] colnames(x_train) <- col_sub y_train <- read.table(file.path(path, "UCI HAR Dataset/train/Y_train.txt"), col.names = c("Activity")) sub_train <- read.table(file.path(path, "UCI HAR Dataset/train/subject_train.txt"), col.names = c("SubjectID")) train <- cbind(sub_train, y_train, x_train) # Load test datasets x_test <- read.table(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, features_sub] colnames(x_test) <- col_sub y_test <- read.table(file.path(path, "UCI HAR Dataset/test/Y_test.txt"), col.names = c("Activity")) sub_test <- read.table(file.path(path, "UCI HAR Dataset/test/subject_test.txt"), col.names = c("SubjectID")) test <- cbind(sub_test, y_test, x_test) # 1. Merges the training and the test sets to create one data set. mrg <- rbind(train, test) # 3. Uses descriptive activity names to name the activities in the data set mrg$Activity <- factor(mrg$Activity , levels = activityLabels$classLabels , labels = activityLabels$activityName) mrg$SubjectID <- as.factor(mrg$SubjectID) # 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. meltedData <- melt(mrg, id = c("SubjectID", "Activity")) tidyData <- dcast(meltedData, SubjectID + Activity ~ variable, mean) # write data to .txt file write.table(tidyData, "./tidy_dataset.txt", row.names = FALSE, quote = FALSE)
ps <- readRDS("ps_c.rds") test_that("plot_standard_error() - Tests for argument errors", { expect_error(plot_standard_error(ps = NULL, ci = c(0.05, 0.95))) expect_error(plot_standard_error(ps = ps, ci = NULL)) expect_error(plot_standard_error(ps = ps, ci = c(0.05, 0.94))) expect_error(plot_standard_error(ps = ps, ci = c("a", "b"))) expect_error(plot_standard_error(ps = ps, ci = c(0.05, 0.5, 0.95))) }) # test_that("plot_standard_error() - Tests for plots matching previous ones", { # p <- plot_standard_error(ps = ps, # ci = c(0.05, 0.95), # facet_back_alpha = 50) # vdiffr::expect_doppelganger("plot-standard-error", p) # # Use following command to add new plots # # vdiffr::manage_cases() # })
/tests/testthat/test-plot-standard-error.R
no_license
aaronmberger-nwfsc/pacifichakemse-1
R
false
false
775
r
ps <- readRDS("ps_c.rds") test_that("plot_standard_error() - Tests for argument errors", { expect_error(plot_standard_error(ps = NULL, ci = c(0.05, 0.95))) expect_error(plot_standard_error(ps = ps, ci = NULL)) expect_error(plot_standard_error(ps = ps, ci = c(0.05, 0.94))) expect_error(plot_standard_error(ps = ps, ci = c("a", "b"))) expect_error(plot_standard_error(ps = ps, ci = c(0.05, 0.5, 0.95))) }) # test_that("plot_standard_error() - Tests for plots matching previous ones", { # p <- plot_standard_error(ps = ps, # ci = c(0.05, 0.95), # facet_back_alpha = 50) # vdiffr::expect_doppelganger("plot-standard-error", p) # # Use following command to add new plots # # vdiffr::manage_cases() # })
\name{V3.TMAX.RAW.URL} \alias{V3.TMAX.RAW.URL} \docType{data} \title{ A url for the GHCN V3 Tmax data, unadjusted } \description{This url points to the unadjusted Tmax (maximum) dataset for GHCN V3 } \usage{V3.TMAX.RAW.URL} \format{ The format is: chr "ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v3/ghcnm.tmax.latest.qcu.tar.gz" } \source{\url{ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v3/} } \references{\url{ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v3/README} } \examples{ data(V3.TMAX.RAW.URL) print(V3.TMAX.RAW.URL) } \keyword{datasets}
/man/V3.TMAX.RAW.URL.Rd
no_license
cran/RghcnV3
R
false
false
573
rd
\name{V3.TMAX.RAW.URL} \alias{V3.TMAX.RAW.URL} \docType{data} \title{ A url for the GHCN V3 Tmax data, unadjusted } \description{This url points to the unadjusted Tmax (maximum) dataset for GHCN V3 } \usage{V3.TMAX.RAW.URL} \format{ The format is: chr "ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v3/ghcnm.tmax.latest.qcu.tar.gz" } \source{\url{ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v3/} } \references{\url{ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v3/README} } \examples{ data(V3.TMAX.RAW.URL) print(V3.TMAX.RAW.URL) } \keyword{datasets}
'daily.angler.effort'<-function(count,LOD.info,num.time.periods="",num.lake.sections="", timeprob="", sectionprob=""){ require(stringr) require(reshape2) if(num.time.periods==""){ num.time.periods<-2 } if(num.lake.sections==""){ num.lake.sections<-1 } if(timeprob==""){ time.period<-c(1,2) prob.time=c(0.5,0.5) } timeprob<-data.frame(time.period,prob.time) if(sectionprob==""){ sections<-c(1) prob.sections=c(1) count$lake.section<-1 } sectionprob<-data.frame(sections,prob.sections) count.info<-as.data.frame(matrix(0,nrow(count),3)) names(count.info)<-c("LOD","timeprob","sectionprob") for(i in 1:nrow(count)){ count.info$LOD[i]<-lod.dat$LOD[which(count$Month[i]==lod.dat$Month)] count.info$timeprob[i]<-timeprob$prob.time[which(count$time.period[i]==timeprob$time.period)] count.info$sectionprob[i]<-sectionprob$prob.sections[which(count$lake.section[i]==sectionprob$sections)] } mean.func<-function(x) mean(x,na.rm=TRUE) #mean with remove NA's names_count<-names(count) names_count<-gsub("boats_ang","boat_ang",names_count) names(count)<-names_count bank_anglers<-apply(count[str_detect(names(count), fixed("bank_ang"))],1, function(x) mean(x,na.rm=TRUE)) bank_anglers[is.nan(bank_anglers)]<-0 # correct for missing data boats<-apply(count[str_detect(names(count), fixed("boats"))],1, mean.func) boats[is.nan(boats)]<-0 boat_anglers<-apply(count[str_detect(names(count), fixed("boat_ang"))],1, function(x) mean(x,na.rm=TRUE)) boat_anglers[is.nan(boat_anglers)]<-0 spr_anglers<-apply(count[str_detect(names(count), fixed("spr.angl"))],1, function(x) mean(x,na.rm=TRUE)) spr_anglers[is.nan(spr_anglers)]<-0 # correct for missing data bow_anglers<-apply(count[str_detect(names(count), fixed("bow.angl"))],1, function(x) mean(x,na.rm=TRUE)) bow_anglers[is.nan(bow_anglers)]<-0 # correct for missing data spec_anglers<-apply(count[str_detect(names(count), fixed("spec.angl"))],1, function(x) mean(x,na.rm=TRUE)) spec_anglers[is.nan(spec_anglers)]<-0 ang_boat <-rep(0,length(boat_anglers)) ang_boat[boats>0]<-boat_anglers[boats>0]/boats[boats>0] non_ang<-apply(count[str_detect(names(count), fixed("non_ang"))],1, mean.func) tot_anglers<-apply(cbind(bank_anglers,boat_anglers,spr_anglers,bow_anglers,spec_anglers),1,sum) adj_tot_angl.a<-tot_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_tot_angler_e<- adj_tot_angl.a/(count.info$timeprob*count.info$sectionprob) adj_bank_angl.a<-bank_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_bank_angler_e<-adj_bank_angl.a/(count.info$timeprob*count.info$sectionprob) adj_boat_anglers.a<-boat_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_boat_angler_e<- adj_boat_anglers.a/(count.info$timeprob*count.info$sectionprob) adj_spr_anglers.a<-spr_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_spr_angler_e<- adj_spr_anglers.a/(count.info$timeprob*count.info$sectionprob) adj_bow_anglers.a<-bow_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_bow_angler_e<- adj_bow_anglers.a/(count.info$timeprob*count.info$sectionprob) adj_spec_anglers.a<-spec_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_spec_angler_e<- adj_spec_anglers.a/(count.info$timeprob*count.info$sectionprob) output<-data.frame(count[,c("site","Date","time.1","Month","weekday","day_type","time.period")], TotalAngEffort=adj_tot_angler_e,BankAngEffort=adj_bank_angler_e,BoatAngEffort=adj_boat_angler_e, SprAngEffort=adj_spr_angler_e,BowAngEffort=adj_bow_angler_e,SpecAngEffort=adj_spec_angler_e,Angl_boat=ang_boat) if(any(duplicated(paste(output$site,output$Date,sep='_')))){ warning("Duplicated dates in count data, mean taken", call. = FALSE) mean.output<-dcast(melt(output,id.var=c("site","Date"),measure.var=c("TotalAngEffort","BankAngEffort","BoatAngEffort","SprAngEffort","BowAngEffort","SpecAngEffort","Angl_boat")),site+Date~variable,mean) id.var<-output[!duplicated(paste(output$site,output$Date)),c("site","Date","time.1","Month","weekday","day_type","time.period")] output<-merge(id.var,mean.output,by=c("site","Date"),all=TRUE) } return(output) }
/R/daily.angler.effort.R
no_license
Ilfenominot/creelr
R
false
false
4,550
r
'daily.angler.effort'<-function(count,LOD.info,num.time.periods="",num.lake.sections="", timeprob="", sectionprob=""){ require(stringr) require(reshape2) if(num.time.periods==""){ num.time.periods<-2 } if(num.lake.sections==""){ num.lake.sections<-1 } if(timeprob==""){ time.period<-c(1,2) prob.time=c(0.5,0.5) } timeprob<-data.frame(time.period,prob.time) if(sectionprob==""){ sections<-c(1) prob.sections=c(1) count$lake.section<-1 } sectionprob<-data.frame(sections,prob.sections) count.info<-as.data.frame(matrix(0,nrow(count),3)) names(count.info)<-c("LOD","timeprob","sectionprob") for(i in 1:nrow(count)){ count.info$LOD[i]<-lod.dat$LOD[which(count$Month[i]==lod.dat$Month)] count.info$timeprob[i]<-timeprob$prob.time[which(count$time.period[i]==timeprob$time.period)] count.info$sectionprob[i]<-sectionprob$prob.sections[which(count$lake.section[i]==sectionprob$sections)] } mean.func<-function(x) mean(x,na.rm=TRUE) #mean with remove NA's names_count<-names(count) names_count<-gsub("boats_ang","boat_ang",names_count) names(count)<-names_count bank_anglers<-apply(count[str_detect(names(count), fixed("bank_ang"))],1, function(x) mean(x,na.rm=TRUE)) bank_anglers[is.nan(bank_anglers)]<-0 # correct for missing data boats<-apply(count[str_detect(names(count), fixed("boats"))],1, mean.func) boats[is.nan(boats)]<-0 boat_anglers<-apply(count[str_detect(names(count), fixed("boat_ang"))],1, function(x) mean(x,na.rm=TRUE)) boat_anglers[is.nan(boat_anglers)]<-0 spr_anglers<-apply(count[str_detect(names(count), fixed("spr.angl"))],1, function(x) mean(x,na.rm=TRUE)) spr_anglers[is.nan(spr_anglers)]<-0 # correct for missing data bow_anglers<-apply(count[str_detect(names(count), fixed("bow.angl"))],1, function(x) mean(x,na.rm=TRUE)) bow_anglers[is.nan(bow_anglers)]<-0 # correct for missing data spec_anglers<-apply(count[str_detect(names(count), fixed("spec.angl"))],1, function(x) mean(x,na.rm=TRUE)) spec_anglers[is.nan(spec_anglers)]<-0 ang_boat <-rep(0,length(boat_anglers)) ang_boat[boats>0]<-boat_anglers[boats>0]/boats[boats>0] non_ang<-apply(count[str_detect(names(count), fixed("non_ang"))],1, mean.func) tot_anglers<-apply(cbind(bank_anglers,boat_anglers,spr_anglers,bow_anglers,spec_anglers),1,sum) adj_tot_angl.a<-tot_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_tot_angler_e<- adj_tot_angl.a/(count.info$timeprob*count.info$sectionprob) adj_bank_angl.a<-bank_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_bank_angler_e<-adj_bank_angl.a/(count.info$timeprob*count.info$sectionprob) adj_boat_anglers.a<-boat_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_boat_angler_e<- adj_boat_anglers.a/(count.info$timeprob*count.info$sectionprob) adj_spr_anglers.a<-spr_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_spr_angler_e<- adj_spr_anglers.a/(count.info$timeprob*count.info$sectionprob) adj_bow_anglers.a<-bow_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_bow_angler_e<- adj_bow_anglers.a/(count.info$timeprob*count.info$sectionprob) adj_spec_anglers.a<-spec_anglers*(count.info$LOD/num.time.periods) # total angler adjusted effort (Malvestuto 1978) adj_spec_angler_e<- adj_spec_anglers.a/(count.info$timeprob*count.info$sectionprob) output<-data.frame(count[,c("site","Date","time.1","Month","weekday","day_type","time.period")], TotalAngEffort=adj_tot_angler_e,BankAngEffort=adj_bank_angler_e,BoatAngEffort=adj_boat_angler_e, SprAngEffort=adj_spr_angler_e,BowAngEffort=adj_bow_angler_e,SpecAngEffort=adj_spec_angler_e,Angl_boat=ang_boat) if(any(duplicated(paste(output$site,output$Date,sep='_')))){ warning("Duplicated dates in count data, mean taken", call. = FALSE) mean.output<-dcast(melt(output,id.var=c("site","Date"),measure.var=c("TotalAngEffort","BankAngEffort","BoatAngEffort","SprAngEffort","BowAngEffort","SpecAngEffort","Angl_boat")),site+Date~variable,mean) id.var<-output[!duplicated(paste(output$site,output$Date)),c("site","Date","time.1","Month","weekday","day_type","time.period")] output<-merge(id.var,mean.output,by=c("site","Date"),all=TRUE) } return(output) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/positionIncrement.R \name{resolvePositions} \alias{resolvePositions} \title{This function computes the position differences and optionally reconciles in the system.} \usage{ resolvePositions( accWisePos, instName, resources, date, ctypes, reconcile, tradeType, session ) } \arguments{ \item{accWisePos}{A list with account wise positions.} \item{instName}{The name of the institution to be used for recons trade guid.} \item{resources}{The resources data frame.} \item{date}{The date of comparison.} \item{ctypes}{The resource ctypes to be compared.} \item{reconcile}{Should the differences be reconciled.} \item{tradeType}{The ctype of the recons trades.} \item{session}{The rdecaf session.} } \description{ This is the description }
/man/resolvePositions.Rd
no_license
beatnaut/remaputils
R
false
true
836
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/positionIncrement.R \name{resolvePositions} \alias{resolvePositions} \title{This function computes the position differences and optionally reconciles in the system.} \usage{ resolvePositions( accWisePos, instName, resources, date, ctypes, reconcile, tradeType, session ) } \arguments{ \item{accWisePos}{A list with account wise positions.} \item{instName}{The name of the institution to be used for recons trade guid.} \item{resources}{The resources data frame.} \item{date}{The date of comparison.} \item{ctypes}{The resource ctypes to be compared.} \item{reconcile}{Should the differences be reconciled.} \item{tradeType}{The ctype of the recons trades.} \item{session}{The rdecaf session.} } \description{ This is the description }
library(bigstatsr) ### Name: big_transpose ### Title: Transposition ### Aliases: big_transpose ### ** Examples X <- FBM(10, 5, init = rnorm(50)) X[] Xt <- big_transpose(X) identical(t(X[]), Xt[]) X <- big_attachExtdata() Xt <- big_transpose(X) identical(t(X[]), Xt[])
/data/genthat_extracted_code/bigstatsr/examples/big_transpose.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
277
r
library(bigstatsr) ### Name: big_transpose ### Title: Transposition ### Aliases: big_transpose ### ** Examples X <- FBM(10, 5, init = rnorm(50)) X[] Xt <- big_transpose(X) identical(t(X[]), Xt[]) X <- big_attachExtdata() Xt <- big_transpose(X) identical(t(X[]), Xt[])
#* Wait 5 seconds and then return the current time #* @serializer json #* @param wait_time #* @get /wait function(req, res, wait_time = 10) { time_diff(wait_time) }
/plumber/plumber.R
permissive
fdrennan/rk8s
R
false
false
171
r
#* Wait 5 seconds and then return the current time #* @serializer json #* @param wait_time #* @get /wait function(req, res, wait_time = 10) { time_diff(wait_time) }
#Package Dependancies########################################################################################## library(readxl) library(dplyr) library(stringr) library(openxlsx) library(getPass) library(odbc) library(DBI) library(RODBC) library(lubridate) library(tidyr) options(scipen=999) #gets rid of scientific notation #Database Connections########################################################################################## dbicon <- DBI::dbConnect(odbc::odbc(), driver = "SQL Server", server = "", database = "EDW_Sandbox" ) dbitran <- DBI::dbConnect(odbc::odbc(), driver = "SQL Server", server = "", database = "AccountTransactions" ) # Find all customers with an ACH In Transaction greater than 500 since 2019-10-1 trans = " select distinct a.[AccountNumber] as 'Account Number' FROM [EDWLoansandDeposits].[dbo].[Accounts] a inner join [AccountTransactions].[dbo].[AccountTransactions] t on a.Account_ID = t.Account_ID and t.[ACHInAmount] >= 500 and t.PostingDate <= dateadd(day,90,a.AccountOpenDate) where t.PostingDate > '2019-10-1' " trans_df = DBI::dbGetQuery(dbitran, trans) # Find eligible reffered friends and corresponding referrers cust_sql = " DECLARE @today date = getdate() select distinct pr.ProductName as 'Product Name' ,a.[AccountNumber] as 'Account Number' ,cc.CostCenter as 'RF Cost Center' ,AVG(d.[AverageLedgerBalanceMTD]) as 'AverageLedgerBalanceMTD' ,min(a.[AccountOpenDate]) as 'Date Opened' ,c.CustomerSinceDate 'Customer Since Date' ,c.[CustomerName] as 'Customer Name' ,a.[PrimaryCustomer_ID] as 'EDW Customer ID' ,g.[Referral_Code] as 'Referral Code' ,g.[CustomerName] as 'Referrer' ,g.[IBSCustomerNumber] as 'Referrer ID' ,a2.[AccountNumber] as 'Refferer Account Number' ,cc2.CostCenter as 'Referrer Cost Center' FROM [EDWLoansandDeposits].[dbo].[Accounts] a inner join [EDWLoansandDeposits].[dbo].[AccountPromotions] ac on a.Account_ID = ac.Account_ID left join [EDWReportingDatamart].[dbo].[vw_dimServicingCostCenter] cc on a.[ServicingCostCenter_ID] = cc.CostCenter_ID and cc.IsCurrentYN = 1 inner join [EDWLoansandDeposits].[dbo].[DepositsMonthly] d on a.Account_ID = d.Account_ID inner join [EDW_Sandbox].[dbo].[Grow_Seattle_refer_a_friend_eligible_Cust_201909] g on g.[Referral_Code] = ac.[AccountPromotionCode] and ac.Promotion_ID = 1 inner join [EDWLoansandDeposits].[dbo].[Customers] c on a.[PrimaryCustomer_ID] = c.[Customer_ID] and dateadd(day, 6570, c.[BirthDate]) <= dateadd(day,90,a.AccountOpenDate) inner join [EDWLoansandDeposits].[dbo].[Products] pr on a.Product_ID = pr.Product_ID and pr.Product_ID in ('178', '230', '250', '283') inner join [EDWLoansandDeposits].[dbo].[Customers] c2 on g.[IBSCustomerNumber] = c2.[IBSCustomerNumber] and dateadd(day, 6570, c.[BirthDate]) <= dateadd(day,90,a.AccountOpenDate) inner join [EDWLoansandDeposits].[dbo].[Accounts] a2 on a2.[PrimaryCustomer_ID] = c2.[Customer_ID] and a2.Product_ID in ('178', '179', '183', '185', '192', '199', '204', '205', '207', '208', '217', '220', '230', '233', '234', '237', '245', '247', '250', '257', '260', '283') left join [EDWReportingDatamart].[dbo].[vw_dimServicingCostCenter] cc2 on a2.[ServicingCostCenter_ID] = cc2.CostCenter_ID and cc2.IsCurrentYN = 1 where (a.AccountOpenDate between '2019-10-7' AND '2019-12-31') and datediff(dd, a.AccountOpenDate, @today) >=90 and (a.[ClosedDate] > dateadd(day,90,a.AccountOpenDate) OR a.ClosedDate is null) group by pr.ProductName ,a.[AccountNumber] ,c.CustomerSinceDate ,c.[CustomerName] ,a.[PrimaryCustomer_ID] ,g.[Referral_Code] ,g.[CustomerName] ,g.[IBSCustomerNumber] ,cc.CostCenter ,a2.[AccountNumber] ,cc2.CostCenter " cust_df = DBI::dbGetQuery(dbicon, cust_sql) # Generate Payouts for Referred Friends #### referred_friends_to_pay = inner_join(cust_df, trans_df, by= c("Account Number")) # Remove Dupes referred_friends_to_pay = referred_friends_to_pay %>% group_by(`EDW Customer ID`) %>%filter(row_number() < 2) #Split DataFrame referrers_to_pay = referred_friends_to_pay[, c(9:13)] referred_friends_to_pay = referred_friends_to_pay[, c(1:8)] # Check for prexisting personal checking account rf_ids = c(unique(referred_friends_to_pay$`EDW Customer ID`)) rf_ids_string = paste("'",as.character(rf_ids),"'",collapse=", ",sep="") rf_aod = c(unique(referred_friends_to_pay$`Date Opened`)) rf_aod_string = paste("'",as.character(rf_aod),"'",collapse=", ",sep="") prex_check_sql = " DECLARE @today date = getdate() select distinct a.[PrimaryCustomer_ID] as 'EDW Customer ID' ,a.[AccountNumber] as 'Account Number' ,min(a.[AccountOpenDate]) as 'Date Opened' FROM [EDWLoansandDeposits].[dbo].[Accounts] a inner join [EDWLoansandDeposits].[dbo].[Customers] c on a.[PrimaryCustomer_ID] = c.[Customer_ID] inner join [EDWLoansandDeposits].[dbo].[Products] pr on a.Product_ID = pr.Product_ID where a.AccountOpenDate between dateadd(day,-182, @today) and @today -- *checks to see if checking accounts were opened on any day other than the day of the qualifying acct within 6 months of today and a.[PrimaryCustomer_ID] in (%s) and a.[AccountOpenDate] not in (%s) and pr.ProductCategory_ID in ('5','6','7', '11', '14', '12') group by a.[AccountNumber] ,a.[PrimaryCustomer_ID] " prex_check_sql_str = sprintf(prex_check_sql, rf_ids_string, rf_aod_string) prex_check = DBI::dbGetQuery(dbicon, prex_check_sql_str) # Remove Ineligble Customers Based on Previous Relationship referred_friends_to_pay = anti_join(referred_friends_to_pay, prex_check, by=c("EDW Customer ID")) # Add date and payment due variables referred_friends_to_pay$`Amount Due` = "100" referred_friends_to_pay$`AsOfDate` = today() # Verify Customers Were Not Paid Already paid_freinds_sql = " select distinct [EDW Customer ID] from [EDW_Sandbox].[dbo].[Grow Seattle Referred Friend Payment HST] " paid_friends = DBI::dbGetQuery(dbicon, paid_freinds_sql) # Remove Already Paid Customers referred_friends_to_pay = anti_join(referred_friends_to_pay, paid_friends, by=c("EDW Customer ID")) #Write Referred Friend History Table to Sandbox #dbWriteTable(dbicon, name = "Grow Seattle Referred Friend Payment HST", referred_friends_to_pay, row.names = FALSE, append=TRUE) # Generate Payouts for Referrers #### # Summarize referrers_to_pay_sum = referrers_to_pay %>% group_by(`Referral Code`, `Referrer`,`Referrer ID`, `Refferer Account Number`, `Referrer Cost Center`) %>% summarise( `Referred Friends This Month` = n()) referrers_to_pay_sum$`Amount Due` = referrers_to_pay_sum$`Referred Friends This Month` * 50 referrers_to_pay_sum$`AsOfDate` = today() # Verify Referrer Payment Eligibility paid_referrers_sql = " select distinct [Referrer ID] ,[Sum RF To Date] from [EDW_Sandbox].[dbo].[Grow Seattle Referrers Payment HST] " paid_referrers = DBI::dbGetQuery(dbicon, paid_referrers_sql) ineligible_referrers = filter(paid_referrers, `Sum RF To Date` == 20) eligible_referrers = filter(paid_referrers, `Sum RF To Date` < 20) # Find the headroom eligible_referrers$headroom = 20 - eligible_referrers$`Sum RF To Date` # Remove Ineligible Referrers referrers_to_pay_sum = anti_join(referrers_to_pay_sum, ineligible_referrers, by=c("Referrer ID")) # Isolate Customers in this period that have already been paid already_been_paid = inner_join(referrers_to_pay_sum, eligible_referrers, by=c("Referrer ID")) # Isolate Customers in this period that are newly eligible new_tobe_paid = anti_join(referrers_to_pay_sum, eligible_referrers, by=c("Referrer ID")) new_tobe_paid$`Sum RF To Date` = new_tobe_paid$`Referred Friends This Month` # Logic to control for Referral Cap already_been_paid$`Sum RF To Date` = ifelse( already_been_paid$`Referred Friends This Month` <= already_been_paid$headroom, (already_been_paid$`Sum RF To Date` + already_been_paid$`Referred Friends This Month`), ifelse(already_been_paid$`Referred Friends This Month` > already_been_paid$headroom, (already_been_paid$`Sum RF To Date` + already_been_paid$headroom), already_been_paid$`Referred Friends This Month`)) # Drop Headroom column already_been_paid = already_been_paid[c(1:9)] # Rejoin Dataframes final_refferers = rbind(new_tobe_paid, already_been_paid, by=c("Referrer ID")) #Write Referred Friend History Table to Sandbox #dbWriteTable(dbicon, name = "Grow Seattle Referrers Payment HST", final_refferers, row.names = FALSE, append=TRUE)
/umpqua_r_code_samples/ReferAFriendMonth2-5_PIISafe.R
no_license
zgod89/Portfolio
R
false
false
8,689
r
#Package Dependancies########################################################################################## library(readxl) library(dplyr) library(stringr) library(openxlsx) library(getPass) library(odbc) library(DBI) library(RODBC) library(lubridate) library(tidyr) options(scipen=999) #gets rid of scientific notation #Database Connections########################################################################################## dbicon <- DBI::dbConnect(odbc::odbc(), driver = "SQL Server", server = "", database = "EDW_Sandbox" ) dbitran <- DBI::dbConnect(odbc::odbc(), driver = "SQL Server", server = "", database = "AccountTransactions" ) # Find all customers with an ACH In Transaction greater than 500 since 2019-10-1 trans = " select distinct a.[AccountNumber] as 'Account Number' FROM [EDWLoansandDeposits].[dbo].[Accounts] a inner join [AccountTransactions].[dbo].[AccountTransactions] t on a.Account_ID = t.Account_ID and t.[ACHInAmount] >= 500 and t.PostingDate <= dateadd(day,90,a.AccountOpenDate) where t.PostingDate > '2019-10-1' " trans_df = DBI::dbGetQuery(dbitran, trans) # Find eligible reffered friends and corresponding referrers cust_sql = " DECLARE @today date = getdate() select distinct pr.ProductName as 'Product Name' ,a.[AccountNumber] as 'Account Number' ,cc.CostCenter as 'RF Cost Center' ,AVG(d.[AverageLedgerBalanceMTD]) as 'AverageLedgerBalanceMTD' ,min(a.[AccountOpenDate]) as 'Date Opened' ,c.CustomerSinceDate 'Customer Since Date' ,c.[CustomerName] as 'Customer Name' ,a.[PrimaryCustomer_ID] as 'EDW Customer ID' ,g.[Referral_Code] as 'Referral Code' ,g.[CustomerName] as 'Referrer' ,g.[IBSCustomerNumber] as 'Referrer ID' ,a2.[AccountNumber] as 'Refferer Account Number' ,cc2.CostCenter as 'Referrer Cost Center' FROM [EDWLoansandDeposits].[dbo].[Accounts] a inner join [EDWLoansandDeposits].[dbo].[AccountPromotions] ac on a.Account_ID = ac.Account_ID left join [EDWReportingDatamart].[dbo].[vw_dimServicingCostCenter] cc on a.[ServicingCostCenter_ID] = cc.CostCenter_ID and cc.IsCurrentYN = 1 inner join [EDWLoansandDeposits].[dbo].[DepositsMonthly] d on a.Account_ID = d.Account_ID inner join [EDW_Sandbox].[dbo].[Grow_Seattle_refer_a_friend_eligible_Cust_201909] g on g.[Referral_Code] = ac.[AccountPromotionCode] and ac.Promotion_ID = 1 inner join [EDWLoansandDeposits].[dbo].[Customers] c on a.[PrimaryCustomer_ID] = c.[Customer_ID] and dateadd(day, 6570, c.[BirthDate]) <= dateadd(day,90,a.AccountOpenDate) inner join [EDWLoansandDeposits].[dbo].[Products] pr on a.Product_ID = pr.Product_ID and pr.Product_ID in ('178', '230', '250', '283') inner join [EDWLoansandDeposits].[dbo].[Customers] c2 on g.[IBSCustomerNumber] = c2.[IBSCustomerNumber] and dateadd(day, 6570, c.[BirthDate]) <= dateadd(day,90,a.AccountOpenDate) inner join [EDWLoansandDeposits].[dbo].[Accounts] a2 on a2.[PrimaryCustomer_ID] = c2.[Customer_ID] and a2.Product_ID in ('178', '179', '183', '185', '192', '199', '204', '205', '207', '208', '217', '220', '230', '233', '234', '237', '245', '247', '250', '257', '260', '283') left join [EDWReportingDatamart].[dbo].[vw_dimServicingCostCenter] cc2 on a2.[ServicingCostCenter_ID] = cc2.CostCenter_ID and cc2.IsCurrentYN = 1 where (a.AccountOpenDate between '2019-10-7' AND '2019-12-31') and datediff(dd, a.AccountOpenDate, @today) >=90 and (a.[ClosedDate] > dateadd(day,90,a.AccountOpenDate) OR a.ClosedDate is null) group by pr.ProductName ,a.[AccountNumber] ,c.CustomerSinceDate ,c.[CustomerName] ,a.[PrimaryCustomer_ID] ,g.[Referral_Code] ,g.[CustomerName] ,g.[IBSCustomerNumber] ,cc.CostCenter ,a2.[AccountNumber] ,cc2.CostCenter " cust_df = DBI::dbGetQuery(dbicon, cust_sql) # Generate Payouts for Referred Friends #### referred_friends_to_pay = inner_join(cust_df, trans_df, by= c("Account Number")) # Remove Dupes referred_friends_to_pay = referred_friends_to_pay %>% group_by(`EDW Customer ID`) %>%filter(row_number() < 2) #Split DataFrame referrers_to_pay = referred_friends_to_pay[, c(9:13)] referred_friends_to_pay = referred_friends_to_pay[, c(1:8)] # Check for prexisting personal checking account rf_ids = c(unique(referred_friends_to_pay$`EDW Customer ID`)) rf_ids_string = paste("'",as.character(rf_ids),"'",collapse=", ",sep="") rf_aod = c(unique(referred_friends_to_pay$`Date Opened`)) rf_aod_string = paste("'",as.character(rf_aod),"'",collapse=", ",sep="") prex_check_sql = " DECLARE @today date = getdate() select distinct a.[PrimaryCustomer_ID] as 'EDW Customer ID' ,a.[AccountNumber] as 'Account Number' ,min(a.[AccountOpenDate]) as 'Date Opened' FROM [EDWLoansandDeposits].[dbo].[Accounts] a inner join [EDWLoansandDeposits].[dbo].[Customers] c on a.[PrimaryCustomer_ID] = c.[Customer_ID] inner join [EDWLoansandDeposits].[dbo].[Products] pr on a.Product_ID = pr.Product_ID where a.AccountOpenDate between dateadd(day,-182, @today) and @today -- *checks to see if checking accounts were opened on any day other than the day of the qualifying acct within 6 months of today and a.[PrimaryCustomer_ID] in (%s) and a.[AccountOpenDate] not in (%s) and pr.ProductCategory_ID in ('5','6','7', '11', '14', '12') group by a.[AccountNumber] ,a.[PrimaryCustomer_ID] " prex_check_sql_str = sprintf(prex_check_sql, rf_ids_string, rf_aod_string) prex_check = DBI::dbGetQuery(dbicon, prex_check_sql_str) # Remove Ineligble Customers Based on Previous Relationship referred_friends_to_pay = anti_join(referred_friends_to_pay, prex_check, by=c("EDW Customer ID")) # Add date and payment due variables referred_friends_to_pay$`Amount Due` = "100" referred_friends_to_pay$`AsOfDate` = today() # Verify Customers Were Not Paid Already paid_freinds_sql = " select distinct [EDW Customer ID] from [EDW_Sandbox].[dbo].[Grow Seattle Referred Friend Payment HST] " paid_friends = DBI::dbGetQuery(dbicon, paid_freinds_sql) # Remove Already Paid Customers referred_friends_to_pay = anti_join(referred_friends_to_pay, paid_friends, by=c("EDW Customer ID")) #Write Referred Friend History Table to Sandbox #dbWriteTable(dbicon, name = "Grow Seattle Referred Friend Payment HST", referred_friends_to_pay, row.names = FALSE, append=TRUE) # Generate Payouts for Referrers #### # Summarize referrers_to_pay_sum = referrers_to_pay %>% group_by(`Referral Code`, `Referrer`,`Referrer ID`, `Refferer Account Number`, `Referrer Cost Center`) %>% summarise( `Referred Friends This Month` = n()) referrers_to_pay_sum$`Amount Due` = referrers_to_pay_sum$`Referred Friends This Month` * 50 referrers_to_pay_sum$`AsOfDate` = today() # Verify Referrer Payment Eligibility paid_referrers_sql = " select distinct [Referrer ID] ,[Sum RF To Date] from [EDW_Sandbox].[dbo].[Grow Seattle Referrers Payment HST] " paid_referrers = DBI::dbGetQuery(dbicon, paid_referrers_sql) ineligible_referrers = filter(paid_referrers, `Sum RF To Date` == 20) eligible_referrers = filter(paid_referrers, `Sum RF To Date` < 20) # Find the headroom eligible_referrers$headroom = 20 - eligible_referrers$`Sum RF To Date` # Remove Ineligible Referrers referrers_to_pay_sum = anti_join(referrers_to_pay_sum, ineligible_referrers, by=c("Referrer ID")) # Isolate Customers in this period that have already been paid already_been_paid = inner_join(referrers_to_pay_sum, eligible_referrers, by=c("Referrer ID")) # Isolate Customers in this period that are newly eligible new_tobe_paid = anti_join(referrers_to_pay_sum, eligible_referrers, by=c("Referrer ID")) new_tobe_paid$`Sum RF To Date` = new_tobe_paid$`Referred Friends This Month` # Logic to control for Referral Cap already_been_paid$`Sum RF To Date` = ifelse( already_been_paid$`Referred Friends This Month` <= already_been_paid$headroom, (already_been_paid$`Sum RF To Date` + already_been_paid$`Referred Friends This Month`), ifelse(already_been_paid$`Referred Friends This Month` > already_been_paid$headroom, (already_been_paid$`Sum RF To Date` + already_been_paid$headroom), already_been_paid$`Referred Friends This Month`)) # Drop Headroom column already_been_paid = already_been_paid[c(1:9)] # Rejoin Dataframes final_refferers = rbind(new_tobe_paid, already_been_paid, by=c("Referrer ID")) #Write Referred Friend History Table to Sandbox #dbWriteTable(dbicon, name = "Grow Seattle Referrers Payment HST", final_refferers, row.names = FALSE, append=TRUE)
curve_fitting <- data.frame( x=c(0.000000,0.111111,0.222222,0.333333,0.444444,0.555556,0.666667,0.777778,0.888889,1.000000), t=c(0.349486,0.830839,1.007332,0.971507,0.133066,0.166823,-0.848307,-0.445686,-0.563567,0.261502)) gp <- function(beta, sigma) { kernel <- function(x1, x2) exp(-(x1-x2)^2/(2*sigma^2)); K <- outer(curve_fitting$x, curve_fitting$x, kernel); C_N <- K + diag(dim(curve_fitting)[1])/beta m <- function(x) (outer(x, curve_fitting$x, kernel) %*% solve(C_N) %*% curve_fitting$t) plot(m, xlim=c(0,1), ylim=c(-1,1), xlab="", ylab="") par(new=T) plot(curve_fitting, xlim=c(0,1), ylim=c(-1,1), xlab=paste("beta=",beta,", sigma=",sigma), ylab="") } par(mfrow=c(2,2))
/misc/r/gp.R
no_license
amumu/nokuno
R
false
false
706
r
curve_fitting <- data.frame( x=c(0.000000,0.111111,0.222222,0.333333,0.444444,0.555556,0.666667,0.777778,0.888889,1.000000), t=c(0.349486,0.830839,1.007332,0.971507,0.133066,0.166823,-0.848307,-0.445686,-0.563567,0.261502)) gp <- function(beta, sigma) { kernel <- function(x1, x2) exp(-(x1-x2)^2/(2*sigma^2)); K <- outer(curve_fitting$x, curve_fitting$x, kernel); C_N <- K + diag(dim(curve_fitting)[1])/beta m <- function(x) (outer(x, curve_fitting$x, kernel) %*% solve(C_N) %*% curve_fitting$t) plot(m, xlim=c(0,1), ylim=c(-1,1), xlab="", ylab="") par(new=T) plot(curve_fitting, xlim=c(0,1), ylim=c(-1,1), xlab=paste("beta=",beta,", sigma=",sigma), ylab="") } par(mfrow=c(2,2))
# lectures données genevieve Sigma_gen <- read.table("~/simulations/Data_genevieve/Data/erdos/Sigma_small.txt", quote="\"", comment.char="") adjmat_gen<- read.table("~/simulations/Data_genevieve/Data/erdos/adjmat_small.txt", quote="\"", comment.char="") adjmat_cond <- read.table("~/simulations/Data_genevieve/Data/erdos/adjmat_cond_small.txt", quote="\"", comment.char="") adjmat_marg <- read.table("~/simulations/Data_genevieve/Data/erdos/adjmat_marg_small.txt", quote="\"", comment.char="") library(EMtree) library(tidyverse) library(ggraph) library(tidygraph) p=30 adjmat_cond=matrix(unlist(adjmat_cond),ncol(adjmat_cond),ncol(adjmat_cond)) adjmat_gen=matrix(unlist(adjmat_gen),ncol(adjmat_gen),ncol(adjmat_gen)) adjmat_marg=matrix(unlist(adjmat_marg),ncol(adjmat_marg),ncol(adjmat_marg)) Sigma_gen=matrix(unlist(Sigma_gen),ncol(Sigma_gen),ncol(Sigma_gen)) image(solve(Sigma_gen), label=1:30) draw_network((adjmat_marg), nodes_label = 1:29, layout="nicely", curv=0.01, size=5, pal="black") K=solve(Sigma_gen) Koh=K[]
/R/codes/missingActor/use_genData.R
no_license
Rmomal/these
R
false
false
1,029
r
# lectures données genevieve Sigma_gen <- read.table("~/simulations/Data_genevieve/Data/erdos/Sigma_small.txt", quote="\"", comment.char="") adjmat_gen<- read.table("~/simulations/Data_genevieve/Data/erdos/adjmat_small.txt", quote="\"", comment.char="") adjmat_cond <- read.table("~/simulations/Data_genevieve/Data/erdos/adjmat_cond_small.txt", quote="\"", comment.char="") adjmat_marg <- read.table("~/simulations/Data_genevieve/Data/erdos/adjmat_marg_small.txt", quote="\"", comment.char="") library(EMtree) library(tidyverse) library(ggraph) library(tidygraph) p=30 adjmat_cond=matrix(unlist(adjmat_cond),ncol(adjmat_cond),ncol(adjmat_cond)) adjmat_gen=matrix(unlist(adjmat_gen),ncol(adjmat_gen),ncol(adjmat_gen)) adjmat_marg=matrix(unlist(adjmat_marg),ncol(adjmat_marg),ncol(adjmat_marg)) Sigma_gen=matrix(unlist(Sigma_gen),ncol(Sigma_gen),ncol(Sigma_gen)) image(solve(Sigma_gen), label=1:30) draw_network((adjmat_marg), nodes_label = 1:29, layout="nicely", curv=0.01, size=5, pal="black") K=solve(Sigma_gen) Koh=K[]
#' Formatuje jak PLN #' #' @param suffix co ma byc po kwocie np. znak 'dolara' domyslnie ' zł' #' #' @return zwraca character vector dla kwot #' #' @export #' #' PLN <- function(x, suffix = " z\U0142", accuracy = 2, scale = 1, prefix = "", big.mark = " ", largest_with_cents = 1000*1000, decimal_mark = ",", negative_parens = FALSE, trim = TRUE, ...) { scales::dollar(x, accuracy = accuracy, scale = scale, prefix = prefix, suffix = suffix, big.mark = big.mark, decimal.mark = decimal_mark, largest_with_cents = largest_with_cents, negative_parens, trim = trim, ...) } ##dodajSeparatorTYS ##dodajSeparatorTYS <- scales::dollar_format(prefix = "", suffix = "", big.mark = " ", decimal.mark = ",")
/R/PLN.R
no_license
kiwimic/kiwiR
R
false
false
998
r
#' Formatuje jak PLN #' #' @param suffix co ma byc po kwocie np. znak 'dolara' domyslnie ' zł' #' #' @return zwraca character vector dla kwot #' #' @export #' #' PLN <- function(x, suffix = " z\U0142", accuracy = 2, scale = 1, prefix = "", big.mark = " ", largest_with_cents = 1000*1000, decimal_mark = ",", negative_parens = FALSE, trim = TRUE, ...) { scales::dollar(x, accuracy = accuracy, scale = scale, prefix = prefix, suffix = suffix, big.mark = big.mark, decimal.mark = decimal_mark, largest_with_cents = largest_with_cents, negative_parens, trim = trim, ...) } ##dodajSeparatorTYS ##dodajSeparatorTYS <- scales::dollar_format(prefix = "", suffix = "", big.mark = " ", decimal.mark = ",")
load("digits.RData") num.class = dim(training.data)[1] # Number of classes num.training = dim(training.data)[2] # Number of training data per class d = prod(dim(training.data)[3:4]) # Dimension of each training image (rowsxcolumns) num.test = dim(test.data)[2] # Number of test data dim(training.data) = c(num.class * num.training, d) # Reshape training data to 2d matrix dim(test.data) = c(num.class * num.test, d) # Same for test. training.label = rep(0:9, num.training) # Labels of training data. test.label = rep(0:9, num.test) # Labels of test data ## Calculate log-likelihood and gamma sl = function(x,mu,pi,N){ l = matrix(0,nrow = N,ncol = M) for(i in seq(N)){ for(m in seq(M)){l[i,m] = pi[m] * prod(mu[m,]^x[i,]) * prod((1 - mu[m,])^(1 - x[i,]))} } max0 = max(log(l)) gamma0 = exp(log(l) - max0) / rowSums(exp(log(l) - max0)) # Gamma is the weight of specific likelihood to the sum of likelihood return(list(l = l, gamma0 = gamma0)) } ## Calculate mu smiu = function(gamma,x){ mu = matrix(0, nrow = M, ncol = D) for(m in seq(M)){ for (j in seq(D)){mu[m, j] = (1 + sum(gamma[,m] * x[,j])) / (2 + sum(gamma[, m]))} } return(mu) } ## Calculate pi spi = function(gamma){ pi = vector(length = M) for(m in seq(M)){pi[m] = (1 + sum(gamma[, m])) / (M + N)} return(pi) } ## EM algorithm ema = function(dif,class,dataset,plot=logical()) { index = rep(class + 1,N) + 10 * rep(0:(N - 1),each = 1) # Add indices to a particular class data = dataset[index, ] # Pick out the data muem = matrix(0,nrow = M,ncol = D); piem = rep(0,M) logl = vector(); count = 0 ##Initial value (First iteration) group = as.factor(sample(seq(M), dim(data)[1], replace = T)) gamma_init = model.matrix(~ group - 1) # Construct 0,1 matrix munew = smiu(gamma_init,data); pinew = spi(gamma_init) # Update new miu and pi values dist = sum((pinew-piem)^2) + sum((munew-muem)^2) ##Iteration while(dist > dif){ muem = munew; piem = pinew lnew = sl(data, muem, piem, N)$l; gammanew = sl(data, muem, piem, N)$gamma0 # Update gamma and log-likelihood (Step Expectation) munew = smiu(gammanew, data); pinew = spi(gammanew) # Update miu and pi (Step Maximization) dist = sum((pinew - piem)^2)+sum((munew - muem)^2) # Calculate step size ss = gammanew * log(lnew) loglik = sum(ss) + sum(log(6*muem*(1-muem))) + log(prod(1:(2*M-1))*prod(piem)) # Calculate log-lik logl = c(logl,loglik) # Record log-lik count=count+1 # Record number of steps } ##Plot (transfer boolean values to black or white in a picture) if(plot == T) { plt = t(muem) dim(plt) = c(sqrt(D),sqrt(D),M) for(i in 1:M){ image(t(1 - round(plt[,,i],3))[,20:1], col=gray(seq(0, 1, length.out=256)),axes=FALSE, asp=1) } } #Report the final outcome return(list(mu = round(muem,2),pi = piem,iterations = count,log_likelihood = logl)) } ## Calculating the test error emerror=function(dataset) { # Build EM model from traning data for (i in 0:9) { assign(paste("out",i,sep=""), ema(dif,i,training.data,plot=F)) } ratio0 = matrix(0,10,10); ratio1 = matrix(0,10,10) for(class1 in 0:8){ for(class2 in ((class1+1):9)){ miu1 = get(paste("out", class1, sep = ""))$mu miu2 = get(paste("out", class2, sep = ""))$mu pi1 = get(paste("out", class1, sep = ""))$pi pi2 = get(paste("out", class2, sep = ""))$pi index10 = rep(class1 + 1, N0) + 10 * rep(0:(N0 - 1), each = 1) index20 = rep(class2 + 1, N0) + 10 * rep(0:(N0 - 1), each = 1) test1 = test.data[index10,] test2 = test.data[index20,] # Calculate and compare log-likelihood l11=sl(test1, miu1, pi1, N0)$l l12=sl(test1, miu2, pi2, N0)$l l21=sl(test2, miu1, pi1, N0)$l l22=sl(test2, miu2, pi2, N0)$l ##likelihood standard error11 = length(which(t(l11%*%pi1) - t(l12%*%pi2) < 0)) error12 = length(which(t(l22%*%pi2) - t(l21%*%pi1) < 0)) ##max standard error01 = length(which(apply(l11,1,max) - apply(l12,1,max) < 0)) error02 = length(which(apply(l22,1,max) - apply(l21,1,max) < 0)) ##building matrices ratio0[class1 + 1,class2 + 1]=(error01 + error02)/(2 * N0) ratio1[class1 + 1,class2 + 1]=(error11 + error12)/(2 * N0) } } ratio0[lower.tri(ratio0)] = t(ratio0)[lower.tri(ratio0)] # Symmetric matrix ratio1[lower.tri(ratio1)] = t(ratio1)[lower.tri(ratio1)] # Symmetric matrix row.names(ratio0) = seq(0,9,1); colnames(ratio0) = seq(0,9,1) row.names(ratio1) = seq(0,9,1); colnames(ratio1) = seq(0,9,1) return(list(MaxLikelihood_Standard = ratio0, MixtureLikelihood_Standard = ratio1)) } ## Experiment D = d N = num.training N0 = num.test dif = 10^-6 M = 2 result2 = ema(10^-6,2,training.data, plot = T) cat("Number of iterations:",result2$iterations,"\n") result2$log_likelihood mu2 = result2$mu; pi2 = result2$pi M = 3 result3 = ema(10^-6,2,training.data, plot = T) cat("Number of iterations:",result3$iterations,"\n") result3$log_likelihood mu3 = result3$mu; pi3 = result3$pi M = 5 result5 = ema(10^-6,2,training.data, plot = T) cat("Number of iterations:",result5$iterations,"\n") result5$log_likelihood mu5 = result5$mu; pi5 = result5$pi M = 8 result8 = ema(10^-6,2,training.data, plot = T) cat("Number of iterations:", result8$iterations, "\n") result8$log_likelihood mu8 = result8$mu; pi8 = result8$pi ## Final result result = list(mu2 = mu2, pi2 = pi2, mu3 = mu3, pi3 = pi3, mu5 = mu5, pi5 = pi5, mu8 = mu8, pi8 = pi8)
/Rcode.R
no_license
destinationyyf/Digits-Recognition
R
false
false
5,549
r
load("digits.RData") num.class = dim(training.data)[1] # Number of classes num.training = dim(training.data)[2] # Number of training data per class d = prod(dim(training.data)[3:4]) # Dimension of each training image (rowsxcolumns) num.test = dim(test.data)[2] # Number of test data dim(training.data) = c(num.class * num.training, d) # Reshape training data to 2d matrix dim(test.data) = c(num.class * num.test, d) # Same for test. training.label = rep(0:9, num.training) # Labels of training data. test.label = rep(0:9, num.test) # Labels of test data ## Calculate log-likelihood and gamma sl = function(x,mu,pi,N){ l = matrix(0,nrow = N,ncol = M) for(i in seq(N)){ for(m in seq(M)){l[i,m] = pi[m] * prod(mu[m,]^x[i,]) * prod((1 - mu[m,])^(1 - x[i,]))} } max0 = max(log(l)) gamma0 = exp(log(l) - max0) / rowSums(exp(log(l) - max0)) # Gamma is the weight of specific likelihood to the sum of likelihood return(list(l = l, gamma0 = gamma0)) } ## Calculate mu smiu = function(gamma,x){ mu = matrix(0, nrow = M, ncol = D) for(m in seq(M)){ for (j in seq(D)){mu[m, j] = (1 + sum(gamma[,m] * x[,j])) / (2 + sum(gamma[, m]))} } return(mu) } ## Calculate pi spi = function(gamma){ pi = vector(length = M) for(m in seq(M)){pi[m] = (1 + sum(gamma[, m])) / (M + N)} return(pi) } ## EM algorithm ema = function(dif,class,dataset,plot=logical()) { index = rep(class + 1,N) + 10 * rep(0:(N - 1),each = 1) # Add indices to a particular class data = dataset[index, ] # Pick out the data muem = matrix(0,nrow = M,ncol = D); piem = rep(0,M) logl = vector(); count = 0 ##Initial value (First iteration) group = as.factor(sample(seq(M), dim(data)[1], replace = T)) gamma_init = model.matrix(~ group - 1) # Construct 0,1 matrix munew = smiu(gamma_init,data); pinew = spi(gamma_init) # Update new miu and pi values dist = sum((pinew-piem)^2) + sum((munew-muem)^2) ##Iteration while(dist > dif){ muem = munew; piem = pinew lnew = sl(data, muem, piem, N)$l; gammanew = sl(data, muem, piem, N)$gamma0 # Update gamma and log-likelihood (Step Expectation) munew = smiu(gammanew, data); pinew = spi(gammanew) # Update miu and pi (Step Maximization) dist = sum((pinew - piem)^2)+sum((munew - muem)^2) # Calculate step size ss = gammanew * log(lnew) loglik = sum(ss) + sum(log(6*muem*(1-muem))) + log(prod(1:(2*M-1))*prod(piem)) # Calculate log-lik logl = c(logl,loglik) # Record log-lik count=count+1 # Record number of steps } ##Plot (transfer boolean values to black or white in a picture) if(plot == T) { plt = t(muem) dim(plt) = c(sqrt(D),sqrt(D),M) for(i in 1:M){ image(t(1 - round(plt[,,i],3))[,20:1], col=gray(seq(0, 1, length.out=256)),axes=FALSE, asp=1) } } #Report the final outcome return(list(mu = round(muem,2),pi = piem,iterations = count,log_likelihood = logl)) } ## Calculating the test error emerror=function(dataset) { # Build EM model from traning data for (i in 0:9) { assign(paste("out",i,sep=""), ema(dif,i,training.data,plot=F)) } ratio0 = matrix(0,10,10); ratio1 = matrix(0,10,10) for(class1 in 0:8){ for(class2 in ((class1+1):9)){ miu1 = get(paste("out", class1, sep = ""))$mu miu2 = get(paste("out", class2, sep = ""))$mu pi1 = get(paste("out", class1, sep = ""))$pi pi2 = get(paste("out", class2, sep = ""))$pi index10 = rep(class1 + 1, N0) + 10 * rep(0:(N0 - 1), each = 1) index20 = rep(class2 + 1, N0) + 10 * rep(0:(N0 - 1), each = 1) test1 = test.data[index10,] test2 = test.data[index20,] # Calculate and compare log-likelihood l11=sl(test1, miu1, pi1, N0)$l l12=sl(test1, miu2, pi2, N0)$l l21=sl(test2, miu1, pi1, N0)$l l22=sl(test2, miu2, pi2, N0)$l ##likelihood standard error11 = length(which(t(l11%*%pi1) - t(l12%*%pi2) < 0)) error12 = length(which(t(l22%*%pi2) - t(l21%*%pi1) < 0)) ##max standard error01 = length(which(apply(l11,1,max) - apply(l12,1,max) < 0)) error02 = length(which(apply(l22,1,max) - apply(l21,1,max) < 0)) ##building matrices ratio0[class1 + 1,class2 + 1]=(error01 + error02)/(2 * N0) ratio1[class1 + 1,class2 + 1]=(error11 + error12)/(2 * N0) } } ratio0[lower.tri(ratio0)] = t(ratio0)[lower.tri(ratio0)] # Symmetric matrix ratio1[lower.tri(ratio1)] = t(ratio1)[lower.tri(ratio1)] # Symmetric matrix row.names(ratio0) = seq(0,9,1); colnames(ratio0) = seq(0,9,1) row.names(ratio1) = seq(0,9,1); colnames(ratio1) = seq(0,9,1) return(list(MaxLikelihood_Standard = ratio0, MixtureLikelihood_Standard = ratio1)) } ## Experiment D = d N = num.training N0 = num.test dif = 10^-6 M = 2 result2 = ema(10^-6,2,training.data, plot = T) cat("Number of iterations:",result2$iterations,"\n") result2$log_likelihood mu2 = result2$mu; pi2 = result2$pi M = 3 result3 = ema(10^-6,2,training.data, plot = T) cat("Number of iterations:",result3$iterations,"\n") result3$log_likelihood mu3 = result3$mu; pi3 = result3$pi M = 5 result5 = ema(10^-6,2,training.data, plot = T) cat("Number of iterations:",result5$iterations,"\n") result5$log_likelihood mu5 = result5$mu; pi5 = result5$pi M = 8 result8 = ema(10^-6,2,training.data, plot = T) cat("Number of iterations:", result8$iterations, "\n") result8$log_likelihood mu8 = result8$mu; pi8 = result8$pi ## Final result result = list(mu2 = mu2, pi2 = pi2, mu3 = mu3, pi3 = pi3, mu5 = mu5, pi5 = pi5, mu8 = mu8, pi8 = pi8)
# Graph 1 library(ggplot2) salesg = sales ggplot(data = salesg, aes(x=sdate,y=pies)) + geom_point() + labs( title ='Sales data of Pies', x='Date', y='Sales') #Stepwise g1= ggplot(data = salesg, aes(x=sdate,y=pies)) g2= g1+ geom_point() g3= g2 + labs( title ='Sales data of Pies', x='Date', y='Sales') g3 #run g1= ggplot(data = salesg, aes(x=sdate,y=pies)) g2= geom_point() g3= labs( title ='Sales data of Pies', x='Date', y='Sales') g=g1+g2+g3 #run g #Part 2 g2b = geom_point(pch=17, color='blue', size=2) g3b = geom_smooth(method='lm', color='red', linetype=3) gA = g1 + g2b + g3b gA names(salesg) #Facegrid f1 = ggplot(salesg, aes(x=sdate, y=pies, shape=daywk, col=daywk)) f2 = geom_point(size=3) f= f1+f2 f
/GRAPH/GG/ggp2.R
no_license
bidyan24/analytics
R
false
false
725
r
# Graph 1 library(ggplot2) salesg = sales ggplot(data = salesg, aes(x=sdate,y=pies)) + geom_point() + labs( title ='Sales data of Pies', x='Date', y='Sales') #Stepwise g1= ggplot(data = salesg, aes(x=sdate,y=pies)) g2= g1+ geom_point() g3= g2 + labs( title ='Sales data of Pies', x='Date', y='Sales') g3 #run g1= ggplot(data = salesg, aes(x=sdate,y=pies)) g2= geom_point() g3= labs( title ='Sales data of Pies', x='Date', y='Sales') g=g1+g2+g3 #run g #Part 2 g2b = geom_point(pch=17, color='blue', size=2) g3b = geom_smooth(method='lm', color='red', linetype=3) gA = g1 + g2b + g3b gA names(salesg) #Facegrid f1 = ggplot(salesg, aes(x=sdate, y=pies, shape=daywk, col=daywk)) f2 = geom_point(size=3) f= f1+f2 f
############## # Laura B. Balzer # Wrapper R code for simulation studies - #"Two-Stage TMLE to Reduce Bias and Improve Efficiency in Cluster Randomized Trials" rm(list=ls()) library('nbpMatching') library('MASS') library('SuperLearner') library('ltmle') # load functions to generate the data source('MainFunction.R') source('GenData.R') # load functions to do estimation source('Estimators.R') # the following code is adapted from or taken directly from # https://github.com/LauraBalzer/SEARCH_Analysis_Adults source('Stage2_Functions_sim.R') source('Adapt_Functions.R') set.seed(1) # whether there is an intervention or not effect <- T # do.complex = T for main simulation study # do.complex = F for supplementary simulation study do.complex <- T # dropM = T explore performance of GEEs, mixed models, & CARE when not adjusting for M # (only applicable to the complex DGP) dropM <- T # number of clusters J <- 30 # number of participants per cluster n <- c(100, 150, 200) # set number of simulation repetitions nReps <- 500 # SuperLearner library for individual-level TMLE to control for differential missingness SL.library <- c('SL.mean', 'SL.glm', 'SL.gam') # a bunch of data frames to save the results truth<- data.frame(matrix(NA, nrow=nReps, ncol=7) ) aug.RR<- data.frame(matrix(NA, nrow=nReps, ncol=9)) colnames(aug.RR) <- c("psi", "psi.hat", "se", "tstat", "CI.lo", "CI.hi", "pval", "cover", "reject") ttest.b <- ttest.p <- gee.b <- gee.p <- care.b <- care.p <- mixed.b <- mixed.p <- aug.p <- aug.RR tmle.RR.b <- tmle.RR.p <- tmle.RD.b <- tmle.RD.p <- data.frame(matrix(NA, nrow=nReps, ncol=19)) # calculate the true values of the population-means and effects pop.truth <- get.truth.pop(do.complex=do.complex, effect=effect, n=n, J=5000) # for nReps for(i in 1:nReps){ out<- getTrialData(do.complex=do.complex, effect=effect, n=n, J=J, pop.truth=pop.truth, SL.library=SL.library, dropM=dropM, verbose=F) truth[i,]<- out$truth ttest.b[i,] <- out$ttest.b ttest.p[i,] <- out$ttest.p gee.b[i,] <- out$gee.b gee.p[i,] <- out$gee.p care.b[i,] <- out$care.b care.p[i,] <- out$care.p mixed.b[i,] <- out$mixed.b mixed.p[i,] <- out$mixed.p aug.RR[i,] <- out$aug.RR tmle.RR.b[i,] <- out$tmle.RR.b tmle.RR.p[i,] <- out$tmle.RR.p tmle.RD.b[i,] <- out$tmle.RD.b tmle.RD.p[i,] <- out$tmle.RD.p print(i) } colnames(truth)<- colnames(out$truth) colnames(tmle.RR.b) <- colnames(tmle.RR.p) <- colnames(tmle.RD.b) <- colnames(tmle.RD.p) <- colnames(out$tmle.RD.b) # quick function to summarize the simulation results make.pretty <- function(ttest, care, mixed, gee, augRR, tmleRR, tmleRD){ make.pretty.mini <- function(est, RD=T){ these <- c("psi", "psi.hat", "se", "tstat", "CI.lo", "CI.hi", "pval", "cover", "reject") if(RD){ bias<- mean(est$psi.hat - est$psi) se.mc <- sqrt(var(est$psi.hat)) }else{ bias<- mean(est$psi.hat/est$psi) se.mc <- sqrt(var(log(est$psi.hat)) ) } c(colMeans(est[,these]), bias=bias, se.mc=se.mc ) } rbind(ttest.RD = make.pretty.mini(ttest), CARE.RD = make.pretty.mini(care), tmle.RD = make.pretty.mini(tmleRD), mixed.RR = make.pretty.mini(mixed, RD=F), gee.RR = make.pretty.mini(gee, RD=F), aug.RR = make.pretty.mini(augRR, RD=F), tmle.RR = make.pretty.mini(tmleRR, RD=F) ) } # Results breaking the matches used for randomization B <- make.pretty(ttest.b, care.b, mixed.b, gee.b, aug.RR, tmle.RR.b, tmle.RD.b) round(B, 2) # Results keeping the matches used for randomization P <- make.pretty(ttest.p, care.p, mixed.p, gee.p, aug.p, tmle.RR.p, tmle.RD.p) round(P,2) colMeans(truth, na.rm=T) file.name <- paste('MAIN', do.complex, 'J', J, 'effect', effect, ifelse(!do.complex | dropM, 'noM', 'wM'), 'reps', nReps, format(Sys.time(),"%d%b%Y"), 'Rdata', sep='.' ) save(truth, J, n, ttest.b, care.b, mixed.b, gee.b, aug.RR,tmle.RD.b, tmle.RR.b, ttest.p, care.p, mixed.p, gee.p, aug.p, tmle.RD.p, tmle.RR.p, SL.library, file=file.name)
/StartHere.R
permissive
LauraBalzer/TwoStageTMLE
R
false
false
4,126
r
############## # Laura B. Balzer # Wrapper R code for simulation studies - #"Two-Stage TMLE to Reduce Bias and Improve Efficiency in Cluster Randomized Trials" rm(list=ls()) library('nbpMatching') library('MASS') library('SuperLearner') library('ltmle') # load functions to generate the data source('MainFunction.R') source('GenData.R') # load functions to do estimation source('Estimators.R') # the following code is adapted from or taken directly from # https://github.com/LauraBalzer/SEARCH_Analysis_Adults source('Stage2_Functions_sim.R') source('Adapt_Functions.R') set.seed(1) # whether there is an intervention or not effect <- T # do.complex = T for main simulation study # do.complex = F for supplementary simulation study do.complex <- T # dropM = T explore performance of GEEs, mixed models, & CARE when not adjusting for M # (only applicable to the complex DGP) dropM <- T # number of clusters J <- 30 # number of participants per cluster n <- c(100, 150, 200) # set number of simulation repetitions nReps <- 500 # SuperLearner library for individual-level TMLE to control for differential missingness SL.library <- c('SL.mean', 'SL.glm', 'SL.gam') # a bunch of data frames to save the results truth<- data.frame(matrix(NA, nrow=nReps, ncol=7) ) aug.RR<- data.frame(matrix(NA, nrow=nReps, ncol=9)) colnames(aug.RR) <- c("psi", "psi.hat", "se", "tstat", "CI.lo", "CI.hi", "pval", "cover", "reject") ttest.b <- ttest.p <- gee.b <- gee.p <- care.b <- care.p <- mixed.b <- mixed.p <- aug.p <- aug.RR tmle.RR.b <- tmle.RR.p <- tmle.RD.b <- tmle.RD.p <- data.frame(matrix(NA, nrow=nReps, ncol=19)) # calculate the true values of the population-means and effects pop.truth <- get.truth.pop(do.complex=do.complex, effect=effect, n=n, J=5000) # for nReps for(i in 1:nReps){ out<- getTrialData(do.complex=do.complex, effect=effect, n=n, J=J, pop.truth=pop.truth, SL.library=SL.library, dropM=dropM, verbose=F) truth[i,]<- out$truth ttest.b[i,] <- out$ttest.b ttest.p[i,] <- out$ttest.p gee.b[i,] <- out$gee.b gee.p[i,] <- out$gee.p care.b[i,] <- out$care.b care.p[i,] <- out$care.p mixed.b[i,] <- out$mixed.b mixed.p[i,] <- out$mixed.p aug.RR[i,] <- out$aug.RR tmle.RR.b[i,] <- out$tmle.RR.b tmle.RR.p[i,] <- out$tmle.RR.p tmle.RD.b[i,] <- out$tmle.RD.b tmle.RD.p[i,] <- out$tmle.RD.p print(i) } colnames(truth)<- colnames(out$truth) colnames(tmle.RR.b) <- colnames(tmle.RR.p) <- colnames(tmle.RD.b) <- colnames(tmle.RD.p) <- colnames(out$tmle.RD.b) # quick function to summarize the simulation results make.pretty <- function(ttest, care, mixed, gee, augRR, tmleRR, tmleRD){ make.pretty.mini <- function(est, RD=T){ these <- c("psi", "psi.hat", "se", "tstat", "CI.lo", "CI.hi", "pval", "cover", "reject") if(RD){ bias<- mean(est$psi.hat - est$psi) se.mc <- sqrt(var(est$psi.hat)) }else{ bias<- mean(est$psi.hat/est$psi) se.mc <- sqrt(var(log(est$psi.hat)) ) } c(colMeans(est[,these]), bias=bias, se.mc=se.mc ) } rbind(ttest.RD = make.pretty.mini(ttest), CARE.RD = make.pretty.mini(care), tmle.RD = make.pretty.mini(tmleRD), mixed.RR = make.pretty.mini(mixed, RD=F), gee.RR = make.pretty.mini(gee, RD=F), aug.RR = make.pretty.mini(augRR, RD=F), tmle.RR = make.pretty.mini(tmleRR, RD=F) ) } # Results breaking the matches used for randomization B <- make.pretty(ttest.b, care.b, mixed.b, gee.b, aug.RR, tmle.RR.b, tmle.RD.b) round(B, 2) # Results keeping the matches used for randomization P <- make.pretty(ttest.p, care.p, mixed.p, gee.p, aug.p, tmle.RR.p, tmle.RD.p) round(P,2) colMeans(truth, na.rm=T) file.name <- paste('MAIN', do.complex, 'J', J, 'effect', effect, ifelse(!do.complex | dropM, 'noM', 'wM'), 'reps', nReps, format(Sys.time(),"%d%b%Y"), 'Rdata', sep='.' ) save(truth, J, n, ttest.b, care.b, mixed.b, gee.b, aug.RR,tmle.RD.b, tmle.RR.b, ttest.p, care.p, mixed.p, gee.p, aug.p, tmle.RD.p, tmle.RR.p, SL.library, file=file.name)
# Decision Tree - Classification #we want predict for combination of input variables, is a person likely to servive or not #import data from online site path = 'https://raw.githubusercontent.com/thomaspernet/data_csv_r/master/data/titanic_csv.csv' titanic <-read.csv(path) head(titanic) names(titanic) data = titanic[,c(2,3,5,6,7)] #select few columns only head(data) #load libraries library(rpart) library(rpart.plot) #Decision Tree fit <- rpart(survived~., data = data, method = 'class') fit rpart.plot(fit, extra = 106, cex=.8,nn=T) #plot printcp(fit) #select complexity parameter prunetree2 = prune(fit, cp=.014) rpart.plot(prunetree2, cex=.8,nn=T) prunetree2 nrow(data) #Predict class category or probabilities (testdata = sample_n(data,2)) predict(prunetree2, newdata=testdata, type='class') predict(prunetree2, newdata=testdata, type='prob') #Use decision trees for predicting #customer is likely to buy a product or not with probabilities #customer is likely to default on payment or not with probabilities #Student is likely to get selected, cricket team likely to win etc #Imp steps #select columns for prediction #load libraries, create model #prune the tree with cp value #plot the graph #predict for new
/titanicDecisionTree.R
no_license
aiyyomacha/analytics1
R
false
false
1,226
r
# Decision Tree - Classification #we want predict for combination of input variables, is a person likely to servive or not #import data from online site path = 'https://raw.githubusercontent.com/thomaspernet/data_csv_r/master/data/titanic_csv.csv' titanic <-read.csv(path) head(titanic) names(titanic) data = titanic[,c(2,3,5,6,7)] #select few columns only head(data) #load libraries library(rpart) library(rpart.plot) #Decision Tree fit <- rpart(survived~., data = data, method = 'class') fit rpart.plot(fit, extra = 106, cex=.8,nn=T) #plot printcp(fit) #select complexity parameter prunetree2 = prune(fit, cp=.014) rpart.plot(prunetree2, cex=.8,nn=T) prunetree2 nrow(data) #Predict class category or probabilities (testdata = sample_n(data,2)) predict(prunetree2, newdata=testdata, type='class') predict(prunetree2, newdata=testdata, type='prob') #Use decision trees for predicting #customer is likely to buy a product or not with probabilities #customer is likely to default on payment or not with probabilities #Student is likely to get selected, cricket team likely to win etc #Imp steps #select columns for prediction #load libraries, create model #prune the tree with cp value #plot the graph #predict for new
library(data.table) library(dplyr) #Read data #Training trainSubject <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE) trainLabel <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE) trainSet <- read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE) #Testing testSubject <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE) testLabel <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE) testSet <- read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE) #1.Merges the training and the test sets to create one data set. feature <- read.table("UCI HAR Dataset/features.txt") subject <- rbind(trainSubject,testSubject) label <- rbind(trainLabel,testLabel) set <- rbind(trainSet,testSet) colnames(subject) <- "subject" colnames(label) <- "label" colnames(set) <- t(feature[2]) alldata <- cbind(set,label,subject) #2.Extracts only the measurements on the mean and standard deviation for each measurement. colWithMeanStd <- grep(".*Mean.*|.*Std.*", names(alldata), ignore.case=TRUE) dataExtracted <- alldata[,c(colWithMeanStd,ncol(alldata)-1,ncol(alldata))] #3.Uses descriptive activity names to name the activities in the data set activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt", header = FALSE) for (i in 1:6) { dataExtracted$label[which(dataExtracted$label == i)] <- as.character(activityLabels$V2[i]) } #4.Appropriately labels the data set with descriptive variable names names(dataExtracted) names(dataExtracted)<-gsub("^t", "time", names(dataExtracted)) names(dataExtracted)<-gsub("^f", "frequency", names(dataExtracted)) names(dataExtracted)<-gsub("Acc", "Accelerometer", names(dataExtracted)) names(dataExtracted)<-gsub("Gyro", "Gyroscope", names(dataExtracted)) names(dataExtracted)<-gsub("Mag", "Magnitude", names(dataExtracted)) names(dataExtracted)<-gsub("BodyBody", "Body", names(dataExtracted)) #5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. tidyData <- aggregate(. ~subject + label, dataExtracted, mean) write.table(tidyData, file = "Data.txt",row.name=FALSE)
/run_analysis.R
no_license
Edison9489/Getting-and-Cleaning-Data-Course-Project
R
false
false
2,177
r
library(data.table) library(dplyr) #Read data #Training trainSubject <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE) trainLabel <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE) trainSet <- read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE) #Testing testSubject <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE) testLabel <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE) testSet <- read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE) #1.Merges the training and the test sets to create one data set. feature <- read.table("UCI HAR Dataset/features.txt") subject <- rbind(trainSubject,testSubject) label <- rbind(trainLabel,testLabel) set <- rbind(trainSet,testSet) colnames(subject) <- "subject" colnames(label) <- "label" colnames(set) <- t(feature[2]) alldata <- cbind(set,label,subject) #2.Extracts only the measurements on the mean and standard deviation for each measurement. colWithMeanStd <- grep(".*Mean.*|.*Std.*", names(alldata), ignore.case=TRUE) dataExtracted <- alldata[,c(colWithMeanStd,ncol(alldata)-1,ncol(alldata))] #3.Uses descriptive activity names to name the activities in the data set activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt", header = FALSE) for (i in 1:6) { dataExtracted$label[which(dataExtracted$label == i)] <- as.character(activityLabels$V2[i]) } #4.Appropriately labels the data set with descriptive variable names names(dataExtracted) names(dataExtracted)<-gsub("^t", "time", names(dataExtracted)) names(dataExtracted)<-gsub("^f", "frequency", names(dataExtracted)) names(dataExtracted)<-gsub("Acc", "Accelerometer", names(dataExtracted)) names(dataExtracted)<-gsub("Gyro", "Gyroscope", names(dataExtracted)) names(dataExtracted)<-gsub("Mag", "Magnitude", names(dataExtracted)) names(dataExtracted)<-gsub("BodyBody", "Body", names(dataExtracted)) #5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. tidyData <- aggregate(. ~subject + label, dataExtracted, mean) write.table(tidyData, file = "Data.txt",row.name=FALSE)
#' @param cparsk copula parameter vector, third component in [0,1] #' @param eps tolerance for convergence #' @param mxiter maximum number of Newton-Raphson iterations #' @rdname bb1sk #' @export qcondbb1sk21=function(p,u,cparsk, eps=1.e-8,mxiter=30,iprint=F) { iter=0; diff=1.; v=qcondbb1(p,u,cparsk[1:2]) # starting point #while(iter<mxiter & abs(diff)>eps) while(iter<mxiter & max(abs(diff))>eps) { num=pcondbb1sk21(v,u,cparsk)-p; den=dbb1sk(u,v,cparsk); diff=num/den; v=v-diff; #while(v<0. || v>1.) { diff=diff/2.; v=v+diff;} while(min(v)<0. | max(v)>1.) { diff=diff/2.; v=v+diff;} iter=iter+1; if(iprint) cat(iter,v,"\n") } v } #' @rdname bb1sk #' @export qcondbb1sk12=function(p,v,cparsk, eps=1.e-8,mxiter=30,iprint=F) { iter=0; diff=1.; u=qcondbb1(p,v,cparsk[1:2]) # starting point #while(iter<mxiter & abs(diff)>eps) while(iter<mxiter & max(abs(diff))>eps) { num=pcondbb1sk12(u,v,cparsk)-p; den=dbb1sk(u,v,cparsk); diff=num/den; u=u-diff; #while(u<0. || u>1.) { diff=diff/2.; u=u+diff;} while(min(u)<0. | max(u)>1.) { diff=diff/2.; u=u+diff;} iter=iter+1; if(iprint) cat(iter,u,"\n") } u } #' @rdname bb1sk #' @export qcondbb1sk <- qcondbb1sk21
/R/bb1sk-qcond.r
permissive
vincenzocoia/copsupp
R
false
false
1,241
r
#' @param cparsk copula parameter vector, third component in [0,1] #' @param eps tolerance for convergence #' @param mxiter maximum number of Newton-Raphson iterations #' @rdname bb1sk #' @export qcondbb1sk21=function(p,u,cparsk, eps=1.e-8,mxiter=30,iprint=F) { iter=0; diff=1.; v=qcondbb1(p,u,cparsk[1:2]) # starting point #while(iter<mxiter & abs(diff)>eps) while(iter<mxiter & max(abs(diff))>eps) { num=pcondbb1sk21(v,u,cparsk)-p; den=dbb1sk(u,v,cparsk); diff=num/den; v=v-diff; #while(v<0. || v>1.) { diff=diff/2.; v=v+diff;} while(min(v)<0. | max(v)>1.) { diff=diff/2.; v=v+diff;} iter=iter+1; if(iprint) cat(iter,v,"\n") } v } #' @rdname bb1sk #' @export qcondbb1sk12=function(p,v,cparsk, eps=1.e-8,mxiter=30,iprint=F) { iter=0; diff=1.; u=qcondbb1(p,v,cparsk[1:2]) # starting point #while(iter<mxiter & abs(diff)>eps) while(iter<mxiter & max(abs(diff))>eps) { num=pcondbb1sk12(u,v,cparsk)-p; den=dbb1sk(u,v,cparsk); diff=num/den; u=u-diff; #while(u<0. || u>1.) { diff=diff/2.; u=u+diff;} while(min(u)<0. | max(u)>1.) { diff=diff/2.; u=u+diff;} iter=iter+1; if(iprint) cat(iter,u,"\n") } u } #' @rdname bb1sk #' @export qcondbb1sk <- qcondbb1sk21
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "fri_c1_250_10") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass") lrn = makeLearner("classif.C50", par.vals = list(), predict.type = "prob") #:# hash #:# 617f67a5f65dec5116ed2b1f02b2b4b4 hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
/models/openml_fri_c1_250_10/classification_binaryClass/617f67a5f65dec5116ed2b1f02b2b4b4/code.R
no_license
pysiakk/CaseStudies2019S
R
false
false
690
r
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "fri_c1_250_10") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass") lrn = makeLearner("classif.C50", par.vals = list(), predict.type = "prob") #:# hash #:# 617f67a5f65dec5116ed2b1f02b2b4b4 hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
library(dplyr) filename <- "getdata_projectfiles_UCI HAR Dataset.zip" # Checking if archieve already exists. if (!file.exists(filename)){ fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(fileURL, filename, method="curl") } # Checking if folder exists if (!file.exists("UCI HAR Dataset")) { unzip(filename) } #Assignning all data frames features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions")) activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity")) subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject") x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions) y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code") subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject") x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions) y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code") #Step 1: Merges the training and the test sets to create one data set. X <- rbind(x_train, x_test) Y <- rbind(y_train, y_test) Subject <- rbind(subject_train, subject_test) Merged_Data <- cbind(Subject, Y, X) #Step 2: Extracts only the measurements on the mean and standard deviation for each measurement. TidyData <- Merged_Data %>% select(subject, code, contains("mean"), contains("std")) #Step 3: Uses descriptive activity names to name the activities in the data set. TidyData$code <- activities[TidyData$code, 2] #Step 4: Appropriately labels the data set with descriptive variable names. names(TidyData)[2] = "activity" names(TidyData)<-gsub("Acc", "Accelerometer", names(TidyData)) names(TidyData)<-gsub("Gyro", "Gyroscope", names(TidyData)) names(TidyData)<-gsub("BodyBody", "Body", names(TidyData)) names(TidyData)<-gsub("Mag", "Magnitude", names(TidyData)) names(TidyData)<-gsub("^t", "Time", names(TidyData)) names(TidyData)<-gsub("^f", "Frequency", names(TidyData)) names(TidyData)<-gsub("tBody", "TimeBody", names(TidyData)) names(TidyData)<-gsub("-mean()", "Mean", names(TidyData), ignore.case = TRUE) names(TidyData)<-gsub("-std()", "STD", names(TidyData), ignore.case = TRUE) names(TidyData)<-gsub("-freq()", "Frequency", names(TidyData), ignore.case = TRUE) names(TidyData)<-gsub("angle", "Angle", names(TidyData)) names(TidyData)<-gsub("gravity", "Gravity", names(TidyData)) #Step 5: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. FinalData <- TidyData %>% group_by(subject, activity) %>% summarise_all(funs(mean)) write.table(FinalData, "FinalData.txt", row.name=FALSE)
/run_analysis.R
no_license
vishal2600/data-cleaning
R
false
false
2,819
r
library(dplyr) filename <- "getdata_projectfiles_UCI HAR Dataset.zip" # Checking if archieve already exists. if (!file.exists(filename)){ fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(fileURL, filename, method="curl") } # Checking if folder exists if (!file.exists("UCI HAR Dataset")) { unzip(filename) } #Assignning all data frames features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions")) activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity")) subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject") x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions) y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code") subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject") x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions) y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code") #Step 1: Merges the training and the test sets to create one data set. X <- rbind(x_train, x_test) Y <- rbind(y_train, y_test) Subject <- rbind(subject_train, subject_test) Merged_Data <- cbind(Subject, Y, X) #Step 2: Extracts only the measurements on the mean and standard deviation for each measurement. TidyData <- Merged_Data %>% select(subject, code, contains("mean"), contains("std")) #Step 3: Uses descriptive activity names to name the activities in the data set. TidyData$code <- activities[TidyData$code, 2] #Step 4: Appropriately labels the data set with descriptive variable names. names(TidyData)[2] = "activity" names(TidyData)<-gsub("Acc", "Accelerometer", names(TidyData)) names(TidyData)<-gsub("Gyro", "Gyroscope", names(TidyData)) names(TidyData)<-gsub("BodyBody", "Body", names(TidyData)) names(TidyData)<-gsub("Mag", "Magnitude", names(TidyData)) names(TidyData)<-gsub("^t", "Time", names(TidyData)) names(TidyData)<-gsub("^f", "Frequency", names(TidyData)) names(TidyData)<-gsub("tBody", "TimeBody", names(TidyData)) names(TidyData)<-gsub("-mean()", "Mean", names(TidyData), ignore.case = TRUE) names(TidyData)<-gsub("-std()", "STD", names(TidyData), ignore.case = TRUE) names(TidyData)<-gsub("-freq()", "Frequency", names(TidyData), ignore.case = TRUE) names(TidyData)<-gsub("angle", "Angle", names(TidyData)) names(TidyData)<-gsub("gravity", "Gravity", names(TidyData)) #Step 5: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. FinalData <- TidyData %>% group_by(subject, activity) %>% summarise_all(funs(mean)) write.table(FinalData, "FinalData.txt", row.name=FALSE)
# installing/loading the package: if(!require(installr)) { install.packages("installr"); require(installr) } #load / install+load installr updateR() ## get packages installed packs = as.data.frame(installed.packages(.libPaths()[1]), stringsAsFactors = F) ## and now re-install install packages using install.packages() install.packages(packs$Package)
/resources/analytics/R_resources/updating_R.r
no_license
aceri/portfolio
R
false
false
366
r
# installing/loading the package: if(!require(installr)) { install.packages("installr"); require(installr) } #load / install+load installr updateR() ## get packages installed packs = as.data.frame(installed.packages(.libPaths()[1]), stringsAsFactors = F) ## and now re-install install packages using install.packages() install.packages(packs$Package)
p <- sample(5:50, 1) n <- sample(4:p, 1) datamat <- matrix(rnorm(p * n), p, n) test_that("checking output with uncentered data", { sample_covariance_matrix <- cov(t(datamat)) trace_sigma_hat <- sum(diag(sample_covariance_matrix)) data_centered <- datamat - rowMeans(datamat) q <- sum(colSums(data_centered^2)^2) / (n - 1) trace_sigma_squared_hat <- (n - 1) / (n * (n - 2) * (n - 3)) * ((n - 1) * (n - 2) * sum(sample_covariance_matrix^2) + trace_sigma_hat^2 - n * q) lambda_hat <- (trace_sigma_hat^2 + trace_sigma_squared_hat) / (n * trace_sigma_squared_hat + trace_sigma_hat^2 - 2 * trace_sigma_hat * (n - 1) + p * (n - 1)) lambda_hat <- max(0, min(lambda_hat, 1)) target <- diag(p) ans <- shrinkcovmat(datamat, target = "identity", centered = FALSE) expect_equal(ans$Sigmahat, (1 - lambda_hat) * sample_covariance_matrix + lambda_hat * target) expect_equal(ans$lambdahat, lambda_hat) expect_equal(ans$Sigmasample, sample_covariance_matrix) expect_equal(ans$Target, target) }) test_that("checking output with centered data", { sample_covariance_matrix <- tcrossprod(datamat) / n trace_sigma_hat <- sum(diag(sample_covariance_matrix)) trace_sigma_squared_hat <- 0 for (i in 1:(n - 1)) { trace_sigma_squared_hat <- sum(crossprod( datamat[, i], datamat[, (i + 1):n] )^2) + trace_sigma_squared_hat } trace_sigma_squared_hat <- 2 * trace_sigma_squared_hat / n / (n - 1) lambda_hat <- (trace_sigma_hat^2 + trace_sigma_squared_hat) / ((n + 1) * trace_sigma_squared_hat + trace_sigma_hat^2 - 2 * trace_sigma_hat * n + p * n) lambda_hat <- max(0, min(lambda_hat, 1)) target <- diag(p) ans <- shrinkcovmat(datamat, target = "identity", centered = TRUE) expect_equal(ans$Sigmahat, (1 - lambda_hat) * sample_covariance_matrix + lambda_hat * target) expect_equal(ans$lambdahat, lambda_hat) expect_equal(ans$Sigmasample, sample_covariance_matrix) expect_equal(ans$Target, target) }) test_that("checking centered argument", { expect_equal( shrinkcovmat(datamat, target = "identity", centered = "TRUE"), shrinkcovmat(datamat, target = "identity", centered = TRUE) ) expect_equal( shrinkcovmat(datamat, target = "identity", centered = "FALSE"), shrinkcovmat(datamat, target = "identity", centered = FALSE) ) expect_error(shrinkcovmat(datamat, target = "identity", centered = "iraklis")) }) test_that("checking sample size requirements", { expect_error( shrinkcovmat(datamat[, 1:3], target = "identity", centered = FALSE), "The number of columns should be greater than 3" ) expect_error( shrinkcovmat(datamat[, 1:2], target = "identity", centered = FALSE), "The number of columns should be greater than 3" ) expect_error( shrinkcovmat(datamat[, 1], target = "identity", centered = FALSE), "The number of columns should be greater than 3" ) expect_error( shrinkcovmat(datamat[, 1], target = "identity", centered = TRUE), "The number of columns should be greater than 1" ) })
/tests/testthat/test_shrinkage_identity.R
no_license
AnestisTouloumis/ShrinkCovMat
R
false
false
3,056
r
p <- sample(5:50, 1) n <- sample(4:p, 1) datamat <- matrix(rnorm(p * n), p, n) test_that("checking output with uncentered data", { sample_covariance_matrix <- cov(t(datamat)) trace_sigma_hat <- sum(diag(sample_covariance_matrix)) data_centered <- datamat - rowMeans(datamat) q <- sum(colSums(data_centered^2)^2) / (n - 1) trace_sigma_squared_hat <- (n - 1) / (n * (n - 2) * (n - 3)) * ((n - 1) * (n - 2) * sum(sample_covariance_matrix^2) + trace_sigma_hat^2 - n * q) lambda_hat <- (trace_sigma_hat^2 + trace_sigma_squared_hat) / (n * trace_sigma_squared_hat + trace_sigma_hat^2 - 2 * trace_sigma_hat * (n - 1) + p * (n - 1)) lambda_hat <- max(0, min(lambda_hat, 1)) target <- diag(p) ans <- shrinkcovmat(datamat, target = "identity", centered = FALSE) expect_equal(ans$Sigmahat, (1 - lambda_hat) * sample_covariance_matrix + lambda_hat * target) expect_equal(ans$lambdahat, lambda_hat) expect_equal(ans$Sigmasample, sample_covariance_matrix) expect_equal(ans$Target, target) }) test_that("checking output with centered data", { sample_covariance_matrix <- tcrossprod(datamat) / n trace_sigma_hat <- sum(diag(sample_covariance_matrix)) trace_sigma_squared_hat <- 0 for (i in 1:(n - 1)) { trace_sigma_squared_hat <- sum(crossprod( datamat[, i], datamat[, (i + 1):n] )^2) + trace_sigma_squared_hat } trace_sigma_squared_hat <- 2 * trace_sigma_squared_hat / n / (n - 1) lambda_hat <- (trace_sigma_hat^2 + trace_sigma_squared_hat) / ((n + 1) * trace_sigma_squared_hat + trace_sigma_hat^2 - 2 * trace_sigma_hat * n + p * n) lambda_hat <- max(0, min(lambda_hat, 1)) target <- diag(p) ans <- shrinkcovmat(datamat, target = "identity", centered = TRUE) expect_equal(ans$Sigmahat, (1 - lambda_hat) * sample_covariance_matrix + lambda_hat * target) expect_equal(ans$lambdahat, lambda_hat) expect_equal(ans$Sigmasample, sample_covariance_matrix) expect_equal(ans$Target, target) }) test_that("checking centered argument", { expect_equal( shrinkcovmat(datamat, target = "identity", centered = "TRUE"), shrinkcovmat(datamat, target = "identity", centered = TRUE) ) expect_equal( shrinkcovmat(datamat, target = "identity", centered = "FALSE"), shrinkcovmat(datamat, target = "identity", centered = FALSE) ) expect_error(shrinkcovmat(datamat, target = "identity", centered = "iraklis")) }) test_that("checking sample size requirements", { expect_error( shrinkcovmat(datamat[, 1:3], target = "identity", centered = FALSE), "The number of columns should be greater than 3" ) expect_error( shrinkcovmat(datamat[, 1:2], target = "identity", centered = FALSE), "The number of columns should be greater than 3" ) expect_error( shrinkcovmat(datamat[, 1], target = "identity", centered = FALSE), "The number of columns should be greater than 3" ) expect_error( shrinkcovmat(datamat[, 1], target = "identity", centered = TRUE), "The number of columns should be greater than 1" ) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-movie.R \docType{data} \name{movie_263} \alias{movie_263} \title{Final Destination} \format{ igraph object } \source{ https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3 https://www.imdb.com/title/tt0195714 } \usage{ movie_263 } \description{ Interactions of characters in the movie "Final Destination" (2000) } \details{ The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/) } \references{ Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3 } \keyword{datasets}
/man/movie_263.Rd
permissive
schochastics/networkdata
R
false
true
1,017
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-movie.R \docType{data} \name{movie_263} \alias{movie_263} \title{Final Destination} \format{ igraph object } \source{ https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3 https://www.imdb.com/title/tt0195714 } \usage{ movie_263 } \description{ Interactions of characters in the movie "Final Destination" (2000) } \details{ The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/) } \references{ Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3 } \keyword{datasets}
#!/applications/R/R-3.4.0/bin/Rscript # Profile mean coverage around peaks and random loci # Usage via Condor submission system on node7: # csmit -m 20G -c 1 "./peak_Profiles_DNAmeth_commandArgs.R 2000 2kb 20 /home/ajt200/BS_Seq/Stroud_2013/WT_rep2/wig/bed/GSM980986_WT_rep2_CG.wig.bed.gr.tab.bed CGmeth" # Source functions to be used in this script source("/projects/ajt200/Rfunctions/covMatrix_DNAmethMatrix_target_ranLoc_R3.4.0.R") library(EnrichedHeatmap) library(genomation) library(regioneR) args <- commandArgs(trailingOnly = T) flankSize <- as.numeric(args[1]) flankName <- as.character(args[2]) winSize <- as.numeric(args[3]) covDatPath <- as.character(args[4]) libName <- as.character(args[5]) matDir <- "./matrices/" plotDir <- "./plots/" system(paste0("[ -d ", matDir, " ] || mkdir ", matDir)) system(paste0("[ -d ", plotDir, " ] || mkdir ", plotDir)) # Chromosome definitions chrs <- c("Chr1", "Chr2", "Chr3", "Chr4", "Chr5") chrStart <- c(rep(1, 5)) chrLens <- c(30427671, 19698289, 23459830, 18585056, 26975502) centromeres <- c(15086045, 3607929, 13587786, 3956021, 11725024) pericenStart <- c(11330001, 990001, 10200001, 990001, 8890001) pericenEnd <- c(18480000, 7540000, 16860000, 6850000, 15650000) genome <- toGRanges(data.frame(chrs, chrStart, chrLens)) mask <- toGRanges(data.frame(rep(chrs, 2), c(chrStart, pericenEnd), c(pericenStart, chrLens))) # Import peaks as GRanges object load(paste0("/home/ajt200/analysis/REC8_pooled/peaks/PeakRanger1.18/ranger/MYC_Rep1_input_p0.001_q0.01/REC8_HA_Rep2/control_filtered/", "REC8_HA_Rep2_ChIP_rangerPeaksGR_peri_control_filtered_mergedOverlaps_noMinWidth.RData")) peaksGR <- rangerPeaksGR_peri_control_filtered_mergedOverlaps strand(peaksGR) <- "*" peaksGR <- sortSeqlevels(peaksGR) peaksGR <- sort(peaksGR) print("***********peaks***********") print(peaksGR) # Generate GRanges object containing random loci of same number # and size distribution as peaksGR set.seed(374592) ranLocGR <- randomizeRegions(peaksGR, genome = genome, mask = mask, per.chromosome = TRUE, allow.overlaps = TRUE) # Confirm ranLocGR does not contain loci in masked regions stopifnot(sum(countOverlaps(mask, ranLocGR)) == 0) # Specify locations of normalised per base coverage files libPath <- system(paste0("ls ", covDatPath), intern = T) # Import coverage files as GRanges objects and assign to library names covGR <- readGeneric(libPath, meta.col = list(coverage = 4)) seqlevels(covGR) <- sub("chr", "Chr", seqlevels(covGR)) assign(paste0(libName), covGR) # Define matrix and column mean DNA methylation outfile (mean profiles) outDF <- list(paste0(matDir, libName, "_norm_cov_feature_smoothed_target_and_", flankName, "_flank_dataframe.txt"), paste0(matDir, libName, "_norm_cov_ranLoc_smoothed_target_and_", flankName, "_flank_dataframe.txt")) outDFcolMeans <- list(paste0(matDir, libName, "_norm_cov_feature_smoothed_target_and_", flankName, "_flank_dataframe_colMeans.txt"), paste0(matDir, libName, "_norm_cov_ranLoc_smoothed_target_and_", flankName, "_flank_dataframe_colMeans.txt")) # Run DNAmethMatrix() function on each coverage GRanges object to obtain matrices ## containing normalised coverage values around target and random loci DNAmethMatrix(signal = covGR, feature = peaksGR, ranLoc = ranLocGR, featureSize = mean(width(peaksGR)), flankSize = flankSize, winSize = winSize, DNAmethOutDF = outDF, DNAmethOutDFcolMeans = outDFcolMeans) print(paste0(libName, " profile calculation complete"))
/peaks/PeakRanger1.18/ranger/MYC_Rep1_input_p0.001_q0.01/REC8_HA_Rep2/control_filtered/peri/heatmap_analysis_01/peak_Profiles_DNAmeth_commandArgs.R
no_license
ajtock/REC8_pooled
R
false
false
3,880
r
#!/applications/R/R-3.4.0/bin/Rscript # Profile mean coverage around peaks and random loci # Usage via Condor submission system on node7: # csmit -m 20G -c 1 "./peak_Profiles_DNAmeth_commandArgs.R 2000 2kb 20 /home/ajt200/BS_Seq/Stroud_2013/WT_rep2/wig/bed/GSM980986_WT_rep2_CG.wig.bed.gr.tab.bed CGmeth" # Source functions to be used in this script source("/projects/ajt200/Rfunctions/covMatrix_DNAmethMatrix_target_ranLoc_R3.4.0.R") library(EnrichedHeatmap) library(genomation) library(regioneR) args <- commandArgs(trailingOnly = T) flankSize <- as.numeric(args[1]) flankName <- as.character(args[2]) winSize <- as.numeric(args[3]) covDatPath <- as.character(args[4]) libName <- as.character(args[5]) matDir <- "./matrices/" plotDir <- "./plots/" system(paste0("[ -d ", matDir, " ] || mkdir ", matDir)) system(paste0("[ -d ", plotDir, " ] || mkdir ", plotDir)) # Chromosome definitions chrs <- c("Chr1", "Chr2", "Chr3", "Chr4", "Chr5") chrStart <- c(rep(1, 5)) chrLens <- c(30427671, 19698289, 23459830, 18585056, 26975502) centromeres <- c(15086045, 3607929, 13587786, 3956021, 11725024) pericenStart <- c(11330001, 990001, 10200001, 990001, 8890001) pericenEnd <- c(18480000, 7540000, 16860000, 6850000, 15650000) genome <- toGRanges(data.frame(chrs, chrStart, chrLens)) mask <- toGRanges(data.frame(rep(chrs, 2), c(chrStart, pericenEnd), c(pericenStart, chrLens))) # Import peaks as GRanges object load(paste0("/home/ajt200/analysis/REC8_pooled/peaks/PeakRanger1.18/ranger/MYC_Rep1_input_p0.001_q0.01/REC8_HA_Rep2/control_filtered/", "REC8_HA_Rep2_ChIP_rangerPeaksGR_peri_control_filtered_mergedOverlaps_noMinWidth.RData")) peaksGR <- rangerPeaksGR_peri_control_filtered_mergedOverlaps strand(peaksGR) <- "*" peaksGR <- sortSeqlevels(peaksGR) peaksGR <- sort(peaksGR) print("***********peaks***********") print(peaksGR) # Generate GRanges object containing random loci of same number # and size distribution as peaksGR set.seed(374592) ranLocGR <- randomizeRegions(peaksGR, genome = genome, mask = mask, per.chromosome = TRUE, allow.overlaps = TRUE) # Confirm ranLocGR does not contain loci in masked regions stopifnot(sum(countOverlaps(mask, ranLocGR)) == 0) # Specify locations of normalised per base coverage files libPath <- system(paste0("ls ", covDatPath), intern = T) # Import coverage files as GRanges objects and assign to library names covGR <- readGeneric(libPath, meta.col = list(coverage = 4)) seqlevels(covGR) <- sub("chr", "Chr", seqlevels(covGR)) assign(paste0(libName), covGR) # Define matrix and column mean DNA methylation outfile (mean profiles) outDF <- list(paste0(matDir, libName, "_norm_cov_feature_smoothed_target_and_", flankName, "_flank_dataframe.txt"), paste0(matDir, libName, "_norm_cov_ranLoc_smoothed_target_and_", flankName, "_flank_dataframe.txt")) outDFcolMeans <- list(paste0(matDir, libName, "_norm_cov_feature_smoothed_target_and_", flankName, "_flank_dataframe_colMeans.txt"), paste0(matDir, libName, "_norm_cov_ranLoc_smoothed_target_and_", flankName, "_flank_dataframe_colMeans.txt")) # Run DNAmethMatrix() function on each coverage GRanges object to obtain matrices ## containing normalised coverage values around target and random loci DNAmethMatrix(signal = covGR, feature = peaksGR, ranLoc = ranLocGR, featureSize = mean(width(peaksGR)), flankSize = flankSize, winSize = winSize, DNAmethOutDF = outDF, DNAmethOutDFcolMeans = outDFcolMeans) print(paste0(libName, " profile calculation complete"))
library(dplyr) library(e1071) ##read the data model_data = read.csv("10foldCrossValidated_Lasso_Features.csv")[,2:24] set.seed(26) sample <- sample.int(n = nrow(model_data), size = floor(.70*nrow(model_data)), replace = F) training <- model_data[sample, ] test <- model_data[-sample, ] # training$fraud=as.factor(training$fraud) # test$fraud=as.factor(test$fraud) train_1=data.frame() test_1=data.frame() set.seed(1)
/SVM.R
no_license
nikhilscg/Application-Fraud
R
false
false
441
r
library(dplyr) library(e1071) ##read the data model_data = read.csv("10foldCrossValidated_Lasso_Features.csv")[,2:24] set.seed(26) sample <- sample.int(n = nrow(model_data), size = floor(.70*nrow(model_data)), replace = F) training <- model_data[sample, ] test <- model_data[-sample, ] # training$fraud=as.factor(training$fraud) # test$fraud=as.factor(test$fraud) train_1=data.frame() test_1=data.frame() set.seed(1)
library(glmnet) mydata = read.table("./TrainingSet/LassoBIC/haematopoietic.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.9,family="gaussian",standardize=FALSE) sink('./Model/EN/Lasso/haematopoietic/haematopoietic_090.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Lasso/haematopoietic/haematopoietic_090.R
no_license
leon1003/QSMART
R
false
false
375
r
library(glmnet) mydata = read.table("./TrainingSet/LassoBIC/haematopoietic.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.9,family="gaussian",standardize=FALSE) sink('./Model/EN/Lasso/haematopoietic/haematopoietic_090.txt',append=TRUE) print(glm$glmnet.fit) sink()
### Jinliang Yang ### Updates: June 24th, 2016 library("farmeR") fq1 <- list.files(path="/group/jrigrp4/BS_teo20/WGBS/", pattern="sra_1.fastq$", full.names = TRUE) fq2 <- list.files(path="/group/jrigrp4/BS_teo20/WGBS/", pattern="sra_2.fastq$", full.names = TRUE) bamfiles <- list.files(path="/group/jrigrp4/BS_teo20/WGBS/BSM", pattern="bam$", full.names = TRUE) inputdf <- data.frame(fq1 = fq1[c(1,4)], fa2 = fq2[c(1,4)], outbase = "test", bam = bamfiles[c(1,4)]) run_bismark(inputdf, genome = "/home/jolyang/dbcenter/AGP/AGPv2", outdir = "/home/jolyang/Documents/Github/methylation/largedata/bismark", N = 1, align = FALSE, email = "yangjl0930@gmail.com", runinfo = c(TRUE, "bigmemm", 8))
/profiling/0.SRA_align/0.B.2_alignment.R
no_license
RILAB/methylation
R
false
false
802
r
### Jinliang Yang ### Updates: June 24th, 2016 library("farmeR") fq1 <- list.files(path="/group/jrigrp4/BS_teo20/WGBS/", pattern="sra_1.fastq$", full.names = TRUE) fq2 <- list.files(path="/group/jrigrp4/BS_teo20/WGBS/", pattern="sra_2.fastq$", full.names = TRUE) bamfiles <- list.files(path="/group/jrigrp4/BS_teo20/WGBS/BSM", pattern="bam$", full.names = TRUE) inputdf <- data.frame(fq1 = fq1[c(1,4)], fa2 = fq2[c(1,4)], outbase = "test", bam = bamfiles[c(1,4)]) run_bismark(inputdf, genome = "/home/jolyang/dbcenter/AGP/AGPv2", outdir = "/home/jolyang/Documents/Github/methylation/largedata/bismark", N = 1, align = FALSE, email = "yangjl0930@gmail.com", runinfo = c(TRUE, "bigmemm", 8))
library(tuber) ### Name: list_videocats ### Title: List of Categories That Can be Associated with Videos ### Aliases: list_videocats ### ** Examples ## Not run: ##D ##D # Set API token via yt_oauth() first ##D ##D list_videocats(c(region_code = "JP")) ##D list_videocats() # Will throw an error asking for a valid filter with valid region_code ## End(Not run)
/data/genthat_extracted_code/tuber/examples/list_videocats.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
370
r
library(tuber) ### Name: list_videocats ### Title: List of Categories That Can be Associated with Videos ### Aliases: list_videocats ### ** Examples ## Not run: ##D ##D # Set API token via yt_oauth() first ##D ##D list_videocats(c(region_code = "JP")) ##D list_videocats() # Will throw an error asking for a valid filter with valid region_code ## End(Not run)
library(DMMF) ### Name: MapChecker ### Title: Map checker for raster layers ### Aliases: MapChecker ### ** Examples ## Not run: ##D ## Load example data for test running B_Checker function ##D data(Potato.Convex) ##D attach(Potato.Convex) ##D ## Run B_Checker function with original DEM which has internal sinks. ##D DEM <- s.map$DEM_original ##D MapCheck <- MapChecker( DEM ) ##D ## Check maps ##D par(mfrow=c(2,2)) ##D plot(DEM) ##D plot(MapCheck$boundary) ##D plot(MapCheck$sink) ##D plot(MapCheck$stand) ## End(Not run)
/data/genthat_extracted_code/DMMF/examples/MapChecker.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
532
r
library(DMMF) ### Name: MapChecker ### Title: Map checker for raster layers ### Aliases: MapChecker ### ** Examples ## Not run: ##D ## Load example data for test running B_Checker function ##D data(Potato.Convex) ##D attach(Potato.Convex) ##D ## Run B_Checker function with original DEM which has internal sinks. ##D DEM <- s.map$DEM_original ##D MapCheck <- MapChecker( DEM ) ##D ## Check maps ##D par(mfrow=c(2,2)) ##D plot(DEM) ##D plot(MapCheck$boundary) ##D plot(MapCheck$sink) ##D plot(MapCheck$stand) ## End(Not run)
context("Factories of 'compboost'") test_that("polynomial factory works", { # Data X and response y: X.linear = 1:10 X.cubic = X.linear^3 y = 3 * X.linear + rnorm(10, 0, 2) expect_silent({ data.source = InMemoryData$new(as.matrix(X.linear), "my_variable") }) expect_silent({ data.target.lin = InMemoryData$new() }) expect_silent({ data.target.cub = InMemoryData$new() }) expect_silent({ linear.factory = BaselearnerPolynomial$new(data.source, data.target.lin, 1, FALSE) }) expect_silent({ cubic.factory = BaselearnerPolynomial$new(data.source, data.target.cub, 3, FALSE) }) mod.linear = lm(y ~ 0 + X.linear) mod.cubic = lm(y ~ 0 + X.cubic) expect_equal( linear.factory$getData(), as.matrix(mod.linear$model[["X.linear"]]) ) expect_equal( cubic.factory$getData(), as.matrix(mod.cubic$model[["X.cubic"]]) ) expect_equal( linear.factory$getData(), linear.factory$transformData(data.source$getData()) ) expect_equal( cubic.factory$getData(), cubic.factory$transformData(data.source$getData()) ) }) test_that("custom factory works", { instantiateDataFun = function (X) { return(X) } trainFun = function (y, X) { X = data.frame(y = y, x = as.numeric(X)) return(rpart::rpart(y ~ x, data = X)) } predictFun = function (model, newdata) { newdata = data.frame(x = as.numeric(newdata)) return(as.matrix(predict(model, newdata))) } extractParameter = function (model) { return(as.matrix(NA)) } X = matrix(1:10, ncol = 1) y = sin(as.numeric(X)) + rnorm(10, 0, 0.6) expect_silent({ data.source = InMemoryData$new(X, "variable_1") }) expect_silent({ data.target = InMemoryData$new() }) expect_silent({ custom.factory = BaselearnerCustom$new(data.source, data.target, instantiateDataFun, trainFun, predictFun, extractParameter) }) expect_equal( custom.factory$getData(), instantiateDataFun(X) ) expect_equal( custom.factory$getData(), custom.factory$transformData(data.source$getData()) ) }) test_that("custom cpp factory works", { expect_output(Rcpp::sourceCpp(code = getCustomCppExample())) set.seed(pi) X = matrix(1:10, ncol = 1) y = 3 * as.numeric(X) + rnorm(10, 0, 2) expect_silent({ data.source = InMemoryData$new(X, "my_variable_name") }) expect_silent({ data.target = InMemoryData$new() }) expect_silent({ custom.cpp.factory = BaselearnerCustomCpp$new(data.source, data.target, dataFunSetter(), trainFunSetter(), predictFunSetter()) }) expect_equal(custom.cpp.factory$getData(), X) expect_equal( custom.cpp.factory$getData(), custom.cpp.factory$transformData(data.source$getData()) ) })
/fuzzedpackages/compboost/tests/testthat/test_factory.R
no_license
akhikolla/testpackages
R
false
false
2,698
r
context("Factories of 'compboost'") test_that("polynomial factory works", { # Data X and response y: X.linear = 1:10 X.cubic = X.linear^3 y = 3 * X.linear + rnorm(10, 0, 2) expect_silent({ data.source = InMemoryData$new(as.matrix(X.linear), "my_variable") }) expect_silent({ data.target.lin = InMemoryData$new() }) expect_silent({ data.target.cub = InMemoryData$new() }) expect_silent({ linear.factory = BaselearnerPolynomial$new(data.source, data.target.lin, 1, FALSE) }) expect_silent({ cubic.factory = BaselearnerPolynomial$new(data.source, data.target.cub, 3, FALSE) }) mod.linear = lm(y ~ 0 + X.linear) mod.cubic = lm(y ~ 0 + X.cubic) expect_equal( linear.factory$getData(), as.matrix(mod.linear$model[["X.linear"]]) ) expect_equal( cubic.factory$getData(), as.matrix(mod.cubic$model[["X.cubic"]]) ) expect_equal( linear.factory$getData(), linear.factory$transformData(data.source$getData()) ) expect_equal( cubic.factory$getData(), cubic.factory$transformData(data.source$getData()) ) }) test_that("custom factory works", { instantiateDataFun = function (X) { return(X) } trainFun = function (y, X) { X = data.frame(y = y, x = as.numeric(X)) return(rpart::rpart(y ~ x, data = X)) } predictFun = function (model, newdata) { newdata = data.frame(x = as.numeric(newdata)) return(as.matrix(predict(model, newdata))) } extractParameter = function (model) { return(as.matrix(NA)) } X = matrix(1:10, ncol = 1) y = sin(as.numeric(X)) + rnorm(10, 0, 0.6) expect_silent({ data.source = InMemoryData$new(X, "variable_1") }) expect_silent({ data.target = InMemoryData$new() }) expect_silent({ custom.factory = BaselearnerCustom$new(data.source, data.target, instantiateDataFun, trainFun, predictFun, extractParameter) }) expect_equal( custom.factory$getData(), instantiateDataFun(X) ) expect_equal( custom.factory$getData(), custom.factory$transformData(data.source$getData()) ) }) test_that("custom cpp factory works", { expect_output(Rcpp::sourceCpp(code = getCustomCppExample())) set.seed(pi) X = matrix(1:10, ncol = 1) y = 3 * as.numeric(X) + rnorm(10, 0, 2) expect_silent({ data.source = InMemoryData$new(X, "my_variable_name") }) expect_silent({ data.target = InMemoryData$new() }) expect_silent({ custom.cpp.factory = BaselearnerCustomCpp$new(data.source, data.target, dataFunSetter(), trainFunSetter(), predictFunSetter()) }) expect_equal(custom.cpp.factory$getData(), X) expect_equal( custom.cpp.factory$getData(), custom.cpp.factory$transformData(data.source$getData()) ) })
#script with function to make phylo object make_phytree_array = function(tree_string,tr){ load(paste("true_tree_rands_gil",tree_string[tr],".RData",sep="")) utri = tree utri[lower.tri(utri,diag=TRUE)] = 0 Nleaves = sum(rowSums(utri)==0) Nnodes = ncol(tree) #create orig_node_names, element i is original name of node i new_node_names = 1:ncol(tree) orig_node_names = c() orig_node_names[1:Nleaves] = new_node_names[rowSums(utri)==0] internal_nodes = new_node_names[rowSums(utri)!=0] orig_node_names[(Nleaves+1):Nnodes] = internal_nodes phyTree = list() phyTree$edge = matrix(0,nrow=2*(Nnodes-Nleaves),ncol=2) count = 1 for(i in 1:(Nnodes-Nleaves)){ #find row internal_nodes[i] #find children (these are off-diagonal columns which aren't empty) #find new number of these element (which elements of orig_node_names) ind = which(tree[internal_nodes[i],]!=0) phyTree$edge[count,] = c(which(orig_node_names==internal_nodes[i]), which(orig_node_names==ind[2])) count = count + 1 phyTree$edge[count,] = c(which(orig_node_names==internal_nodes[i]), which(orig_node_names==ind[3])) count = count + 1 } phyTree$tip.label = orig_node_names[1:Nleaves] phyTree$node.label = orig_node_names[(Nleaves+1):Nnodes] phyTree$Nnode = Nnodes-Nleaves class(phyTree) = "phylo" #find which nodes are alive alive_leaves = new_node_names[rowSums(utri)==0 & diag(tree)<=2] alive_leaves_new = sapply(alive_leaves,function(x) which(orig_node_names==x)) #find children of each node in new numbering children_new = list() for(i in 1:length(children)){ if(length(children[[i]])!=0){ children_new[[i]] = sapply(children[[i]],function(x) which(orig_node_names==x)) } } #keep only the leaves children_new_alive = lapply(children_new,function(x) intersect(x,alive_leaves_new)) children_new_alive = children_new_alive[sapply(children_new_alive,function(x) length(x)!=0)] children_new_alive = lapply(children_new_alive,sort) children_alive = lapply(children,function(x) intersect(x,alive_leaves)) children_alive = children_alive[sapply(children_alive,function(x) length(x)>1)] children_alive = lapply(children_alive,sort) #find dead leaves dead_leaves = new_node_names[diag(tree)==3] dead_leaves_new = sapply(dead_leaves,function(x) which(orig_node_names==x)) if(length(dead_leaves)!=0){ #prune tree phyTree = drop.tip(phyTree, tip=dead_leaves_new, trim.internal = TRUE, subtree = FALSE) } #save outputs to a file save(alive_leaves,children_alive,phyTree,orig_node_names, file=paste("phytree_variables",tree_string[tr],".RData",sep="")) return(phyTree) }
/make_phytree_array_cluster.R
no_license
anne-marie-lyne/Compare_tree_reconstruction_microsatellites
R
false
false
2,770
r
#script with function to make phylo object make_phytree_array = function(tree_string,tr){ load(paste("true_tree_rands_gil",tree_string[tr],".RData",sep="")) utri = tree utri[lower.tri(utri,diag=TRUE)] = 0 Nleaves = sum(rowSums(utri)==0) Nnodes = ncol(tree) #create orig_node_names, element i is original name of node i new_node_names = 1:ncol(tree) orig_node_names = c() orig_node_names[1:Nleaves] = new_node_names[rowSums(utri)==0] internal_nodes = new_node_names[rowSums(utri)!=0] orig_node_names[(Nleaves+1):Nnodes] = internal_nodes phyTree = list() phyTree$edge = matrix(0,nrow=2*(Nnodes-Nleaves),ncol=2) count = 1 for(i in 1:(Nnodes-Nleaves)){ #find row internal_nodes[i] #find children (these are off-diagonal columns which aren't empty) #find new number of these element (which elements of orig_node_names) ind = which(tree[internal_nodes[i],]!=0) phyTree$edge[count,] = c(which(orig_node_names==internal_nodes[i]), which(orig_node_names==ind[2])) count = count + 1 phyTree$edge[count,] = c(which(orig_node_names==internal_nodes[i]), which(orig_node_names==ind[3])) count = count + 1 } phyTree$tip.label = orig_node_names[1:Nleaves] phyTree$node.label = orig_node_names[(Nleaves+1):Nnodes] phyTree$Nnode = Nnodes-Nleaves class(phyTree) = "phylo" #find which nodes are alive alive_leaves = new_node_names[rowSums(utri)==0 & diag(tree)<=2] alive_leaves_new = sapply(alive_leaves,function(x) which(orig_node_names==x)) #find children of each node in new numbering children_new = list() for(i in 1:length(children)){ if(length(children[[i]])!=0){ children_new[[i]] = sapply(children[[i]],function(x) which(orig_node_names==x)) } } #keep only the leaves children_new_alive = lapply(children_new,function(x) intersect(x,alive_leaves_new)) children_new_alive = children_new_alive[sapply(children_new_alive,function(x) length(x)!=0)] children_new_alive = lapply(children_new_alive,sort) children_alive = lapply(children,function(x) intersect(x,alive_leaves)) children_alive = children_alive[sapply(children_alive,function(x) length(x)>1)] children_alive = lapply(children_alive,sort) #find dead leaves dead_leaves = new_node_names[diag(tree)==3] dead_leaves_new = sapply(dead_leaves,function(x) which(orig_node_names==x)) if(length(dead_leaves)!=0){ #prune tree phyTree = drop.tip(phyTree, tip=dead_leaves_new, trim.internal = TRUE, subtree = FALSE) } #save outputs to a file save(alive_leaves,children_alive,phyTree,orig_node_names, file=paste("phytree_variables",tree_string[tr],".RData",sep="")) return(phyTree) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/create_bins.R \name{create_bins} \alias{create_bins} \title{Create Bins from a vector of numbers.} \usage{ create_bins(x, nbins, bins = NULL) } \arguments{ \item{x}{Vector of number.} \item{nbins}{Number of bins to be generated. If 'bins' parameter is provided value, nbins is not used.} \item{bins}{List containing the breaks intervals and the bins' labels. If 'bins' parameter is not provided value, nbins is used and bins are automatically generated. Bins contain two vectors: breaks and labels.} } \value{ vector of bins as string associated to x numbers. } \description{ Create Bins from a vector of numbers. } \examples{ #auto generate the bins data<-data.frame(x =floor(exp(rnorm(200000 * 1.3)))) data$bin <- create_bins(data$x, nbins=6) #pre-define the bins data<-data.frame(x =floor(exp(rnorm(200000 * 1.3)))) data$bin <- create_bins(data$x, nbins=NULL, bins=list(breaks=c(-1,0,1,35,61,92,130), labels=c("0","1","2 to 35","36 to 61","62 to 92","93 to 130"))) }
/man/create_bins.Rd
no_license
mohtad/mapR
R
false
true
1,052
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/create_bins.R \name{create_bins} \alias{create_bins} \title{Create Bins from a vector of numbers.} \usage{ create_bins(x, nbins, bins = NULL) } \arguments{ \item{x}{Vector of number.} \item{nbins}{Number of bins to be generated. If 'bins' parameter is provided value, nbins is not used.} \item{bins}{List containing the breaks intervals and the bins' labels. If 'bins' parameter is not provided value, nbins is used and bins are automatically generated. Bins contain two vectors: breaks and labels.} } \value{ vector of bins as string associated to x numbers. } \description{ Create Bins from a vector of numbers. } \examples{ #auto generate the bins data<-data.frame(x =floor(exp(rnorm(200000 * 1.3)))) data$bin <- create_bins(data$x, nbins=6) #pre-define the bins data<-data.frame(x =floor(exp(rnorm(200000 * 1.3)))) data$bin <- create_bins(data$x, nbins=NULL, bins=list(breaks=c(-1,0,1,35,61,92,130), labels=c("0","1","2 to 35","36 to 61","62 to 92","93 to 130"))) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{augmented_diag} \alias{augmented_diag} \title{Returns an offset diagonal matrix.} \usage{ augmented_diag(d, offset) } \arguments{ \item{d}{Matrix dimensional (square matrix)} \item{offset}{Offset from the diagonal} } \value{ A matrix with an offset diagonal of ones. } \description{ Returns an offset diagonal matrix. } \examples{ d <- 10 offset <- 1 augmented_diag(d, offset) # zeroes everywhere except 1s on the +1 diag }
/man/augmented_diag.Rd
no_license
cran/ntwk
R
false
true
517
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{augmented_diag} \alias{augmented_diag} \title{Returns an offset diagonal matrix.} \usage{ augmented_diag(d, offset) } \arguments{ \item{d}{Matrix dimensional (square matrix)} \item{offset}{Offset from the diagonal} } \value{ A matrix with an offset diagonal of ones. } \description{ Returns an offset diagonal matrix. } \examples{ d <- 10 offset <- 1 augmented_diag(d, offset) # zeroes everywhere except 1s on the +1 diag }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_nba_season_team_rosters.R \name{get_nba_season_team_rosters} \alias{get_nba_season_team_rosters} \title{Query the commonteamroster endpoint from nba stats} \usage{ get_nba_season_team_rosters(seasons = NULL, teamids = NULL, sleep = TRUE) } \arguments{ \item{seasons}{numeric seasons to grab (e.g. 2000)} \item{teamids}{character that looks like a numeric but could have leading zeroes, teamids to grab, usually the TEAM_ID field in a boxscore} \item{sleep}{logical sleep between API calls} } \value{ list of team rosters; each entry will have players and coaches if available } \description{ Query the commonteamroster endpoint from nba stats }
/man/get_nba_season_team_rosters.Rd
permissive
jimtheflash/nbastatstools
R
false
true
731
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_nba_season_team_rosters.R \name{get_nba_season_team_rosters} \alias{get_nba_season_team_rosters} \title{Query the commonteamroster endpoint from nba stats} \usage{ get_nba_season_team_rosters(seasons = NULL, teamids = NULL, sleep = TRUE) } \arguments{ \item{seasons}{numeric seasons to grab (e.g. 2000)} \item{teamids}{character that looks like a numeric but could have leading zeroes, teamids to grab, usually the TEAM_ID field in a boxscore} \item{sleep}{logical sleep between API calls} } \value{ list of team rosters; each entry will have players and coaches if available } \description{ Query the commonteamroster endpoint from nba stats }
#!/usr/bin/Rscript --vanilla installed_packages <- rownames(installed.packages()) # machines at FVAFR need to set the proxy correctly. if (grepl("FVAFR-", Sys.info()["nodename"])) { if (! "httr" %in% installed_packages) install.packages("httr") httr::set_config(httr::use_proxy(url="10.127.255.17", port=8080)) } # documentation is a package not on CRAN, it is provided via github. if (! "devtools" %in% installed_packages) install.packages("devtools") devtools::install_github("fvafrCU/documentation")
/listings/install_documentation.r
permissive
outheis/programmierleitfaden
R
false
false
516
r
#!/usr/bin/Rscript --vanilla installed_packages <- rownames(installed.packages()) # machines at FVAFR need to set the proxy correctly. if (grepl("FVAFR-", Sys.info()["nodename"])) { if (! "httr" %in% installed_packages) install.packages("httr") httr::set_config(httr::use_proxy(url="10.127.255.17", port=8080)) } # documentation is a package not on CRAN, it is provided via github. if (! "devtools" %in% installed_packages) install.packages("devtools") devtools::install_github("fvafrCU/documentation")
\name{ggally_cor} \alias{ggally_cor} \title{Correlation from the Scatter Plot} \usage{ ggally_cor(data, mapping, corAlignPercent = 0.6, corSize = 3, ...) } \arguments{ \item{data}{data set using} \item{mapping}{aesthetics being used} \item{corAlignPercent}{right align position of numbers. Default is 60 percent across the horizontal} \item{corSize}{size of text} \item{...}{other arguments being supplied to geom_text} } \description{ Estimate correlation from the given data. } \examples{ data(tips, package="reshape") ggally_cor(tips, mapping = ggplot2::aes_string(x = "total_bill", y = "tip")) ggally_cor(tips, mapping = ggplot2::aes_string(x = "total_bill", y = "tip", size = 15, colour = "red")) ggally_cor(tips, mapping = ggplot2::aes_string(x = "total_bill", y = "tip", color = "sex"), corSize = 5) } \author{ Barret Schloerke \email{schloerke@gmail.com} } \keyword{hplot}
/man/ggally_cor.Rd
no_license
lselzer/ggally
R
false
false
911
rd
\name{ggally_cor} \alias{ggally_cor} \title{Correlation from the Scatter Plot} \usage{ ggally_cor(data, mapping, corAlignPercent = 0.6, corSize = 3, ...) } \arguments{ \item{data}{data set using} \item{mapping}{aesthetics being used} \item{corAlignPercent}{right align position of numbers. Default is 60 percent across the horizontal} \item{corSize}{size of text} \item{...}{other arguments being supplied to geom_text} } \description{ Estimate correlation from the given data. } \examples{ data(tips, package="reshape") ggally_cor(tips, mapping = ggplot2::aes_string(x = "total_bill", y = "tip")) ggally_cor(tips, mapping = ggplot2::aes_string(x = "total_bill", y = "tip", size = 15, colour = "red")) ggally_cor(tips, mapping = ggplot2::aes_string(x = "total_bill", y = "tip", color = "sex"), corSize = 5) } \author{ Barret Schloerke \email{schloerke@gmail.com} } \keyword{hplot}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_qcew_data.R \name{export_named_df} \alias{export_named_df} \title{Export the downloaded table into a csv file.} \usage{ export_named_df(path_data = "./", year, named_df) } \arguments{ \item{year:}{filename for .csv file. Note that you need to pass in the correct year here, since the function has no idea of what the actual year corresponding to this data is} \item{named_df:}{data frame with name} \item{path_data:}{where does the download happen: default current directory} } \value{ NIL. Exports csv file "./singlefile/$agglvl_code/$year.csv" } \description{ Export the downloaded table into a csv file. } \note{ generates the appropriate directory if it does not already exist. our data does not have any headers; this simplifies the process of joining all the csv files with cat or similar command line tools so that we have a single file across all aggregation level codes. }
/man/export_named_df.Rd
no_license
mp3201/entrydatar
R
false
true
971
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_qcew_data.R \name{export_named_df} \alias{export_named_df} \title{Export the downloaded table into a csv file.} \usage{ export_named_df(path_data = "./", year, named_df) } \arguments{ \item{year:}{filename for .csv file. Note that you need to pass in the correct year here, since the function has no idea of what the actual year corresponding to this data is} \item{named_df:}{data frame with name} \item{path_data:}{where does the download happen: default current directory} } \value{ NIL. Exports csv file "./singlefile/$agglvl_code/$year.csv" } \description{ Export the downloaded table into a csv file. } \note{ generates the appropriate directory if it does not already exist. our data does not have any headers; this simplifies the process of joining all the csv files with cat or similar command line tools so that we have a single file across all aggregation level codes. }
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 1792163 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 1792163 c c Input Parameter (command line, file): c input filename QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-uniform-depth-25.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 671009 c no.of clauses 1792163 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 1792163 c c QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-uniform-depth-25.qdimacs 671009 1792163 E1 [] 0 3666 666991 1792163 NONE
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/fpu/fpu-10Xh-error01-uniform-depth-25/fpu-10Xh-error01-uniform-depth-25.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
687
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 1792163 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 1792163 c c Input Parameter (command line, file): c input filename QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-uniform-depth-25.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 671009 c no.of clauses 1792163 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 1792163 c c QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-uniform-depth-25.qdimacs 671009 1792163 E1 [] 0 3666 666991 1792163 NONE
getPortefeuille<-function(){ #action<-c("AC.PA","ACA.PA","AI.PA","AIR.PA","BN.PA","BNP.PA","CA.PA","CAP.PA","CS.PA","DG.PA","EI.PA","EN.PA","ENGI.PA","FP.PA","FR.PA","GLE.PA","KER.PA","LHN.PA","LI.PA","LR.PA","MC.PA","ML.PA"); #nombre<-c(1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22); #portefeuille<-matrix(nrow=2, ncol=length(action)) #portefeuille[1,]<-action #portefeuille[2,]<-nombre #return(portefeuille); return(portefeuille) }
/R/getPortefeuille.R
no_license
fcluzeau/stockfr
R
false
false
456
r
getPortefeuille<-function(){ #action<-c("AC.PA","ACA.PA","AI.PA","AIR.PA","BN.PA","BNP.PA","CA.PA","CAP.PA","CS.PA","DG.PA","EI.PA","EN.PA","ENGI.PA","FP.PA","FR.PA","GLE.PA","KER.PA","LHN.PA","LI.PA","LR.PA","MC.PA","ML.PA"); #nombre<-c(1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22); #portefeuille<-matrix(nrow=2, ncol=length(action)) #portefeuille[1,]<-action #portefeuille[2,]<-nombre #return(portefeuille); return(portefeuille) }
#' Returns the shardkey (not implemented yet) #' @importFrom jsonlite fromJSON #' @param df a disk.frame #' @export # TODO make this work shardkey <- function(df) { meta_file = file.path(attr(df,"path"),".metadata", "meta.json") if(!file.exists(meta_file)) { add_meta(df) } meta = jsonlite::fromJSON(meta_file) list(shardkey = meta$shardkey, shardchunks = meta$shardchunks) }
/R/shardkey.r
permissive
jingmouren/disk.frame
R
false
false
390
r
#' Returns the shardkey (not implemented yet) #' @importFrom jsonlite fromJSON #' @param df a disk.frame #' @export # TODO make this work shardkey <- function(df) { meta_file = file.path(attr(df,"path"),".metadata", "meta.json") if(!file.exists(meta_file)) { add_meta(df) } meta = jsonlite::fromJSON(meta_file) list(shardkey = meta$shardkey, shardchunks = meta$shardchunks) }
context("cumulative distribution function") test_models <- list( jqpd(c(0.32, 0.4, 0.6), lower = 0, upper = 1, alpha = .1), jqpd(c(0.32, 0.4, 0.6), lower = 0, upper = 1, alpha = .3), jqpd(c(2, 4, 6), lower = 0, upper = Inf, alpha = .1), jqpd(c(2, 4, 6), lower = 0, upper = Inf, alpha = .3) ) test_that("lower bound returns probability zero", { for (params in test_models) { expect_equal(pjqpd(params$lower, params), 0) } }) test_that("upper bound returns probability one", { for (params in test_models) { expect_equal(pjqpd(params$upper, params), 1) } }) test_that("recovers inputs", { for (params in test_models) { p <- pjqpd(params$x, params) expect_equal(p[1], params$alpha) expect_equal(p[2], 0.5) expect_equal(p[3], 1-params$alpha) } })
/tests/testthat/test-pjqpd.R
permissive
bobbyingram/rjqpd
R
false
false
790
r
context("cumulative distribution function") test_models <- list( jqpd(c(0.32, 0.4, 0.6), lower = 0, upper = 1, alpha = .1), jqpd(c(0.32, 0.4, 0.6), lower = 0, upper = 1, alpha = .3), jqpd(c(2, 4, 6), lower = 0, upper = Inf, alpha = .1), jqpd(c(2, 4, 6), lower = 0, upper = Inf, alpha = .3) ) test_that("lower bound returns probability zero", { for (params in test_models) { expect_equal(pjqpd(params$lower, params), 0) } }) test_that("upper bound returns probability one", { for (params in test_models) { expect_equal(pjqpd(params$upper, params), 1) } }) test_that("recovers inputs", { for (params in test_models) { p <- pjqpd(params$x, params) expect_equal(p[1], params$alpha) expect_equal(p[2], 0.5) expect_equal(p[3], 1-params$alpha) } })
library(billboarder) ### Name: bb_data ### Title: Add data to Billboard chart ### Aliases: bb_data ### ** Examples billboarder() %>% bb_barchart(data = table(mtcars$cyl)) %>% bb_data(names = list(Freq = "Number of cylinders"), labels = TRUE)
/data/genthat_extracted_code/billboarder/examples/bb_data.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
253
r
library(billboarder) ### Name: bb_data ### Title: Add data to Billboard chart ### Aliases: bb_data ### ** Examples billboarder() %>% bb_barchart(data = table(mtcars$cyl)) %>% bb_data(names = list(Freq = "Number of cylinders"), labels = TRUE)
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, 9.73143140745845e-188, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142)) result <- do.call(dcurver:::ddc,testlist) str(result)
/dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609868309-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
833
r
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, 9.73143140745845e-188, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142)) result <- do.call(dcurver:::ddc,testlist) str(result)
if(Sys.info()['login']=='jeremyb'){ setwd("C:\\Users\\jeremyb\\Documents\\A_Master\\Code") }else if(Sys.info()['login']=='jeremy') { setwd("C:/Users/JumpCo Vostro3700/infection-dating-tool/manuscripts/figures") }else{ setwd(".") #what does this do? } source("part_Onefunctions.R") file.create("C:\\Users\\jeremyb\\Documents\\A_Master\\Code\\runlog_main.txt") runlog <- file("runlog.txt") writeLines(capture.output(sessionInfo()),runlog) close(runlog) ?writeLines ?date ## SCRIPT # Now we are going towards risk estimation # So we define the quick transition from non-infectious to infectious, and the probability of detection (the 'sensitivity' of the test), both with Weibul distributions n = 5 detail <- 10 timeaxis <- seq(0,90,1/detail) shape_infect <- 1 scale_infect <- 5 mean_delay_infect <- 7 population_sd_infect <- 10 shape_detect <- 5 scale_detect <- 5 mean_delay_detect <- mean_delay_infect + 14 population_sd_detect <- population_sd_infect donation_time <- 77 # now we generate and plot the curves infectious_delays <- generate_positions_cumulative_normal(n=n,mean_center_position = mean_delay_infect, sd_size = population_sd_infect) detectable_delays <- infectious_delays + 14 infectious_curves <- family_positive_likelihood_weibul_pos(times = timeaxis, shape = shape_infect, scale = scale_infect, positions = infectious_delays, n = n, test_time = donation_time) undetected_curves <- family_negative_likelihood_weibul_pos(times = timeaxis, shape = shape_detect, scale = scale_detect, positions = detectable_delays, n = n, test_time = donation_time) # I want a function to take a set of infectious curves and undetected curves and do the three-layered analysis we're proposing # or it could take the parameters required for all three calculations, and do them. # then we can take sets of parameters gleaned from literature and stick them together... #but now how do I calculate the background ones....... if I decide to shift the likelihood by some amount? I'll probably have to define a function or a block of # code which shifts the infectious delays to get corresponding detectable delays, then just run that block for a much bigger infectiousness set.. no problem # need to think about whether there is any difference between defining that everyone waits two weeks from infectious to detectable,but the total delay is distributed, and defining that the infectiousness # infectiousness aand detectability as distributed, using the same distribution and different means (delta is two weeks). No difference in the non-mixed case, but say we want to # allow the time between infectiousness and detectability to vary. In that case it is more transparent to define a bunch of infectiousness times and delay-between-infectiousness-and-detectability-times, # then calculate detectability times accordingly. That way we can change the map from 1 - 1 to 1 - many.... Defining them conditionally seeeems to make more sense but I'm not sure. # anyway that shouldn't take me too long, so I can start with that tonight. #DONEZOES. Now I've specified the detectability delays ACCORDING to the # next on the list is to check the old figures with some mixing included. (imperfect correlation). ALSO, check that zero correlation is what they say it is.... # Figure 1 ################ # : produce a family of sensitivity curves and their average #goto_1 #Parameters n=10 # test_details mean_delay_t1=55 mean_delay_t2=45 #we have a second test listed so that we can easily swap between them when generating the sensitivity curves sd_size_t1 = 5 sd_size_t2 = 1 detail = 10 # 1/Step size timeaxis = seq(0,100,1/detail) scale_t1= 3 shape_t1 = 2 scale_t2= 3 shape_t2 = 2 #visuals # col_negative <- rgb(27/255,158/255,119/255) # col_positive <- rgb(217/255,95/255,2/255) # col_mean <- rgb(231/255,41/255,138/255) # col_truth <- rgb(117/255,112/255,179/255) # col_likelihood <- col_truth # col_dotted <- rgb(3/7,3/7,3/7) #color for dotted lines #col_negative <- "green" #rgb(27/255,158/255,119/255) #col_positive <- "red" #rgb(217/255,95/255,2/255) col_negative <- rgb(27/255,158/255,119/255) col_positive <- rgb(217/255,95/255,2/255) col_mean <- rgb(231/255,41/255,138/255) #col_truth <- "purple" #rgb(117/255,112/255,179/255) col_truth <- rgb(117/255,112/255,179/255) col_likelihood <- col_truth col_dotted <- rgb(3/7,3/7,3/7) #color for dotted lines #y-axis limits ylim_chosen <- c(-.002,1.007) #Generate Data sensitivity_family_1 <- family_sensitivity_weibul(n=n, scale=scale_t1,shape=shape_t1,mean_delay = mean_delay_t1 , sd_size=sd_size_t1,times=timeaxis) sensitivity_family_background <- family_sensitivity_weibul(n=n+150,scale=scale_t1,shape=shape_t1,mean_delay=mean_delay_t1,sd_size=sd_size_t1,times=timeaxis) sensitivity_average <- generate_mean_of_family(sensitivity_family_background) # Plot # pdf(file='figure_1.pdf',width=7.3,height=4.7) plot(timeaxis,sensitivity_family_1[,1],type='l',xaxt='n',xaxs='i',yaxs='i',xlim=c(mean_delay_t1-4*sd_size_t1,mean_delay_t1+shift_to_half_likelihood_weibul(shape=shape_t1,scale=scale_t1)+2.35*sd_size_t1),ylim=c(-.005,1.005),xlab="",ylab="",col='green',yaxt='n',bty='L' ) # plot(timeaxis,sensitivity_average,col=col_truth,lwd=5,type='l',xaxt='n',xaxs='i',yaxs='i',xlim=c(mean_delay_t1-4*sd_size_t1,mean_delay_t1+shift_to_half_likelihood_weibul(shape=shape_t1,scale=scale_t1)+2.35*sd_size_t1),ylim=c(-.005,1.005),xlab="",ylab="",yaxt='n',bty='L') # todo: title # label sizes # colors # line widths # scale # you're right (alex) about the size of the lines rendering correctly in the pdf title(xlab="Time since infection", line=1.5, cex.lab=1.2) title(ylab='Probability of Infection', line=2, cex.lab=1.2) ## goto_fix # axis ticks, remove box, bring lower axis up to zero or thereabout yaxis_pos <- c(0,1) yaxis_names <- c('0','1') zero_pos <- c(0) zero_name <- c(expression('0'[''])) axis(side=2, at=yaxis_pos, labels= yaxis_names,tck=-0.037, padj=.237) #axis(side=1, at=xaxis_pos, labels= xaxis_names,padj=-.35,hadj=-.137) axis(side=1, at=zero_pos, labels=zero_name,padj=-0.45,hadj=0.37) for (i in seq(1:n)){ lines(timeaxis,sensitivity_family_1[,i],col=col_negative,lwd=1.5) } lines(timeaxis,sensitivity_average,col=col_truth,lwd=5) # Average delay arrows Arrows(x0 = mean_delay_t1-4*sd_size_t1, y0 = 0.501 ,x1= timeaxis[which.min(abs(sensitivity_average-0.5))], y1 = 0.5,code=3 ,arr.adj=1) text(x=(mean_delay_t1-4*sd_size_t1+timeaxis[which.min(abs(sensitivity_average-0.5))])/2, y=c(0.53), pos=4, labels=expression(italic("d"))) #Standard deviation arrows Arrows(x1=timeaxis[which.min(abs(sensitivity_average-0.5))],y0=0.5, x0= timeaxis[which.min(abs(sensitivity_average-0.5))]+sd_size_t1,y1=0.5,code=3,arr.adj=1) text(x= timeaxis[which.min(abs(sensitivity_average-0.5))]+sd_size_t1/2-.5,y=0.53,pos=4,labels=expression(sigma)) # Arrows(c(0,1.7),c(1.3,-1.8),c(0.8,1.1),c(1.2,-1), lwd=2 # dev.off() ####### #Figure 2 adapted to represent blood testing scenario # Figure 2: Likelihood of observed discordant test results, t1 negative t2 positive - different times #goto_2 # Parameters n=10 detail=10 timeaxis=seq(0,100,1/detail) # TEST 1 (negative) # Describe individual (person) test sensitivity form with population mean-delay and standard deviation of delay # delay is the variable we distribute across the population - it could in principle be anything else of course #so each individual has the same SHAPE of sensitivity, but different delays ## Visuals lwd_means <- 4 lwd_ind <- 1.37 lwd_likelihood <- lwd_means - 3 #High scale causes slower swap #high shape causes quicker and steeper swap AND more symetrical swap scale_t1= 2 shape_t1 = 1.73 mean_delay_t1 = 12 sd_size_t1 = 10 # Time of negative test (relative to arbitrary t=0) test_time_1 = 48 ## TEST 2 (positive) scale_t2 = scale_t1*4.7 shape_t2 = shape_t1 mean_delay_t2 = mean_delay_t1*2+10 sd_size_t2 = 1.3*sd_size_t1 # Time of positive test test_time_2 = timeaxis[length(timeaxis)]-5 ## Generate Data # Data = the individual likelihood curves for the first (negative) and second (positive) test #Test 1 plotdata_negative = family_negative_likelihood_weibul(n=n, scale=scale_t1, shape=shape_t1, mean_delay=mean_delay_t1, sd_size= sd_size_t1, times = timeaxis, test_time = test_time_1) #for generating mean curve plotdata_negative_background <- family_negative_likelihood_weibul(n=n+150, scale=scale_t1, shape=shape_t1, mean_delay=mean_delay_t1, sd_size= sd_size_t1, times = timeaxis, test_time = test_time_1) #Test 2 plotdata_positive = family_positive_likelihood_weibul(n=n, scale=scale_t2, shape=shape_t2, mean_delay=mean_delay_t2, sd_size= sd_size_t2, times = timeaxis, test_time = test_time_2) #for generating mean curve plotdata_positive_background <- family_positive_likelihood_weibul(n=n+150, scale=scale_t2, shape=shape_t2, mean_delay=mean_delay_t2, sd_size= sd_size_t2, times = timeaxis, test_time = test_time_2) ## COMMUNICATE # pdf(file = "simple_case.pdf", width = 7.3, height = 4.7) plot(timeaxis,plotdata_negative[,1],type='l',xlim=c(timeaxis[1],timeaxis[length(timeaxis)]),ylim=c(-.002,1.002),xaxt='n',yaxt='n',xaxs='i',yaxs='i',bty='l',xlab='',ylab='',col='green') #clarify label in comment title(xlab="Hypothetical time of infection", line=1.5, cex.lab=1.4) title(ylab=expression('Likelihood'), line=2, cex.lab=1.4) yaxis_pos <- c(0,0.5,1) yaxis_names <- c('0',"",'1') xaxis_pos <- c(test_time_2) xaxis_names <- c(expression('T'['Donation'])) zero_pos <- c(0) zero_name <- c(expression('0'[''])) #shift axes axis(side=2, at=yaxis_pos, labels= yaxis_names,tck=-0.037, padj=.437) axis(side=1, at=xaxis_pos, labels= xaxis_names,padj=-.35,hadj=0)#-.137) #axis(side=1, at=zero_pos, labels=zero_name,padj=-0.45,hadj=0.37) # goto_do #points(plotdata[,1],plotdata[,3]) for (i in seq(1:n)){ lines(timeaxis,plotdata_negative[,i],col=col_negative,lwd=lwd_ind) } #points(plotdata[,1],plotdata[,3]) for (i in seq(1:n)){ lines(timeaxis,plotdata_positive[,i],col=col_positive,lwd=lwd_ind) } positive_mean_background <- rowMeans(plotdata_positive_background) negative_mean_background <- rowMeans(plotdata_negative_background) lines(timeaxis,negative_mean_background, lwd=lwd_means, col=col_negative) lines(timeaxis,positive_mean_background, lwd=lwd_means, col=col_positive) naive_likelihood <- positive_mean_background*negative_mean_background lines(timeaxis,naive_likelihood,col='grey42',lwd=4,lty=5) true_likelihood <- likelihood_by_DDI(plotdata_positive_background,plotdata_negative_background,timeaxis) lines(timeaxis,true_likelihood,col=col_truth,lwd=4) # segments(x0=test_time_1,y0=0,x1=test_time_1,y1=1,lty=4) segments(x0=test_time_2,y0=0,x1=test_time_2,y1=1,lty=4) # mean delay arrows (shape package) # # Arrows(x0 = timeaxis[which.min(abs(negative_mean_background-0.5))],y0=0.5,x1=test_time_1,y1=0.5,code=3 ,arr.adj=1,arr.width = .1/2) # text(x=(timeaxis[which.min(abs(negative_mean_background-0.5))]+test_time_1)/2-2.4,y=0.53,pos=4,labels=expression(italic('d')['1'])) # # Arrows(x0 = timeaxis[which.min(abs(positive_mean_background-0.5))],y0=0.5,x1=test_time_2,y1=0.5,code=3 ,arr.adj=1,arr.width = .1/2) # text(x=(timeaxis[which.min(abs(positive_mean_background-0.5))]+test_time_2)/2-2.5,y=0.53,pos=4,labels=expression(italic('d')['2'])) # Standard deviation arrows Arrows(x0 = timeaxis[which.min(abs(negative_mean_background-0.5))]-sd_size_t1,y0=0.5,x1= timeaxis[which.min(abs(negative_mean_background-0.5))], y1=0.5, code=3,arr.adj=1,arr.length = .1,arr.width = .1/2) text(x=timeaxis[which.min(abs(negative_mean_background-0.5))]-sd_size_t1/2-2.4,y=0.53,pos=4,labels=expression(sigma['1'])) Arrows(x0 = timeaxis[which.min(abs(positive_mean_background-0.5))]-sd_size_t2,y0=0.5,x1= timeaxis[which.min(abs(positive_mean_background-0.5))], y1=0.5, code=3,arr.adj=1,arr.length = .1,arr.width = .1/2) text(x=timeaxis[which.min(abs(positive_mean_background-0.5))]-sd_size_t2/2-2.5,y=0.53,pos=4,labels=expression(sigma['2'])) # Delta arrow # Arrows(x0 = test_time_1,y0=0.75,x1=test_time_2,y1=0.75, code=3 ,arr.adj=1,arr.width = .1/2) # text(x = test_time_1 + (test_time_2 - test_time_1)/2 - 5, y=0.8,pos=4,labels=expression(delta)) #product of means #true likelihood # dev.off()
/part_Onescripts.R
no_license
JemLukeBingham/poster_code
R
false
false
12,316
r
if(Sys.info()['login']=='jeremyb'){ setwd("C:\\Users\\jeremyb\\Documents\\A_Master\\Code") }else if(Sys.info()['login']=='jeremy') { setwd("C:/Users/JumpCo Vostro3700/infection-dating-tool/manuscripts/figures") }else{ setwd(".") #what does this do? } source("part_Onefunctions.R") file.create("C:\\Users\\jeremyb\\Documents\\A_Master\\Code\\runlog_main.txt") runlog <- file("runlog.txt") writeLines(capture.output(sessionInfo()),runlog) close(runlog) ?writeLines ?date ## SCRIPT # Now we are going towards risk estimation # So we define the quick transition from non-infectious to infectious, and the probability of detection (the 'sensitivity' of the test), both with Weibul distributions n = 5 detail <- 10 timeaxis <- seq(0,90,1/detail) shape_infect <- 1 scale_infect <- 5 mean_delay_infect <- 7 population_sd_infect <- 10 shape_detect <- 5 scale_detect <- 5 mean_delay_detect <- mean_delay_infect + 14 population_sd_detect <- population_sd_infect donation_time <- 77 # now we generate and plot the curves infectious_delays <- generate_positions_cumulative_normal(n=n,mean_center_position = mean_delay_infect, sd_size = population_sd_infect) detectable_delays <- infectious_delays + 14 infectious_curves <- family_positive_likelihood_weibul_pos(times = timeaxis, shape = shape_infect, scale = scale_infect, positions = infectious_delays, n = n, test_time = donation_time) undetected_curves <- family_negative_likelihood_weibul_pos(times = timeaxis, shape = shape_detect, scale = scale_detect, positions = detectable_delays, n = n, test_time = donation_time) # I want a function to take a set of infectious curves and undetected curves and do the three-layered analysis we're proposing # or it could take the parameters required for all three calculations, and do them. # then we can take sets of parameters gleaned from literature and stick them together... #but now how do I calculate the background ones....... if I decide to shift the likelihood by some amount? I'll probably have to define a function or a block of # code which shifts the infectious delays to get corresponding detectable delays, then just run that block for a much bigger infectiousness set.. no problem # need to think about whether there is any difference between defining that everyone waits two weeks from infectious to detectable,but the total delay is distributed, and defining that the infectiousness # infectiousness aand detectability as distributed, using the same distribution and different means (delta is two weeks). No difference in the non-mixed case, but say we want to # allow the time between infectiousness and detectability to vary. In that case it is more transparent to define a bunch of infectiousness times and delay-between-infectiousness-and-detectability-times, # then calculate detectability times accordingly. That way we can change the map from 1 - 1 to 1 - many.... Defining them conditionally seeeems to make more sense but I'm not sure. # anyway that shouldn't take me too long, so I can start with that tonight. #DONEZOES. Now I've specified the detectability delays ACCORDING to the # next on the list is to check the old figures with some mixing included. (imperfect correlation). ALSO, check that zero correlation is what they say it is.... # Figure 1 ################ # : produce a family of sensitivity curves and their average #goto_1 #Parameters n=10 # test_details mean_delay_t1=55 mean_delay_t2=45 #we have a second test listed so that we can easily swap between them when generating the sensitivity curves sd_size_t1 = 5 sd_size_t2 = 1 detail = 10 # 1/Step size timeaxis = seq(0,100,1/detail) scale_t1= 3 shape_t1 = 2 scale_t2= 3 shape_t2 = 2 #visuals # col_negative <- rgb(27/255,158/255,119/255) # col_positive <- rgb(217/255,95/255,2/255) # col_mean <- rgb(231/255,41/255,138/255) # col_truth <- rgb(117/255,112/255,179/255) # col_likelihood <- col_truth # col_dotted <- rgb(3/7,3/7,3/7) #color for dotted lines #col_negative <- "green" #rgb(27/255,158/255,119/255) #col_positive <- "red" #rgb(217/255,95/255,2/255) col_negative <- rgb(27/255,158/255,119/255) col_positive <- rgb(217/255,95/255,2/255) col_mean <- rgb(231/255,41/255,138/255) #col_truth <- "purple" #rgb(117/255,112/255,179/255) col_truth <- rgb(117/255,112/255,179/255) col_likelihood <- col_truth col_dotted <- rgb(3/7,3/7,3/7) #color for dotted lines #y-axis limits ylim_chosen <- c(-.002,1.007) #Generate Data sensitivity_family_1 <- family_sensitivity_weibul(n=n, scale=scale_t1,shape=shape_t1,mean_delay = mean_delay_t1 , sd_size=sd_size_t1,times=timeaxis) sensitivity_family_background <- family_sensitivity_weibul(n=n+150,scale=scale_t1,shape=shape_t1,mean_delay=mean_delay_t1,sd_size=sd_size_t1,times=timeaxis) sensitivity_average <- generate_mean_of_family(sensitivity_family_background) # Plot # pdf(file='figure_1.pdf',width=7.3,height=4.7) plot(timeaxis,sensitivity_family_1[,1],type='l',xaxt='n',xaxs='i',yaxs='i',xlim=c(mean_delay_t1-4*sd_size_t1,mean_delay_t1+shift_to_half_likelihood_weibul(shape=shape_t1,scale=scale_t1)+2.35*sd_size_t1),ylim=c(-.005,1.005),xlab="",ylab="",col='green',yaxt='n',bty='L' ) # plot(timeaxis,sensitivity_average,col=col_truth,lwd=5,type='l',xaxt='n',xaxs='i',yaxs='i',xlim=c(mean_delay_t1-4*sd_size_t1,mean_delay_t1+shift_to_half_likelihood_weibul(shape=shape_t1,scale=scale_t1)+2.35*sd_size_t1),ylim=c(-.005,1.005),xlab="",ylab="",yaxt='n',bty='L') # todo: title # label sizes # colors # line widths # scale # you're right (alex) about the size of the lines rendering correctly in the pdf title(xlab="Time since infection", line=1.5, cex.lab=1.2) title(ylab='Probability of Infection', line=2, cex.lab=1.2) ## goto_fix # axis ticks, remove box, bring lower axis up to zero or thereabout yaxis_pos <- c(0,1) yaxis_names <- c('0','1') zero_pos <- c(0) zero_name <- c(expression('0'[''])) axis(side=2, at=yaxis_pos, labels= yaxis_names,tck=-0.037, padj=.237) #axis(side=1, at=xaxis_pos, labels= xaxis_names,padj=-.35,hadj=-.137) axis(side=1, at=zero_pos, labels=zero_name,padj=-0.45,hadj=0.37) for (i in seq(1:n)){ lines(timeaxis,sensitivity_family_1[,i],col=col_negative,lwd=1.5) } lines(timeaxis,sensitivity_average,col=col_truth,lwd=5) # Average delay arrows Arrows(x0 = mean_delay_t1-4*sd_size_t1, y0 = 0.501 ,x1= timeaxis[which.min(abs(sensitivity_average-0.5))], y1 = 0.5,code=3 ,arr.adj=1) text(x=(mean_delay_t1-4*sd_size_t1+timeaxis[which.min(abs(sensitivity_average-0.5))])/2, y=c(0.53), pos=4, labels=expression(italic("d"))) #Standard deviation arrows Arrows(x1=timeaxis[which.min(abs(sensitivity_average-0.5))],y0=0.5, x0= timeaxis[which.min(abs(sensitivity_average-0.5))]+sd_size_t1,y1=0.5,code=3,arr.adj=1) text(x= timeaxis[which.min(abs(sensitivity_average-0.5))]+sd_size_t1/2-.5,y=0.53,pos=4,labels=expression(sigma)) # Arrows(c(0,1.7),c(1.3,-1.8),c(0.8,1.1),c(1.2,-1), lwd=2 # dev.off() ####### #Figure 2 adapted to represent blood testing scenario # Figure 2: Likelihood of observed discordant test results, t1 negative t2 positive - different times #goto_2 # Parameters n=10 detail=10 timeaxis=seq(0,100,1/detail) # TEST 1 (negative) # Describe individual (person) test sensitivity form with population mean-delay and standard deviation of delay # delay is the variable we distribute across the population - it could in principle be anything else of course #so each individual has the same SHAPE of sensitivity, but different delays ## Visuals lwd_means <- 4 lwd_ind <- 1.37 lwd_likelihood <- lwd_means - 3 #High scale causes slower swap #high shape causes quicker and steeper swap AND more symetrical swap scale_t1= 2 shape_t1 = 1.73 mean_delay_t1 = 12 sd_size_t1 = 10 # Time of negative test (relative to arbitrary t=0) test_time_1 = 48 ## TEST 2 (positive) scale_t2 = scale_t1*4.7 shape_t2 = shape_t1 mean_delay_t2 = mean_delay_t1*2+10 sd_size_t2 = 1.3*sd_size_t1 # Time of positive test test_time_2 = timeaxis[length(timeaxis)]-5 ## Generate Data # Data = the individual likelihood curves for the first (negative) and second (positive) test #Test 1 plotdata_negative = family_negative_likelihood_weibul(n=n, scale=scale_t1, shape=shape_t1, mean_delay=mean_delay_t1, sd_size= sd_size_t1, times = timeaxis, test_time = test_time_1) #for generating mean curve plotdata_negative_background <- family_negative_likelihood_weibul(n=n+150, scale=scale_t1, shape=shape_t1, mean_delay=mean_delay_t1, sd_size= sd_size_t1, times = timeaxis, test_time = test_time_1) #Test 2 plotdata_positive = family_positive_likelihood_weibul(n=n, scale=scale_t2, shape=shape_t2, mean_delay=mean_delay_t2, sd_size= sd_size_t2, times = timeaxis, test_time = test_time_2) #for generating mean curve plotdata_positive_background <- family_positive_likelihood_weibul(n=n+150, scale=scale_t2, shape=shape_t2, mean_delay=mean_delay_t2, sd_size= sd_size_t2, times = timeaxis, test_time = test_time_2) ## COMMUNICATE # pdf(file = "simple_case.pdf", width = 7.3, height = 4.7) plot(timeaxis,plotdata_negative[,1],type='l',xlim=c(timeaxis[1],timeaxis[length(timeaxis)]),ylim=c(-.002,1.002),xaxt='n',yaxt='n',xaxs='i',yaxs='i',bty='l',xlab='',ylab='',col='green') #clarify label in comment title(xlab="Hypothetical time of infection", line=1.5, cex.lab=1.4) title(ylab=expression('Likelihood'), line=2, cex.lab=1.4) yaxis_pos <- c(0,0.5,1) yaxis_names <- c('0',"",'1') xaxis_pos <- c(test_time_2) xaxis_names <- c(expression('T'['Donation'])) zero_pos <- c(0) zero_name <- c(expression('0'[''])) #shift axes axis(side=2, at=yaxis_pos, labels= yaxis_names,tck=-0.037, padj=.437) axis(side=1, at=xaxis_pos, labels= xaxis_names,padj=-.35,hadj=0)#-.137) #axis(side=1, at=zero_pos, labels=zero_name,padj=-0.45,hadj=0.37) # goto_do #points(plotdata[,1],plotdata[,3]) for (i in seq(1:n)){ lines(timeaxis,plotdata_negative[,i],col=col_negative,lwd=lwd_ind) } #points(plotdata[,1],plotdata[,3]) for (i in seq(1:n)){ lines(timeaxis,plotdata_positive[,i],col=col_positive,lwd=lwd_ind) } positive_mean_background <- rowMeans(plotdata_positive_background) negative_mean_background <- rowMeans(plotdata_negative_background) lines(timeaxis,negative_mean_background, lwd=lwd_means, col=col_negative) lines(timeaxis,positive_mean_background, lwd=lwd_means, col=col_positive) naive_likelihood <- positive_mean_background*negative_mean_background lines(timeaxis,naive_likelihood,col='grey42',lwd=4,lty=5) true_likelihood <- likelihood_by_DDI(plotdata_positive_background,plotdata_negative_background,timeaxis) lines(timeaxis,true_likelihood,col=col_truth,lwd=4) # segments(x0=test_time_1,y0=0,x1=test_time_1,y1=1,lty=4) segments(x0=test_time_2,y0=0,x1=test_time_2,y1=1,lty=4) # mean delay arrows (shape package) # # Arrows(x0 = timeaxis[which.min(abs(negative_mean_background-0.5))],y0=0.5,x1=test_time_1,y1=0.5,code=3 ,arr.adj=1,arr.width = .1/2) # text(x=(timeaxis[which.min(abs(negative_mean_background-0.5))]+test_time_1)/2-2.4,y=0.53,pos=4,labels=expression(italic('d')['1'])) # # Arrows(x0 = timeaxis[which.min(abs(positive_mean_background-0.5))],y0=0.5,x1=test_time_2,y1=0.5,code=3 ,arr.adj=1,arr.width = .1/2) # text(x=(timeaxis[which.min(abs(positive_mean_background-0.5))]+test_time_2)/2-2.5,y=0.53,pos=4,labels=expression(italic('d')['2'])) # Standard deviation arrows Arrows(x0 = timeaxis[which.min(abs(negative_mean_background-0.5))]-sd_size_t1,y0=0.5,x1= timeaxis[which.min(abs(negative_mean_background-0.5))], y1=0.5, code=3,arr.adj=1,arr.length = .1,arr.width = .1/2) text(x=timeaxis[which.min(abs(negative_mean_background-0.5))]-sd_size_t1/2-2.4,y=0.53,pos=4,labels=expression(sigma['1'])) Arrows(x0 = timeaxis[which.min(abs(positive_mean_background-0.5))]-sd_size_t2,y0=0.5,x1= timeaxis[which.min(abs(positive_mean_background-0.5))], y1=0.5, code=3,arr.adj=1,arr.length = .1,arr.width = .1/2) text(x=timeaxis[which.min(abs(positive_mean_background-0.5))]-sd_size_t2/2-2.5,y=0.53,pos=4,labels=expression(sigma['2'])) # Delta arrow # Arrows(x0 = test_time_1,y0=0.75,x1=test_time_2,y1=0.75, code=3 ,arr.adj=1,arr.width = .1/2) # text(x = test_time_1 + (test_time_2 - test_time_1)/2 - 5, y=0.8,pos=4,labels=expression(delta)) #product of means #true likelihood # dev.off()
\name{ex07.26} \alias{ex07.26} \docType{data} \title{data from exercise 7.26} \description{ The \code{ex07.26} data frame has 11 rows and 2 columns. } \format{ This data frame contains the following columns: \describe{ \item{Number.of.absences}{ a numeric vector } \item{Frequency}{ a numeric vector } } } \source{ Devore, J. L. (2003) \emph{Probability and Statistics for Engineering and the Sciences (6th ed)}, Duxbury } \examples{ str(ex07.26) } \keyword{datasets}
/man/ex07.26.Rd
no_license
dmbates/Devore6
R
false
false
506
rd
\name{ex07.26} \alias{ex07.26} \docType{data} \title{data from exercise 7.26} \description{ The \code{ex07.26} data frame has 11 rows and 2 columns. } \format{ This data frame contains the following columns: \describe{ \item{Number.of.absences}{ a numeric vector } \item{Frequency}{ a numeric vector } } } \source{ Devore, J. L. (2003) \emph{Probability and Statistics for Engineering and the Sciences (6th ed)}, Duxbury } \examples{ str(ex07.26) } \keyword{datasets}
nonlinear.student.data <- read.csv("~/Desktop/回归分析/第4讲 非线性回归/student_data.csv", row.names=1) nonlinear.student.data<- na.omit(nonlinear.student.data) head(nonlinear.student.data) #散点图可视化身高体重线性关系 attach(nonlinear.student.data) plot(height,weight ,main="scatter plot") #1)多项式回归模型建模 plot(height,weight ,main="scatter plot") poly.model = lm(weight~height+I(height^2)+I(height^3)) #第二种方法:lm(weight~poly(height,100,raw=TRUE)) poly.model #可视化多项式回归 x <- seq(150,200,by=0.1) lines(x,predict(poly.model,newdata = data.frame(height = x)),col = "red") #置信区间 x <- seq(150,200,by=0.1) conf_interval = predict(poly.model,data.frame(height = x),interval = "confidence") lines(x, conf_interval[,2], col="blue", lty=2) lines(x, conf_interval[,3], col="blue", lty=2) #预测区间 predict_interval = predict(poly.model,data.frame(height = x),interval = "predict") lines(x, predict_interval[,2], col="blue", lty=2) lines(x, predict_interval[,3], col="blue", lty=2) #如何判断polynomial degree anova(poly.model) #2)把身高看成分类型变量回归 cut(height,3) step.regression = lm(weight~cut(height,3)) step.regression #可视化分类型变量回归 plot(height,weight ,main="scatter plot") lines(sort(height),sort(step.regression$fitted.values),col = "red",lty=2,lwd=2) #3)多分类变量回归 plot(height,weight ,main="scatter plot") multi.factor.lm = lm(weight~cut(height,3)+gender) multi.factor.lm #可视化多分类变量回归 #4)分类型数值型混合回归 mix.model = lm(weight~height+gender) mix.model #可视化混合回归 plot(height,weight ,main="scatter plot") x <- seq(150,200,by=0.1) lines(x,predict(mix.model,newdata = data.frame(height = x,gender="Male")),col = "red",lty=2,lwd=2) lines(x,predict(mix.model,newdata = data.frame(height = x,gender="Female")),col = "Blue",lty=2,lwd=2) legend(180,140,legend = c("Male","Female"), col=c("red", "blue"),lty=c(2,2)) #5)分类型数值型混合回归(含交互作用) mix.model = lm(weight~height+gender+height:gender) mix.model #可视化混合回归 plot(height,weight ,main="scatter plot") x <- seq(150,200,by=0.1) lines(x,predict(mix.model,newdata = data.frame(height = x,gender="Male")),col = "red",lty=2,lwd=2) lines(x,predict(mix.model,newdata = data.frame(height = x,gender="Female")),col = "Blue",lty=2,lwd=2) legend(180,140,legend = c("Male","Female"), col=c("red", "blue"),lty=c(2,2))
/第4讲 非线性回归.R
no_license
Chuyue0724/Regression-model
R
false
false
2,492
r
nonlinear.student.data <- read.csv("~/Desktop/回归分析/第4讲 非线性回归/student_data.csv", row.names=1) nonlinear.student.data<- na.omit(nonlinear.student.data) head(nonlinear.student.data) #散点图可视化身高体重线性关系 attach(nonlinear.student.data) plot(height,weight ,main="scatter plot") #1)多项式回归模型建模 plot(height,weight ,main="scatter plot") poly.model = lm(weight~height+I(height^2)+I(height^3)) #第二种方法:lm(weight~poly(height,100,raw=TRUE)) poly.model #可视化多项式回归 x <- seq(150,200,by=0.1) lines(x,predict(poly.model,newdata = data.frame(height = x)),col = "red") #置信区间 x <- seq(150,200,by=0.1) conf_interval = predict(poly.model,data.frame(height = x),interval = "confidence") lines(x, conf_interval[,2], col="blue", lty=2) lines(x, conf_interval[,3], col="blue", lty=2) #预测区间 predict_interval = predict(poly.model,data.frame(height = x),interval = "predict") lines(x, predict_interval[,2], col="blue", lty=2) lines(x, predict_interval[,3], col="blue", lty=2) #如何判断polynomial degree anova(poly.model) #2)把身高看成分类型变量回归 cut(height,3) step.regression = lm(weight~cut(height,3)) step.regression #可视化分类型变量回归 plot(height,weight ,main="scatter plot") lines(sort(height),sort(step.regression$fitted.values),col = "red",lty=2,lwd=2) #3)多分类变量回归 plot(height,weight ,main="scatter plot") multi.factor.lm = lm(weight~cut(height,3)+gender) multi.factor.lm #可视化多分类变量回归 #4)分类型数值型混合回归 mix.model = lm(weight~height+gender) mix.model #可视化混合回归 plot(height,weight ,main="scatter plot") x <- seq(150,200,by=0.1) lines(x,predict(mix.model,newdata = data.frame(height = x,gender="Male")),col = "red",lty=2,lwd=2) lines(x,predict(mix.model,newdata = data.frame(height = x,gender="Female")),col = "Blue",lty=2,lwd=2) legend(180,140,legend = c("Male","Female"), col=c("red", "blue"),lty=c(2,2)) #5)分类型数值型混合回归(含交互作用) mix.model = lm(weight~height+gender+height:gender) mix.model #可视化混合回归 plot(height,weight ,main="scatter plot") x <- seq(150,200,by=0.1) lines(x,predict(mix.model,newdata = data.frame(height = x,gender="Male")),col = "red",lty=2,lwd=2) lines(x,predict(mix.model,newdata = data.frame(height = x,gender="Female")),col = "Blue",lty=2,lwd=2) legend(180,140,legend = c("Male","Female"), col=c("red", "blue"),lty=c(2,2))
# From edefs[["wxStretch"]] where edefs are the enumerations for the TU # for wxWidgets. library(RAutoGenRunTime) setClass("wxStretch", contains = "BitwiseValue") wxStretchValues = BitwiseValue(structure(c(0L, 4096L, 8192L, 8192L, 16384L, 32768L, 49152L, 1048576L), .Names = c("wxSTRETCH_NOT", "wxSHRINK", "wxGROW", "wxEXPAND", "wxSHAPED", "wxFIXED_MINSIZE", "wxTILE", "wxADJUST_MINSIZE" )), class = "wxStretch") setAs("character", "wxStretch", function(from) { asBitwiseValue(from, wxStretchValues, "wxStretch") }) setAs("numeric", "wxStretch", function(from) { asBitwiseValue(from, wxStretchValues, "wxStretch") }) setAs("integer", "wxStretch", function(from) { asBitwiseValue(from, wxStretchValues, "wxStretch") }) as("wxGROW", "wxStretch") as(c("wxGROW", "wxTILE"), "wxStretch") as(8192, "wxStretch") as(c(8192, 4096), "wxStretch") as(as.integer(c(8192, 4096)), "wxStretch") # make top-level variables makeSymbolicVariables(wxStretchValues) # now check things work. a = wxGROW | wxEXPAND | wxTILE class(a) == "wxStretch" length(a) == 1 names(a) == "wxGROW | wxEXPAND | wxTILE" tt = c(wxGROW, wxEXPAND, wxTILE) class(tt) == "wxStretch" length(tt) == 3 as(tt, "numeric") == a names(as(tt, "numeric")) # straight to a number, but with the checks. asBitwiseValue(tt, wxStretchValues, NA) (wxGROW | wxSHRINK) & wxSHRINK c(wxGROW, wxEXPAND, wxTILE) & wxTILE c(wxGROW, wxSHRINK, wxGROW, wxTILE) & wxTILE #################################################################################################### # Enums. # See C++Cast also v = structure(c(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 12L, 65535L ), .Names = c("wxZIP_METHOD_STORE", "wxZIP_METHOD_SHRINK", "wxZIP_METHOD_REDUCE1", "wxZIP_METHOD_REDUCE2", "wxZIP_METHOD_REDUCE3", "wxZIP_METHOD_REDUCE4", "wxZIP_METHOD_IMPLODE", "wxZIP_METHOD_TOKENIZE", "wxZIP_METHOD_DEFLATE", "wxZIP_METHOD_DEFLATE64", "wxZIP_METHOD_BZIP2", "wxZIP_METHOD_DEFAULT" )) setClass("wxZipMethod", contains = "EnumValue") wxZipMethodValues = EnumDef("wxZipMethod", v) makeSymbolicVariables(wxZipMethodValues) wxZipMethodValues[1] wxZipMethodValues[1:3] setAs('character', 'wxZipMethod', function(from) BitwiseValue(from, from, class = "wxZipMethod")) as("wxZIP_METHOD_STORE", 'wxZipMethod') as(c("wxZIP_METHOD_STORE", "wxZIP_METHOD_SHRINK"), 'wxZipMethod') as(1, 'wxZipMethod') as(c(1, 2), 'wxZipMethod') # Error tryCatch(as("wxZIP_METHOD_SHRINL", "wxZipMethod"), error = function(e, ...) cat("Intentional error\n")) tryCatch(as("wxZIP_METHOD_SHRINL", "wxZipMethod"), EnumCoercionError = function(e, ...) cat("I bet you meant", paste(e$possibleValues, collapse = ", "), "\n")) tryCatch(as(65534, "wxZipMethod"))
/tests/bits.R
no_license
omegahat/RAutoGenRunTime
R
false
false
2,781
r
# From edefs[["wxStretch"]] where edefs are the enumerations for the TU # for wxWidgets. library(RAutoGenRunTime) setClass("wxStretch", contains = "BitwiseValue") wxStretchValues = BitwiseValue(structure(c(0L, 4096L, 8192L, 8192L, 16384L, 32768L, 49152L, 1048576L), .Names = c("wxSTRETCH_NOT", "wxSHRINK", "wxGROW", "wxEXPAND", "wxSHAPED", "wxFIXED_MINSIZE", "wxTILE", "wxADJUST_MINSIZE" )), class = "wxStretch") setAs("character", "wxStretch", function(from) { asBitwiseValue(from, wxStretchValues, "wxStretch") }) setAs("numeric", "wxStretch", function(from) { asBitwiseValue(from, wxStretchValues, "wxStretch") }) setAs("integer", "wxStretch", function(from) { asBitwiseValue(from, wxStretchValues, "wxStretch") }) as("wxGROW", "wxStretch") as(c("wxGROW", "wxTILE"), "wxStretch") as(8192, "wxStretch") as(c(8192, 4096), "wxStretch") as(as.integer(c(8192, 4096)), "wxStretch") # make top-level variables makeSymbolicVariables(wxStretchValues) # now check things work. a = wxGROW | wxEXPAND | wxTILE class(a) == "wxStretch" length(a) == 1 names(a) == "wxGROW | wxEXPAND | wxTILE" tt = c(wxGROW, wxEXPAND, wxTILE) class(tt) == "wxStretch" length(tt) == 3 as(tt, "numeric") == a names(as(tt, "numeric")) # straight to a number, but with the checks. asBitwiseValue(tt, wxStretchValues, NA) (wxGROW | wxSHRINK) & wxSHRINK c(wxGROW, wxEXPAND, wxTILE) & wxTILE c(wxGROW, wxSHRINK, wxGROW, wxTILE) & wxTILE #################################################################################################### # Enums. # See C++Cast also v = structure(c(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 12L, 65535L ), .Names = c("wxZIP_METHOD_STORE", "wxZIP_METHOD_SHRINK", "wxZIP_METHOD_REDUCE1", "wxZIP_METHOD_REDUCE2", "wxZIP_METHOD_REDUCE3", "wxZIP_METHOD_REDUCE4", "wxZIP_METHOD_IMPLODE", "wxZIP_METHOD_TOKENIZE", "wxZIP_METHOD_DEFLATE", "wxZIP_METHOD_DEFLATE64", "wxZIP_METHOD_BZIP2", "wxZIP_METHOD_DEFAULT" )) setClass("wxZipMethod", contains = "EnumValue") wxZipMethodValues = EnumDef("wxZipMethod", v) makeSymbolicVariables(wxZipMethodValues) wxZipMethodValues[1] wxZipMethodValues[1:3] setAs('character', 'wxZipMethod', function(from) BitwiseValue(from, from, class = "wxZipMethod")) as("wxZIP_METHOD_STORE", 'wxZipMethod') as(c("wxZIP_METHOD_STORE", "wxZIP_METHOD_SHRINK"), 'wxZipMethod') as(1, 'wxZipMethod') as(c(1, 2), 'wxZipMethod') # Error tryCatch(as("wxZIP_METHOD_SHRINL", "wxZipMethod"), error = function(e, ...) cat("Intentional error\n")) tryCatch(as("wxZIP_METHOD_SHRINL", "wxZipMethod"), EnumCoercionError = function(e, ...) cat("I bet you meant", paste(e$possibleValues, collapse = ", "), "\n")) tryCatch(as(65534, "wxZipMethod"))