content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
require(qdap) analyseReadability <- function(x){ # Computes the readablity score of some textual input, using the flesch_kincaid measure. # Args: # x: the textual input. Can be a single character vector or a list of character vectors. # # Returns: # A data frame contraining the corresponding FK readability scores for the given text # inputs. fk_output <- lapply(x, function(y) cleanTextAndGetFKScore(y)) scores.df <- data.frame(readability=unlist(fk_output)) return(scores.df) } cleanTextAndGetFKScore <- function(x){ # Takes some character vector input, splits it into sentences and # then calculates the Flesch-Kincaid readability measure. # # Args: # x: A character vector. # # Returns: # The Flesch-Kincaid readability score for the input. text <- unlist(strsplit(x, "\\.")) fk <- flesch_kincaid(text) return(fk$Readability$FK_read.ease) }
/readabilityAnalysis.R
no_license
skymonkey/text-tools
R
false
false
916
r
require(qdap) analyseReadability <- function(x){ # Computes the readablity score of some textual input, using the flesch_kincaid measure. # Args: # x: the textual input. Can be a single character vector or a list of character vectors. # # Returns: # A data frame contraining the corresponding FK readability scores for the given text # inputs. fk_output <- lapply(x, function(y) cleanTextAndGetFKScore(y)) scores.df <- data.frame(readability=unlist(fk_output)) return(scores.df) } cleanTextAndGetFKScore <- function(x){ # Takes some character vector input, splits it into sentences and # then calculates the Flesch-Kincaid readability measure. # # Args: # x: A character vector. # # Returns: # The Flesch-Kincaid readability score for the input. text <- unlist(strsplit(x, "\\.")) fk <- flesch_kincaid(text) return(fk$Readability$FK_read.ease) }
setwd('C:/Users/User/Desktop/Bioinformatics/H3K4me3_ES_E14/') source('lib.R') ### if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager") BiocManager::install("TxDb.Mmusculus.UCSC.mm10.knownGene") BiocManager::install("ChIPseeker") BiocManager::install("clusterProfiler") BiocManager::install("ChIPpeakAnno") BiocManager::install("org.Mm.eg.db") library(ChIPseeker) library(TxDb.Mmusculus.UCSC.mm10.knownGene) library(clusterProfiler) library(org.Mm.eg.db) ### NAME <- 'mouseZ-DNA1' #NAME <- 'H3K4me3_ES_E14.ENCFF993IIG.mm10.filtered' #NAME <- 'H3K4me3_ES_E14.ENCFF899LDH.mm10.filtered' BED_FN <- paste0(DATA_DIR, NAME, '.bed') ### txdb <- TxDb.Mmusculus.UCSC.mm10.knownGene peakAnno <- annotatePeak(BED_FN, tssRegion=c(-3000, 3000), TxDb=txdb, annoDb="org.Mm.eg.db") pdf(paste0(OUT_DIR, 'chip_seeker.', NAME, '.annopie.pdf')) plotAnnoPie(peakAnno) dev.off() peak <- readPeakFile(BED_FN) pdf(paste0(OUT_DIR, 'chip_seeker.', NAME, '.covplot.pdf')) covplot(peak, weightCol="V5") dev.off()
/src/chip_seeker.R
no_license
namikhnenko/hse21_H3K4me3_ZDNA_mouse
R
false
false
1,070
r
setwd('C:/Users/User/Desktop/Bioinformatics/H3K4me3_ES_E14/') source('lib.R') ### if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager") BiocManager::install("TxDb.Mmusculus.UCSC.mm10.knownGene") BiocManager::install("ChIPseeker") BiocManager::install("clusterProfiler") BiocManager::install("ChIPpeakAnno") BiocManager::install("org.Mm.eg.db") library(ChIPseeker) library(TxDb.Mmusculus.UCSC.mm10.knownGene) library(clusterProfiler) library(org.Mm.eg.db) ### NAME <- 'mouseZ-DNA1' #NAME <- 'H3K4me3_ES_E14.ENCFF993IIG.mm10.filtered' #NAME <- 'H3K4me3_ES_E14.ENCFF899LDH.mm10.filtered' BED_FN <- paste0(DATA_DIR, NAME, '.bed') ### txdb <- TxDb.Mmusculus.UCSC.mm10.knownGene peakAnno <- annotatePeak(BED_FN, tssRegion=c(-3000, 3000), TxDb=txdb, annoDb="org.Mm.eg.db") pdf(paste0(OUT_DIR, 'chip_seeker.', NAME, '.annopie.pdf')) plotAnnoPie(peakAnno) dev.off() peak <- readPeakFile(BED_FN) pdf(paste0(OUT_DIR, 'chip_seeker.', NAME, '.covplot.pdf')) covplot(peak, weightCol="V5") dev.off()
################################################################################* # DATA FORMATTING TEMPLATE ################################################################################* # # Dataset name: Wintering Bird Populations at William Trelease Woods (AVG number individuals) # Dataset source (link): https://www.ideals.illinois.edu/handle/2142/25182 # Formatted by: Sara Snell #NOTE: dataset 298 is one of 4 datasets from Bird populations in East central #IL: Fluctuations, variations, and development over a half-century. Data from #Appendix 1 of document # Start by opening the data formatting table (data_formatting_table.csv). # Datasets to be worked on will have a 'format_flag' of 0. # Flag codes are as follows: # 0 = not currently worked on # 1 = formatting complete # 2 = formatting in process # 3 = formatting halted, issue # 4 = data unavailable # 5 = data insufficient for generating occupancy data # NOTE: All changes to the data formatting table will be done in R! # Do not make changes directly to this table, this will create conflicting versions. # YOU WILL NEED TO ENTER DATASET-SPECIFIC INFO IN EVERY LINE OF CODE PRECEDED # BY "#--! PROVIDE INFO !--#". # YOU SHOULD RUN, BUT NOT OTHERWISE MODIFY, ALL OTHER LINES OF CODE. #-------------------------------------------------------------------------------* # ---- SET-UP ---- #===============================================================================* # This script is best viewed in RStudio. I like to reduced the size of my window # to roughly the width of the section lines (as above). Additionally, ensure # that your global options are set to soft-wrap by selecting: # Tools/Global Options .../Code Editing/Soft-wrap R source files # Load libraries: library(stringr) library(plyr) library(ggplot2) library(grid) library(gridExtra) library(MASS) # Source the functions file: getwd() # Set your working directory to be in the home of the core-transient repository # e.g., setwd('C:/git/core-transient') source('scripts/R-scripts/core-transient_functions.R') # Get data. First specify the dataset number ('datasetID') you are working with. #--! PROVIDE INFO !--# datasetID = 299 list.files('data/raw_datasets') dataset = read.csv(paste('data/raw_datasets/dataset_', datasetID, '.csv', sep = '')) dataFormattingTable = read.csv('data_formatting_table.csv') # Make sure the original name of the raw data file is saved in the data formatting table. # NOT, for example, 'rawdataset_255.csv', but the filename as originally downloaded. # Check the data source link (available in the table, and hopefully posted above) if # the data is available online. If the data come from a published paper and there is # no file that was downloaded, enter "NA". dataFormattingTable[,'Raw_datafile_name'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_datafile_name', #--! PROVIDE INFO !--# 'extracted from Kendeigh 1982 Appendix 1') ######################################################## # ANALYSIS CRITERIA # ######################################################## # Min number of time samples required minNTime = 6 # Min number of species required minSpRich = 10 # Ultimately, the largest number of spatial and # temporal subsamples will be chosen to characterize # an assemblage such that at least this fraction # of site-years will be represented. topFractionSites = 0.5 ####################################################### #-------------------------------------------------------------------------------* # ---- EXPLORE THE DATASET ---- #===============================================================================* # Here, you are predominantly interested in getting to know the dataset, and # determine what the fields represent and which fields are relavent. # View field names: names(dataset) # View how many records and fields: dim(dataset) # View the structure of the dataset: # View first 6 rows of the dataset: head(dataset) # Here, we can see that there are some fields that we won't use. These might be # fields describing weather, observer ID's, or duplicate information like year # or month when there is already a complete date column. # If all fields will be used, then set unusedFieldNames = "" names(dataset) #--! PROVIDE INFO !--# unusedFieldNames = c() dataset1 = dataset[, !names(dataset) %in% unusedFieldNames] # Note that I've given a new name here "dataset1", this is to ensure that # we don't have to go back to square 1 if we've miscoded anything. # Explore, if everything looks okay, you're ready to move forward. If not, # retrace your steps to look for and fix errors. head(dataset1, 10) # I've found it helpful to explore more than just the first 6 data points given # with just a head(), so I used head(dataset#, 10) or even 20 to 50 to get a # better snapshot of what the data looks like. Do this periodically throughout # the formatting process # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED! #!DATA FORMATTING TABLE UPDATE! # Are the ONLY site identifiers the latitude and longitude of the observation or # sample? (I.e., there are no site names or site IDs or other designations) Y/N dataFormattingTable[,'LatLong_sites'] = dataFormattingTableFieldUpdate(datasetID, 'LatLong_sites', #--! PROVIDE INFO !--# 'N') #-------------------------------------------------------------------------------* # ---- FORMAT TIME DATA ---- #===============================================================================* # Here, we need to extract the sampling dates. # What is the name of the field that has information on sampling date? # If date info is in separate columns (e.g., 'day', 'month', and 'year' cols), # then write these field names as a vector from largest to smallest temporal grain. # E.g., c('year', 'month', 'day') #--! PROVIDE INFO !--# dateFieldName = c('year') # If necessary, paste together date info from multiple columns into single field if (length(dateFieldName) > 1) { newDateField = dataset1[, dateFieldName[1]] for (i in dateFieldName[2:length(dateFieldName)]) { newDateField = paste(newDateField, dataset[,i], sep = "-") } dataset1$date = newDateField datefield = 'date' } else { datefield = dateFieldName } # What is the format in which date data is recorded? For example, if it is # recorded as 5/30/94, then this would be '%m/%d/%y', while 1994-5-30 would # be '%Y-%m-%d'. Type "?strptime" for other examples of date formatting. #--! PROVIDE INFO !--# dateformat = '%Y' # If the date is just a year, then make sure it is of class numeric # and not a factor. Otherwise change to a true date object. if (dateformat == '%Y' | dateformat == '%y') { date = as.numeric(as.character(dataset1[, datefield])) } else { date = as.POSIXct(strptime(dataset1[, datefield], dateformat)) } # A check on the structure lets you know that date field is now a date object: class(date) # Give a double-check, if everything looks okay replace the column: head(dataset1[, datefield]) head(date) dataset2 = dataset1 # Delete the old date field dataset2 = dataset2[, -which(names(dataset2) %in% dateFieldName)] # Assign the new date values in a field called 'date' dataset2$date = date # Check the results: head(dataset2) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATE DATA WERE MODIFIED! #!DATA FORMATTING TABLE UPDATE! # Notes_timeFormat. Provide a thorough description of any modifications that # were made to the time field. dataFormattingTable[,'Notes_timeFormat'] = dataFormattingTableFieldUpdate(datasetID, 'Notes_timeFormat', #--! PROVIDE INFO !--# 'The only modification to this field involved converting to a date object. Additionally only the first year of 2 year range was used to make formatting easier.') # subannualTgrain. After exploring the time data, was this dataset sampled at a # sub-annual temporal grain? Y/N dataFormattingTable[,'subannualTgrain'] = dataFormattingTableFieldUpdate(datasetID, 'subannualTgrain', #--! PROVIDE INFO !--# 'N') #-------------------------------------------------------------------------------* # ---- EXPLORE AND FORMAT SITE DATA ---- #===============================================================================* # From the previous head commmand, we can see that sites are broken up into # (potentially) 2 fields. Find the metadata link in the data formatting table use # that link to determine how sites are characterized. # -- If sampling is nested (e.g., quadrats within sites as in this study), use # each of the identifying fields and separate each field with an underscore. # For nested samples be sure the order of concatenated columns goes from # coarser to finer scales (e.g. "km_m_cm") # -- If sites are listed as lats and longs, use the finest available grain and # separate lat and long fields with an underscore. # -- If the site definition is clear, make a new site column as necessary. # -- If the dataset is for just a single site, and there is no site column, then add one. # Here, we will concatenate all of the potential fields that describe the site # in hierarchical order from largest to smallest grain. Based on the dataset, # fill in the fields that specify nested spatial grains below. #--! PROVIDE INFO !--# dataset2$site = 1 site_grain_names = c("site") # We will now create the site field with these codes concatenated if there # are multiple grain fields. Otherwise, site will just be the single grain field. num_grains = length(site_grain_names) site = dataset2[, site_grain_names[1]] if (num_grains > 1) { for (i in 2:num_grains) { site = paste(site, dataset2[, site_grain_names[i]], sep = "_") } } # What is the spatial grain of the finest sampling scale? For example, this might be # a 0.25 m2 quadrat, or a 5 m transect, or a 50 ml water sample. dataFormattingTable[,'Raw_spatial_grain'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain', #--! PROVIDE INFO !--# 24) dataFormattingTable[,'Raw_spatial_grain_unit'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain_unit', #--! PROVIDE INFO !--# 'ha') # BEFORE YOU CONTINUE. We need to make sure that there are at least minNTime for # sites at the coarsest possible spatial grain. siteCoarse = dataset2[, site_grain_names[1]] if (dateformat == '%Y' | dateformat == '%y') { dateYear = dataset2$date } else { dateYear = format(dataset2$date, '%Y') } datasetYearTest = data.frame(siteCoarse, dateYear) ddply(datasetYearTest, .(siteCoarse), summarise, lengthYears = length(unique(dateYear))) # If the dataset has less than minNTime years per site, do not continue processing. # Do some quality control by comparing the site fields in the dataset with the new vector of sites: head(site) # Check how evenly represented all of the sites are in the dataset. If this is the # type of dataset where every site was sampled on a regular schedule, then you # expect to see similar values here across sites. Sites that only show up a small # percent of the time may reflect typos. data.frame(table(site)) # All looks correct, so replace the site column in the dataset (as a factor) and remove the unnecessary fields, start by renaming the dataset to dataset2: dataset3 = dataset2 # Remove any hierarchical site related fields that are no longer needed, IF NECESSARY. #--! PROVIDE INFO !--# dataset3 = dataset3[, !names(dataset3) %in% site_grain_names] dataset3$site = factor(site) # Check the new dataset (are the columns as they should be?): head(dataset3) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SITE DATA WERE MODIFIED! # !DATA FORMATTING TABLE UPDATE! # Raw_siteUnit. How a site is coded (i.e. if the field was concatenated such as this # one, it was coded as "site_quadrat"). Alternatively, if the site were concatenated # from latitude and longitude fields, the encoding would be "lat_long". if (dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,'LatLong_sites'] == "N") { dataFormattingTable[,'Raw_siteUnit'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_siteUnit', paste(site_grain_names, collapse="_")) } else if (dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,'LatLong_sites'] == "Y") { dataFormattingTable[,'Raw_siteUnit'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_siteUnit', "lat_long") } # spatial_scale_variable. Is a site potentially nested (e.g., plot within a quad or # decimal lat longs that could be scaled up)? Y/N dataFormattingTable[,'spatial_scale_variable'] = dataFormattingTableFieldUpdate(datasetID, 'spatial_scale_variable', #--! PROVIDE INFO !--# 'N') # Notes_siteFormat. Use this field to THOROUGHLY describe any changes made to the # site field during formatting. dataFormattingTable[,'Notes_siteFormat'] = dataFormattingTableFieldUpdate(datasetID, 'Notes_siteFormat', #--! PROVIDE INFO !--# 'The dataset includes only a single site, William Trelease Woods in a 24 ha plot.') #-------------------------------------------------------------------------------* # ---- EXPLORE AND FORMAT COUNT DATA ---- #===============================================================================* # Next, we need to explore the count records. For filling out the data formatting # table, we need to change the name of the field which represents counts, # densities, percent cover, etc to "count". Then we will clean up unnecessary values. names(dataset3) summary(dataset3) # Fill in the original field name for the count or abundance data here. # If there is no countfield, set this equal to "". #--! PROVIDE INFO !--# countfield = "count" # Renaming it if (countfield == "") { dataset3$count = 1 } else { names(dataset3)[which(names(dataset3) == countfield)] = 'count' } # Check that the count field is numeric or integer, and convert if necessary class(dataset3$count) # For example, dataset3$count = as.numeric(as.character(dataset3$count)) # Now we will remove zero counts and NA's: summary(dataset3) # Can usually tell if there are any zeros or NAs from that summary(). If there # aren't any showing, still run these functions or continue with the update of # dataset# so that you are consistent with this template. # Subset to records > 0 (if applicable): dataset4 = subset(dataset3, count > 0) summary(dataset4) # Check to make sure that by removing 0's that you haven't completely removed # any sampling events in which nothing was observed. Compare the number of # unique site-dates in dataset3 and dataset4. # If there are no sampling events lost, then we can go ahead and use the # smaller dataset4 which could save some time in subsequent analyses. # If there are sampling events lost, then we'll keep the 0's (use dataset3). numEventsd3 = nrow(unique(dataset3[, c('site', 'date')])) numEventsd4 = nrow(unique(dataset4[, c('site', 'date')])) if(numEventsd3 > numEventsd4) { dataset4 = dataset3 } else { dataset4 = dataset4 } # Remove NA's: dataset5 = na.omit(dataset4) # How does it look? head(dataset5) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE COUNT DATA WERE MODIFIED! #!DATA FORMATTING TABLE UPDATE! # Possible values for countFormat field are density, cover, presence and count. dataFormattingTable[,'countFormat'] = dataFormattingTableFieldUpdate(datasetID, 'countFormat', #--! PROVIDE INFO !--# 'count') dataFormattingTable[,'Notes_countFormat'] = dataFormattingTableFieldUpdate(datasetID, 'Notes_countFormat', #--! PROVIDE INFO !--# 'Average number of individuals in each year provided.') #-------------------------------------------------------------------------------* # ---- EXPLORE AND FORMAT SPECIES DATA ---- #===============================================================================* # Here, your primary goal is to ensure that all of your species are valid. To do so, # you need to look at the list of unique species very carefully. Avoid being too # liberal in interpretation, if you notice an entry that MIGHT be a problem, but # you can't say with certainty, create an issue on GitHub. # First, what is the field name in which species or taxonomic data are stored? # It will get converted to 'species' #--! PROVIDE INFO !--# speciesField = 'spp' names(dataset5)[names(dataset5) == speciesField] = 'species' # Look at the individual species present and how frequently they occur: This way # you can more easily scan the species names (listed alphabetically) and identify # potential misspellings, extra characters or blank space, or other issues. data.frame(table(dataset5$species)) # If there are entries that only specify the genus while there are others that # specify the species in addition to that same genus, they need to be regrouped # in order to avoid ambiguity. For example, if there are entries of 'Cygnus', # 'Cygnus_columbianus', and 'Cygnus_cygnus', 'Cygnus' could refer to either # species, but the observer could not identify it. This causes ambiguity in the # data, and must be fixed by either 1. deleting the genus-only entry altogether, # or 2. renaming the genus-species entries to just the genus-only entry. # This decision can be fairly subjective, but generally if less than 50% of the # entries are genus-only, then they can be deleted (using bad_sp). If more than # 50% of the entries for that genus are only specified to the genus, then the # genus-species entries should be renamed to be genus-only (using typo_name). # If species names are coded (not scientific names) go back to study's metadata # to learn what species should and shouldn't be in the data. # In this example, a quick look at the metadata is not informative, unfortunately. # Because of this, you should really stop here and post an issue on GitHub. #--! PROVIDE INFO !--# bad_sp = c('') dataset6 = dataset5[!dataset5$species %in% bad_sp,] # It may be useful to count the number of times each name occurs, as misspellings # or typos will likely only show up one time. table(dataset6$species) # If you find any potential typos, try to confirm that the "mispelling" isn't # actually a valid name. If not, then list the typos in typo_name, and the # correct spellings in good_name, and then replace them using the for loop below: #--! PROVIDE INFO !--# typo_name = c('') #--! PROVIDE INFO !--# good_name = c('') if (length(typo_name) > 0 & typo_name[1] != "") { for (n in 1:length(typo_name)) { dataset6$species[dataset6$species == typo_name[n]] = good_name[n] } } # Reset the factor levels: dataset6$species = factor(dataset6$species) # Let's look at how the removal of bad species and altered the length of the dataset: nrow(dataset5) nrow(dataset6) # Look at the head of the dataset to ensure everything is correct: head(dataset6) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SPECIES DATA WERE MODIFIED! #!DATA FORMATTING TABLE UPDATE! # Column M. Notes_spFormat. Provide a THOROUGH description of any changes made # to the species field, including why any species were removed. dataFormattingTable[,'Notes_spFormat'] = dataFormattingTableFieldUpdate(datasetID, 'Notes_spFormat', #--! PROVIDE INFO !--# 'Data was entered by hand, common species name was provided. No typos or bad spp found.') #-------------------------------------------------------------------------------* # ---- MAKE DATA FRAME OF COUNT BY SITES, SPECIES, AND YEAR ---- #===============================================================================* # Now we will make the final formatted dataset, add a datasetID field, check for # errors, and remove records that cant be used for our purposes. # First, lets add the datasetID: dataset6$datasetID = datasetID # Now make the compiled dataframe: dataset7 = ddply(dataset6,.(datasetID, site, date, species), summarize, count = sum(count)) # Explore the data frame: dim(dataset7) head(dataset7, 15) summary(dataset7) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED! #-------------------------------------------------------------------------------* # ---- UPDATE THE DATA FORMATTING TABLE AND WRITE OUTPUT DATA FRAMES ---- #===============================================================================* # Update the data formatting table (this may take a moment to process). Note that # the inputs for this are 'datasetID', the datasetID and the dataset form that you # consider to be fully formatted. dataFormattingTable = dataFormattingTableUpdate(datasetID, dataset7) # Take a final look at the dataset: head(dataset7) summary (dataset7) # If everything is looks okay we're ready to write formatted data frame: write.csv(dataset7, paste("data/formatted_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F) # !GIT-ADD-COMMIT-PUSH THE FORMATTED DATASET IN THE DATA FILE, # THEN GIT-ADD-COMMIT-PUSH THE UPDATED DATA FOLDER! # As we've now successfully created the formatted dataset, we will now update # the format flag field. dataFormattingTable[,'format_flag'] = dataFormattingTableFieldUpdate(datasetID, 'format_flag', #--! PROVIDE INFO !--# 1) # Flag codes are as follows: # 0 = not currently worked on # 1 = formatting complete # 2 = formatting in process # 3 = formatting halted, issue # 4 = data unavailable # 5 = data insufficient for generating occupancy data # !GIT-ADD-COMMIT-PUSH THE DATA FORMATTING TABLE! ###################################################################################* # ---- END DATA FORMATTING. START PROPOCC AND DATA SUMMARY ---- ###################################################################################* # We have now formatted the dataset to the finest possible spatial and temporal # grain, removed bad species, and added the dataset ID. It's now to make some # scale decisions and determine the proportional occupancies. # Load additional required libraries and dataset: library(dplyr) library(tidyr) # Read in formatted dataset if skipping above formatting code (lines 1-660). #dataset7 = read.csv(paste("data/formatted_datasets/dataset_", # datasetID, ".csv", sep ='')) # Have a look at the dimensions of the dataset and number of sites: dim(dataset7) length(unique(dataset7$site)) length(unique(dataset7$date)) head(dataset7) # Get the data formatting table for that dataset: dataDescription = dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,] # or read it in from the saved data_formatting_table.csv if skipping lines 1-660. #dataDescription = subset(read.csv("data_formatting_table.csv"), # dataset_ID == datasetID) # Check relevant table values: dataDescription$LatLong_sites dataDescription$spatial_scale_variable dataDescription$Raw_siteUnit dataDescription$subannualTgrain # Before proceeding, we need to make decisions about the spatial and temporal grains at # which we will conduct our analyses. Except in unusual circumstances, the temporal # grain will almost always be 'year', but the spatial grain that best represents the # scale of a "community" will vary based on the sampling design and the taxonomic # group. Justify your spatial scale below with a comment. #--! PROVIDE INFO !--# tGrain = 'year' # Refresh your memory about the spatial grain names if this is NOT a lat-long-only # based dataset. Set sGrain = to the hierarchical scale for analysis, including # the higher levels separated by underscore. E.g., for a dataset with quads within # plots within the site, sGrain = 'site_plot_quad' or sGrain = 'site_plot' or # sGrain = 'site'. # HOWEVER, if the sites are purely defined by lat-longs, then sGrain should equal # a numerical value specifying the block size in degrees latitude for analysis. site_grain_names #--! PROVIDE INFO !--# sGrain = 'site' # This is a reasonable choice of spatial grain because ... #--! PROVIDE INFO !--# # only study site of 24 ha was used # The function "richnessYearSubsetFun" below will subset the data to sites with an # adequate number of years of sampling and species richness. If there are no # adequate years, the function will return a custom error message and you can # try resetting sGrain above to something coarser. Keep trying until this # runs without an error. If a particular sGrain value led to an error in this # function, you can make a note of that in the spatial grain justification comment # above. If this function fails for ALL spatial grains, then this dataset will # not be suitable for analysis and you can STOP HERE. richnessYearsTest = richnessYearSubsetFun(dataset7, spatialGrain = sGrain, temporalGrain = tGrain, minNTime = minNTime, minSpRich = minSpRich, dataDescription) head(richnessYearsTest) dim(richnessYearsTest) ; dim(dataset7) #Number of unique sites meeting criteria goodSites = unique(richnessYearsTest$analysisSite) length(goodSites) # Now subset dataset7 to just those goodSites as defined. This is tricky though # because assuming Sgrain is not the finest resolution, we will need to use # grep to match site names that begin with the string in goodSites. # The reason to do this is that sites which don't meet the criteria (e.g. not # enough years of data) may also have low sampling intensity that constrains # the subsampling level of the well sampled sites. uniqueSites = unique(dataset7$site) fullGoodSites = c() for (s in goodSites) { tmp = as.character(uniqueSites[grepl(paste(s, "_", sep = ""), paste(uniqueSites, "_", sep = ""))]) fullGoodSites = c(fullGoodSites, tmp) } dataset8 = subset(dataset7, site %in% fullGoodSites) # Once we've settled on spatial and temporal grains that pass our test above, # we then need to 1) figure out what levels of spatial and temporal subsampling # we should use to characterize that analysis grain, and 2) subset the # formatted dataset down to that standardized level of subsampling. # For example, if some sites had 20 spatial subsamples (e.g. quads) per year while # others had only 16, or 10, we would identify the level of subsampling that # at least 'topFractionSites' of sites met (with a default of 50%). We would # discard "poorly subsampled" sites (based on this criterion) from further analysis. # For the "well-sampled" sites, the function below randomly samples the # appropriate number of subsamples for each year or site, # and bases the characterization of the community in that site-year based on # the aggregate of those standardized subsamples. dataSubset = subsetDataFun(dataset8, datasetID, spatialGrain = sGrain, temporalGrain = tGrain, minNTime = minNTime, minSpRich = minSpRich, proportionalThreshold = topFractionSites, dataDescription) subsettedData = dataSubset$data write.csv(subsettedData, paste("data/standardized_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F) # Take a look at the propOcc: head(propOccFun(subsettedData)) hist(propOccFun(subsettedData)$propOcc) mean(propOccFun(subsettedData)$propOcc) # Take a look at the site summary frame: siteSummaryFun(subsettedData) # If everything looks good, write the files: writePropOccSiteSummary(subsettedData) # Save the spatial and temporal subsampling values to the data formatting table: dataFormattingTable[,'Spatial_subsamples'] = dataFormattingTableFieldUpdate(datasetID, 'Spatial_subsamples', dataSubset$w) dataFormattingTable[,'Temporal_subsamples'] = dataFormattingTableFieldUpdate(datasetID, 'Temporal_subsamples', dataSubset$z) # Update Data Formatting Table with summary stats of the formatted, # properly subsetted dataset dataFormattingTable = dataFormattingTableUpdateFinished(datasetID, subsettedData) dataFormattingTable[,'General_notes'] = dataFormattingTableFieldUpdate(datasetID, 'General_notes', #--! PROVIDE INFO !--# 'This dataset is different than app 1 bc it is the average number of wintering bird populations.') # And write the final data formatting table: write.csv(dataFormattingTable, 'data_formatting_table.csv', row.names = F) # Remove all objects except for functions from the environment: rm(list = setdiff(ls(), lsf.str()))
/scripts/R-scripts/data_cleaning_scripts/dwork_299_sjs.R
no_license
hurlbertlab/core-transient
R
false
false
29,160
r
################################################################################* # DATA FORMATTING TEMPLATE ################################################################################* # # Dataset name: Wintering Bird Populations at William Trelease Woods (AVG number individuals) # Dataset source (link): https://www.ideals.illinois.edu/handle/2142/25182 # Formatted by: Sara Snell #NOTE: dataset 298 is one of 4 datasets from Bird populations in East central #IL: Fluctuations, variations, and development over a half-century. Data from #Appendix 1 of document # Start by opening the data formatting table (data_formatting_table.csv). # Datasets to be worked on will have a 'format_flag' of 0. # Flag codes are as follows: # 0 = not currently worked on # 1 = formatting complete # 2 = formatting in process # 3 = formatting halted, issue # 4 = data unavailable # 5 = data insufficient for generating occupancy data # NOTE: All changes to the data formatting table will be done in R! # Do not make changes directly to this table, this will create conflicting versions. # YOU WILL NEED TO ENTER DATASET-SPECIFIC INFO IN EVERY LINE OF CODE PRECEDED # BY "#--! PROVIDE INFO !--#". # YOU SHOULD RUN, BUT NOT OTHERWISE MODIFY, ALL OTHER LINES OF CODE. #-------------------------------------------------------------------------------* # ---- SET-UP ---- #===============================================================================* # This script is best viewed in RStudio. I like to reduced the size of my window # to roughly the width of the section lines (as above). Additionally, ensure # that your global options are set to soft-wrap by selecting: # Tools/Global Options .../Code Editing/Soft-wrap R source files # Load libraries: library(stringr) library(plyr) library(ggplot2) library(grid) library(gridExtra) library(MASS) # Source the functions file: getwd() # Set your working directory to be in the home of the core-transient repository # e.g., setwd('C:/git/core-transient') source('scripts/R-scripts/core-transient_functions.R') # Get data. First specify the dataset number ('datasetID') you are working with. #--! PROVIDE INFO !--# datasetID = 299 list.files('data/raw_datasets') dataset = read.csv(paste('data/raw_datasets/dataset_', datasetID, '.csv', sep = '')) dataFormattingTable = read.csv('data_formatting_table.csv') # Make sure the original name of the raw data file is saved in the data formatting table. # NOT, for example, 'rawdataset_255.csv', but the filename as originally downloaded. # Check the data source link (available in the table, and hopefully posted above) if # the data is available online. If the data come from a published paper and there is # no file that was downloaded, enter "NA". dataFormattingTable[,'Raw_datafile_name'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_datafile_name', #--! PROVIDE INFO !--# 'extracted from Kendeigh 1982 Appendix 1') ######################################################## # ANALYSIS CRITERIA # ######################################################## # Min number of time samples required minNTime = 6 # Min number of species required minSpRich = 10 # Ultimately, the largest number of spatial and # temporal subsamples will be chosen to characterize # an assemblage such that at least this fraction # of site-years will be represented. topFractionSites = 0.5 ####################################################### #-------------------------------------------------------------------------------* # ---- EXPLORE THE DATASET ---- #===============================================================================* # Here, you are predominantly interested in getting to know the dataset, and # determine what the fields represent and which fields are relavent. # View field names: names(dataset) # View how many records and fields: dim(dataset) # View the structure of the dataset: # View first 6 rows of the dataset: head(dataset) # Here, we can see that there are some fields that we won't use. These might be # fields describing weather, observer ID's, or duplicate information like year # or month when there is already a complete date column. # If all fields will be used, then set unusedFieldNames = "" names(dataset) #--! PROVIDE INFO !--# unusedFieldNames = c() dataset1 = dataset[, !names(dataset) %in% unusedFieldNames] # Note that I've given a new name here "dataset1", this is to ensure that # we don't have to go back to square 1 if we've miscoded anything. # Explore, if everything looks okay, you're ready to move forward. If not, # retrace your steps to look for and fix errors. head(dataset1, 10) # I've found it helpful to explore more than just the first 6 data points given # with just a head(), so I used head(dataset#, 10) or even 20 to 50 to get a # better snapshot of what the data looks like. Do this periodically throughout # the formatting process # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED! #!DATA FORMATTING TABLE UPDATE! # Are the ONLY site identifiers the latitude and longitude of the observation or # sample? (I.e., there are no site names or site IDs or other designations) Y/N dataFormattingTable[,'LatLong_sites'] = dataFormattingTableFieldUpdate(datasetID, 'LatLong_sites', #--! PROVIDE INFO !--# 'N') #-------------------------------------------------------------------------------* # ---- FORMAT TIME DATA ---- #===============================================================================* # Here, we need to extract the sampling dates. # What is the name of the field that has information on sampling date? # If date info is in separate columns (e.g., 'day', 'month', and 'year' cols), # then write these field names as a vector from largest to smallest temporal grain. # E.g., c('year', 'month', 'day') #--! PROVIDE INFO !--# dateFieldName = c('year') # If necessary, paste together date info from multiple columns into single field if (length(dateFieldName) > 1) { newDateField = dataset1[, dateFieldName[1]] for (i in dateFieldName[2:length(dateFieldName)]) { newDateField = paste(newDateField, dataset[,i], sep = "-") } dataset1$date = newDateField datefield = 'date' } else { datefield = dateFieldName } # What is the format in which date data is recorded? For example, if it is # recorded as 5/30/94, then this would be '%m/%d/%y', while 1994-5-30 would # be '%Y-%m-%d'. Type "?strptime" for other examples of date formatting. #--! PROVIDE INFO !--# dateformat = '%Y' # If the date is just a year, then make sure it is of class numeric # and not a factor. Otherwise change to a true date object. if (dateformat == '%Y' | dateformat == '%y') { date = as.numeric(as.character(dataset1[, datefield])) } else { date = as.POSIXct(strptime(dataset1[, datefield], dateformat)) } # A check on the structure lets you know that date field is now a date object: class(date) # Give a double-check, if everything looks okay replace the column: head(dataset1[, datefield]) head(date) dataset2 = dataset1 # Delete the old date field dataset2 = dataset2[, -which(names(dataset2) %in% dateFieldName)] # Assign the new date values in a field called 'date' dataset2$date = date # Check the results: head(dataset2) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATE DATA WERE MODIFIED! #!DATA FORMATTING TABLE UPDATE! # Notes_timeFormat. Provide a thorough description of any modifications that # were made to the time field. dataFormattingTable[,'Notes_timeFormat'] = dataFormattingTableFieldUpdate(datasetID, 'Notes_timeFormat', #--! PROVIDE INFO !--# 'The only modification to this field involved converting to a date object. Additionally only the first year of 2 year range was used to make formatting easier.') # subannualTgrain. After exploring the time data, was this dataset sampled at a # sub-annual temporal grain? Y/N dataFormattingTable[,'subannualTgrain'] = dataFormattingTableFieldUpdate(datasetID, 'subannualTgrain', #--! PROVIDE INFO !--# 'N') #-------------------------------------------------------------------------------* # ---- EXPLORE AND FORMAT SITE DATA ---- #===============================================================================* # From the previous head commmand, we can see that sites are broken up into # (potentially) 2 fields. Find the metadata link in the data formatting table use # that link to determine how sites are characterized. # -- If sampling is nested (e.g., quadrats within sites as in this study), use # each of the identifying fields and separate each field with an underscore. # For nested samples be sure the order of concatenated columns goes from # coarser to finer scales (e.g. "km_m_cm") # -- If sites are listed as lats and longs, use the finest available grain and # separate lat and long fields with an underscore. # -- If the site definition is clear, make a new site column as necessary. # -- If the dataset is for just a single site, and there is no site column, then add one. # Here, we will concatenate all of the potential fields that describe the site # in hierarchical order from largest to smallest grain. Based on the dataset, # fill in the fields that specify nested spatial grains below. #--! PROVIDE INFO !--# dataset2$site = 1 site_grain_names = c("site") # We will now create the site field with these codes concatenated if there # are multiple grain fields. Otherwise, site will just be the single grain field. num_grains = length(site_grain_names) site = dataset2[, site_grain_names[1]] if (num_grains > 1) { for (i in 2:num_grains) { site = paste(site, dataset2[, site_grain_names[i]], sep = "_") } } # What is the spatial grain of the finest sampling scale? For example, this might be # a 0.25 m2 quadrat, or a 5 m transect, or a 50 ml water sample. dataFormattingTable[,'Raw_spatial_grain'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain', #--! PROVIDE INFO !--# 24) dataFormattingTable[,'Raw_spatial_grain_unit'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain_unit', #--! PROVIDE INFO !--# 'ha') # BEFORE YOU CONTINUE. We need to make sure that there are at least minNTime for # sites at the coarsest possible spatial grain. siteCoarse = dataset2[, site_grain_names[1]] if (dateformat == '%Y' | dateformat == '%y') { dateYear = dataset2$date } else { dateYear = format(dataset2$date, '%Y') } datasetYearTest = data.frame(siteCoarse, dateYear) ddply(datasetYearTest, .(siteCoarse), summarise, lengthYears = length(unique(dateYear))) # If the dataset has less than minNTime years per site, do not continue processing. # Do some quality control by comparing the site fields in the dataset with the new vector of sites: head(site) # Check how evenly represented all of the sites are in the dataset. If this is the # type of dataset where every site was sampled on a regular schedule, then you # expect to see similar values here across sites. Sites that only show up a small # percent of the time may reflect typos. data.frame(table(site)) # All looks correct, so replace the site column in the dataset (as a factor) and remove the unnecessary fields, start by renaming the dataset to dataset2: dataset3 = dataset2 # Remove any hierarchical site related fields that are no longer needed, IF NECESSARY. #--! PROVIDE INFO !--# dataset3 = dataset3[, !names(dataset3) %in% site_grain_names] dataset3$site = factor(site) # Check the new dataset (are the columns as they should be?): head(dataset3) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SITE DATA WERE MODIFIED! # !DATA FORMATTING TABLE UPDATE! # Raw_siteUnit. How a site is coded (i.e. if the field was concatenated such as this # one, it was coded as "site_quadrat"). Alternatively, if the site were concatenated # from latitude and longitude fields, the encoding would be "lat_long". if (dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,'LatLong_sites'] == "N") { dataFormattingTable[,'Raw_siteUnit'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_siteUnit', paste(site_grain_names, collapse="_")) } else if (dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,'LatLong_sites'] == "Y") { dataFormattingTable[,'Raw_siteUnit'] = dataFormattingTableFieldUpdate(datasetID, 'Raw_siteUnit', "lat_long") } # spatial_scale_variable. Is a site potentially nested (e.g., plot within a quad or # decimal lat longs that could be scaled up)? Y/N dataFormattingTable[,'spatial_scale_variable'] = dataFormattingTableFieldUpdate(datasetID, 'spatial_scale_variable', #--! PROVIDE INFO !--# 'N') # Notes_siteFormat. Use this field to THOROUGHLY describe any changes made to the # site field during formatting. dataFormattingTable[,'Notes_siteFormat'] = dataFormattingTableFieldUpdate(datasetID, 'Notes_siteFormat', #--! PROVIDE INFO !--# 'The dataset includes only a single site, William Trelease Woods in a 24 ha plot.') #-------------------------------------------------------------------------------* # ---- EXPLORE AND FORMAT COUNT DATA ---- #===============================================================================* # Next, we need to explore the count records. For filling out the data formatting # table, we need to change the name of the field which represents counts, # densities, percent cover, etc to "count". Then we will clean up unnecessary values. names(dataset3) summary(dataset3) # Fill in the original field name for the count or abundance data here. # If there is no countfield, set this equal to "". #--! PROVIDE INFO !--# countfield = "count" # Renaming it if (countfield == "") { dataset3$count = 1 } else { names(dataset3)[which(names(dataset3) == countfield)] = 'count' } # Check that the count field is numeric or integer, and convert if necessary class(dataset3$count) # For example, dataset3$count = as.numeric(as.character(dataset3$count)) # Now we will remove zero counts and NA's: summary(dataset3) # Can usually tell if there are any zeros or NAs from that summary(). If there # aren't any showing, still run these functions or continue with the update of # dataset# so that you are consistent with this template. # Subset to records > 0 (if applicable): dataset4 = subset(dataset3, count > 0) summary(dataset4) # Check to make sure that by removing 0's that you haven't completely removed # any sampling events in which nothing was observed. Compare the number of # unique site-dates in dataset3 and dataset4. # If there are no sampling events lost, then we can go ahead and use the # smaller dataset4 which could save some time in subsequent analyses. # If there are sampling events lost, then we'll keep the 0's (use dataset3). numEventsd3 = nrow(unique(dataset3[, c('site', 'date')])) numEventsd4 = nrow(unique(dataset4[, c('site', 'date')])) if(numEventsd3 > numEventsd4) { dataset4 = dataset3 } else { dataset4 = dataset4 } # Remove NA's: dataset5 = na.omit(dataset4) # How does it look? head(dataset5) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE COUNT DATA WERE MODIFIED! #!DATA FORMATTING TABLE UPDATE! # Possible values for countFormat field are density, cover, presence and count. dataFormattingTable[,'countFormat'] = dataFormattingTableFieldUpdate(datasetID, 'countFormat', #--! PROVIDE INFO !--# 'count') dataFormattingTable[,'Notes_countFormat'] = dataFormattingTableFieldUpdate(datasetID, 'Notes_countFormat', #--! PROVIDE INFO !--# 'Average number of individuals in each year provided.') #-------------------------------------------------------------------------------* # ---- EXPLORE AND FORMAT SPECIES DATA ---- #===============================================================================* # Here, your primary goal is to ensure that all of your species are valid. To do so, # you need to look at the list of unique species very carefully. Avoid being too # liberal in interpretation, if you notice an entry that MIGHT be a problem, but # you can't say with certainty, create an issue on GitHub. # First, what is the field name in which species or taxonomic data are stored? # It will get converted to 'species' #--! PROVIDE INFO !--# speciesField = 'spp' names(dataset5)[names(dataset5) == speciesField] = 'species' # Look at the individual species present and how frequently they occur: This way # you can more easily scan the species names (listed alphabetically) and identify # potential misspellings, extra characters or blank space, or other issues. data.frame(table(dataset5$species)) # If there are entries that only specify the genus while there are others that # specify the species in addition to that same genus, they need to be regrouped # in order to avoid ambiguity. For example, if there are entries of 'Cygnus', # 'Cygnus_columbianus', and 'Cygnus_cygnus', 'Cygnus' could refer to either # species, but the observer could not identify it. This causes ambiguity in the # data, and must be fixed by either 1. deleting the genus-only entry altogether, # or 2. renaming the genus-species entries to just the genus-only entry. # This decision can be fairly subjective, but generally if less than 50% of the # entries are genus-only, then they can be deleted (using bad_sp). If more than # 50% of the entries for that genus are only specified to the genus, then the # genus-species entries should be renamed to be genus-only (using typo_name). # If species names are coded (not scientific names) go back to study's metadata # to learn what species should and shouldn't be in the data. # In this example, a quick look at the metadata is not informative, unfortunately. # Because of this, you should really stop here and post an issue on GitHub. #--! PROVIDE INFO !--# bad_sp = c('') dataset6 = dataset5[!dataset5$species %in% bad_sp,] # It may be useful to count the number of times each name occurs, as misspellings # or typos will likely only show up one time. table(dataset6$species) # If you find any potential typos, try to confirm that the "mispelling" isn't # actually a valid name. If not, then list the typos in typo_name, and the # correct spellings in good_name, and then replace them using the for loop below: #--! PROVIDE INFO !--# typo_name = c('') #--! PROVIDE INFO !--# good_name = c('') if (length(typo_name) > 0 & typo_name[1] != "") { for (n in 1:length(typo_name)) { dataset6$species[dataset6$species == typo_name[n]] = good_name[n] } } # Reset the factor levels: dataset6$species = factor(dataset6$species) # Let's look at how the removal of bad species and altered the length of the dataset: nrow(dataset5) nrow(dataset6) # Look at the head of the dataset to ensure everything is correct: head(dataset6) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SPECIES DATA WERE MODIFIED! #!DATA FORMATTING TABLE UPDATE! # Column M. Notes_spFormat. Provide a THOROUGH description of any changes made # to the species field, including why any species were removed. dataFormattingTable[,'Notes_spFormat'] = dataFormattingTableFieldUpdate(datasetID, 'Notes_spFormat', #--! PROVIDE INFO !--# 'Data was entered by hand, common species name was provided. No typos or bad spp found.') #-------------------------------------------------------------------------------* # ---- MAKE DATA FRAME OF COUNT BY SITES, SPECIES, AND YEAR ---- #===============================================================================* # Now we will make the final formatted dataset, add a datasetID field, check for # errors, and remove records that cant be used for our purposes. # First, lets add the datasetID: dataset6$datasetID = datasetID # Now make the compiled dataframe: dataset7 = ddply(dataset6,.(datasetID, site, date, species), summarize, count = sum(count)) # Explore the data frame: dim(dataset7) head(dataset7, 15) summary(dataset7) # !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED! #-------------------------------------------------------------------------------* # ---- UPDATE THE DATA FORMATTING TABLE AND WRITE OUTPUT DATA FRAMES ---- #===============================================================================* # Update the data formatting table (this may take a moment to process). Note that # the inputs for this are 'datasetID', the datasetID and the dataset form that you # consider to be fully formatted. dataFormattingTable = dataFormattingTableUpdate(datasetID, dataset7) # Take a final look at the dataset: head(dataset7) summary (dataset7) # If everything is looks okay we're ready to write formatted data frame: write.csv(dataset7, paste("data/formatted_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F) # !GIT-ADD-COMMIT-PUSH THE FORMATTED DATASET IN THE DATA FILE, # THEN GIT-ADD-COMMIT-PUSH THE UPDATED DATA FOLDER! # As we've now successfully created the formatted dataset, we will now update # the format flag field. dataFormattingTable[,'format_flag'] = dataFormattingTableFieldUpdate(datasetID, 'format_flag', #--! PROVIDE INFO !--# 1) # Flag codes are as follows: # 0 = not currently worked on # 1 = formatting complete # 2 = formatting in process # 3 = formatting halted, issue # 4 = data unavailable # 5 = data insufficient for generating occupancy data # !GIT-ADD-COMMIT-PUSH THE DATA FORMATTING TABLE! ###################################################################################* # ---- END DATA FORMATTING. START PROPOCC AND DATA SUMMARY ---- ###################################################################################* # We have now formatted the dataset to the finest possible spatial and temporal # grain, removed bad species, and added the dataset ID. It's now to make some # scale decisions and determine the proportional occupancies. # Load additional required libraries and dataset: library(dplyr) library(tidyr) # Read in formatted dataset if skipping above formatting code (lines 1-660). #dataset7 = read.csv(paste("data/formatted_datasets/dataset_", # datasetID, ".csv", sep ='')) # Have a look at the dimensions of the dataset and number of sites: dim(dataset7) length(unique(dataset7$site)) length(unique(dataset7$date)) head(dataset7) # Get the data formatting table for that dataset: dataDescription = dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,] # or read it in from the saved data_formatting_table.csv if skipping lines 1-660. #dataDescription = subset(read.csv("data_formatting_table.csv"), # dataset_ID == datasetID) # Check relevant table values: dataDescription$LatLong_sites dataDescription$spatial_scale_variable dataDescription$Raw_siteUnit dataDescription$subannualTgrain # Before proceeding, we need to make decisions about the spatial and temporal grains at # which we will conduct our analyses. Except in unusual circumstances, the temporal # grain will almost always be 'year', but the spatial grain that best represents the # scale of a "community" will vary based on the sampling design and the taxonomic # group. Justify your spatial scale below with a comment. #--! PROVIDE INFO !--# tGrain = 'year' # Refresh your memory about the spatial grain names if this is NOT a lat-long-only # based dataset. Set sGrain = to the hierarchical scale for analysis, including # the higher levels separated by underscore. E.g., for a dataset with quads within # plots within the site, sGrain = 'site_plot_quad' or sGrain = 'site_plot' or # sGrain = 'site'. # HOWEVER, if the sites are purely defined by lat-longs, then sGrain should equal # a numerical value specifying the block size in degrees latitude for analysis. site_grain_names #--! PROVIDE INFO !--# sGrain = 'site' # This is a reasonable choice of spatial grain because ... #--! PROVIDE INFO !--# # only study site of 24 ha was used # The function "richnessYearSubsetFun" below will subset the data to sites with an # adequate number of years of sampling and species richness. If there are no # adequate years, the function will return a custom error message and you can # try resetting sGrain above to something coarser. Keep trying until this # runs without an error. If a particular sGrain value led to an error in this # function, you can make a note of that in the spatial grain justification comment # above. If this function fails for ALL spatial grains, then this dataset will # not be suitable for analysis and you can STOP HERE. richnessYearsTest = richnessYearSubsetFun(dataset7, spatialGrain = sGrain, temporalGrain = tGrain, minNTime = minNTime, minSpRich = minSpRich, dataDescription) head(richnessYearsTest) dim(richnessYearsTest) ; dim(dataset7) #Number of unique sites meeting criteria goodSites = unique(richnessYearsTest$analysisSite) length(goodSites) # Now subset dataset7 to just those goodSites as defined. This is tricky though # because assuming Sgrain is not the finest resolution, we will need to use # grep to match site names that begin with the string in goodSites. # The reason to do this is that sites which don't meet the criteria (e.g. not # enough years of data) may also have low sampling intensity that constrains # the subsampling level of the well sampled sites. uniqueSites = unique(dataset7$site) fullGoodSites = c() for (s in goodSites) { tmp = as.character(uniqueSites[grepl(paste(s, "_", sep = ""), paste(uniqueSites, "_", sep = ""))]) fullGoodSites = c(fullGoodSites, tmp) } dataset8 = subset(dataset7, site %in% fullGoodSites) # Once we've settled on spatial and temporal grains that pass our test above, # we then need to 1) figure out what levels of spatial and temporal subsampling # we should use to characterize that analysis grain, and 2) subset the # formatted dataset down to that standardized level of subsampling. # For example, if some sites had 20 spatial subsamples (e.g. quads) per year while # others had only 16, or 10, we would identify the level of subsampling that # at least 'topFractionSites' of sites met (with a default of 50%). We would # discard "poorly subsampled" sites (based on this criterion) from further analysis. # For the "well-sampled" sites, the function below randomly samples the # appropriate number of subsamples for each year or site, # and bases the characterization of the community in that site-year based on # the aggregate of those standardized subsamples. dataSubset = subsetDataFun(dataset8, datasetID, spatialGrain = sGrain, temporalGrain = tGrain, minNTime = minNTime, minSpRich = minSpRich, proportionalThreshold = topFractionSites, dataDescription) subsettedData = dataSubset$data write.csv(subsettedData, paste("data/standardized_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F) # Take a look at the propOcc: head(propOccFun(subsettedData)) hist(propOccFun(subsettedData)$propOcc) mean(propOccFun(subsettedData)$propOcc) # Take a look at the site summary frame: siteSummaryFun(subsettedData) # If everything looks good, write the files: writePropOccSiteSummary(subsettedData) # Save the spatial and temporal subsampling values to the data formatting table: dataFormattingTable[,'Spatial_subsamples'] = dataFormattingTableFieldUpdate(datasetID, 'Spatial_subsamples', dataSubset$w) dataFormattingTable[,'Temporal_subsamples'] = dataFormattingTableFieldUpdate(datasetID, 'Temporal_subsamples', dataSubset$z) # Update Data Formatting Table with summary stats of the formatted, # properly subsetted dataset dataFormattingTable = dataFormattingTableUpdateFinished(datasetID, subsettedData) dataFormattingTable[,'General_notes'] = dataFormattingTableFieldUpdate(datasetID, 'General_notes', #--! PROVIDE INFO !--# 'This dataset is different than app 1 bc it is the average number of wintering bird populations.') # And write the final data formatting table: write.csv(dataFormattingTable, 'data_formatting_table.csv', row.names = F) # Remove all objects except for functions from the environment: rm(list = setdiff(ls(), lsf.str()))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bio_funs.R \name{applyIQRrule} \alias{applyIQRrule} \title{Apply the 1.5 x IQR Rule to a vector} \usage{ applyIQRrule(x) } \arguments{ \item{x}{a numeric vector} } \value{ a numeric vector with outliers removed } \description{ Apply the 1.5 x IQR Rule to a vector }
/man/applyIQRrule.Rd
no_license
CelMcC/UKBTools
R
false
true
344
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bio_funs.R \name{applyIQRrule} \alias{applyIQRrule} \title{Apply the 1.5 x IQR Rule to a vector} \usage{ applyIQRrule(x) } \arguments{ \item{x}{a numeric vector} } \value{ a numeric vector with outliers removed } \description{ Apply the 1.5 x IQR Rule to a vector }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gate_data.R \name{gate_it} \alias{gate_it} \title{Generates the kerneld to gate the data} \usage{ gate_it(x, y, idx, thres_1, thres_2, n = 34, DEBUG = FALSE) } \arguments{ \item{x}{A vector of FSC data} \item{y}{A vector of SSC data} \item{thres_1}{Pre-gating fraction of cells that will be remove on x and y axis. This allows to limit computations and filtering to the area of interest. Yeast cells being small, default value is 0.05.} \item{thres_2}{A fraction of cells to keep after gating (approximately). This number defines the size of the gate (few cells: narrow gate).} } \value{ A kerneld } \description{ This function works of FL1H, FSC and SSC data acquired on 96-well plates }
/man/gate_it.Rd
no_license
magrichard/facsticor
R
false
true
770
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gate_data.R \name{gate_it} \alias{gate_it} \title{Generates the kerneld to gate the data} \usage{ gate_it(x, y, idx, thres_1, thres_2, n = 34, DEBUG = FALSE) } \arguments{ \item{x}{A vector of FSC data} \item{y}{A vector of SSC data} \item{thres_1}{Pre-gating fraction of cells that will be remove on x and y axis. This allows to limit computations and filtering to the area of interest. Yeast cells being small, default value is 0.05.} \item{thres_2}{A fraction of cells to keep after gating (approximately). This number defines the size of the gate (few cells: narrow gate).} } \value{ A kerneld } \description{ This function works of FL1H, FSC and SSC data acquired on 96-well plates }
## If required, set your working directory ## Read and subset data EPC <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE) EPCFeb2007 <- EPC[EPC$Date == "1/2/2007" | EPC$Date == "2/2/2007",] ## four plots, 2 x 2 par(mfrow = c(2,2), mar = c(4,4,2,1)) ############################################################################ ## First plot ############################################################################ EPCFeb2007$Global_active_power <- as.numeric(EPCFeb2007$Global_active_power) plot(EPCFeb2007$Global_active_power, type = "l", ann = FALSE, axes = FALSE) box() title(ylab = "Global Active Power", cex = 0.8) axis(2, at=c(0,2,4,6)) axis(1, at=c(1,1440,2880), labels = c("Thu","Fri","Sat")) ############################################################################ ## Second plot ############################################################################ EPCFeb2007$Voltage <- as.numeric(EPCFeb2007$Voltage) plot(EPCFeb2007$Voltage, type = "l", ann = FALSE, axes = FALSE) box() title(ylab = "Voltage", xlab = "datetime") axis(2, at=seq(234,246,2)) axis(1, at=c(1,1440,2880), labels = c("Thu","Fri","Sat")) ############################################################################ ## Third plot ############################################################################ EPCFeb2007$Sub_metering_1 <- as.numeric(EPCFeb2007$Sub_metering_1) EPCFeb2007$Sub_metering_2 <- as.numeric(EPCFeb2007$Sub_metering_2) EPCFeb2007$Sub_metering_3 <- as.numeric(EPCFeb2007$Sub_metering_3) ## Make the plot, turn of axes and annotations plot(EPCFeb2007$Sub_metering_1, type = "l", ann = FALSE, axes = FALSE) ## Add Sub_metering_2 to the plot lines(EPCFeb2007$Sub_metering_2, type = "l", col = "red") ## Add Sub_metering_3 to the plot lines(EPCFeb2007$Sub_metering_3, type = "l", col = "blue") box() title(ylab = "Energy sub metering") axis(2, at=c(0,10,20,30)) axis(1, at=c(1,1440,2880), labels = c("Thu","Fri","Sat")) legend("topright", lty=c(1,1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n", cex = 0.8) ############################################################################ ## Fourth plot ############################################################################ EPCFeb2007$Global_reactive_power <- as.numeric(EPCFeb2007$Global_reactive_power) plot(EPCFeb2007$Global_reactive_power, type = "l", ann = FALSE, axes = FALSE) box() title(ylab = "Global_reactive_power", xlab = "datetime") axis(2, at=seq(0.0,0.5,0.1)) axis(1, at=c(1,1440,2880), labels = c("Thu","Fri","Sat")) ############################################################################ ## Save as PNG ############################################################################ dev.copy(png,file = "plot4.png") dev.off()
/plot4.R
no_license
JannetvanZante/ExData_Plotting1
R
false
false
2,951
r
## If required, set your working directory ## Read and subset data EPC <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE) EPCFeb2007 <- EPC[EPC$Date == "1/2/2007" | EPC$Date == "2/2/2007",] ## four plots, 2 x 2 par(mfrow = c(2,2), mar = c(4,4,2,1)) ############################################################################ ## First plot ############################################################################ EPCFeb2007$Global_active_power <- as.numeric(EPCFeb2007$Global_active_power) plot(EPCFeb2007$Global_active_power, type = "l", ann = FALSE, axes = FALSE) box() title(ylab = "Global Active Power", cex = 0.8) axis(2, at=c(0,2,4,6)) axis(1, at=c(1,1440,2880), labels = c("Thu","Fri","Sat")) ############################################################################ ## Second plot ############################################################################ EPCFeb2007$Voltage <- as.numeric(EPCFeb2007$Voltage) plot(EPCFeb2007$Voltage, type = "l", ann = FALSE, axes = FALSE) box() title(ylab = "Voltage", xlab = "datetime") axis(2, at=seq(234,246,2)) axis(1, at=c(1,1440,2880), labels = c("Thu","Fri","Sat")) ############################################################################ ## Third plot ############################################################################ EPCFeb2007$Sub_metering_1 <- as.numeric(EPCFeb2007$Sub_metering_1) EPCFeb2007$Sub_metering_2 <- as.numeric(EPCFeb2007$Sub_metering_2) EPCFeb2007$Sub_metering_3 <- as.numeric(EPCFeb2007$Sub_metering_3) ## Make the plot, turn of axes and annotations plot(EPCFeb2007$Sub_metering_1, type = "l", ann = FALSE, axes = FALSE) ## Add Sub_metering_2 to the plot lines(EPCFeb2007$Sub_metering_2, type = "l", col = "red") ## Add Sub_metering_3 to the plot lines(EPCFeb2007$Sub_metering_3, type = "l", col = "blue") box() title(ylab = "Energy sub metering") axis(2, at=c(0,10,20,30)) axis(1, at=c(1,1440,2880), labels = c("Thu","Fri","Sat")) legend("topright", lty=c(1,1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n", cex = 0.8) ############################################################################ ## Fourth plot ############################################################################ EPCFeb2007$Global_reactive_power <- as.numeric(EPCFeb2007$Global_reactive_power) plot(EPCFeb2007$Global_reactive_power, type = "l", ann = FALSE, axes = FALSE) box() title(ylab = "Global_reactive_power", xlab = "datetime") axis(2, at=seq(0.0,0.5,0.1)) axis(1, at=c(1,1440,2880), labels = c("Thu","Fri","Sat")) ############################################################################ ## Save as PNG ############################################################################ dev.copy(png,file = "plot4.png") dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ncGTWalign.R \name{ncGTWalign} \alias{ncGTWalign} \title{Run ncGTW alignment} \usage{ ncGTWalign(ncGTWinput, xcmsLargeWin, parSamp = 10, k1Num = 3, k2Num = 1, bpParam = BiocParallel::SnowParam(workers = 1), ncGTWparam = NULL) } \arguments{ \item{ncGTWinput}{A \code{\link{ncGTWinput}} object.} \item{xcmsLargeWin}{A \code{\link[xcms]{xcmsSet-class}} object.} \item{parSamp}{Decide how many samples are in each group when considering parallel computing, and the default is 10.} \item{k1Num}{Decide how many different k1 will be tested in stage 1. The default is 3.} \item{k2Num}{Decide how many different k2 will be tested in stage 2. The default is 1.} \item{bpParam}{A object of \pkg{BiocParallel} to control parallel processing, and can be created by \code{\link[BiocParallel:SerialParam-class]{SerialParam}}, \code{\link[BiocParallel:MulticoreParam-class]{MulticoreParam}}, or \code{\link[BiocParallel:SnowParam-class]{SnowParam}}.} \item{ncGTWparam}{A \code{\link{ncGTWparam}} object.} } \value{ A \code{\link{ncGTWoutput}} object. } \description{ This function applies ncGTW alignment to the input feature. } \details{ This function realign the input feature with ncGTW alignment function with given m/z and RT range. } \examples{ # obtain data data('xcmsExamples') xcmsLargeWin <- xcmsExamples$xcmsLargeWin xcmsSmallWin <- xcmsExamples$xcmsSmallWin ppm <- xcmsExamples$ppm # detect misaligned features excluGroups <- misalignDetect(xcmsLargeWin, xcmsSmallWin, ppm) # obtain the paths of the sample files filepath <- system.file("extdata", package = "ncGTW") file <- list.files(filepath, pattern="mzxml", full.names=TRUE) tempInd <- matrix(0, length(file), 1) for (n in seq_along(file)){ tempCha <- file[n] tempLen <- nchar(tempCha) tempInd[n] <- as.numeric(substr(tempCha, regexpr("example", tempCha) + 7, tempLen - 6)) } # sort the paths by data acquisition order file <- file[sort.int(tempInd, index.return = TRUE)$ix] \dontrun{ # load the sample profiles ncGTWinputs <- loadProfile(file, excluGroups) # initialize the parameters of ncGTW alignment with default ncGTWparam <- new("ncGTWparam") # run ncGTW alignment ncGTWoutputs <- vector('list', length(ncGTWinputs)) for (n in seq_along(ncGTWinputs)) ncGTWoutputs[[n]] <- ncGTWalign(ncGTWinputs[[n]], xcmsLargeWin, 5, ncGTWparam = ncGTWparam) } }
/man/ncGTWalign.Rd
no_license
ChiungTingWu/ncGTW
R
false
true
2,431
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ncGTWalign.R \name{ncGTWalign} \alias{ncGTWalign} \title{Run ncGTW alignment} \usage{ ncGTWalign(ncGTWinput, xcmsLargeWin, parSamp = 10, k1Num = 3, k2Num = 1, bpParam = BiocParallel::SnowParam(workers = 1), ncGTWparam = NULL) } \arguments{ \item{ncGTWinput}{A \code{\link{ncGTWinput}} object.} \item{xcmsLargeWin}{A \code{\link[xcms]{xcmsSet-class}} object.} \item{parSamp}{Decide how many samples are in each group when considering parallel computing, and the default is 10.} \item{k1Num}{Decide how many different k1 will be tested in stage 1. The default is 3.} \item{k2Num}{Decide how many different k2 will be tested in stage 2. The default is 1.} \item{bpParam}{A object of \pkg{BiocParallel} to control parallel processing, and can be created by \code{\link[BiocParallel:SerialParam-class]{SerialParam}}, \code{\link[BiocParallel:MulticoreParam-class]{MulticoreParam}}, or \code{\link[BiocParallel:SnowParam-class]{SnowParam}}.} \item{ncGTWparam}{A \code{\link{ncGTWparam}} object.} } \value{ A \code{\link{ncGTWoutput}} object. } \description{ This function applies ncGTW alignment to the input feature. } \details{ This function realign the input feature with ncGTW alignment function with given m/z and RT range. } \examples{ # obtain data data('xcmsExamples') xcmsLargeWin <- xcmsExamples$xcmsLargeWin xcmsSmallWin <- xcmsExamples$xcmsSmallWin ppm <- xcmsExamples$ppm # detect misaligned features excluGroups <- misalignDetect(xcmsLargeWin, xcmsSmallWin, ppm) # obtain the paths of the sample files filepath <- system.file("extdata", package = "ncGTW") file <- list.files(filepath, pattern="mzxml", full.names=TRUE) tempInd <- matrix(0, length(file), 1) for (n in seq_along(file)){ tempCha <- file[n] tempLen <- nchar(tempCha) tempInd[n] <- as.numeric(substr(tempCha, regexpr("example", tempCha) + 7, tempLen - 6)) } # sort the paths by data acquisition order file <- file[sort.int(tempInd, index.return = TRUE)$ix] \dontrun{ # load the sample profiles ncGTWinputs <- loadProfile(file, excluGroups) # initialize the parameters of ncGTW alignment with default ncGTWparam <- new("ncGTWparam") # run ncGTW alignment ncGTWoutputs <- vector('list', length(ncGTWinputs)) for (n in seq_along(ncGTWinputs)) ncGTWoutputs[[n]] <- ncGTWalign(ncGTWinputs[[n]], xcmsLargeWin, 5, ncGTWparam = ncGTWparam) } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{\%all_in\%} \alias{\%all_in\%} \title{All value matching} \usage{ x \%all_in\% X } \arguments{ \item{x}{vector or NULL: the values to be matched.} \item{X}{vector or NULL: the values to be matched against.} } \value{ TRUE or FALSE } \description{ All value matching } \examples{ letters[1:3] \%all_in\% letters[1:5] letters[1:3] \%all_in\% letters[2:5] }
/man/grapes-all_in-grapes.Rd
no_license
JK-junkin/lgbktestr
R
false
true
449
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{\%all_in\%} \alias{\%all_in\%} \title{All value matching} \usage{ x \%all_in\% X } \arguments{ \item{x}{vector or NULL: the values to be matched.} \item{X}{vector or NULL: the values to be matched against.} } \value{ TRUE or FALSE } \description{ All value matching } \examples{ letters[1:3] \%all_in\% letters[1:5] letters[1:3] \%all_in\% letters[2:5] }
## Functions cache potentially time consuming calculations for inverse of special matrix. ## The values of matrix and it inverse are cached so if matrix is not changed it inverse ## can be looked up rather then recalculated. ## Caching is based on the scoping rules of R language for preservig state inside of R object. ## makeCacheMatrix creates a special list containing a function to ## set the value of the matrix ## get the value of the matrix ## set the value of the inverse of the matrix ## get the value of the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { s <- NULL set <- function(y) { x <<- y s <<- NULL } get <- function() x setsolve <- function(solve) s <<- solve getsolve <- function() s list(set = set, get = get, setsolve = setsolve , getsolve = getsolve ) } ## The following function calculates the inverse of the special "matrix" created with ## the function "makeCacheMatrix". ## It first checks to see if the inverse has already been calculated. If so, it gets ## the inverse from the cache and skips the computation. ## Otherwise, it calculates the inverse of the data and sets the value of the inverse ## in the cache via the setsolve function. cacheSolve <- function(x, ...) { s <- x$getsolve() if(!is.null(s)) { ## Return a matrix that is the inverse of 'x' return(s) } data <- x$get() s <- solve(data, ...) x$setsolve(s) s }
/cachematrix.R
no_license
Yelena-Davy/ProgrammingAssignment2
R
false
false
1,443
r
## Functions cache potentially time consuming calculations for inverse of special matrix. ## The values of matrix and it inverse are cached so if matrix is not changed it inverse ## can be looked up rather then recalculated. ## Caching is based on the scoping rules of R language for preservig state inside of R object. ## makeCacheMatrix creates a special list containing a function to ## set the value of the matrix ## get the value of the matrix ## set the value of the inverse of the matrix ## get the value of the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { s <- NULL set <- function(y) { x <<- y s <<- NULL } get <- function() x setsolve <- function(solve) s <<- solve getsolve <- function() s list(set = set, get = get, setsolve = setsolve , getsolve = getsolve ) } ## The following function calculates the inverse of the special "matrix" created with ## the function "makeCacheMatrix". ## It first checks to see if the inverse has already been calculated. If so, it gets ## the inverse from the cache and skips the computation. ## Otherwise, it calculates the inverse of the data and sets the value of the inverse ## in the cache via the setsolve function. cacheSolve <- function(x, ...) { s <- x$getsolve() if(!is.null(s)) { ## Return a matrix that is the inverse of 'x' return(s) } data <- x$get() s <- solve(data, ...) x$setsolve(s) s }
\name{Angular central Gaussian random values simulation} \alias{racg} \title{ Angular central Gaussian random values simulation } \description{ Angular central Gaussian random values simulation. } \usage{ racg(n, sigma, seed = NULL) } \arguments{ \item{n}{ The sample size, a numerical value. } \item{sigma}{ The covariance matrix in \eqn{R^d}. } \item{seed}{ If you want the same to be generated again use a seed for the generator, an integer number. } } \details{ The algorithm uses univariate normal random values and transforms them to multivariate via a spectral decomposition. The vectors are then scaled to have unit length. } \value{ A matrix with the simulated data. } \references{ Tyler D. E. (1987). Statistical analysis for the angular central Gaussian distribution on the sphere. Biometrika 74(3): 579-589. } \author{ Michail Tsagris R implementation and documentation: Michail Tsagris <mtsagris@uoc.gr> } %\note{ %% ~~further notes~~ %} \seealso{ \code{\link{acg.mle}, \link{rmvnorm}, \link{rmvlaplace}, \link{rmvt} } } \examples{ s <- cov( iris[, 1:4] ) x <- racg(100, s) res<-acg.mle(x) res<-vmf.mle(x) ## the concentration parameter, kappa, is very low, close to zero, as expected. } \keyword{ Angular central Gaussian distribution } \keyword{ random values simulation } \keyword{ directional data }
/man/racg.Rd
no_license
cran/Rfast
R
false
false
1,400
rd
\name{Angular central Gaussian random values simulation} \alias{racg} \title{ Angular central Gaussian random values simulation } \description{ Angular central Gaussian random values simulation. } \usage{ racg(n, sigma, seed = NULL) } \arguments{ \item{n}{ The sample size, a numerical value. } \item{sigma}{ The covariance matrix in \eqn{R^d}. } \item{seed}{ If you want the same to be generated again use a seed for the generator, an integer number. } } \details{ The algorithm uses univariate normal random values and transforms them to multivariate via a spectral decomposition. The vectors are then scaled to have unit length. } \value{ A matrix with the simulated data. } \references{ Tyler D. E. (1987). Statistical analysis for the angular central Gaussian distribution on the sphere. Biometrika 74(3): 579-589. } \author{ Michail Tsagris R implementation and documentation: Michail Tsagris <mtsagris@uoc.gr> } %\note{ %% ~~further notes~~ %} \seealso{ \code{\link{acg.mle}, \link{rmvnorm}, \link{rmvlaplace}, \link{rmvt} } } \examples{ s <- cov( iris[, 1:4] ) x <- racg(100, s) res<-acg.mle(x) res<-vmf.mle(x) ## the concentration parameter, kappa, is very low, close to zero, as expected. } \keyword{ Angular central Gaussian distribution } \keyword{ random values simulation } \keyword{ directional data }
library(shiny) library(promises) library(future) library(DBI) plan(multiprocess) shinyApp( ui = fluidPage( textOutput("time"), actionButton("btn", "Long query!"), textOutput("query_status"), sliderInput("cyls", "Cylinders", min = 1, max = 8, value = 4, step = 1), plotOutput("plt") ), server = function(input, output, session) { output$time <- renderText({ invalidateLater(1000) paste0("Time: ", format(Sys.time())) }) query_result <- eventReactive(input$btn, { future({ x <- system.time({ # The DB connection needs to be created and destroyed within the future con <- DBI::dbConnect(RSQLite::SQLite(), "tmp.sqlite") DBI::dbGetQuery(con, "WITH RECURSIVE r(i) AS ( VALUES(0) UNION ALL SELECT i FROM r LIMIT 10000000 ) SELECT i FROM r WHERE i = 1;") DBI::dbDisconnect(con) }) x[["elapsed"]] }) }) output$query_status <- renderText({ query_result() %...>% (function(result) { paste0("[", Sys.time(), "] Query completed in ", result, " seconds") }) }) output$plt <- renderPlot({ plot(mpg ~ disp, data = mtcars[mtcars$cyl >= input$cyls,,drop = FALSE], pch = 16, cex = 2, col = cyl) }) } )
/R/async/app.R
no_license
blairj09/shiny-query-async
R
false
false
1,495
r
library(shiny) library(promises) library(future) library(DBI) plan(multiprocess) shinyApp( ui = fluidPage( textOutput("time"), actionButton("btn", "Long query!"), textOutput("query_status"), sliderInput("cyls", "Cylinders", min = 1, max = 8, value = 4, step = 1), plotOutput("plt") ), server = function(input, output, session) { output$time <- renderText({ invalidateLater(1000) paste0("Time: ", format(Sys.time())) }) query_result <- eventReactive(input$btn, { future({ x <- system.time({ # The DB connection needs to be created and destroyed within the future con <- DBI::dbConnect(RSQLite::SQLite(), "tmp.sqlite") DBI::dbGetQuery(con, "WITH RECURSIVE r(i) AS ( VALUES(0) UNION ALL SELECT i FROM r LIMIT 10000000 ) SELECT i FROM r WHERE i = 1;") DBI::dbDisconnect(con) }) x[["elapsed"]] }) }) output$query_status <- renderText({ query_result() %...>% (function(result) { paste0("[", Sys.time(), "] Query completed in ", result, " seconds") }) }) output$plt <- renderPlot({ plot(mpg ~ disp, data = mtcars[mtcars$cyl >= input$cyls,,drop = FALSE], pch = 16, cex = 2, col = cyl) }) } )
# ------------------------------------------------------------------------------ # # Functions for elastic net and LR # # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # Plot regularisation path for glmnet # ------------------------------------------------------------------------------ plot_reg_path_glmnet <- function(results, n_feat="all"){ # Plots the regularisation paths for each model in the outer folds. library(plotmo) # Setup the plot outer_fold_n <- length(results$models) num_rows <- ceiling((outer_fold_n)/2) par(mfrow=c(num_rows, 2)) models <- get_models(results) for (i in 1:outer_fold_n){ # Get best lambda and model best_lambda <- results$models[[i]]$learner.model$opt.result$x$s model <- models[[i]] title <- paste("Outer fold", as.character(i)) # Plot regularisation path with the best lambda=s chosen by CV if (n_feat == "all"){ plotmo::plot_glmnet(model, label=T, s=best_lambda, main=title) }else{ plotmo::plot_glmnet(model, label=n_feat, s=best_lambda, main=title) # grid.col="lightgrey" adds grid to the plits } } par(mfrow=c(1,1)) } # ------------------------------------------------------------------------------ # Calculate odds ratios from the logistic regression coefficients # ------------------------------------------------------------------------------ get_odds_ratios <- function(model){ # For details see here: http://www.ats.ucla.edu/stat/r/dae/logit.htm exp(cbind(OR = coef(model), confint(model))) }
/palab_model/palab_model_lr.R
no_license
jzhao0802/PA_UK
R
false
false
1,661
r
# ------------------------------------------------------------------------------ # # Functions for elastic net and LR # # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # Plot regularisation path for glmnet # ------------------------------------------------------------------------------ plot_reg_path_glmnet <- function(results, n_feat="all"){ # Plots the regularisation paths for each model in the outer folds. library(plotmo) # Setup the plot outer_fold_n <- length(results$models) num_rows <- ceiling((outer_fold_n)/2) par(mfrow=c(num_rows, 2)) models <- get_models(results) for (i in 1:outer_fold_n){ # Get best lambda and model best_lambda <- results$models[[i]]$learner.model$opt.result$x$s model <- models[[i]] title <- paste("Outer fold", as.character(i)) # Plot regularisation path with the best lambda=s chosen by CV if (n_feat == "all"){ plotmo::plot_glmnet(model, label=T, s=best_lambda, main=title) }else{ plotmo::plot_glmnet(model, label=n_feat, s=best_lambda, main=title) # grid.col="lightgrey" adds grid to the plits } } par(mfrow=c(1,1)) } # ------------------------------------------------------------------------------ # Calculate odds ratios from the logistic regression coefficients # ------------------------------------------------------------------------------ get_odds_ratios <- function(model){ # For details see here: http://www.ats.ucla.edu/stat/r/dae/logit.htm exp(cbind(OR = coef(model), confint(model))) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/specifications.R \docType{class} \name{spec_logreg-class} \alias{spec_logreg-class} \alias{.make_spec_logreg} \title{S4 class spec_logreg for a logistic regression specification.} \description{ See help for \code{\linkS4class{spec}}. }
/man/spec_logreg-class.Rd
no_license
JacobBergstedt/mmi
R
false
true
314
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/specifications.R \docType{class} \name{spec_logreg-class} \alias{spec_logreg-class} \alias{.make_spec_logreg} \title{S4 class spec_logreg for a logistic regression specification.} \description{ See help for \code{\linkS4class{spec}}. }
\name{diff.ff} \alias{diff.ff} \title{Lagged Differences} \usage{ \method{diff}{ff} (x, lag = 1L, differences = 1L, ...) } \arguments{ \item{x}{a \code{ff} vector containing values to be differenced} \item{lag}{a n integer indicating which lag to use} \item{differences}{an integer indicating the order of the difference} \item{...}{other parameters will be passed on to diff} } \description{ Returned suitably lagged and iterated differences }
/pkg/man/diff.ff.Rd
no_license
nalimilan/ffbase
R
false
false
465
rd
\name{diff.ff} \alias{diff.ff} \title{Lagged Differences} \usage{ \method{diff}{ff} (x, lag = 1L, differences = 1L, ...) } \arguments{ \item{x}{a \code{ff} vector containing values to be differenced} \item{lag}{a n integer indicating which lag to use} \item{differences}{an integer indicating the order of the difference} \item{...}{other parameters will be passed on to diff} } \description{ Returned suitably lagged and iterated differences }
install.packages("randomForest") library(datasets) library(randomForest) iris <- read.csv(file = "C:\\Users\\prana\\Desktop\\loadsmart\\flowers.csv", header = TRUE) head(iris) set.seed(2) iris.split <- sample(2,nrow(iris),replace = TRUE,prob = c(2/3,1/3)) iris.train <- iris[iris.split == 1 ,] iris.test <- iris[iris.split == 2 ,] set.seed(1) iris.rf <- randomForest(species ~ data$petal.length+data$petal.width , data = iris.train, ntree = 0, proximity = TRUE) print(iris.rf) plot(iris.rf) importance(iris.rf) varImpPlot(iris.rf) #use the model training data iris.pred <- predict(iris.rf , newdata = iris.train) table(iris.pred, iris.train$species) (sum(iris.pred==iris.train$species)/length(iris.pred))*100 #use the model on testing data iris.pred <- predict(iris.rf , newdata = iris.test) table(iris.pred, iris.test$species) (sum(iris.pred==iris.test$species)/length(iris.pred))*100 #use the model on entire data set iris.pred <- predict(iris.rf, newdata = iris) table(iris.pred, iris$species) (sum(iris.pred==iris$species)/length(iris.pred))*100
/Machine Learning/Random Forest/rf.R
no_license
pkreddy/SPU-Academic-Projects
R
false
false
1,131
r
install.packages("randomForest") library(datasets) library(randomForest) iris <- read.csv(file = "C:\\Users\\prana\\Desktop\\loadsmart\\flowers.csv", header = TRUE) head(iris) set.seed(2) iris.split <- sample(2,nrow(iris),replace = TRUE,prob = c(2/3,1/3)) iris.train <- iris[iris.split == 1 ,] iris.test <- iris[iris.split == 2 ,] set.seed(1) iris.rf <- randomForest(species ~ data$petal.length+data$petal.width , data = iris.train, ntree = 0, proximity = TRUE) print(iris.rf) plot(iris.rf) importance(iris.rf) varImpPlot(iris.rf) #use the model training data iris.pred <- predict(iris.rf , newdata = iris.train) table(iris.pred, iris.train$species) (sum(iris.pred==iris.train$species)/length(iris.pred))*100 #use the model on testing data iris.pred <- predict(iris.rf , newdata = iris.test) table(iris.pred, iris.test$species) (sum(iris.pred==iris.test$species)/length(iris.pred))*100 #use the model on entire data set iris.pred <- predict(iris.rf, newdata = iris) table(iris.pred, iris$species) (sum(iris.pred==iris$species)/length(iris.pred))*100
\name{PairPiece} \alias{PairPiece} \title{The Piecewise Constant Pairwise Interaction Point Process Model} \description{ Creates an instance of a pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ PairPiece(r) } \arguments{ \item{r}{vector of jump points for the potential function} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. The process is a pairwise interaction process, whose interaction potential is piecewise constant, with jumps at the distances given in the vector \eqn{r}. } \details{ A pairwise interaction point process in a bounded region is a stochastic point process with probability density of the form \deqn{ f(x_1,\ldots,x_n) = \alpha \prod_i b(x_i) \prod_{i < j} h(x_i, x_j) }{ f(x_1,\ldots,x_n) = alpha . product { b(x[i]) } product { h(x_i, x_j) } } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern. The first product on the right hand side is over all points of the pattern; the second product is over all unordered pairs of points of the pattern. Thus each point \eqn{x_i}{x[i]} of the pattern contributes a factor \eqn{b(x_i)}{b(x[i])} to the probability density, and each pair of points \eqn{x_i, x_j}{x[i], x[j]} contributes a factor \eqn{h(x_i,x_j)}{h(x[i], x[j])} to the density. The pairwise interaction term \eqn{h(u, v)} is called \emph{piecewise constant} if it depends only on the distance between \eqn{u} and \eqn{v}, say \eqn{h(u,v) = H(||u-v||)}, and \eqn{H} is a piecewise constant function (a function which is constant except for jumps at a finite number of places). The use of piecewise constant interaction terms was first suggested by Takacs (1986). The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant pairwise interaction is yielded by the function \code{PairPiece()}. See the examples below. The entries of \code{r} must be strictly increasing, positive numbers. They are interpreted as the points of discontinuity of \eqn{H}. It is assumed that \eqn{H(s) =1} for all \eqn{s > r_{max}}{s > rmax} where \eqn{r_{max}}{rmax} is the maximum value in \code{r}. Thus the model has as many regular parameters (see \code{\link{ppm}}) as there are entries in \code{r}. The \eqn{i}-th regular parameter \eqn{\theta_i}{theta[i]} is the logarithm of the value of the interaction function \eqn{H} on the interval \eqn{[r_{i-1},r_i)}{[r[i-1],r[i])}. If \code{r} is a single number, this model is similar to the Strauss process, see \code{\link{Strauss}}. The difference is that in \code{PairPiece} the interaction function is continuous on the right, while in \code{\link{Strauss}} it is continuous on the left. The analogue of this model for multitype point processes has not yet been implemented. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}} \code{\link{rmh.ppm}} } \examples{ PairPiece(c(0.1,0.2)) # prints a sensible description of itself data(cells) ppm(cells ~1, PairPiece(r = c(0.05, 0.1, 0.2))) # fit a stationary piecewise constant pairwise interaction process # ppm(cells ~polynom(x,y,3), PairPiece(c(0.05, 0.1))) # nonstationary process with log-cubic polynomial trend } \references{ Takacs, R. (1986) Estimator for the pair potential of a Gibbsian point process. \emph{Statistics} \bold{17}, 429--433. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models}
/man/PairPiece.Rd
no_license
spatstat/spatstat.core
R
false
false
3,872
rd
\name{PairPiece} \alias{PairPiece} \title{The Piecewise Constant Pairwise Interaction Point Process Model} \description{ Creates an instance of a pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ PairPiece(r) } \arguments{ \item{r}{vector of jump points for the potential function} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. The process is a pairwise interaction process, whose interaction potential is piecewise constant, with jumps at the distances given in the vector \eqn{r}. } \details{ A pairwise interaction point process in a bounded region is a stochastic point process with probability density of the form \deqn{ f(x_1,\ldots,x_n) = \alpha \prod_i b(x_i) \prod_{i < j} h(x_i, x_j) }{ f(x_1,\ldots,x_n) = alpha . product { b(x[i]) } product { h(x_i, x_j) } } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern. The first product on the right hand side is over all points of the pattern; the second product is over all unordered pairs of points of the pattern. Thus each point \eqn{x_i}{x[i]} of the pattern contributes a factor \eqn{b(x_i)}{b(x[i])} to the probability density, and each pair of points \eqn{x_i, x_j}{x[i], x[j]} contributes a factor \eqn{h(x_i,x_j)}{h(x[i], x[j])} to the density. The pairwise interaction term \eqn{h(u, v)} is called \emph{piecewise constant} if it depends only on the distance between \eqn{u} and \eqn{v}, say \eqn{h(u,v) = H(||u-v||)}, and \eqn{H} is a piecewise constant function (a function which is constant except for jumps at a finite number of places). The use of piecewise constant interaction terms was first suggested by Takacs (1986). The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant pairwise interaction is yielded by the function \code{PairPiece()}. See the examples below. The entries of \code{r} must be strictly increasing, positive numbers. They are interpreted as the points of discontinuity of \eqn{H}. It is assumed that \eqn{H(s) =1} for all \eqn{s > r_{max}}{s > rmax} where \eqn{r_{max}}{rmax} is the maximum value in \code{r}. Thus the model has as many regular parameters (see \code{\link{ppm}}) as there are entries in \code{r}. The \eqn{i}-th regular parameter \eqn{\theta_i}{theta[i]} is the logarithm of the value of the interaction function \eqn{H} on the interval \eqn{[r_{i-1},r_i)}{[r[i-1],r[i])}. If \code{r} is a single number, this model is similar to the Strauss process, see \code{\link{Strauss}}. The difference is that in \code{PairPiece} the interaction function is continuous on the right, while in \code{\link{Strauss}} it is continuous on the left. The analogue of this model for multitype point processes has not yet been implemented. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}} \code{\link{rmh.ppm}} } \examples{ PairPiece(c(0.1,0.2)) # prints a sensible description of itself data(cells) ppm(cells ~1, PairPiece(r = c(0.05, 0.1, 0.2))) # fit a stationary piecewise constant pairwise interaction process # ppm(cells ~polynom(x,y,3), PairPiece(c(0.05, 0.1))) # nonstationary process with log-cubic polynomial trend } \references{ Takacs, R. (1986) Estimator for the pair potential of a Gibbsian point process. \emph{Statistics} \bold{17}, 429--433. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models}
# Uber and taxi trip analysis by geography # Andrew Flowers setwd("~/data-analysis/uber-nyc/") require(readr) require(stringr) require(lubridate) require(dplyr) require(ggplot2) require(reshape2) # Boro codes boroCodes <- data.frame(codes=c(5, 47, 61, 81, 85), name=c("Bronx", "Brooklyn", "Manhattan", "Queens", "Staten Island")) ## Aggregate trip data (Apr-Sept) agg_trips <- read_csv("for_analysisv2.csv") carl_coding <- read_csv("carl_manhattan_coding.csv") # Clean names names(agg_trips) <- tolower(names(agg_trips)) names(agg_trips)[8:10] <- c("uber_total", "green_total", "yellow_total") agg_trips$cab_total <- agg_trips$green_total + agg_trips$yellow_total agg_trips$boro <- boroCodes[match(agg_trips$countyfp, boroCodes$codes),]$name agg_trips$cbd <- carl_coding[match(agg_trips$geoid, carl_coding$GEOID),]$CBD length(unique(agg_trips$geoid)) # 2165 unique geoids agg_trips <- agg_trips %>% mutate(airport=ifelse(tractce %in% c(71600, 33100) & boro=="Queens", TRUE, FALSE)) table(agg_trips$boro) # Trip data for Apr and Sept 2014 separately apr_sept_trips <- data.frame() # Six csv files storied in sub-directory `/geo-trip data` include: # "green_data_april2014.csv" "green_data_sept2014.csv" "taxi_data_april2014.csv" # "taxi_data_sept2014.csv" "uber_data_april2014.csv" "uber_data_sept2014.csv" list.files("./geo-trip-data/") # Aggregate those six files into a master file of the Apr and Sept trips by geoid for (f in list.files("./geo-trip-data/")){ temp <- read_csv(paste0("./geo-trip-data/",f)) temp$old_name <- f apr_sept_trips <- rbind(apr_sept_trips, temp) } length(unique((apr_sept_trips$geoid))) # 2923 unique geoids fileNames <- data.frame(old_name=unique(apr_sept_trips$old_name), new_name=c("green_apr", "green_sept", "yellow_apr", "yellow_sept", "uber_apr", "uber_sept")) apr_sept_trips$file <- fileNames[match(apr_sept_trips$old_name, fileNames$old_name),]$new_name table(apr_sept_trips$file, useNA = "ifany") length(common_tracts <- intersect(agg_trips$geoid, apr_sept_trips$geoid)) # 2160 common geoids between two data sets apr_sept_trips$boro <- agg_trips[match(apr_sept_trips$geoid, agg_trips$geoid),]$boro table(apr_sept_trips$boro, useNA = "ifany") length(common_tracts <- intersect(agg_trips$geoid, apr_sept_trips[!is.na(apr_sept_trips$boro),]$geoid)) # Investigate apr_sept_trips where no boro is found # Presumably geoid is outside of NYC # View(apr_sept_trips %>% filter(is.na(boro))) # Might want to investigate where these geoids are exactly -- outside NYC, right? apr_sept_trips_nyc <- apr_sept_trips %>% filter(!is.na(boro)) apr_sept_trips_cast <- dcast(data=apr_sept_trips_nyc, formula=geoid~file, value.var="total") # Create `all_trips` data frame includes data by 2160 geoids (census tracts) in NYC all_trips <- agg_trips %>% right_join(apr_sept_trips_cast, by="geoid") all_trips[is.na(all_trips)] <- 0 agg_byBoro <- all_trips %>% group_by(boro) %>% summarize(uber_total=sum(uber_total, na.rm=T), green_total=sum(green_total, na.rm=T), yellow_total=sum(yellow_total, na.rm=T), taxi_total=green_total+yellow_total, total_trips=uber_total+taxi_total) # 22% of Uber pickups are outside of Manhattan sum(agg_byBoro[agg_byBoro$boro!="Manhattan",]$uber_total)/sum(agg_byBoro$uber_total) # 14% of taxi pickups are outside of Manhattan sum(agg_byBoro[agg_byBoro$boro!="Manhattan",]$taxi_total, na.rm=T)/sum(agg_byBoro$taxi_total, na.rm=T) # 4.5% of Uber pickups are from airports sum(all_trips[all_trips$airport==TRUE,]$uber_total, na.rm=T)/sum(all_trips$uber_total, na.rm=T) # 3.8% of taxi pickups are from airports sum(all_trips[all_trips$airport==TRUE,]$cab_total, na.rm=T)/sum(all_trips$cab_total, na.rm=T) # 63% of Uber trips are in the Central Business District (CBD) sum(all_trips[all_trips$cbd==1,]$uber_total, na.rm=T)/sum(all_trips$uber_total, na.rm=T) # 62% of taxi trips are in the Central Business District (CBD) sum(all_trips[all_trips$cbd==1,]$cab_total, na.rm=T)/sum(all_trips$cab_total, na.rm=T) apr_sept_byBoro <- all_trips %>% group_by(boro) %>% summarize(uber_apr=sum(uber_apr, na.rm=T), uber_sept=sum(uber_sept, na.rm=T), green_apr=sum(green_apr, na.rm=T), green_sept=sum(green_sept, na.rm=T), yellow_apr=sum(yellow_apr, na.rm=T), yellow_sept=sum(yellow_sept, na.rm=T), uber_chng=uber_sept-uber_apr, green_chng=green_sept-green_apr, yellow_chng=yellow_sept-yellow_apr) # Decline in Manhattan Yellow/Green cab trips was 3.79x the increase in Uber trips between Apr and Sept 2014 (2272+1159075)/306657 # Analyze Manhattan census tracts where Uber strongest, and see if Yellow cab declined # 288 Manhattan census tracts --> filter to 211 that had at least 1000 total Uber trips manhattan <- all_trips %>% filter(boro=="Manhattan", uber_total>=100) %>% mutate(uber_chng_pct=((uber_sept)/(uber_apr)-1)*100, taxi_chng_pct=((yellow_sept+green_sept)/(yellow_apr+green_apr)-1)*100) g1 <- ggplot(data=manhattan, aes(x=uber_chng_pct, y=taxi_chng_pct))+ geom_point()+geom_smooth()+ xlab("% Change in Uber Trips")+ylab("% Change in Yellow/Green Taxi Trips")+ ggtitle("Uber vs. Taxis\nManhattan census tracts with minimum of 1,000 Uber trips\nApr-Sept 2014") g1 ggsave("uber_vs_taxis_manhattan.png") g2 <- ggplot(data=manhattan, aes(x=uber_chng_pct, y=taxi_chng_pct, size=uber_total))+ geom_point()+geom_smooth()+ xlab("% Change in Uber Trips")+ylab("% Change in Yellow/Green Taxi Trips")+ ggtitle("Uber vs. Taxis\nManhattan census tracts with minimum of 1,000 Uber trips\nApr-Sept 2014") g2 ggsave("uber_vs_taxis_manhattan_sized.png") ## Analyze by race, age, language, etc. censusData <- read_csv("census_data_by_nyc_tract.csv") test <- cbind(all_trips, censusData[match(all_trips$geoid, censusData$geoid),])
/uber-nyc/geo_trip_analysis.R
no_license
andrewflowers/data-analysis
R
false
false
6,068
r
# Uber and taxi trip analysis by geography # Andrew Flowers setwd("~/data-analysis/uber-nyc/") require(readr) require(stringr) require(lubridate) require(dplyr) require(ggplot2) require(reshape2) # Boro codes boroCodes <- data.frame(codes=c(5, 47, 61, 81, 85), name=c("Bronx", "Brooklyn", "Manhattan", "Queens", "Staten Island")) ## Aggregate trip data (Apr-Sept) agg_trips <- read_csv("for_analysisv2.csv") carl_coding <- read_csv("carl_manhattan_coding.csv") # Clean names names(agg_trips) <- tolower(names(agg_trips)) names(agg_trips)[8:10] <- c("uber_total", "green_total", "yellow_total") agg_trips$cab_total <- agg_trips$green_total + agg_trips$yellow_total agg_trips$boro <- boroCodes[match(agg_trips$countyfp, boroCodes$codes),]$name agg_trips$cbd <- carl_coding[match(agg_trips$geoid, carl_coding$GEOID),]$CBD length(unique(agg_trips$geoid)) # 2165 unique geoids agg_trips <- agg_trips %>% mutate(airport=ifelse(tractce %in% c(71600, 33100) & boro=="Queens", TRUE, FALSE)) table(agg_trips$boro) # Trip data for Apr and Sept 2014 separately apr_sept_trips <- data.frame() # Six csv files storied in sub-directory `/geo-trip data` include: # "green_data_april2014.csv" "green_data_sept2014.csv" "taxi_data_april2014.csv" # "taxi_data_sept2014.csv" "uber_data_april2014.csv" "uber_data_sept2014.csv" list.files("./geo-trip-data/") # Aggregate those six files into a master file of the Apr and Sept trips by geoid for (f in list.files("./geo-trip-data/")){ temp <- read_csv(paste0("./geo-trip-data/",f)) temp$old_name <- f apr_sept_trips <- rbind(apr_sept_trips, temp) } length(unique((apr_sept_trips$geoid))) # 2923 unique geoids fileNames <- data.frame(old_name=unique(apr_sept_trips$old_name), new_name=c("green_apr", "green_sept", "yellow_apr", "yellow_sept", "uber_apr", "uber_sept")) apr_sept_trips$file <- fileNames[match(apr_sept_trips$old_name, fileNames$old_name),]$new_name table(apr_sept_trips$file, useNA = "ifany") length(common_tracts <- intersect(agg_trips$geoid, apr_sept_trips$geoid)) # 2160 common geoids between two data sets apr_sept_trips$boro <- agg_trips[match(apr_sept_trips$geoid, agg_trips$geoid),]$boro table(apr_sept_trips$boro, useNA = "ifany") length(common_tracts <- intersect(agg_trips$geoid, apr_sept_trips[!is.na(apr_sept_trips$boro),]$geoid)) # Investigate apr_sept_trips where no boro is found # Presumably geoid is outside of NYC # View(apr_sept_trips %>% filter(is.na(boro))) # Might want to investigate where these geoids are exactly -- outside NYC, right? apr_sept_trips_nyc <- apr_sept_trips %>% filter(!is.na(boro)) apr_sept_trips_cast <- dcast(data=apr_sept_trips_nyc, formula=geoid~file, value.var="total") # Create `all_trips` data frame includes data by 2160 geoids (census tracts) in NYC all_trips <- agg_trips %>% right_join(apr_sept_trips_cast, by="geoid") all_trips[is.na(all_trips)] <- 0 agg_byBoro <- all_trips %>% group_by(boro) %>% summarize(uber_total=sum(uber_total, na.rm=T), green_total=sum(green_total, na.rm=T), yellow_total=sum(yellow_total, na.rm=T), taxi_total=green_total+yellow_total, total_trips=uber_total+taxi_total) # 22% of Uber pickups are outside of Manhattan sum(agg_byBoro[agg_byBoro$boro!="Manhattan",]$uber_total)/sum(agg_byBoro$uber_total) # 14% of taxi pickups are outside of Manhattan sum(agg_byBoro[agg_byBoro$boro!="Manhattan",]$taxi_total, na.rm=T)/sum(agg_byBoro$taxi_total, na.rm=T) # 4.5% of Uber pickups are from airports sum(all_trips[all_trips$airport==TRUE,]$uber_total, na.rm=T)/sum(all_trips$uber_total, na.rm=T) # 3.8% of taxi pickups are from airports sum(all_trips[all_trips$airport==TRUE,]$cab_total, na.rm=T)/sum(all_trips$cab_total, na.rm=T) # 63% of Uber trips are in the Central Business District (CBD) sum(all_trips[all_trips$cbd==1,]$uber_total, na.rm=T)/sum(all_trips$uber_total, na.rm=T) # 62% of taxi trips are in the Central Business District (CBD) sum(all_trips[all_trips$cbd==1,]$cab_total, na.rm=T)/sum(all_trips$cab_total, na.rm=T) apr_sept_byBoro <- all_trips %>% group_by(boro) %>% summarize(uber_apr=sum(uber_apr, na.rm=T), uber_sept=sum(uber_sept, na.rm=T), green_apr=sum(green_apr, na.rm=T), green_sept=sum(green_sept, na.rm=T), yellow_apr=sum(yellow_apr, na.rm=T), yellow_sept=sum(yellow_sept, na.rm=T), uber_chng=uber_sept-uber_apr, green_chng=green_sept-green_apr, yellow_chng=yellow_sept-yellow_apr) # Decline in Manhattan Yellow/Green cab trips was 3.79x the increase in Uber trips between Apr and Sept 2014 (2272+1159075)/306657 # Analyze Manhattan census tracts where Uber strongest, and see if Yellow cab declined # 288 Manhattan census tracts --> filter to 211 that had at least 1000 total Uber trips manhattan <- all_trips %>% filter(boro=="Manhattan", uber_total>=100) %>% mutate(uber_chng_pct=((uber_sept)/(uber_apr)-1)*100, taxi_chng_pct=((yellow_sept+green_sept)/(yellow_apr+green_apr)-1)*100) g1 <- ggplot(data=manhattan, aes(x=uber_chng_pct, y=taxi_chng_pct))+ geom_point()+geom_smooth()+ xlab("% Change in Uber Trips")+ylab("% Change in Yellow/Green Taxi Trips")+ ggtitle("Uber vs. Taxis\nManhattan census tracts with minimum of 1,000 Uber trips\nApr-Sept 2014") g1 ggsave("uber_vs_taxis_manhattan.png") g2 <- ggplot(data=manhattan, aes(x=uber_chng_pct, y=taxi_chng_pct, size=uber_total))+ geom_point()+geom_smooth()+ xlab("% Change in Uber Trips")+ylab("% Change in Yellow/Green Taxi Trips")+ ggtitle("Uber vs. Taxis\nManhattan census tracts with minimum of 1,000 Uber trips\nApr-Sept 2014") g2 ggsave("uber_vs_taxis_manhattan_sized.png") ## Analyze by race, age, language, etc. censusData <- read_csv("census_data_by_nyc_tract.csv") test <- cbind(all_trips, censusData[match(all_trips$geoid, censusData$geoid),])
\name{imgAverageShrink} \alias{imgAverageShrink} \title{Shrink an image} \description{ This function shrinks an image using the average and returns a new image. } \usage{imgAverageShrink(imgdata, x_scale, y_scale)} \arguments{ \item{imgdata}{The image} \item{x_scale}{The horizontal scale factor} \item{y_scale}{The vertical scale factor} } \value{ return an imagedata object } \examples{ \dontrun{ x <- readJpeg(system.file("samples", "violet.jpg", package="biOps")) y <- imgAverageShrink(x, 0.5, 0.5) } } \note{ The scale factors are expected to be less than 1. } \seealso{ \code{\link{imgMedianShrink}} \code{\link{imgNearestNeighborScale}} \code{\link{imgBilinearScale}} \code{\link{imgCubicScale}} } \keyword{math}
/08_grad_project/Deliverables/biOps/man/imgAverageShrink.Rd
permissive
blairg23/pattern-recognition
R
false
false
736
rd
\name{imgAverageShrink} \alias{imgAverageShrink} \title{Shrink an image} \description{ This function shrinks an image using the average and returns a new image. } \usage{imgAverageShrink(imgdata, x_scale, y_scale)} \arguments{ \item{imgdata}{The image} \item{x_scale}{The horizontal scale factor} \item{y_scale}{The vertical scale factor} } \value{ return an imagedata object } \examples{ \dontrun{ x <- readJpeg(system.file("samples", "violet.jpg", package="biOps")) y <- imgAverageShrink(x, 0.5, 0.5) } } \note{ The scale factors are expected to be less than 1. } \seealso{ \code{\link{imgMedianShrink}} \code{\link{imgNearestNeighborScale}} \code{\link{imgBilinearScale}} \code{\link{imgCubicScale}} } \keyword{math}
###################################################### # Purpose: Automatically change variable names and places where they are refered to in data tables, metadata tables and scripts # Inputs: - all data tables, all metadata tables and all .R or .m files in all directories of scripts # - old names and new names as specified in "metadata/archive/variable_names_polishing.tx (to enter by hand, one by one, be careful at comments written in file) # outputs: polished files # NOTES: Same changes need to be made in diagram of entity relationship and paper # Developped by: Valentine Herrmann ( HerrmannV@si.edu) in Januaray 2018 # R version 3.4.2 ###################################################### rm(list = ls()) # what column names are we chaning and to what ? old.variable.name <- "\\bANPP_litterfall_3\\b" new.variable.name <- "ANPP_litterfall_0" # Get tables path and name tables_filenames <- list.files("data", pattern = "\\.csv$", full.names = T) # Get metadata path and name metadata_filenames <- list.files("metadata", pattern = "\\.csv$", full.names = T) # Get all scripts path and names all.scripts <- c(list.files("scripts/Figures/", pattern = "\\.R$|\\.m$", full.names = T), list.files("scripts/Generate PLOTS from HISTORY/", pattern = "\\.R$|\\.m$", full.names = T), list.files("scripts/Group_sites_into_areas/", pattern = "\\.R$|\\.m$", full.names = T), list.files("scripts/QA_QC/", pattern = "\\.R$|\\.m$", full.names = T), list.files("scripts/z_archive/", pattern = "\\.R$|\\.m$", full.names = T)) # MAKE CHANGES AND SAVE for( f in c(tables_filenames, metadata_filenames,all.scripts) ){ print(f) x <- readLines(f) if(any(grepl(old.variable.name,x))) stop() # y <- gsub(old.variable.name, new.variable.name, x) # cat(y, file=f, sep="\n") } print(old.variable.name)
/scripts/z_archive/Changing_variable_names_in_data_metadata_and_scripts.R
permissive
forc-db/ForC
R
false
false
1,838
r
###################################################### # Purpose: Automatically change variable names and places where they are refered to in data tables, metadata tables and scripts # Inputs: - all data tables, all metadata tables and all .R or .m files in all directories of scripts # - old names and new names as specified in "metadata/archive/variable_names_polishing.tx (to enter by hand, one by one, be careful at comments written in file) # outputs: polished files # NOTES: Same changes need to be made in diagram of entity relationship and paper # Developped by: Valentine Herrmann ( HerrmannV@si.edu) in Januaray 2018 # R version 3.4.2 ###################################################### rm(list = ls()) # what column names are we chaning and to what ? old.variable.name <- "\\bANPP_litterfall_3\\b" new.variable.name <- "ANPP_litterfall_0" # Get tables path and name tables_filenames <- list.files("data", pattern = "\\.csv$", full.names = T) # Get metadata path and name metadata_filenames <- list.files("metadata", pattern = "\\.csv$", full.names = T) # Get all scripts path and names all.scripts <- c(list.files("scripts/Figures/", pattern = "\\.R$|\\.m$", full.names = T), list.files("scripts/Generate PLOTS from HISTORY/", pattern = "\\.R$|\\.m$", full.names = T), list.files("scripts/Group_sites_into_areas/", pattern = "\\.R$|\\.m$", full.names = T), list.files("scripts/QA_QC/", pattern = "\\.R$|\\.m$", full.names = T), list.files("scripts/z_archive/", pattern = "\\.R$|\\.m$", full.names = T)) # MAKE CHANGES AND SAVE for( f in c(tables_filenames, metadata_filenames,all.scripts) ){ print(f) x <- readLines(f) if(any(grepl(old.variable.name,x))) stop() # y <- gsub(old.variable.name, new.variable.name, x) # cat(y, file=f, sep="\n") } print(old.variable.name)
#---------------------------- # Cópula Bernstein bivariada #---------------------------- a_leer_primero <- function() { cat(noquote("Los datos bivariados (x1,y1),...,(xn,yn) deben estar capturados en una matriz"), "\n") cat(noquote("de n x 2 con el nombre <muestra>"), "\n") cat(noquote("Genere la matriz de la c'opula emp'irica mediante la siguiente instrucci'on:"), "\n") cat(noquote("matriz.copem <- genmat.copem(muestra)"), "\n") cat(noquote("Hecho lo anterior puede proceder a los c'alculos."), "\n") cat(noquote("Fin instructivo."), "\n") } Bpol.fun <- function(x, funcion, orden) sum(funcion((0:orden)/orden) * dbinom(0:orden, orden, x)) Bpol.fun.emp <- function(x, valores.emp) sum(valores.emp * dbinom(0:(length(valores.emp) - 1), length(valores.emp) - 1, x)) Bpol.valores <- function(valores, funcion, orden) { vec.valores <- rep(0, length(valores)) for (j in 1:(length(valores))) { vec.valores[j] <- Bpol.fun(valores[j], funcion, orden) } return(vec.valores) } Bpol.valores.emp <- function(valores, valores.emp) { vec.valores <- rep(0, length(valores)) for (j in 1:(length(valores))) { vec.valores[j] <- Bpol.fun.emp(valores[j], valores.emp) } return(vec.valores) } copula.Bernshtein.emp <- function(u, v) sum(matriz.copem * (dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 1, u) %*% t(dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 1, v)))) cv.du <- function(u, v) (dim(matriz.copem)[1] - 1) * sum(matriz.copem * ((dbinom(-1:(dim(matriz.copem)[1] - 2), dim(matriz.copem)[1] - 2, u) - dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 2, u) * c(-1, rep(1, dim(matriz.copem)[1] - 1))) %*% t(dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 1, v)))) cv.du.aux <- function(v, ua.vec) cv.du(ua.vec[1], v) - ua.vec[2] cv.du.inv <- function(u, a) uniroot(cv.du.aux, interval = c(0, 1), ua.vec = c(u, a), tol = tolerancia)$root dcopula.Bernshtein.emp <- function(u, v) ((dim(matriz.copem)[1] - 1)^2) * sum(matriz.copem * ((dbinom(-1:(dim(matriz.copem)[1] - 2), dim(matriz.copem)[1] - 2, u) - dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 2, u) * c(-1, rep(1, dim(matriz.copem)[1] - 1))) %*% t(dbinom(-1:(dim(matriz.copem)[1] - 2), dim(matriz.copem)[1] - 2, v) - dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 2, v) * c(-1, rep(1, dim(matriz.copem)[1] - 1))))) densidad.Bernshtein.emp <- function(x, y) dcopula.Bernshtein.emp(Fn.Bernshtein(x), Gn.Bernshtein(y))*fn.Bernshtein(x)*gn.Bernshtein(y) densidad.Bernshtein.emp.ydadox <- function(y, x) dcopula.Bernshtein.emp(Fn.Bernshtein(x), Gn.Bernshtein(y))*gn.Bernshtein(y) dep.schweizer <- function (mat.xy){ copem <- genmat.copem(mat.xy) n <- dim(mat.xy)[1] mat1 <- matrix(seq(from = 0, to = 1, length = (n + 1)), nrow = (n + 1), ncol = 1) mat2 <- matrix(seq(from = 0, to = 1, length = (n + 1)), ncol = (n + 1), nrow = 1) copula.pi <- mat1 %*% mat2 sigma.schweizer <- (12/(n^2)) * sum(abs(copem - copula.pi)) return(sigma.schweizer) } depurar.xy <- function(mat.xy){ n <- dim(mat.xy)[1] matriz.ord <- mat.xy orden <- order(mat.xy[, 1]) for (i in 1:n) { matriz.ord[i, ] <- mat.xy[orden[i], ] } i <- 1 rango.bloque <- which(matriz.ord[, 1] == matriz.ord[i, 1]) matriz.dep <- matrix(c(matriz.ord[i, 1], median(matriz.ord[rango.bloque, 2])), ncol = 2, nrow = 1) i <- max(rango.bloque) + 1 while (i <= n) { rango.bloque <- which(matriz.ord[, 1] == matriz.ord[i, 1]) matriz.dep <- rbind(matriz.dep, c(matriz.ord[i, 1], median(matriz.ord[rango.bloque, 2]))) i <- max(rango.bloque) + 1 } mat.xy <- matriz.dep[, 2:1] n <- dim(mat.xy)[1] matriz.ord <- mat.xy orden <- order(mat.xy[, 1]) for (i in 1:n) { matriz.ord[i, ] <- mat.xy[orden[i], ] } i <- 1 rango.bloque <- which(matriz.ord[, 1] == matriz.ord[i, 1]) matriz.dep <- matrix(c(matriz.ord[i, 1], median(matriz.ord[rango.bloque, 2])), ncol = 2, nrow = 1) i <- max(rango.bloque) + 1 while (i <= n) { rango.bloque <- which(matriz.ord[, 1] == matriz.ord[i, 1]) matriz.dep <- rbind(matriz.dep, c(matriz.ord[i, 1], median(matriz.ord[rango.bloque, 2]))) i <- max(rango.bloque) + 1 } mat.xy <- matriz.dep[, 2:1] n <- dim(mat.xy)[1] matriz.ord <- mat.xy orden <- order(mat.xy[, 1]) for (i in 1:n) { matriz.ord[i, ] <- mat.xy[orden[i], ] } return(matriz.ord) } dFn.inv.Bernshtein <- function(u, valores.emp){ x <- sort(valores.emp) n <- length(x) xm <- rep(0, n + 1) for (j in 2:n) { xm[j] <- (x[j - 1] + x[j])/2 } xm[1] <- x[1] xm[n + 1] <- x[n] resultado <- x[n] * (u^(n - 1)) - x[1] * ((1 - u)^(n - 1)) resultado <- resultado + sum(xm[2:n] * (dbinom(0:(n - 2), n - 1, u) - dbinom(1:(n - 1), n - 1, u))) resultado <- n * resultado return(resultado) } diag.copem <- function(matcopem){ m <- ncol(matcopem) - 1 return(cbind((0:m)/m, diag(matcopem))) } estandarizar <- function(muestra) apply(muestra, 2, rank)/nrow(muestra) # calcular muestra estandarizada fn.Bernshtein <- function(x) 1/dFn.inv.Bernshtein(Fn.Bernshtein(x), muestra[, 1]) Fn.Bernshtein <- function(x) uniroot(Fn.Bernshtein.aux, interval = c(0, 1), xdada = x, tol = tolerancia, extendInt="yes", trace=2)$root Fn.Bernshtein.aux <- function(u, xdada) Fn.inv.Bernshtein(u, muestra[, 1]) - xdada Fn.emp <- function(x, datos) mean(datos <= x) Fn.inv.Bernshtein <- function(u, valores.emp){ x <- sort(valores.emp) n <- length(x) xm <- rep(0, n + 1) for (j in 2:n) { xm[j] <- (x[j - 1] + x[j])/2 } xm[1] <- x[1] xm[n + 1] <- x[n] return(sum(xm * dbinom(0:n, n, u))) } genmat.copem <- function(mat.xy){ n <- dim(mat.xy)[1] mat.copem <- matrix(0, ncol = (n + 1), nrow = (n + 1)) mat.xyord <- mat.xy orden <- order(mat.xy[, 1]) for (i in 1:n) { mat.xyord[i, ] <- mat.xy[orden[i], ] } mat.copem[n + 1, ] <- (0:n)/n y.ord <- sort(mat.xyord[, 2]) for (i in 1:(n - 1)) { columna <- (((mat.xyord[, 2][i] <= y.ord)) * 1)/n mat.copem[i + 1, ] <- mat.copem[i, ] + c(0, columna) } return(mat.copem) } genmat.copem.Bernshtein <- function(u.vec, v.vec){ copula.B <- matrix(0, nrow = length(u.vec), ncol = length(v.vec)) for (i in 1:(length(u.vec))) { for (j in 1:(length(v.vec))) { copula.B[i, j] <- copula.Bernshtein.emp(u.vec[i], v.vec[j]) } } return(list(u = u.vec, v = v.vec, copemB = copula.B)) } genmat.dcopem.Bernshtein <- function(u.vec, v.vec){ dcopula.B <- matrix(0, nrow = length(u.vec), ncol = length(v.vec)) for (i in 1:(length(u.vec))) { for (j in 1:(length(v.vec))) { dcopula.B[i, j] <- dcopula.Bernshtein.emp(u.vec[i], v.vec[j]) } } return(list(u = u.vec, v = v.vec, dcopemB = dcopula.B)) } genmat.densidad.Bernshtein <- function(x.vec, y.vec){ densidad <- matrix(0, nrow = length(x.vec), ncol = length(y.vec)) for (i in 1:(length(x.vec))) { for (j in 1:(length(y.vec))) { densidad[i, j] <- densidad.Bernshtein.emp(x.vec[i], y.vec[j]) } } return(list(x = x.vec, y = y.vec, densidad = densidad)) } genmat.diagem <- function(matriz){ # # Input: matriz de (n x 2) cuyos renglones son observaciones del vector aleatorio (X,Y). # Output: matriz de (n+1 x 2) con los valores (u,dn(u)) de la diagonal empirica # n <- dim(matriz)[1] # tamanio de muestra tau <- rep(0, n-1) # declarar subvector de trayectoria de tau(1) a tau(n-1) x <- matriz[, 1] y <- matriz[, 2] x.orden <- rank(x) # vector de orden en los valores de x y.ordx <- y # declarando vector de y ordenado en x for(j in 1:n){ # ordenando y de acuerdo a x y.ordx[x.orden[j]] <- y[j] } y <- sort(y) # ordenando y de menor a mayor for(j in 1:(n-1)){ # calculando vector tau[1:n-1] acumulador <- 0 for(k in 1:j){ acumulador <- acumulador + 1*(y.ordx[k] <= y[j]) } tau[j] <- acumulador } tau <- c(0, tau, n)/n # vector completo tau[0:n] mat.diag <- matrix(0, ncol = 2, nrow = (n+1)) mat.diag[ , 1] <- (0:n)/n mat.diag[ , 2] <- tau return(mat.diag) } gn.Bernshtein <- function(x) 1/dFn.inv.Bernshtein(Gn.Bernshtein(x), muestra[, 2]) Gn.Bernshtein <- function(x) uniroot(Gn.Bernshtein.aux, interval = c(0, 1), xdada = x, tol = tolerancia, extendInt="yes", trace=2)$root Gn.Bernshtein.aux <- function(u, xdada) Fn.inv.Bernshtein(u, muestra[, 2]) - xdada grafica.cotas.diagonal <- function(titulo){ # # Genera una plantilla para graficar u versus la # secci?n diagonal d(u), agregando cotas de FH y # la diagonal de Pi # # Input: t?tulo del gr?fico (entrecomillado) # plot(c(0, 1),c(0, 1),type = "n", main = titulo, xlab = "u", ylab = "diag(u)") lines(c(0, 1),c(0, 1), col = "green") lines(c(0, 0.5), c(0, 0), col="green") lines(c(0.5, 1), c(0, 1), col = "green") u <- seq(from = 0, to = 1, length = 1000) lines(u, u^2, col = "orange") } regresion <- function(x, cuantil) Fn.inv.Bernshtein(regresion.copulaB(Fn.Bernshtein(x), cuantil), muestra[, 2]) regresion.copulaB <- function(u, cuantil) uniroot(cv.du.aux, interval = c(0, 1), ua.vec = c(u, cuantil), tol = tolerancia)$root simula.Bernshtein <- function(tam.muestra){ sim.copula <- simula.copula.Bernshtein(tam.muestra) x <- sapply(sim.copula[, 1], Fn.inv.Bernshtein, valores.emp = muestra[, 1]) y <- sapply(sim.copula[, 2], Fn.inv.Bernshtein, valores.emp = muestra[, 2]) simulaciones <- cbind(x, y) return(list(sim.xy = simulaciones, sim.copula = sim.copula)) } simula.Bernshtein.condicional <- function(x, tam.muestra){ uu <- runif(tam.muestra) Fx <- Fn.Bernshtein(x) v <- sapply(uu, cv.du.inv, u = Fx) y <- sapply(v, Fn.inv.Bernshtein, valores.emp = muestra[, 2]) return(y) } simula.copula.Bernshtein <- function(tam.muestra){ uu <- runif(tam.muestra) tt <- runif(tam.muestra) vv <- mapply(cv.du.inv, u = uu, a = tt) return(cbind(uu, vv)) } simula.copula.unif3 <- function(tam.muestra, theta){ n <- tam.muestra m <- matrix(0, ncol = 2, nrow = n) m[, 1] <- runif(n) a <- rbinom(n, 1, theta) b <- 2 * rbinom(n, 1, 1/2) - 1 m[, 2] <- (m[, 1] <= (1/3)) * (runif(n, 0, 2/3) * a + runif(n, 2/3, 1) * (1 - a)) + (((1/3) < m[, 1]) & (m[, 1] <= (2/3))) * (runif(n, 1/3, 1) * a + runif(n, 0, 1/3) * (1 - a)) + ((2/3) < m[, 1]) * (runif(n, 1/3, 2/3) + a * b/3) return(m) } transf.muestra.copula <- function (){ u <- sapply(muestra[, 1], Fn.Bernshtein) v <- sapply(muestra[, 2], Gn.Bernshtein) return(cbind(u, v)) } tolerancia <- 0.00001
/Bernstein.R
no_license
vargaslab/temporal-univariate-Latin-Hypercube
R
false
false
11,026
r
#---------------------------- # Cópula Bernstein bivariada #---------------------------- a_leer_primero <- function() { cat(noquote("Los datos bivariados (x1,y1),...,(xn,yn) deben estar capturados en una matriz"), "\n") cat(noquote("de n x 2 con el nombre <muestra>"), "\n") cat(noquote("Genere la matriz de la c'opula emp'irica mediante la siguiente instrucci'on:"), "\n") cat(noquote("matriz.copem <- genmat.copem(muestra)"), "\n") cat(noquote("Hecho lo anterior puede proceder a los c'alculos."), "\n") cat(noquote("Fin instructivo."), "\n") } Bpol.fun <- function(x, funcion, orden) sum(funcion((0:orden)/orden) * dbinom(0:orden, orden, x)) Bpol.fun.emp <- function(x, valores.emp) sum(valores.emp * dbinom(0:(length(valores.emp) - 1), length(valores.emp) - 1, x)) Bpol.valores <- function(valores, funcion, orden) { vec.valores <- rep(0, length(valores)) for (j in 1:(length(valores))) { vec.valores[j] <- Bpol.fun(valores[j], funcion, orden) } return(vec.valores) } Bpol.valores.emp <- function(valores, valores.emp) { vec.valores <- rep(0, length(valores)) for (j in 1:(length(valores))) { vec.valores[j] <- Bpol.fun.emp(valores[j], valores.emp) } return(vec.valores) } copula.Bernshtein.emp <- function(u, v) sum(matriz.copem * (dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 1, u) %*% t(dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 1, v)))) cv.du <- function(u, v) (dim(matriz.copem)[1] - 1) * sum(matriz.copem * ((dbinom(-1:(dim(matriz.copem)[1] - 2), dim(matriz.copem)[1] - 2, u) - dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 2, u) * c(-1, rep(1, dim(matriz.copem)[1] - 1))) %*% t(dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 1, v)))) cv.du.aux <- function(v, ua.vec) cv.du(ua.vec[1], v) - ua.vec[2] cv.du.inv <- function(u, a) uniroot(cv.du.aux, interval = c(0, 1), ua.vec = c(u, a), tol = tolerancia)$root dcopula.Bernshtein.emp <- function(u, v) ((dim(matriz.copem)[1] - 1)^2) * sum(matriz.copem * ((dbinom(-1:(dim(matriz.copem)[1] - 2), dim(matriz.copem)[1] - 2, u) - dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 2, u) * c(-1, rep(1, dim(matriz.copem)[1] - 1))) %*% t(dbinom(-1:(dim(matriz.copem)[1] - 2), dim(matriz.copem)[1] - 2, v) - dbinom(0:(dim(matriz.copem)[1] - 1), dim(matriz.copem)[1] - 2, v) * c(-1, rep(1, dim(matriz.copem)[1] - 1))))) densidad.Bernshtein.emp <- function(x, y) dcopula.Bernshtein.emp(Fn.Bernshtein(x), Gn.Bernshtein(y))*fn.Bernshtein(x)*gn.Bernshtein(y) densidad.Bernshtein.emp.ydadox <- function(y, x) dcopula.Bernshtein.emp(Fn.Bernshtein(x), Gn.Bernshtein(y))*gn.Bernshtein(y) dep.schweizer <- function (mat.xy){ copem <- genmat.copem(mat.xy) n <- dim(mat.xy)[1] mat1 <- matrix(seq(from = 0, to = 1, length = (n + 1)), nrow = (n + 1), ncol = 1) mat2 <- matrix(seq(from = 0, to = 1, length = (n + 1)), ncol = (n + 1), nrow = 1) copula.pi <- mat1 %*% mat2 sigma.schweizer <- (12/(n^2)) * sum(abs(copem - copula.pi)) return(sigma.schweizer) } depurar.xy <- function(mat.xy){ n <- dim(mat.xy)[1] matriz.ord <- mat.xy orden <- order(mat.xy[, 1]) for (i in 1:n) { matriz.ord[i, ] <- mat.xy[orden[i], ] } i <- 1 rango.bloque <- which(matriz.ord[, 1] == matriz.ord[i, 1]) matriz.dep <- matrix(c(matriz.ord[i, 1], median(matriz.ord[rango.bloque, 2])), ncol = 2, nrow = 1) i <- max(rango.bloque) + 1 while (i <= n) { rango.bloque <- which(matriz.ord[, 1] == matriz.ord[i, 1]) matriz.dep <- rbind(matriz.dep, c(matriz.ord[i, 1], median(matriz.ord[rango.bloque, 2]))) i <- max(rango.bloque) + 1 } mat.xy <- matriz.dep[, 2:1] n <- dim(mat.xy)[1] matriz.ord <- mat.xy orden <- order(mat.xy[, 1]) for (i in 1:n) { matriz.ord[i, ] <- mat.xy[orden[i], ] } i <- 1 rango.bloque <- which(matriz.ord[, 1] == matriz.ord[i, 1]) matriz.dep <- matrix(c(matriz.ord[i, 1], median(matriz.ord[rango.bloque, 2])), ncol = 2, nrow = 1) i <- max(rango.bloque) + 1 while (i <= n) { rango.bloque <- which(matriz.ord[, 1] == matriz.ord[i, 1]) matriz.dep <- rbind(matriz.dep, c(matriz.ord[i, 1], median(matriz.ord[rango.bloque, 2]))) i <- max(rango.bloque) + 1 } mat.xy <- matriz.dep[, 2:1] n <- dim(mat.xy)[1] matriz.ord <- mat.xy orden <- order(mat.xy[, 1]) for (i in 1:n) { matriz.ord[i, ] <- mat.xy[orden[i], ] } return(matriz.ord) } dFn.inv.Bernshtein <- function(u, valores.emp){ x <- sort(valores.emp) n <- length(x) xm <- rep(0, n + 1) for (j in 2:n) { xm[j] <- (x[j - 1] + x[j])/2 } xm[1] <- x[1] xm[n + 1] <- x[n] resultado <- x[n] * (u^(n - 1)) - x[1] * ((1 - u)^(n - 1)) resultado <- resultado + sum(xm[2:n] * (dbinom(0:(n - 2), n - 1, u) - dbinom(1:(n - 1), n - 1, u))) resultado <- n * resultado return(resultado) } diag.copem <- function(matcopem){ m <- ncol(matcopem) - 1 return(cbind((0:m)/m, diag(matcopem))) } estandarizar <- function(muestra) apply(muestra, 2, rank)/nrow(muestra) # calcular muestra estandarizada fn.Bernshtein <- function(x) 1/dFn.inv.Bernshtein(Fn.Bernshtein(x), muestra[, 1]) Fn.Bernshtein <- function(x) uniroot(Fn.Bernshtein.aux, interval = c(0, 1), xdada = x, tol = tolerancia, extendInt="yes", trace=2)$root Fn.Bernshtein.aux <- function(u, xdada) Fn.inv.Bernshtein(u, muestra[, 1]) - xdada Fn.emp <- function(x, datos) mean(datos <= x) Fn.inv.Bernshtein <- function(u, valores.emp){ x <- sort(valores.emp) n <- length(x) xm <- rep(0, n + 1) for (j in 2:n) { xm[j] <- (x[j - 1] + x[j])/2 } xm[1] <- x[1] xm[n + 1] <- x[n] return(sum(xm * dbinom(0:n, n, u))) } genmat.copem <- function(mat.xy){ n <- dim(mat.xy)[1] mat.copem <- matrix(0, ncol = (n + 1), nrow = (n + 1)) mat.xyord <- mat.xy orden <- order(mat.xy[, 1]) for (i in 1:n) { mat.xyord[i, ] <- mat.xy[orden[i], ] } mat.copem[n + 1, ] <- (0:n)/n y.ord <- sort(mat.xyord[, 2]) for (i in 1:(n - 1)) { columna <- (((mat.xyord[, 2][i] <= y.ord)) * 1)/n mat.copem[i + 1, ] <- mat.copem[i, ] + c(0, columna) } return(mat.copem) } genmat.copem.Bernshtein <- function(u.vec, v.vec){ copula.B <- matrix(0, nrow = length(u.vec), ncol = length(v.vec)) for (i in 1:(length(u.vec))) { for (j in 1:(length(v.vec))) { copula.B[i, j] <- copula.Bernshtein.emp(u.vec[i], v.vec[j]) } } return(list(u = u.vec, v = v.vec, copemB = copula.B)) } genmat.dcopem.Bernshtein <- function(u.vec, v.vec){ dcopula.B <- matrix(0, nrow = length(u.vec), ncol = length(v.vec)) for (i in 1:(length(u.vec))) { for (j in 1:(length(v.vec))) { dcopula.B[i, j] <- dcopula.Bernshtein.emp(u.vec[i], v.vec[j]) } } return(list(u = u.vec, v = v.vec, dcopemB = dcopula.B)) } genmat.densidad.Bernshtein <- function(x.vec, y.vec){ densidad <- matrix(0, nrow = length(x.vec), ncol = length(y.vec)) for (i in 1:(length(x.vec))) { for (j in 1:(length(y.vec))) { densidad[i, j] <- densidad.Bernshtein.emp(x.vec[i], y.vec[j]) } } return(list(x = x.vec, y = y.vec, densidad = densidad)) } genmat.diagem <- function(matriz){ # # Input: matriz de (n x 2) cuyos renglones son observaciones del vector aleatorio (X,Y). # Output: matriz de (n+1 x 2) con los valores (u,dn(u)) de la diagonal empirica # n <- dim(matriz)[1] # tamanio de muestra tau <- rep(0, n-1) # declarar subvector de trayectoria de tau(1) a tau(n-1) x <- matriz[, 1] y <- matriz[, 2] x.orden <- rank(x) # vector de orden en los valores de x y.ordx <- y # declarando vector de y ordenado en x for(j in 1:n){ # ordenando y de acuerdo a x y.ordx[x.orden[j]] <- y[j] } y <- sort(y) # ordenando y de menor a mayor for(j in 1:(n-1)){ # calculando vector tau[1:n-1] acumulador <- 0 for(k in 1:j){ acumulador <- acumulador + 1*(y.ordx[k] <= y[j]) } tau[j] <- acumulador } tau <- c(0, tau, n)/n # vector completo tau[0:n] mat.diag <- matrix(0, ncol = 2, nrow = (n+1)) mat.diag[ , 1] <- (0:n)/n mat.diag[ , 2] <- tau return(mat.diag) } gn.Bernshtein <- function(x) 1/dFn.inv.Bernshtein(Gn.Bernshtein(x), muestra[, 2]) Gn.Bernshtein <- function(x) uniroot(Gn.Bernshtein.aux, interval = c(0, 1), xdada = x, tol = tolerancia, extendInt="yes", trace=2)$root Gn.Bernshtein.aux <- function(u, xdada) Fn.inv.Bernshtein(u, muestra[, 2]) - xdada grafica.cotas.diagonal <- function(titulo){ # # Genera una plantilla para graficar u versus la # secci?n diagonal d(u), agregando cotas de FH y # la diagonal de Pi # # Input: t?tulo del gr?fico (entrecomillado) # plot(c(0, 1),c(0, 1),type = "n", main = titulo, xlab = "u", ylab = "diag(u)") lines(c(0, 1),c(0, 1), col = "green") lines(c(0, 0.5), c(0, 0), col="green") lines(c(0.5, 1), c(0, 1), col = "green") u <- seq(from = 0, to = 1, length = 1000) lines(u, u^2, col = "orange") } regresion <- function(x, cuantil) Fn.inv.Bernshtein(regresion.copulaB(Fn.Bernshtein(x), cuantil), muestra[, 2]) regresion.copulaB <- function(u, cuantil) uniroot(cv.du.aux, interval = c(0, 1), ua.vec = c(u, cuantil), tol = tolerancia)$root simula.Bernshtein <- function(tam.muestra){ sim.copula <- simula.copula.Bernshtein(tam.muestra) x <- sapply(sim.copula[, 1], Fn.inv.Bernshtein, valores.emp = muestra[, 1]) y <- sapply(sim.copula[, 2], Fn.inv.Bernshtein, valores.emp = muestra[, 2]) simulaciones <- cbind(x, y) return(list(sim.xy = simulaciones, sim.copula = sim.copula)) } simula.Bernshtein.condicional <- function(x, tam.muestra){ uu <- runif(tam.muestra) Fx <- Fn.Bernshtein(x) v <- sapply(uu, cv.du.inv, u = Fx) y <- sapply(v, Fn.inv.Bernshtein, valores.emp = muestra[, 2]) return(y) } simula.copula.Bernshtein <- function(tam.muestra){ uu <- runif(tam.muestra) tt <- runif(tam.muestra) vv <- mapply(cv.du.inv, u = uu, a = tt) return(cbind(uu, vv)) } simula.copula.unif3 <- function(tam.muestra, theta){ n <- tam.muestra m <- matrix(0, ncol = 2, nrow = n) m[, 1] <- runif(n) a <- rbinom(n, 1, theta) b <- 2 * rbinom(n, 1, 1/2) - 1 m[, 2] <- (m[, 1] <= (1/3)) * (runif(n, 0, 2/3) * a + runif(n, 2/3, 1) * (1 - a)) + (((1/3) < m[, 1]) & (m[, 1] <= (2/3))) * (runif(n, 1/3, 1) * a + runif(n, 0, 1/3) * (1 - a)) + ((2/3) < m[, 1]) * (runif(n, 1/3, 2/3) + a * b/3) return(m) } transf.muestra.copula <- function (){ u <- sapply(muestra[, 1], Fn.Bernshtein) v <- sapply(muestra[, 2], Gn.Bernshtein) return(cbind(u, v)) } tolerancia <- 0.00001
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NAreport.R \name{NAreport} \alias{NAreport} \title{Report NA variables per case} \usage{ NAreport(x, idvar = NULL) } \arguments{ \item{x}{data.frame to be analyzed} \item{idvar}{variable to be used as id} } \description{ Report NA variables per case } \examples{ test <- data.frame( name = c("john","mary","gregor"), x = c(1,NA,NA), y = c(NA,1,1), z = c(1,1,NA)) test2 <- test test2$surname <- c("doe", "foo", "bar") test2 <- test2[c("name","surname","x","y","z")] test NAreport(test, "name") test2 NAreport(test2, c("name", "surname")) }
/man/NAreport.Rd
no_license
strategist922/yapomif
R
false
true
626
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NAreport.R \name{NAreport} \alias{NAreport} \title{Report NA variables per case} \usage{ NAreport(x, idvar = NULL) } \arguments{ \item{x}{data.frame to be analyzed} \item{idvar}{variable to be used as id} } \description{ Report NA variables per case } \examples{ test <- data.frame( name = c("john","mary","gregor"), x = c(1,NA,NA), y = c(NA,1,1), z = c(1,1,NA)) test2 <- test test2$surname <- c("doe", "foo", "bar") test2 <- test2[c("name","surname","x","y","z")] test NAreport(test, "name") test2 NAreport(test2, c("name", "surname")) }
#' Generate HTML tags used in examples #' #' @param class Class of the main div. #' #' @importFrom htmltools HTML tags #' #' @return HTML tags. #' @export #' #' @examples #' tag_example() tag_example <- function(class = NULL) { style_grid <- paste( "display: grid;", "grid-template-columns: repeat(2, 1fr);", "grid-template-rows: 1fr;", "grid-column-gap: 0px;", "grid-row-gap: 0px;" ) tags$div( class = class, tags$blockquote( tags$p(HTML("&Eacute;coutez !")), tags$p(HTML("Puisqu&rsquo;on allume les &eacute;toiles,")), tags$p(HTML("C&rsquo;est qu&rsquo;elles sont &agrave; quelqu&rsquo;un n&eacute;cessaires ?")), tags$p(HTML("C&rsquo;est que quelqu&rsquo;un d&eacute;sire qu&rsquo;elles soient ?")), tags$br(), tags$p(HTML("Vladimir Ma&iuml;akovski &ndash; &Eacute;coutez !")) ), tags$div( # style = style_grid, tags$div( tags$p( paste(letters, collapse = "") ), tags$p( style = "font-weight: bold;", paste(letters, collapse = "") ), tags$p( style = "font-style: italic;", paste(letters, collapse = "") ) ), tags$div( tags$p( paste(LETTERS, collapse = "") ), tags$p( style = "font-weight: bold;", paste(LETTERS, collapse = "") ), tags$p( style = "font-style: italic;", paste(LETTERS, collapse = "") ) ) ) ) }
/R/tag_example.R
no_license
dreamRs/gfonts
R
false
false
1,529
r
#' Generate HTML tags used in examples #' #' @param class Class of the main div. #' #' @importFrom htmltools HTML tags #' #' @return HTML tags. #' @export #' #' @examples #' tag_example() tag_example <- function(class = NULL) { style_grid <- paste( "display: grid;", "grid-template-columns: repeat(2, 1fr);", "grid-template-rows: 1fr;", "grid-column-gap: 0px;", "grid-row-gap: 0px;" ) tags$div( class = class, tags$blockquote( tags$p(HTML("&Eacute;coutez !")), tags$p(HTML("Puisqu&rsquo;on allume les &eacute;toiles,")), tags$p(HTML("C&rsquo;est qu&rsquo;elles sont &agrave; quelqu&rsquo;un n&eacute;cessaires ?")), tags$p(HTML("C&rsquo;est que quelqu&rsquo;un d&eacute;sire qu&rsquo;elles soient ?")), tags$br(), tags$p(HTML("Vladimir Ma&iuml;akovski &ndash; &Eacute;coutez !")) ), tags$div( # style = style_grid, tags$div( tags$p( paste(letters, collapse = "") ), tags$p( style = "font-weight: bold;", paste(letters, collapse = "") ), tags$p( style = "font-style: italic;", paste(letters, collapse = "") ) ), tags$div( tags$p( paste(LETTERS, collapse = "") ), tags$p( style = "font-weight: bold;", paste(LETTERS, collapse = "") ), tags$p( style = "font-style: italic;", paste(LETTERS, collapse = "") ) ) ) ) }
#' List the datasets in a package. #' @export list_datasets <- function(package) { data(package = package)$results[, "Item"] }
/R/docs.R
no_license
pedmiston/rdata
R
false
false
129
r
#' List the datasets in a package. #' @export list_datasets <- function(package) { data(package = package)$results[, "Item"] }
#### Assignment JH Course 4 - week 1 setwd(dir = "d:/machine-learning/Johns_Hopkins/course4/week1/") if (! file.exists("./data")) { dir.create("./data") } file.exists("./data") setwd(dir = "./data/") urlFile <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" destFile <- "./household_power_consumption.zip" download.file(urlFile, destfile = destFile) unzip(destFile) list.files() hpc_data <- read.table(file = "./household_power_consumption.txt", header = TRUE, na.strings = "?", sep = ";") object.size(hpc_data) # strptime(head(hpc_data$Time),format = "%H:%M:%S") # strptime(head(hpc_data$Date),format = "%d/%m/%Y") hpc_data$date_time <- (paste(sep = " ", hpc_data$Date, hpc_data$Time)) hpc_data$date_time <- strptime(hpc_data$date_time, format = "%d/%m/%Y %H:%M:%S") hpc <- hpc_data[hpc_data$Date == "1/2/2007" | hpc_data$Date == "2/2/2007",] nrow(hpc) ### plot 4 dev.copy(png, file = "plot4.png") par(mfrow = c(2, 2)) with(hpc, { plot(date_time, Global_active_power, type= "l",xlab = "", ylab = "Global Active Power") plot(date_time, Voltage, type= "l", xlab = "datetime") plot(date_time, Sub_metering_1, type = "l", xlab = "", ylab = "Energy submetering") lines(date_time, Sub_metering_2, col = "red") lines(date_time, Sub_metering_3, col = "blue") legend("topright", lty = 1, seg.len = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(date_time, Global_reactive_power, type= "l",xlab = "datetime") } ) dev.off()
/plot4.R
no_license
wouwou4444/ExData_Plotting1
R
false
false
1,611
r
#### Assignment JH Course 4 - week 1 setwd(dir = "d:/machine-learning/Johns_Hopkins/course4/week1/") if (! file.exists("./data")) { dir.create("./data") } file.exists("./data") setwd(dir = "./data/") urlFile <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" destFile <- "./household_power_consumption.zip" download.file(urlFile, destfile = destFile) unzip(destFile) list.files() hpc_data <- read.table(file = "./household_power_consumption.txt", header = TRUE, na.strings = "?", sep = ";") object.size(hpc_data) # strptime(head(hpc_data$Time),format = "%H:%M:%S") # strptime(head(hpc_data$Date),format = "%d/%m/%Y") hpc_data$date_time <- (paste(sep = " ", hpc_data$Date, hpc_data$Time)) hpc_data$date_time <- strptime(hpc_data$date_time, format = "%d/%m/%Y %H:%M:%S") hpc <- hpc_data[hpc_data$Date == "1/2/2007" | hpc_data$Date == "2/2/2007",] nrow(hpc) ### plot 4 dev.copy(png, file = "plot4.png") par(mfrow = c(2, 2)) with(hpc, { plot(date_time, Global_active_power, type= "l",xlab = "", ylab = "Global Active Power") plot(date_time, Voltage, type= "l", xlab = "datetime") plot(date_time, Sub_metering_1, type = "l", xlab = "", ylab = "Energy submetering") lines(date_time, Sub_metering_2, col = "red") lines(date_time, Sub_metering_3, col = "blue") legend("topright", lty = 1, seg.len = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(date_time, Global_reactive_power, type= "l",xlab = "datetime") } ) dev.off()
plegend <- function (p)(paste0("=",formatC(p, format="e", digits=1))) x <- read.table("indelsnv_seq_hg19_allcoding_perpat.txt", sep="\t", quote="\"", head=T, check.names=F) p <- read.table("pvalues.txt", sep="\t", quote="\"", head=T, check.names=F, stringsAsFactors=F) number <- c(x$mutations_per_mb_Coding, x$mutations_per_mb_UCR) group <- factor(rep(x$type, 2)) class <- rep(c("Coding", "UCR"), each=nrow(x)) stat <- NULL for (i in unique(class)) { m<-mean(number[class==i]) s<-sd(number[class==i]) max<-max(number[class==i]) stat <- rbind(stat, data.frame(class=i, m=m, ms=m+s, m2s=m+2*s, max=max)) } col<-rainbow(length(levels(group))) # by median median <- tapply(number[class=="UCR"], group[class=="UCR"], median) o <- order(-median, levels(group), decreasing=T) group <- factor(group, levels=levels(group)[o]) col <- col[o] #labels <- levels(group) scale <- function (v) log(1+v) col2 <- rep(col, each=2) tiff("boxplotarrangeadtw13hght10nodigitperMB_2cases_3.tiff", width=20*300, height=24*300, res=300, compression="lzw") #pdf("drawing.pdf", width=8, height=10) at <- c(0, 10, 100, 600, 1000) par(mar=c(5,30,1,1)+.1, font=2) boxplot(scale(number)~class+group, horizontal=T, axes=F, outpch=16, xlab="", ylab="", #boxcol=col2, boxfill=col2, #medcol=col2, outcol=col2, ylim=scale(range(at)) ) labels <- c(t(matrix(c(paste0(levels(group), " (Coding)"), paste0(levels(group)," (UCE, n=", table(group[class=="UCR"]), ")", "(P",plegend(sapply(levels(group), function (g) p$pvalue[p$type==g])),")")), ncol=2))) axis(1, at=scale(at), labels=at, font.axis=2, lwd=4, cex.axis=2) axis(2, at=seq(labels), labels=rep("", length(labels)), las=1, font.axis=2, lwd=4, cex.axis=1.75) mtext(labels, side=2, line=1, at=seq(labels), las=1, cex=2) mtext("# UCE mutations per MB per tumor", side=1, line=3, las=1, cex=2.5) lcol <- c("darkblue", "black") for (i in 1:nrow(stat)) { abline(v=scale(stat[i,2:4]), lty=c("dotted", "dotdash","longdash"), lwd=2, col=lcol[i]) } stat$class[2] for (i in 1:nrow(stat)) { legend(5, 32-8*i, lty=c("dotted", "dotdash","longdash", "blank"), lwd=3, legend=paste0(c("mean=", "mean+SD=", "mean+2SD=", "max="), formatC(as.numeric(stat[i,2:5]), format="f", digits=0)), cex=2, bty="n", col=lcol[i], title.col=lcol[i], text.col=lcol[i], title=ifelse(i==1, as.character(stat$class[i]), "UCE"), xpd=NA) } dev.off()
/SupFig2/SupFig2.R
no_license
CristinaMonaIvan/UCE_Project
R
false
false
2,542
r
plegend <- function (p)(paste0("=",formatC(p, format="e", digits=1))) x <- read.table("indelsnv_seq_hg19_allcoding_perpat.txt", sep="\t", quote="\"", head=T, check.names=F) p <- read.table("pvalues.txt", sep="\t", quote="\"", head=T, check.names=F, stringsAsFactors=F) number <- c(x$mutations_per_mb_Coding, x$mutations_per_mb_UCR) group <- factor(rep(x$type, 2)) class <- rep(c("Coding", "UCR"), each=nrow(x)) stat <- NULL for (i in unique(class)) { m<-mean(number[class==i]) s<-sd(number[class==i]) max<-max(number[class==i]) stat <- rbind(stat, data.frame(class=i, m=m, ms=m+s, m2s=m+2*s, max=max)) } col<-rainbow(length(levels(group))) # by median median <- tapply(number[class=="UCR"], group[class=="UCR"], median) o <- order(-median, levels(group), decreasing=T) group <- factor(group, levels=levels(group)[o]) col <- col[o] #labels <- levels(group) scale <- function (v) log(1+v) col2 <- rep(col, each=2) tiff("boxplotarrangeadtw13hght10nodigitperMB_2cases_3.tiff", width=20*300, height=24*300, res=300, compression="lzw") #pdf("drawing.pdf", width=8, height=10) at <- c(0, 10, 100, 600, 1000) par(mar=c(5,30,1,1)+.1, font=2) boxplot(scale(number)~class+group, horizontal=T, axes=F, outpch=16, xlab="", ylab="", #boxcol=col2, boxfill=col2, #medcol=col2, outcol=col2, ylim=scale(range(at)) ) labels <- c(t(matrix(c(paste0(levels(group), " (Coding)"), paste0(levels(group)," (UCE, n=", table(group[class=="UCR"]), ")", "(P",plegend(sapply(levels(group), function (g) p$pvalue[p$type==g])),")")), ncol=2))) axis(1, at=scale(at), labels=at, font.axis=2, lwd=4, cex.axis=2) axis(2, at=seq(labels), labels=rep("", length(labels)), las=1, font.axis=2, lwd=4, cex.axis=1.75) mtext(labels, side=2, line=1, at=seq(labels), las=1, cex=2) mtext("# UCE mutations per MB per tumor", side=1, line=3, las=1, cex=2.5) lcol <- c("darkblue", "black") for (i in 1:nrow(stat)) { abline(v=scale(stat[i,2:4]), lty=c("dotted", "dotdash","longdash"), lwd=2, col=lcol[i]) } stat$class[2] for (i in 1:nrow(stat)) { legend(5, 32-8*i, lty=c("dotted", "dotdash","longdash", "blank"), lwd=3, legend=paste0(c("mean=", "mean+SD=", "mean+2SD=", "max="), formatC(as.numeric(stat[i,2:5]), format="f", digits=0)), cex=2, bty="n", col=lcol[i], title.col=lcol[i], text.col=lcol[i], title=ifelse(i==1, as.character(stat$class[i]), "UCE"), xpd=NA) } dev.off()
library(nycflights13) data<-flights clean_data<-na.omit(data) head(clean_data) flights_model_0 <- lm(clean_data$arr_delay~clean_data$distance+clean_data$air_time) summary(flights_model_0) linearMod <- lm(clean_data$arr_delay ~ clean_data$dep_delay) summary(linearMod) f1 <- summary(flights_model_0)$r.squared f1 f2 <- summary(linearMod)$r.squared f2
/Ejercicio 2/ejercicio2R.R
no_license
JaviGR66/RmdTestExamples
R
false
false
380
r
library(nycflights13) data<-flights clean_data<-na.omit(data) head(clean_data) flights_model_0 <- lm(clean_data$arr_delay~clean_data$distance+clean_data$air_time) summary(flights_model_0) linearMod <- lm(clean_data$arr_delay ~ clean_data$dep_delay) summary(linearMod) f1 <- summary(flights_model_0)$r.squared f1 f2 <- summary(linearMod)$r.squared f2
########################################################################/** # @RdocFunction cdfAddBaseMmCounts # # @title "Adds the number of allele A and allele B mismatching nucleotides of the probes in a CDF structure" # # \description{ # @get "title". # # This @function is design to be used with @see "applyCdfGroups" # on an Affymetrix Mapping (SNP) CDF @list structure. # # Identifies the number of nucleotides (bases) in probe sequences that # mismatch the the target sequence for allele A and the allele B, # as used by [1]. # } # # @synopsis # # \arguments{ # \item{groups}{A @list structure with groups. # Each group must contain the fields \code{tbase}, \code{pbase}, and # \code{offset} (from @see "cdfAddProbeOffsets"). # } # \item{...}{Not used.} # } # # \value{ # Returns a @list structure with the same number of groups as the # \code{groups} argument. To each group, two fields is added: # \item{mmACount}{The number of nucleotides in the probe sequence # that mismatches the target sequence of allele A.} # \item{mmBCount}{The number of nucleotides in the probe sequence # that mismatches the target sequence of allele B.} # } # # \details{ # Note that the above counts can be inferred from the CDF structure alone, # i.e. no sequence information is required. # Consider a probe group interrogating allele A. First, all PM probes # matches the allele A target sequence perfectly regardless of shift. # Moreover, all these PM probes mismatch the allele B target sequence # at exactly one position. Second, all MM probes mismatches the # allele A sequence at exactly one position. This is also true for # the allele B sequence, \emph{except} for an MM probe with zero offset, # which only mismatch at one (the middle) position. # For a probe group interrogating allele B, the same rules applies with # labels A and B swapped. # In summary, the mismatch counts for PM probes can take values 0 and 1, # and for MM probes they can take values 0, 1, and 2. # } # # \seealso{ # To add required probe offsets, @see "cdfAddProbeOffsets". # @see "applyCdfGroups". # } # # @author "HB" # # \references{ # [1] LaFramboise T, Weir BA, Zhao X, Beroukhim R, Li C, Harrington D, # Sellers WR, and Meyerson M. \emph{Allele-specific amplification in # cancer revealed by SNP array analysis}, PLoS Computational Biology, # Nov 2005, Volume 1, Issue 6, e65.\cr # [2] Affymetrix, \emph{Understanding Genotyping Probe Set Structure}, 2005. # \url{http://www.affymetrix.com/support/developer/whitepapers/genotyping_probe_set_structure.affx}\cr # } # # @keyword programming # @keyword internal #**/####################################################################### cdfAddBaseMmCounts <- function(groups, ...) { for (gg in seq(along=groups)) { group <- groups[[gg]]; # Find PM probes tbase <- group$tbase; pbase <- group$pbase; isPm <- ((tbase == "a" | tbase == "A") & (pbase == "t" | pbase == "T")) | ((tbase == "t" | tbase == "T") & (pbase == "a" | pbase == "A")) | ((tbase == "c" | tbase == "C") & (pbase == "g" | pbase == "G")) | ((tbase == "g" | tbase == "G") & (pbase == "c" | pbase == "C")); # Find the center probes isCentered <- (group$offset == 0); dim <- dim(isCentered); if (is.null(dim)) { mmACount <- mmBCount <- rep(as.integer(1), length(isCentered)); } else { mmACount <- mmBCount <- array(as.integer(1), dim=dim, dimnames=dimnames(isCentered)); } # Is this probe group interrogating allele A? isA <- (gg %% 2 == 1); if (isA) { mmACount[isPm] <- as.integer(0); mmBCount[!isPm & !isCentered] <- as.integer(2); } else { mmBCount[isPm] <- as.integer(0); mmACount[!isPm & !isCentered] <- as.integer(2); } # Add the new fields group$mmACount <- mmACount; group$mmBCount <- mmBCount; groups[[gg]] <- group; } groups; } ############################################################################ # HISTORY: # 2006-06-19 # o Added more Rdoc help. # 2006-03-07 # o Created. ############################################################################
/R/cdfAddBaseMmCounts.R
no_license
N0s3n/affxparser
R
false
false
4,312
r
########################################################################/** # @RdocFunction cdfAddBaseMmCounts # # @title "Adds the number of allele A and allele B mismatching nucleotides of the probes in a CDF structure" # # \description{ # @get "title". # # This @function is design to be used with @see "applyCdfGroups" # on an Affymetrix Mapping (SNP) CDF @list structure. # # Identifies the number of nucleotides (bases) in probe sequences that # mismatch the the target sequence for allele A and the allele B, # as used by [1]. # } # # @synopsis # # \arguments{ # \item{groups}{A @list structure with groups. # Each group must contain the fields \code{tbase}, \code{pbase}, and # \code{offset} (from @see "cdfAddProbeOffsets"). # } # \item{...}{Not used.} # } # # \value{ # Returns a @list structure with the same number of groups as the # \code{groups} argument. To each group, two fields is added: # \item{mmACount}{The number of nucleotides in the probe sequence # that mismatches the target sequence of allele A.} # \item{mmBCount}{The number of nucleotides in the probe sequence # that mismatches the target sequence of allele B.} # } # # \details{ # Note that the above counts can be inferred from the CDF structure alone, # i.e. no sequence information is required. # Consider a probe group interrogating allele A. First, all PM probes # matches the allele A target sequence perfectly regardless of shift. # Moreover, all these PM probes mismatch the allele B target sequence # at exactly one position. Second, all MM probes mismatches the # allele A sequence at exactly one position. This is also true for # the allele B sequence, \emph{except} for an MM probe with zero offset, # which only mismatch at one (the middle) position. # For a probe group interrogating allele B, the same rules applies with # labels A and B swapped. # In summary, the mismatch counts for PM probes can take values 0 and 1, # and for MM probes they can take values 0, 1, and 2. # } # # \seealso{ # To add required probe offsets, @see "cdfAddProbeOffsets". # @see "applyCdfGroups". # } # # @author "HB" # # \references{ # [1] LaFramboise T, Weir BA, Zhao X, Beroukhim R, Li C, Harrington D, # Sellers WR, and Meyerson M. \emph{Allele-specific amplification in # cancer revealed by SNP array analysis}, PLoS Computational Biology, # Nov 2005, Volume 1, Issue 6, e65.\cr # [2] Affymetrix, \emph{Understanding Genotyping Probe Set Structure}, 2005. # \url{http://www.affymetrix.com/support/developer/whitepapers/genotyping_probe_set_structure.affx}\cr # } # # @keyword programming # @keyword internal #**/####################################################################### cdfAddBaseMmCounts <- function(groups, ...) { for (gg in seq(along=groups)) { group <- groups[[gg]]; # Find PM probes tbase <- group$tbase; pbase <- group$pbase; isPm <- ((tbase == "a" | tbase == "A") & (pbase == "t" | pbase == "T")) | ((tbase == "t" | tbase == "T") & (pbase == "a" | pbase == "A")) | ((tbase == "c" | tbase == "C") & (pbase == "g" | pbase == "G")) | ((tbase == "g" | tbase == "G") & (pbase == "c" | pbase == "C")); # Find the center probes isCentered <- (group$offset == 0); dim <- dim(isCentered); if (is.null(dim)) { mmACount <- mmBCount <- rep(as.integer(1), length(isCentered)); } else { mmACount <- mmBCount <- array(as.integer(1), dim=dim, dimnames=dimnames(isCentered)); } # Is this probe group interrogating allele A? isA <- (gg %% 2 == 1); if (isA) { mmACount[isPm] <- as.integer(0); mmBCount[!isPm & !isCentered] <- as.integer(2); } else { mmBCount[isPm] <- as.integer(0); mmACount[!isPm & !isCentered] <- as.integer(2); } # Add the new fields group$mmACount <- mmACount; group$mmBCount <- mmBCount; groups[[gg]] <- group; } groups; } ############################################################################ # HISTORY: # 2006-06-19 # o Added more Rdoc help. # 2006-03-07 # o Created. ############################################################################
#4.1 plot(cars$speed, cars$dist, xlab = "Speed (mph)", ylab = "Stopping Distance (ft)", main="Stopping Distance vs. Speed", col="red", pch=17) #4.4 par(mfrow=c(2, 2)) attach(mtcars) plot(disp, mpg, xlab = "Displacement") plot(wt, mpg, xlab = "Weight") plot(drat, mpg, xlab = "Rear Axle Ratio" ) plot(hp, mpg, xlab = "Horsepower") #4.5 house=function(x, y, ...){ lines(c(x - 1, x + 1, x + 1, x - 1, x - 1), c(y - 1, y - 1, y + 1, y + 1, y - 1), ...) lines(c(x - 1, x, x + 1), c(y + 1, y + 2, y + 1), ...) lines(c(x - 0.3, x + 0.3, x + 0.3, x - 0.3, x - 0.3), c(y - 1, y - 1, y + 0.4, y + 0.4, y - 1), ...) } plot.new() plot.window(xlim = c(0, 10), ylim = c(0, 10)) lines(house(1, 1), house(4, 2), house(7,6)) house(0, 7, col="purple", lty = 17) box() #4.6 curve(dbeta(x, 2, 6), from=0, to=1) curve(dbeta(x, 4, 4), from=0, to=1, add = T) curve(dbeta(x, 6, 2), from=0, to=1, add = T) title(expression(f(y)==frac(1,B(a,b))*y^{a-1}*(1-y)^{b-1})) text(.05, 2.7, labels = "Beta(2, 6)") text(.4, 1.5, labels = "This is how to make words") #4.7 faithful$leng = ifelse(faithful$eruptions < 3.2, "short", "long") faithful library(lattice) bwplot(waiting ~ leng, data = faithful, main = "Waiting Time by Length") densityplot(~waiting, group = leng, data = faithful, auto.key=list(space="top"), xlab = "Waiting Time", main = "Faithful Waiting Time Density Plot") #5.1 #PART A col=subset(college, complete.cases(college)) stripchart(Pct.20 ~ Tier, method = "stack", main="Percentage of Small Classes in National Universities", xlab="Small Class Percentage", data = col) #PART B identify(col$Pct.20, col$Tier, n=1, labels=col$School) #cant get identify to work #PARTC median(col$Pct.20) abline(v=45, col="hotpink") #5.2 #PART A big=college$Pct.50[!is.na(college$Pct.50)] plot(college$Pct.20, college$Pct.50, main = "Percentage of Class Sizes", xlab = "Small Class Percentage", ylab = "Large Class Percentage") #PART B fit=line(col$Pct.20, col$Pct.50) fit abline(coef(fit)) #PART C 23.1-.2667*60 #PART D plot(col$Pct.20, fit$residuals, xlab = "Small Class Percentage", ylab = "Residuals", main = "Residuals vs. Pct.20") abline(h=0) abline(h=10, col="hotpink") abline(h=-10, col="hotpink") identify(col$Pct.20, fit$residuals, n=7, labels = col$School) plot(col$Pct.50, fit$residuals, xlab = "Large Class Percentage", ylab = "Residuals", main = "Residuals vs. Pct.50") abline(h=0) abline(h=10, col="hotpink") abline(h=-10, col="hotpink") identify(col$Pct.50, fit$residuals, n=7, labels = col$School) #5.5 #PARTA hist(col$Full.time, xlab = "Full Time Percentages") froot=sqrt(col$Full.time) - sqrt(100 - col$Full.time) flog = log(col$Full.time + 0.5) - log(100 - col$Full.time + 0.5) hist(froot) hist(flog) truehist(flog) curve(dnorm(x, m, stan), add = T) m=mean(flog) stan=sd(flog) m+stan m-stan #5.7 stripchart(Alumni.giving ~ Tier, method = "stack", main="Alumni Giving Rates by Tier", xlab="Alumni Giving Rate", data = col, ylab="Tier") identify(col$Alumni.giving, col$Tier, n=3, labels = col$School) #PART D stripchart(sqrt(Alumni.giving) ~ Tier, method = "stack", main="Alumni Giving Rates by Tier (square root)", xlab="Alumni Giving Rate", data = col, ylab="Tier") boxplot(log(Alumni.giving) ~ Tier, method = "stack", main="Alumni Giving Rates by Tier (log)", xlab="log(Alumni Giving Rate)", data = col, ylab="Tier", horizontal =T) #Extra questions GSS=subset(`GSS2015_DST_09.(1)`, complete.cases(`GSS2015_DST_09.(1)`)) attach(GSS) #Gets rid of old data GSS$X2014olda=NULL sci=subset(GSS, Field=="Science", select = c(X2010, X2011, X2012, X2013, X2014newa, X2015)) eng=subset(GSS, Field=="Engineering", select = c(X2010, X2011, X2012, X2013, X2014newa, X2015)) heal=subset(GSS, Field=="Health", select = c(X2010, X2011, X2012, X2013, X2014newa, X2015)) stat=subset(GSS, Field=="Statistics", select = c(X2010, X2011, X2012, X2013, X2014newa, X2015)) s=t(sci) e=t(eng) h=t(heal) st=t(stat) new=cbind(s, e, h) barplot(new, beside = T, xlab = "Field", ylab = "Number of Graduate Students", names.arg = c("Science", "Engineering", "Health"), ylim = c(0,450000), col = cm.colors(6), main = "Number of Graduate Students Over the Years") options(scipen = 7) names=c("2010", "2011", "2012", "2013", "2014", "2015") legend("topright", legend = names, pch = 15, col = cm.colors(6), bty = "n") barplot(st, beside = T, col = cm.colors((6)), ylim = c(0,8000), main = "Graduate Students in Statistics", names.arg = "Statistics", ylab = "Number of Graduate Students") legend("topleft", legend = names, pch = 15, col = cm.colors(6), bty = "n", horiz = T, cex=.8) svst=cbind(s, st) chisq.test(svst) evst=cbind(e, st) chisq.test(evst)
/AMS 204/204 HW #2.R
no_license
sarahgjarvis/UCSC-School-Work
R
false
false
4,649
r
#4.1 plot(cars$speed, cars$dist, xlab = "Speed (mph)", ylab = "Stopping Distance (ft)", main="Stopping Distance vs. Speed", col="red", pch=17) #4.4 par(mfrow=c(2, 2)) attach(mtcars) plot(disp, mpg, xlab = "Displacement") plot(wt, mpg, xlab = "Weight") plot(drat, mpg, xlab = "Rear Axle Ratio" ) plot(hp, mpg, xlab = "Horsepower") #4.5 house=function(x, y, ...){ lines(c(x - 1, x + 1, x + 1, x - 1, x - 1), c(y - 1, y - 1, y + 1, y + 1, y - 1), ...) lines(c(x - 1, x, x + 1), c(y + 1, y + 2, y + 1), ...) lines(c(x - 0.3, x + 0.3, x + 0.3, x - 0.3, x - 0.3), c(y - 1, y - 1, y + 0.4, y + 0.4, y - 1), ...) } plot.new() plot.window(xlim = c(0, 10), ylim = c(0, 10)) lines(house(1, 1), house(4, 2), house(7,6)) house(0, 7, col="purple", lty = 17) box() #4.6 curve(dbeta(x, 2, 6), from=0, to=1) curve(dbeta(x, 4, 4), from=0, to=1, add = T) curve(dbeta(x, 6, 2), from=0, to=1, add = T) title(expression(f(y)==frac(1,B(a,b))*y^{a-1}*(1-y)^{b-1})) text(.05, 2.7, labels = "Beta(2, 6)") text(.4, 1.5, labels = "This is how to make words") #4.7 faithful$leng = ifelse(faithful$eruptions < 3.2, "short", "long") faithful library(lattice) bwplot(waiting ~ leng, data = faithful, main = "Waiting Time by Length") densityplot(~waiting, group = leng, data = faithful, auto.key=list(space="top"), xlab = "Waiting Time", main = "Faithful Waiting Time Density Plot") #5.1 #PART A col=subset(college, complete.cases(college)) stripchart(Pct.20 ~ Tier, method = "stack", main="Percentage of Small Classes in National Universities", xlab="Small Class Percentage", data = col) #PART B identify(col$Pct.20, col$Tier, n=1, labels=col$School) #cant get identify to work #PARTC median(col$Pct.20) abline(v=45, col="hotpink") #5.2 #PART A big=college$Pct.50[!is.na(college$Pct.50)] plot(college$Pct.20, college$Pct.50, main = "Percentage of Class Sizes", xlab = "Small Class Percentage", ylab = "Large Class Percentage") #PART B fit=line(col$Pct.20, col$Pct.50) fit abline(coef(fit)) #PART C 23.1-.2667*60 #PART D plot(col$Pct.20, fit$residuals, xlab = "Small Class Percentage", ylab = "Residuals", main = "Residuals vs. Pct.20") abline(h=0) abline(h=10, col="hotpink") abline(h=-10, col="hotpink") identify(col$Pct.20, fit$residuals, n=7, labels = col$School) plot(col$Pct.50, fit$residuals, xlab = "Large Class Percentage", ylab = "Residuals", main = "Residuals vs. Pct.50") abline(h=0) abline(h=10, col="hotpink") abline(h=-10, col="hotpink") identify(col$Pct.50, fit$residuals, n=7, labels = col$School) #5.5 #PARTA hist(col$Full.time, xlab = "Full Time Percentages") froot=sqrt(col$Full.time) - sqrt(100 - col$Full.time) flog = log(col$Full.time + 0.5) - log(100 - col$Full.time + 0.5) hist(froot) hist(flog) truehist(flog) curve(dnorm(x, m, stan), add = T) m=mean(flog) stan=sd(flog) m+stan m-stan #5.7 stripchart(Alumni.giving ~ Tier, method = "stack", main="Alumni Giving Rates by Tier", xlab="Alumni Giving Rate", data = col, ylab="Tier") identify(col$Alumni.giving, col$Tier, n=3, labels = col$School) #PART D stripchart(sqrt(Alumni.giving) ~ Tier, method = "stack", main="Alumni Giving Rates by Tier (square root)", xlab="Alumni Giving Rate", data = col, ylab="Tier") boxplot(log(Alumni.giving) ~ Tier, method = "stack", main="Alumni Giving Rates by Tier (log)", xlab="log(Alumni Giving Rate)", data = col, ylab="Tier", horizontal =T) #Extra questions GSS=subset(`GSS2015_DST_09.(1)`, complete.cases(`GSS2015_DST_09.(1)`)) attach(GSS) #Gets rid of old data GSS$X2014olda=NULL sci=subset(GSS, Field=="Science", select = c(X2010, X2011, X2012, X2013, X2014newa, X2015)) eng=subset(GSS, Field=="Engineering", select = c(X2010, X2011, X2012, X2013, X2014newa, X2015)) heal=subset(GSS, Field=="Health", select = c(X2010, X2011, X2012, X2013, X2014newa, X2015)) stat=subset(GSS, Field=="Statistics", select = c(X2010, X2011, X2012, X2013, X2014newa, X2015)) s=t(sci) e=t(eng) h=t(heal) st=t(stat) new=cbind(s, e, h) barplot(new, beside = T, xlab = "Field", ylab = "Number of Graduate Students", names.arg = c("Science", "Engineering", "Health"), ylim = c(0,450000), col = cm.colors(6), main = "Number of Graduate Students Over the Years") options(scipen = 7) names=c("2010", "2011", "2012", "2013", "2014", "2015") legend("topright", legend = names, pch = 15, col = cm.colors(6), bty = "n") barplot(st, beside = T, col = cm.colors((6)), ylim = c(0,8000), main = "Graduate Students in Statistics", names.arg = "Statistics", ylab = "Number of Graduate Students") legend("topleft", legend = names, pch = 15, col = cm.colors(6), bty = "n", horiz = T, cex=.8) svst=cbind(s, st) chisq.test(svst) evst=cbind(e, st) chisq.test(evst)
## ---------------------------------------------------------------------------------- ## A trivial class for dealing with deterministic outcome modeling ## ---------------------------------------------------------------------------------- DeterministicBinaryOutcomeModel <- R6Class(classname = "DeterministicBinaryOutcomeModel", inherit = BinaryOutcomeModel, cloneable = TRUE, portable = TRUE, class = TRUE, public = list( gstar.Name = character(), is.fitted = TRUE, initialize = function(reg, ...) { self$model_contrl <- reg$model_contrl self$gstar.Name <- reg$model_contrl[["gstar.Name"]] assert_that(!is.null(self$gstar.Name)) assert_that(is.string(reg$outvar)) self$outvar <- reg$outvar self$predvars <- reg$predvars self$subset_vars <- reg$subset_vars self$subset_exprs <- reg$subset_exprs assert_that(length(self$subset_exprs) <= 1) self$ReplMisVal0 <- reg$ReplMisVal0 invisible(self) }, # if (predict) then use the same data to make predictions for all obs in self$subset_idx; # store these predictions in private$probA1 and private$probAeqa fit = function(overwrite = FALSE, data, ...) { # Move overwrite to a field? ... self$overwrite self$n <- data$nobs self$define.subset.idx(data) private$probA1 <- data$get.outvar(TRUE, self$gstar.Name) # private$.isNA.probA1 <- is.na(private$probA1) # self$subset_idx <- rep.int(TRUE, self$n) self$subset_idx <- seq_len(self$n) private$.outvar <- data$get.outvar(TRUE, self$getoutvarnm) # Always a vector of 0/1 # private$.isNA.outvar <- is.na(private$.outvar) self$is.fitted <- TRUE # ********************************************************************** # to save RAM space when doing many stacked regressions wipe out all internal data: # self$wipe.alldat # ********************************************************************** invisible(self) }, # get the fixed (known) the gstar P(A^*(t) = 1|W, bar{L(t)}); # should be already saved earlier in private$probA1, so there is nothing to do here predict = function(newdata, ...) { assert_that(self$is.fitted) return(invisible(self)) }, predictAeqa = function(newdata, ...) { # P(A^s[i]=a^s|W^s=w^s) - calculating the likelihood for indA[i] (n vector of a`s) assert_that(self$is.fitted) if (missing(newdata)) { indA <- self$getoutvarval } else { indA <- newdata$get.outvar(self$getsubset, self$getoutvarnm) # Always a vector of 0/1 } assert_that(is.integerish(indA)) # check that observed exposure is always a vector of integers probAeqa <- rep.int(1L, self$n) # for missing values, the likelihood is always set to P(A = a) = 1. # probA1 <- private$probA1[self$getsubset] probA1 <- private$probA1 probAeqa[self$getsubset] <- probA1^(indA) * (1 - probA1)^(1L - indA) self$wipe.alldat # to save RAM space when doing many stacked regressions wipe out all internal data: return(probAeqa) }, # Output info on the general type of regression being fitted: show = function(print_format = TRUE) { if (print_format) { return("P(" %+% self$outvar %+% "|" %+% paste(self$predvars, collapse=", ") %+% ")" %+% ";\\ Stratify: " %+% self$subset_exprs) } else { return(list(outvar = self$outvar, predvars = self$predvars, stratify = self$subset_exprs)) } } ), active = list( wipe.alldat = function() { private$probA1 <- NULL private$probAeqa <- NULL private$.outvar <- NULL self$subset_idx <- NULL return(self) }, getfit = function() { private$model.fit }, getprobA1 = function() { private$probA1 }, getsubset = function() { self$subset_idx }, getoutvarnm = function() { self$outvar }, getoutvarval = function() { private$.outvar } ), private = list( model.fit = list(), # the model fit (either coefficients or the model fit object) .outvar = NULL, # .isNA.outvar = NULL, probA1 = NULL, # Predicted probA^s=1 conditional on Xmat # .isNA.probA1 = NULL, probAeqa = NULL # Likelihood of observing a particular value A^s=a^s conditional on Xmat ) )
/R/DeterministicBinaryOutcomeModel.R
permissive
jlstiles/stremr
R
false
false
4,294
r
## ---------------------------------------------------------------------------------- ## A trivial class for dealing with deterministic outcome modeling ## ---------------------------------------------------------------------------------- DeterministicBinaryOutcomeModel <- R6Class(classname = "DeterministicBinaryOutcomeModel", inherit = BinaryOutcomeModel, cloneable = TRUE, portable = TRUE, class = TRUE, public = list( gstar.Name = character(), is.fitted = TRUE, initialize = function(reg, ...) { self$model_contrl <- reg$model_contrl self$gstar.Name <- reg$model_contrl[["gstar.Name"]] assert_that(!is.null(self$gstar.Name)) assert_that(is.string(reg$outvar)) self$outvar <- reg$outvar self$predvars <- reg$predvars self$subset_vars <- reg$subset_vars self$subset_exprs <- reg$subset_exprs assert_that(length(self$subset_exprs) <= 1) self$ReplMisVal0 <- reg$ReplMisVal0 invisible(self) }, # if (predict) then use the same data to make predictions for all obs in self$subset_idx; # store these predictions in private$probA1 and private$probAeqa fit = function(overwrite = FALSE, data, ...) { # Move overwrite to a field? ... self$overwrite self$n <- data$nobs self$define.subset.idx(data) private$probA1 <- data$get.outvar(TRUE, self$gstar.Name) # private$.isNA.probA1 <- is.na(private$probA1) # self$subset_idx <- rep.int(TRUE, self$n) self$subset_idx <- seq_len(self$n) private$.outvar <- data$get.outvar(TRUE, self$getoutvarnm) # Always a vector of 0/1 # private$.isNA.outvar <- is.na(private$.outvar) self$is.fitted <- TRUE # ********************************************************************** # to save RAM space when doing many stacked regressions wipe out all internal data: # self$wipe.alldat # ********************************************************************** invisible(self) }, # get the fixed (known) the gstar P(A^*(t) = 1|W, bar{L(t)}); # should be already saved earlier in private$probA1, so there is nothing to do here predict = function(newdata, ...) { assert_that(self$is.fitted) return(invisible(self)) }, predictAeqa = function(newdata, ...) { # P(A^s[i]=a^s|W^s=w^s) - calculating the likelihood for indA[i] (n vector of a`s) assert_that(self$is.fitted) if (missing(newdata)) { indA <- self$getoutvarval } else { indA <- newdata$get.outvar(self$getsubset, self$getoutvarnm) # Always a vector of 0/1 } assert_that(is.integerish(indA)) # check that observed exposure is always a vector of integers probAeqa <- rep.int(1L, self$n) # for missing values, the likelihood is always set to P(A = a) = 1. # probA1 <- private$probA1[self$getsubset] probA1 <- private$probA1 probAeqa[self$getsubset] <- probA1^(indA) * (1 - probA1)^(1L - indA) self$wipe.alldat # to save RAM space when doing many stacked regressions wipe out all internal data: return(probAeqa) }, # Output info on the general type of regression being fitted: show = function(print_format = TRUE) { if (print_format) { return("P(" %+% self$outvar %+% "|" %+% paste(self$predvars, collapse=", ") %+% ")" %+% ";\\ Stratify: " %+% self$subset_exprs) } else { return(list(outvar = self$outvar, predvars = self$predvars, stratify = self$subset_exprs)) } } ), active = list( wipe.alldat = function() { private$probA1 <- NULL private$probAeqa <- NULL private$.outvar <- NULL self$subset_idx <- NULL return(self) }, getfit = function() { private$model.fit }, getprobA1 = function() { private$probA1 }, getsubset = function() { self$subset_idx }, getoutvarnm = function() { self$outvar }, getoutvarval = function() { private$.outvar } ), private = list( model.fit = list(), # the model fit (either coefficients or the model fit object) .outvar = NULL, # .isNA.outvar = NULL, probA1 = NULL, # Predicted probA^s=1 conditional on Xmat # .isNA.probA1 = NULL, probAeqa = NULL # Likelihood of observing a particular value A^s=a^s conditional on Xmat ) )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cross_validation_functions.R \name{permute.vector} \alias{permute.vector} \title{Randomly permute the entries of a vector.} \usage{ permute.vector(x) } \arguments{ \item{x}{the vector for which its entries have to be permuted} } \value{ the permuted vector } \description{ Randomly permute the entries of a vector. }
/man/permute.vector.Rd
no_license
xhuang4/optaucx
R
false
true
396
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cross_validation_functions.R \name{permute.vector} \alias{permute.vector} \title{Randomly permute the entries of a vector.} \usage{ permute.vector(x) } \arguments{ \item{x}{the vector for which its entries have to be permuted} } \value{ the permuted vector } \description{ Randomly permute the entries of a vector. }
library(plsRglm) ### Name: tilt.bootplsglm ### Title: Tilted bootstrap for PLS models ### Aliases: tilt.bootplsglm ### Keywords: models ### ** Examples ## No test: data(aze_compl) Xaze_compl<-aze_compl[,2:34] yaze_compl<-aze_compl$y dataset <- cbind(y=yaze_compl,Xaze_compl) # Lazraq-Cleroux PLS bootstrap Classic aze_compl.tilt.boot <- tilt.bootplsglm(plsRglm(yaze_compl,Xaze_compl,3, modele="pls-glm-logistic", family=NULL), statistic=coefs.plsRglm, R=c(499, 100, 100), alpha=c(0.025, 0.975), sim="ordinary", stype="i", index=1) aze_compl.tilt.boot <- tilt.bootplsglm(plsRglm(yaze_compl,Xaze_compl,3, modele="pls-glm-logistic"), statistic=coefs.plsRglm, R=c(499, 100, 100), alpha=c(0.025, 0.975), sim="ordinary", stype="i", index=1) aze_compl.tilt.boot <- tilt.bootplsglm(plsRglm(yaze_compl,Xaze_compl,3, modele="pls-glm-family", family=binomial), statistic=coefs.plsRglm, R=c(499, 100, 100), alpha=c(0.025, 0.975), sim="ordinary", stype="i", index=1) boxplots.bootpls(aze_compl.tilt.boot,1:2) # PLS bootstrap balanced aze_compl.tilt.boot <- tilt.bootplsglm(plsRglm(yaze_compl,Xaze_compl,3, modele="pls-glm-logistic"), statistic=coefs.plsRglm, R=c(499, 100, 100), alpha=c(0.025, 0.975), sim="balanced", stype="i", index=1) ## End(No test)
/data/genthat_extracted_code/plsRglm/examples/tilt.bootplsglm.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,264
r
library(plsRglm) ### Name: tilt.bootplsglm ### Title: Tilted bootstrap for PLS models ### Aliases: tilt.bootplsglm ### Keywords: models ### ** Examples ## No test: data(aze_compl) Xaze_compl<-aze_compl[,2:34] yaze_compl<-aze_compl$y dataset <- cbind(y=yaze_compl,Xaze_compl) # Lazraq-Cleroux PLS bootstrap Classic aze_compl.tilt.boot <- tilt.bootplsglm(plsRglm(yaze_compl,Xaze_compl,3, modele="pls-glm-logistic", family=NULL), statistic=coefs.plsRglm, R=c(499, 100, 100), alpha=c(0.025, 0.975), sim="ordinary", stype="i", index=1) aze_compl.tilt.boot <- tilt.bootplsglm(plsRglm(yaze_compl,Xaze_compl,3, modele="pls-glm-logistic"), statistic=coefs.plsRglm, R=c(499, 100, 100), alpha=c(0.025, 0.975), sim="ordinary", stype="i", index=1) aze_compl.tilt.boot <- tilt.bootplsglm(plsRglm(yaze_compl,Xaze_compl,3, modele="pls-glm-family", family=binomial), statistic=coefs.plsRglm, R=c(499, 100, 100), alpha=c(0.025, 0.975), sim="ordinary", stype="i", index=1) boxplots.bootpls(aze_compl.tilt.boot,1:2) # PLS bootstrap balanced aze_compl.tilt.boot <- tilt.bootplsglm(plsRglm(yaze_compl,Xaze_compl,3, modele="pls-glm-logistic"), statistic=coefs.plsRglm, R=c(499, 100, 100), alpha=c(0.025, 0.975), sim="balanced", stype="i", index=1) ## End(No test)
####Programming Final Project ##Predicting Best picture nomination for the year 2017 setwd("C:\\Users\\Rithvik\\Desktop\\MSBA\\Programming for Analytics\\Final Project") #Reading the training, validation & testing dataset movie_train<-read.csv("movie_train.csv", header=TRUE) movie_val<-read.csv("movie_val.csv",header=TRUE) movie_test<-read.csv("movie_test.csv", header=TRUE) #The movies being nominated should have a minimum duration of 40 minutes #so checking if there are any movies which doesnt satisfy our condition length(movie_train[movie_train$duration<=40,]) length(movie_val[movie_val$duration<=40,]) length(movie_test[movie_test$duration<=40,]) #we see that none of the movies have duration less than 40 #we see that budget value is null for many of the variables and #after analyzing the train data set we come to a conclusion that budget is around 50% of the gross value length(movie_train$budget[is.na(movie_train$budget)]) #there are no observations without the budget value #if we have missing data for budget, we replace the missing budget values with 50% of the gross value movie_train$budget[is.na(movie_train$budget)] <- movie_train$gross[is.na(movie_train$budget)]*0.50 #now checking if there are any missing budget values in the validation data length(movie_val$budget[is.na(movie_val$budget)]) #Similarly applying the strategy for the test dataset length(movie_test$budget[is.na(movie_test$budget)]) #there are 6 observations without the budget value #now replacing the missing budget values with 60% of the gross value in the test set movie_test$budget[is.na(movie_test$budget)]<- movie_test$gross[is.na(movie_test$budget)]*0.6 # now looking at the data movie_train str(movie_train) movie_val str(movie_val) movie_test str(movie_test) #we need to convert the nominated column to categorical movie_train$nominated<- as.factor(movie_train$nominated) str(movie_train) ## performing stepwise regression library(MASS) model<-glm(nominated~duration+gross+budget+facebook_likes+average_rating+sentiment_score,family=binomial(link='logit'),data=movie_train) stepwise_reg<-stepAIC(model,direction="both") stepwise_reg$anova #now we need to build the logistic regression model<- glm(nominated~duration+gross+budget+facebook_likes+average_rating,family=binomial(link='logit'),data=movie_train) summary(model) ### performing SLR model<- glm(nominated~duration,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~gross,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~budget,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~facebook_likes,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~average_rating,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~sentiment_score,family=binomial(link='logit'),data=movie_train) summary(model) #anova(model,test = "Chisq") #checking the R2 value for the Logistic Regression library(pscl) pR2(model) #now predicting the nominations for validation data and finding the missclassification rate library(ROCR) new_nominations<-predict(model,newdata = movie_val,type = "response") #fixed_nominations<-ifelse(new_nominations>=0.5,1,0) #table(fixed_nominations) #if there are less than 8 movies with probability greater than 0.5 then we choose the top 8 movies with highest probability ordered_nomination<- order(-new_nominations) fixed_nominations=ifelse(new_nominations>0,0,1) for (i in ordered_nomination[1:8]){ fixed_nominations[i]=1 } table(fixed_nominations) movie_val$Predictions<-fixed_nominations confusion_matrix<-table(movie_val$Predictions,movie_val$nominated) confusion_matrix misclass_rate<-mean(movie_val$Predictions!=movie_val$nominated) misclass_rate #now plotting the roc curve and finding the area under the curve (AUC) library(ROCR) pred<-prediction(fixed_nominations,movie_val$nominated) roc<-performance(pred,measure="tpr",x.measure="fpr") plot(roc,main="ROC Curve") #Now finding the AUC value auc<- performance(pred,measure='auc') auc@y.values[[1]] #now plotting the lift curve lift_curve<-performance(pred,"lift","rpp") plot(lift_curve,main="Lift Curve",colorize=TRUE) #the output of lift curve means that the chance of the #movie being nominated is 2 times higher if we selected a movie from top 20% of the predicted probabilities #finding the movies which are nominated for the best picture for the year 2017 new_nominations_test<-predict(model,newdata = movie_test,type = "response") #ordering the nomination predictions in descending order ordered_nomination_test<-order(-new_nominations_test) movie_test$nomination_prob<- new_nominations_test #Assigning 0(Not Nominated) to all the movies at first fixed_nominations_test<-ifelse(new_nominations_test>=0,0,1) #Now assigning 1(Nominated) to the Top 8 movies with highest probability of being nominated for (i in ordered_nomination_test[1:8]){ fixed_nominations_test[i]=1 } table(fixed_nominations_test) #Storing the predicted nominations in our actual test dataset movie_test$Predictions<-fixed_nominations_test str(movie_test) #now listing the movies which our model thinks are going to be nominated nomination_predicted<-movie_test[movie_test$Predictions==1,] #Printing the list of movies which are predicted to be nominated for the best picture award nomination_predicted$movie_name[order(-nomination_predicted$nomination_prob)]
/3 Logistic Regression code/Logistic.R
no_license
Qinhui-Xu/OscarNominationPrediction
R
false
false
5,643
r
####Programming Final Project ##Predicting Best picture nomination for the year 2017 setwd("C:\\Users\\Rithvik\\Desktop\\MSBA\\Programming for Analytics\\Final Project") #Reading the training, validation & testing dataset movie_train<-read.csv("movie_train.csv", header=TRUE) movie_val<-read.csv("movie_val.csv",header=TRUE) movie_test<-read.csv("movie_test.csv", header=TRUE) #The movies being nominated should have a minimum duration of 40 minutes #so checking if there are any movies which doesnt satisfy our condition length(movie_train[movie_train$duration<=40,]) length(movie_val[movie_val$duration<=40,]) length(movie_test[movie_test$duration<=40,]) #we see that none of the movies have duration less than 40 #we see that budget value is null for many of the variables and #after analyzing the train data set we come to a conclusion that budget is around 50% of the gross value length(movie_train$budget[is.na(movie_train$budget)]) #there are no observations without the budget value #if we have missing data for budget, we replace the missing budget values with 50% of the gross value movie_train$budget[is.na(movie_train$budget)] <- movie_train$gross[is.na(movie_train$budget)]*0.50 #now checking if there are any missing budget values in the validation data length(movie_val$budget[is.na(movie_val$budget)]) #Similarly applying the strategy for the test dataset length(movie_test$budget[is.na(movie_test$budget)]) #there are 6 observations without the budget value #now replacing the missing budget values with 60% of the gross value in the test set movie_test$budget[is.na(movie_test$budget)]<- movie_test$gross[is.na(movie_test$budget)]*0.6 # now looking at the data movie_train str(movie_train) movie_val str(movie_val) movie_test str(movie_test) #we need to convert the nominated column to categorical movie_train$nominated<- as.factor(movie_train$nominated) str(movie_train) ## performing stepwise regression library(MASS) model<-glm(nominated~duration+gross+budget+facebook_likes+average_rating+sentiment_score,family=binomial(link='logit'),data=movie_train) stepwise_reg<-stepAIC(model,direction="both") stepwise_reg$anova #now we need to build the logistic regression model<- glm(nominated~duration+gross+budget+facebook_likes+average_rating,family=binomial(link='logit'),data=movie_train) summary(model) ### performing SLR model<- glm(nominated~duration,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~gross,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~budget,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~facebook_likes,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~average_rating,family=binomial(link='logit'),data=movie_train) summary(model) model<- glm(nominated~sentiment_score,family=binomial(link='logit'),data=movie_train) summary(model) #anova(model,test = "Chisq") #checking the R2 value for the Logistic Regression library(pscl) pR2(model) #now predicting the nominations for validation data and finding the missclassification rate library(ROCR) new_nominations<-predict(model,newdata = movie_val,type = "response") #fixed_nominations<-ifelse(new_nominations>=0.5,1,0) #table(fixed_nominations) #if there are less than 8 movies with probability greater than 0.5 then we choose the top 8 movies with highest probability ordered_nomination<- order(-new_nominations) fixed_nominations=ifelse(new_nominations>0,0,1) for (i in ordered_nomination[1:8]){ fixed_nominations[i]=1 } table(fixed_nominations) movie_val$Predictions<-fixed_nominations confusion_matrix<-table(movie_val$Predictions,movie_val$nominated) confusion_matrix misclass_rate<-mean(movie_val$Predictions!=movie_val$nominated) misclass_rate #now plotting the roc curve and finding the area under the curve (AUC) library(ROCR) pred<-prediction(fixed_nominations,movie_val$nominated) roc<-performance(pred,measure="tpr",x.measure="fpr") plot(roc,main="ROC Curve") #Now finding the AUC value auc<- performance(pred,measure='auc') auc@y.values[[1]] #now plotting the lift curve lift_curve<-performance(pred,"lift","rpp") plot(lift_curve,main="Lift Curve",colorize=TRUE) #the output of lift curve means that the chance of the #movie being nominated is 2 times higher if we selected a movie from top 20% of the predicted probabilities #finding the movies which are nominated for the best picture for the year 2017 new_nominations_test<-predict(model,newdata = movie_test,type = "response") #ordering the nomination predictions in descending order ordered_nomination_test<-order(-new_nominations_test) movie_test$nomination_prob<- new_nominations_test #Assigning 0(Not Nominated) to all the movies at first fixed_nominations_test<-ifelse(new_nominations_test>=0,0,1) #Now assigning 1(Nominated) to the Top 8 movies with highest probability of being nominated for (i in ordered_nomination_test[1:8]){ fixed_nominations_test[i]=1 } table(fixed_nominations_test) #Storing the predicted nominations in our actual test dataset movie_test$Predictions<-fixed_nominations_test str(movie_test) #now listing the movies which our model thinks are going to be nominated nomination_predicted<-movie_test[movie_test$Predictions==1,] #Printing the list of movies which are predicted to be nominated for the best picture award nomination_predicted$movie_name[order(-nomination_predicted$nomination_prob)]
#' Conditional posterior distribution of the distinct Ystar in the case of #' censoring #' #' This function evaluates the ratio of conditional posterior distributions of #' the distinct latents Ystar. #' #' For internal use #' #' @keywords internal #' @examples #' #' ## The function is currently defined as #' function(v, v2, xleft, xright, censor_code, distr.k, sigma.k, #' distr.p0, mu.p0, sigma.p0) { #' alpha <- p0(v, distr = distr.p0, mu = mu.p0, sigma = sigma.p0) / p0(v2, #' distr = distr.p0, mu = mu.p0, sigma = sigma.p0 #' ) #' Prod <- 1 #' for (i in seq_along(xleft)) { #' fac <- dkcens2_1val( #' xleft = xleft[i], xright = xright[i], #' c_code = censor_code[i], distr = distr.k, mu = v, #' sigma = sigma.k #' ) / dkcens2_1val( #' xleft = xleft[i], xright = xright[i], #' c_code = censor_code[i], distr = distr.k, mu = v2, #' sigma = sigma.k #' ) #' Prod <- Prod * fac #' } #' f <- alpha * Prod #' return(f) #' } rfystarcens2 <- function(v, v2, xleft, xright, censor_code, distr.k, sigma.k, distr.p0, mu.p0, sigma.p0) { alpha <- p0(v, distr = distr.p0, mu = mu.p0, sigma = sigma.p0) / p0(v2, distr = distr.p0, mu = mu.p0, sigma = sigma.p0 ) Prod <- 1 for (i in seq_along(xleft)) { fac <- dkcens2_1val( xleft = xleft[i], xright = xright[i], c_code = censor_code[i], distr = distr.k, mu = v, sigma = sigma.k ) / dkcens2_1val( xleft = xleft[i], xright = xright[i], c_code = censor_code[i], distr = distr.k, mu = v2, sigma = sigma.k ) Prod <- Prod * fac } f <- alpha * Prod return(f) }
/R/rfystarcens2.R
no_license
cran/BNPdensity
R
false
false
1,693
r
#' Conditional posterior distribution of the distinct Ystar in the case of #' censoring #' #' This function evaluates the ratio of conditional posterior distributions of #' the distinct latents Ystar. #' #' For internal use #' #' @keywords internal #' @examples #' #' ## The function is currently defined as #' function(v, v2, xleft, xright, censor_code, distr.k, sigma.k, #' distr.p0, mu.p0, sigma.p0) { #' alpha <- p0(v, distr = distr.p0, mu = mu.p0, sigma = sigma.p0) / p0(v2, #' distr = distr.p0, mu = mu.p0, sigma = sigma.p0 #' ) #' Prod <- 1 #' for (i in seq_along(xleft)) { #' fac <- dkcens2_1val( #' xleft = xleft[i], xright = xright[i], #' c_code = censor_code[i], distr = distr.k, mu = v, #' sigma = sigma.k #' ) / dkcens2_1val( #' xleft = xleft[i], xright = xright[i], #' c_code = censor_code[i], distr = distr.k, mu = v2, #' sigma = sigma.k #' ) #' Prod <- Prod * fac #' } #' f <- alpha * Prod #' return(f) #' } rfystarcens2 <- function(v, v2, xleft, xright, censor_code, distr.k, sigma.k, distr.p0, mu.p0, sigma.p0) { alpha <- p0(v, distr = distr.p0, mu = mu.p0, sigma = sigma.p0) / p0(v2, distr = distr.p0, mu = mu.p0, sigma = sigma.p0 ) Prod <- 1 for (i in seq_along(xleft)) { fac <- dkcens2_1val( xleft = xleft[i], xright = xright[i], c_code = censor_code[i], distr = distr.k, mu = v, sigma = sigma.k ) / dkcens2_1val( xleft = xleft[i], xright = xright[i], c_code = censor_code[i], distr = distr.k, mu = v2, sigma = sigma.k ) Prod <- Prod * fac } f <- alpha * Prod return(f) }
library(CAMAN) ### Name: mixcov ### Title: Fitting mixture models with covariates ### Aliases: mixcov ### Keywords: meta-analysis, covariates, mixture model ### ** Examples ### Toy data: simulate subjects with a different relationship between age and salariy grps = sample(1:3,70, replace=TRUE) #assign each person to one group salary=NULL age = round(runif(70) * 47 + 18) #random effects: age has a different influence (slope) on the salary salary[grps == 1] = 2000 + 12 * age[grps==1] salary[grps == 2] = 4000 + 4 * age[grps==2] salary[grps == 3] = 3200 + (-15) * age[grps==3] salary = salary + rnorm(70)*30 #some noise sex =sample(c("m","w"), 70, replace=TRUE) salary[sex=="m"] = salary[sex=="m"] * 1.2 #men earn 20 percent more than women salaryData = data.frame(salary=salary, age=age, sex=sex) tstSalary <- mixcov(dep="salary", fixed="sex", random="age" ,data=salaryData, k=3,family="gaussian", acc=10^-3) ### POISSON data: data(NoP) ames3 <- mixcov(dep="count",fixed=c("dose", "logd"),random="",data=NoP, k=3,family="poisson") ### Gaussian data data(betaplasma) beta4 <- mixcov(dep="betacaro", fixed=c("chol","sex","bmi"), random="betadiet", data=betaplasma, k=4, family="gaussian")
/data/genthat_extracted_code/CAMAN/examples/mixcov.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,281
r
library(CAMAN) ### Name: mixcov ### Title: Fitting mixture models with covariates ### Aliases: mixcov ### Keywords: meta-analysis, covariates, mixture model ### ** Examples ### Toy data: simulate subjects with a different relationship between age and salariy grps = sample(1:3,70, replace=TRUE) #assign each person to one group salary=NULL age = round(runif(70) * 47 + 18) #random effects: age has a different influence (slope) on the salary salary[grps == 1] = 2000 + 12 * age[grps==1] salary[grps == 2] = 4000 + 4 * age[grps==2] salary[grps == 3] = 3200 + (-15) * age[grps==3] salary = salary + rnorm(70)*30 #some noise sex =sample(c("m","w"), 70, replace=TRUE) salary[sex=="m"] = salary[sex=="m"] * 1.2 #men earn 20 percent more than women salaryData = data.frame(salary=salary, age=age, sex=sex) tstSalary <- mixcov(dep="salary", fixed="sex", random="age" ,data=salaryData, k=3,family="gaussian", acc=10^-3) ### POISSON data: data(NoP) ames3 <- mixcov(dep="count",fixed=c("dose", "logd"),random="",data=NoP, k=3,family="poisson") ### Gaussian data data(betaplasma) beta4 <- mixcov(dep="betacaro", fixed=c("chol","sex","bmi"), random="betadiet", data=betaplasma, k=4, family="gaussian")
## ----echo=F,knitr-options,message=FALSE, warning=FALSE------------------------ library(knitr) opts_chunk$set(fig.align = 'center', fig.width = 6, fig.height = 5, dev = 'png') options(warn=-1) ## ----eval=FALSE--------------------------------------------------------------- # suppressMessages(library(epiConv)) # suppressMessages(library(SingleCellExperiment)) # # mat<-readMM(file="bmmc_matrix.mtx") # cell_info<-read.table(file="bmmc_ident.tsv") # colnames(cell_info)<-c("barcode","ident") # peak<-read.table(file="peaks.bed") # colnames(peak)<-c("seqnames","start","end") # rownames(mat)<-paste(peak$seqnames,":", # peak$start,"-", # peak$end,sep="") # colnames(mat)<-cell_info[,1] # sce<-SingleCellExperiment(assays=SimpleList(counts=as(mat,"dgCMatrix")), # rowRanges=GRanges(peak),colData=cell_info) # colData(sce)$lib_size<-lib.estimate(assays(sce)$counts) # sce<-subset(sce,select=colData(sce)$lib_size>500) ## ----eval=FALSE--------------------------------------------------------------- # # row_sample<-1:ncol(sce) # Smat<-run.epiConv(mat=assays(sce)$counts, # row_sample=row_sample, # lib_size=sce$lib_size, # nbootstrap=3, # nsample=floor(nrow(sce)*0.5), # bin=1000, # inf=(-10), # backingfile="bmmc.backup", # descriptorfile="bmmc.backup.descriptor") ## ----eval=FALSE--------------------------------------------------------------- # snn_mat<-Smat2snn(Smat,knn=20) # dis<-snn_mat # dis@x<-max(dis@x)-dis@x+1e-9 # umap_res<-uwot::umap(X=dis,n_neighbors=20) # ## ----eval=FALSE--------------------------------------------------------------- # batch<-factor(sce$ident) # Smat_corrected<-deepcopy(Smat, # backingfile="bmmc2.backup", # descriptorfile="bmmc2.backup.descriptor") # res_anchor<-epiConv.anchor(Smat=Smat_corrected, # row_sample=row_sample, # batch=batch, # reference="Resting", # neigs=30, # features=NULL, # knn_target=50, # knn_reference=20, # threshold=2) ## ----eval=FALSE--------------------------------------------------------------- # res_eigs<-epiConv.correct(Smat=Smat_corrected, # row_sample=row_sample, # batch=batch, # reference="Resting", # neigs=30, # knn_update=res_anchor$knn_update) # ## ----eval=FALSE--------------------------------------------------------------- # snn_mat_corrected<-Smat2snn(Smat_corrected,knn=20) # dis<-snn_mat_corrected # dis@x<-max(dis@x)-dis@x+1e-9 # umap_res_corrected<-uwot::umap(X=dis,n_neighbors=20) # # plot(umap_res,pch="+",cex=0.5,col=factor(sce$ident)) # plot(umap_res_corrected,pch="+",cex=0.5,col=factor(sce$ident)) ## ----eval=FALSE--------------------------------------------------------------- # clust<-epiConv.louvain(snn=snn_mat_corrected,resolution=c(0.8,0.6,0.4,0.2)) # head(clust)
/vignettes/my-vignette.R
permissive
LiLin-biosoft/epiConv
R
false
false
3,345
r
## ----echo=F,knitr-options,message=FALSE, warning=FALSE------------------------ library(knitr) opts_chunk$set(fig.align = 'center', fig.width = 6, fig.height = 5, dev = 'png') options(warn=-1) ## ----eval=FALSE--------------------------------------------------------------- # suppressMessages(library(epiConv)) # suppressMessages(library(SingleCellExperiment)) # # mat<-readMM(file="bmmc_matrix.mtx") # cell_info<-read.table(file="bmmc_ident.tsv") # colnames(cell_info)<-c("barcode","ident") # peak<-read.table(file="peaks.bed") # colnames(peak)<-c("seqnames","start","end") # rownames(mat)<-paste(peak$seqnames,":", # peak$start,"-", # peak$end,sep="") # colnames(mat)<-cell_info[,1] # sce<-SingleCellExperiment(assays=SimpleList(counts=as(mat,"dgCMatrix")), # rowRanges=GRanges(peak),colData=cell_info) # colData(sce)$lib_size<-lib.estimate(assays(sce)$counts) # sce<-subset(sce,select=colData(sce)$lib_size>500) ## ----eval=FALSE--------------------------------------------------------------- # # row_sample<-1:ncol(sce) # Smat<-run.epiConv(mat=assays(sce)$counts, # row_sample=row_sample, # lib_size=sce$lib_size, # nbootstrap=3, # nsample=floor(nrow(sce)*0.5), # bin=1000, # inf=(-10), # backingfile="bmmc.backup", # descriptorfile="bmmc.backup.descriptor") ## ----eval=FALSE--------------------------------------------------------------- # snn_mat<-Smat2snn(Smat,knn=20) # dis<-snn_mat # dis@x<-max(dis@x)-dis@x+1e-9 # umap_res<-uwot::umap(X=dis,n_neighbors=20) # ## ----eval=FALSE--------------------------------------------------------------- # batch<-factor(sce$ident) # Smat_corrected<-deepcopy(Smat, # backingfile="bmmc2.backup", # descriptorfile="bmmc2.backup.descriptor") # res_anchor<-epiConv.anchor(Smat=Smat_corrected, # row_sample=row_sample, # batch=batch, # reference="Resting", # neigs=30, # features=NULL, # knn_target=50, # knn_reference=20, # threshold=2) ## ----eval=FALSE--------------------------------------------------------------- # res_eigs<-epiConv.correct(Smat=Smat_corrected, # row_sample=row_sample, # batch=batch, # reference="Resting", # neigs=30, # knn_update=res_anchor$knn_update) # ## ----eval=FALSE--------------------------------------------------------------- # snn_mat_corrected<-Smat2snn(Smat_corrected,knn=20) # dis<-snn_mat_corrected # dis@x<-max(dis@x)-dis@x+1e-9 # umap_res_corrected<-uwot::umap(X=dis,n_neighbors=20) # # plot(umap_res,pch="+",cex=0.5,col=factor(sce$ident)) # plot(umap_res_corrected,pch="+",cex=0.5,col=factor(sce$ident)) ## ----eval=FALSE--------------------------------------------------------------- # clust<-epiConv.louvain(snn=snn_mat_corrected,resolution=c(0.8,0.6,0.4,0.2)) # head(clust)
library("shiny") body <- dashboardBody( fluidRow( # Column 1 column(width = 6, infoBox( width = NULL, title = "Regular Box, Column 1", subtitle = "Gimme those Star Wars" ) ), # Column 2 column(width = 6, infoBox( width = NULL, title = "Regular Box, Column 2", subtitle = "Don't let them end" ) ) ) ) ui <- dashboardPage(header = dashboardHeader(), sidebar = dashboardSidebar(), body = body ) server <- function(input, output) {} shinyApp(ui, server)
/01_R/Reporting/Building Dashboards with shinydashboard/09_fluidRow_plus_colum.R
no_license
IVI-M/datacamp
R
false
false
645
r
library("shiny") body <- dashboardBody( fluidRow( # Column 1 column(width = 6, infoBox( width = NULL, title = "Regular Box, Column 1", subtitle = "Gimme those Star Wars" ) ), # Column 2 column(width = 6, infoBox( width = NULL, title = "Regular Box, Column 2", subtitle = "Don't let them end" ) ) ) ) ui <- dashboardPage(header = dashboardHeader(), sidebar = dashboardSidebar(), body = body ) server <- function(input, output) {} shinyApp(ui, server)
library(testthat) library(PROJ) test_check("PROJ")
/tests/testthat.R
no_license
paleolimbot/PROJ
R
false
false
52
r
library(testthat) library(PROJ) test_check("PROJ")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulation.R \name{vbeta} \alias{vbeta} \title{Variance of the beta distribution} \usage{ vbeta(shape1, shape2) } \arguments{ \item{shape1, shape2}{Shape parameters of the beta density.} } \value{ The variance of the beta distribution. } \description{ Variance of the beta distribution }
/man/vbeta.Rd
permissive
JonasMoss/standardized
R
false
true
366
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulation.R \name{vbeta} \alias{vbeta} \title{Variance of the beta distribution} \usage{ vbeta(shape1, shape2) } \arguments{ \item{shape1, shape2}{Shape parameters of the beta density.} } \value{ The variance of the beta distribution. } \description{ Variance of the beta distribution }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MarkovChain.r \docType{methods} \name{plot,MarkovChain-method} \alias{plot,MarkovChain-method} \title{Plots a \code{MarkovChain} object} \usage{ \S4method{plot}{MarkovChain}(x, order = 1, digits = 2, minProbability = 0, ...) } \arguments{ \item{x}{An instance of the \code{MarkovChain}-class} \item{order}{The order of the transition matrix that should be plotted} \item{digits}{The number of digits of the transition probabilities} \item{minProbability}{Only transitions with a probability >= the specified minProbability will be shown} \item{...}{Further parameters for the \code{plot}-function in package \code{igraph}} } \description{ Plots a \code{MarkovChain} object } \section{Methods}{ \describe{ \item{list("signature(x = \"MarkovChain\", order = \"numeric\", digits = \"numeric\")")}{ Plots the transition matrix with order \code{order} of a \code{MarkovChain} object as graph. } } } \author{ Michael Scholz \email{michael.scholz@uni-passau.de} } \keyword{methods}
/man/plot-method.Rd
no_license
cran/clickstream
R
false
true
1,059
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MarkovChain.r \docType{methods} \name{plot,MarkovChain-method} \alias{plot,MarkovChain-method} \title{Plots a \code{MarkovChain} object} \usage{ \S4method{plot}{MarkovChain}(x, order = 1, digits = 2, minProbability = 0, ...) } \arguments{ \item{x}{An instance of the \code{MarkovChain}-class} \item{order}{The order of the transition matrix that should be plotted} \item{digits}{The number of digits of the transition probabilities} \item{minProbability}{Only transitions with a probability >= the specified minProbability will be shown} \item{...}{Further parameters for the \code{plot}-function in package \code{igraph}} } \description{ Plots a \code{MarkovChain} object } \section{Methods}{ \describe{ \item{list("signature(x = \"MarkovChain\", order = \"numeric\", digits = \"numeric\")")}{ Plots the transition matrix with order \code{order} of a \code{MarkovChain} object as graph. } } } \author{ Michael Scholz \email{michael.scholz@uni-passau.de} } \keyword{methods}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/signatures.R \name{sign_bdc} \alias{sign_bdc} \title{Signature in hrefs provided by the STAC from the Brazil Data Cube project.} \usage{ sign_bdc(access_token = NULL, ...) } \arguments{ \item{access_token}{a \code{character} with the access token parameter to access Brazil Data Cube assets.} \item{...}{additional parameters can be supplied to the \code{GET} function of the \code{httr} package.} } \value{ a \code{function} that signs each item assets. } \description{ To sign the hrefs with your token you need to store it in an environment variable in \code{BDC_ACCESS_KEY}or use \code{acess_token} parameter. } \examples{ \dontrun{ # STACItemCollection object stac_obj <- stac("https://brazildatacube.dpi.inpe.br/stac/") \%>\% stac_search(collections = "CB4_64_16D_STK-1", datetime = "2019-06-01/2019-08-01") \%>\% stac_search() \%>\% get_request() # signing each item href stac_obj \%>\% items_sign(sign_fn = sign_bdc(access_token = "123")) } }
/man/sign_bdc.Rd
permissive
ottoneves/rstac
R
false
true
1,053
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/signatures.R \name{sign_bdc} \alias{sign_bdc} \title{Signature in hrefs provided by the STAC from the Brazil Data Cube project.} \usage{ sign_bdc(access_token = NULL, ...) } \arguments{ \item{access_token}{a \code{character} with the access token parameter to access Brazil Data Cube assets.} \item{...}{additional parameters can be supplied to the \code{GET} function of the \code{httr} package.} } \value{ a \code{function} that signs each item assets. } \description{ To sign the hrefs with your token you need to store it in an environment variable in \code{BDC_ACCESS_KEY}or use \code{acess_token} parameter. } \examples{ \dontrun{ # STACItemCollection object stac_obj <- stac("https://brazildatacube.dpi.inpe.br/stac/") \%>\% stac_search(collections = "CB4_64_16D_STK-1", datetime = "2019-06-01/2019-08-01") \%>\% stac_search() \%>\% get_request() # signing each item href stac_obj \%>\% items_sign(sign_fn = sign_bdc(access_token = "123")) } }
assignmentAsFirstArgFuns <- c('nimArr_rmnorm_chol', 'nimArr_rmvt_chol', 'nimArr_rwish_chol', 'nimArr_rinvwish_chol', 'nimArr_rcar_normal', 'nimArr_rmulti', 'nimArr_rdirch', 'getValues', 'getValuesIndexRange', 'initialize', 'setWhich', 'setRepVectorTimes', 'assignVectorToNimArr', 'dimNimArr', 'assignNimArrToNimArr') setSizeNotNeededOperators <- c('setWhich', 'setRepVectorTimes', 'SEXP_2_NimArr', 'nimVerbatim') operatorsAllowedBeforeIndexBracketsWithoutLifting <- c('map', 'dim', 'mvAccessRow', 'nfVar') sizeCalls <- c(makeCallList(binaryOperators, 'sizeBinaryCwise'), makeCallList(binaryMidLogicalOperators, 'sizeBinaryCwiseLogical'), makeCallList(binaryOrUnaryOperators, 'sizeBinaryUnaryCwise'), makeCallList(unaryOperators, 'sizeUnaryCwise'), makeCallList(unaryOrNonaryOperators, 'sizeUnaryNonaryCwise'), makeCallList(assignmentOperators, 'sizeAssign'), makeCallList(reductionUnaryOperators, 'sizeUnaryReduction'), makeCallList(matrixSquareReductionOperators, 'sizeMatrixSquareReduction'), makeCallList(reductionBinaryOperators, 'sizeBinaryReduction'), makeCallList(matrixMultOperators, 'sizeMatrixMult'), makeCallList(matrixFlipOperators, 'sizeTranspose'), makeCallList(matrixSolveOperators, 'sizeSolveOp'), makeCallList(matrixSquareOperators, 'sizeUnaryCwiseSquare'), makeCallList(nimbleListReturningOperators, 'sizeNimbleListReturningFunction'), nimOptim = 'sizeOptim', nimOptimDefaultControl = 'sizeOptimDefaultControl', list('debugSizeProcessing' = 'sizeProxyForDebugging', diag = 'sizeDiagonal', dim = 'sizeDim', RRtest_add = 'sizeRecyclingRule', which = 'sizeWhich', nimC = 'sizeConcatenate', nimRep = 'sizeRep', nimSeqBy = 'sizeSeq', nimSeqLen = 'sizeSeq', nimSeqByLen = 'sizeSeq', 'return' = 'sizeReturn', 'asRow' = 'sizeAsRowOrCol', 'asCol' = 'sizeAsRowOrCol', makeNewNimbleListObject = 'sizeNewNimbleList', getParam = 'sizeGetParam', getBound = 'sizeGetBound', nimSwitch = 'sizeSwitch', ## asDoublePtr = 'sizeasDoublePtr', '[' = 'sizeIndexingBracket', '[[' = 'sizeDoubleBracket', ## for nimbleFunctionList, this will always go through chainedCall(nfList[[i]], 'foo')(arg1, arg2) chainedCall = 'sizeChainedCall', nfVar = 'sizeNFvar', map = 'sizemap', ':' = 'sizeColonOperator', ##dim = 'sizeDimOperator', 'if' = 'recurseSetSizes', ##OK 'while' = 'recurseSetSizes', ## callC = 'sizecallC', 'for' = 'sizeFor', cppPointerDereference = 'sizeCppPointerDereference', values = 'sizeValues', '(' = 'sizeUnaryCwise', setSize = 'sizeSetSize', ## OK but not done for numericLists resizeNoPtr = 'sizeResizeNoPtr', ## may not be used any more nimArr_rcat = 'sizeScalarRecurse', nimArr_rinterval = 'sizeScalarRecurse', nimPrint = 'sizeforceEigenize', nimDerivs = 'sizeNimDerivs', ##nimCat = 'sizeforceEigenize', as.integer = 'sizeUnaryCwise', ## Note as.integer and as.numeric will not work on a non-scalar yet as.numeric = 'sizeUnaryCwise', nimArrayGeneral = 'sizeNimArrayGeneral', setAll = 'sizeOneEigenCommand', voidPtr = 'sizeVoidPtr', run.time = 'sizeRunTime', PROTECT = 'sizePROTECT', NimArr_2_SEXP = 'sizePROTECT', ## same need Reval = 'sizeReval', nimbleConvert = 'sizeNimbleConvert', nimbleUnconvert = 'sizeNimbleUnconvert', asReturnSymbol = 'sizeAsReturnSymbol'), makeCallList(scalar_distribution_dFuns, 'sizeRecyclingRule'), makeCallList(scalar_distribution_pFuns, 'sizeRecyclingRule'), makeCallList(scalar_distribution_qFuns, 'sizeRecyclingRule'), makeCallList(scalar_distribution_rFuns, 'sizeRecyclingRuleRfunction'), makeCallList(distributionFuns[!(distributionFuns %in% c(scalar_distribution_dFuns, scalar_distribution_pFuns, scalar_distribution_qFuns, scalar_distribution_rFuns))], 'sizeScalarRecurse'), # R dist functions that are not used by NIMBLE but we allow in DSL makeCallList(paste0(c('d','q','p'), 't'), 'sizeRecyclingRule'), rt = 'sizeRecyclingRuleRfunction', makeCallList(paste0(c('d','q','p'), 'exp'), 'sizeRecyclingRule'), rexp = 'sizeRecyclingRuleRfunction', makeCallList(c('isnan','ISNAN','ISNA'), 'sizeScalarRecurse'), makeCallList(c('nimArr_dmnorm_chol', 'nimArr_dmvt_chol', 'nimArr_dwish_chol', 'nimArr_dinvwish_chol', 'nimArr_dcar_normal', 'nimArr_dmulti', 'nimArr_dcat', 'nimArr_dinterval', 'nimArr_ddirch'), 'sizeScalarRecurse'), makeCallList(c('nimArr_rmnorm_chol', 'nimArr_rmvt_chol', 'nimArr_rwish_chol', 'nimArr_rinvwish_chol', 'nimArr_rcar_normal', 'nimArr_rmulti', 'nimArr_rdirch'), 'sizeRmultivarFirstArg'), makeCallList(c('decide', 'size', 'getsize','getNodeFunctionIndexedInfo', 'endNimbleTimer'), 'sizeScalar'), makeCallList(c('calculate','calculateDiff', 'getLogProb'), 'sizeScalarModelOp'), simulate = 'sizeSimulate', makeCallList(c('blank', 'nfMethod', 'getPtr', 'startNimbleTimer'), 'sizeUndefined') ##'nimFunListAccess' ) scalarOutputTypes <- list(decide = 'logical', size = 'integer', isnan = 'logical', ISNA = 'logical', '!' = 'logical', getNodeFunctionIndexedInfo = 'double', endNimbleTimer = 'double') ## exprClasses_setSizes fills in the type information of exprClass code ## code is an exprClas object ## typeEnv is an environment returned by exprClasses_initSizes ## allowUnknown says whether it is ok to have unknown type. This will be true for LHS of assignments ## ## This returns a set of type assertions collected for each line of code ## This function operates recursively, so the type assertions returned from recursive calls are ## put into the exprClass object for which they were recursed. ## ## For example, if we have A2 <- mean(B + C) ## then typeEnv must have size expressions for B and C to get started. ## If these are matrices, the generic size expressions (for B) will be dim(B)[1] and dim(B)[2] ## Then the exprClass object for `+`(B, C) will generate assertions that dim(B)[1] == dim(C)[1] and dim(B[2]) == dim(C)[2] ## and it will copy the size expressions for B as its own size expressions ## Then the exprClass object for mean(`+`(B, C)) will create a size expression of 1 (with the same dimensions as B+C) ## Then the exprClass object for `<-`(A, mean(`+`(B, C))) will generate assertions that the size of A must be 1 ## and it will set the size expressions for A and for itself to 1. expressionSymbolTypeReplacements <- c('symbolNimbleListGenerator', 'symbolNimbleList', 'symbolNimbleFunction') exprClasses_setSizes <- function(code, symTab, typeEnv) { ## input code is exprClass ## name: if(code$isName) { ## If it doesn't exist and must exist, stop if(code$name != "") { ## e.g. In A[i,], second index gives name=="" if(!exists(code$name, envir = typeEnv, inherits = FALSE)) { if(symTab$symbolExists(code$name, TRUE)) { thisSymbolObject <- symTab$getSymbolObject(code$name, TRUE) code$type <- class(thisSymbolObject)[1] if(code$type %in% expressionSymbolTypeReplacements){ code$type <- thisSymbolObject$type code$sizeExprs <- thisSymbolObject } } else { code$type <- 'unknown' if(!typeEnv$.AllowUnknowns) if(identical(code$name, 'pi')) { ## unique because it may be encountered anew on on RHS and be valid assign('pi', exprTypeInfoClass$new(nDim = 0, type = 'double', sizeExprs = list()), envir = typeEnv) symTab$addSymbol(symbolBasic(name = 'pi', type = 'double', nDim = 0)) code$nDim <- 0 code$type <- 'double' code$sizeExprs <- list() code$toEigenize <- 'maybe' } else { warning(paste0("variable '",code$name,"' has not been created yet."), call.=FALSE) } } } else { ## otherwise fill in type fields from typeEnv object info <- get(code$name, envir = typeEnv) if(inherits(info, 'exprTypeInfoClass')) { code$type <- info$type code$sizeExprs <- info$sizeExprs code$nDim <- info$nDim code$toEigenize <- 'maybe' } } ## Add RCfunctions to neededRCfuns. if(exists(code$name) && is.rcf(get(code$name))) { nfmObj <- environment(get(code$name))$nfMethodRCobject uniqueName <- nfmObj$uniqueName if (is.null(typeEnv$neededRCfuns[[uniqueName]])) { typeEnv$neededRCfuns[[uniqueName]] <- nfmObj } } ## Note that generation of a symbol for LHS of an assignment is done in the sizeAssign function, which is the handler for assignments return(NULL) } } if(code$isCall) { if(code$name == '{') { ## recurse over lines for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { newAsserts <- exprClasses_setSizes(code$args[[i]], symTab, typeEnv) code$args[[i]]$assertions <- if(is.null(newAsserts)) list() else newAsserts } } return(invisible(NULL)) } sizeCall <- sizeCalls[[code$name]] if(!is.null(sizeCall)) { if(.nimbleOptions$debugSizeProcessing) { browser() eval(substitute(debugonce(XYZ), list(XYZ = as.name(sizeCall)))) } test0 <- eval(call(sizeCall, code, symTab, typeEnv)) return(test0) } if(symTab$symbolExists(code$name, TRUE)) { ## could be a nimbleFunction object return(sizeNimbleFunction(code, symTab, typeEnv) ) } ## Finally, it could be an RCfunction (a nimbleFunction with no setup == a simple function) { if(exists(code$name)) { obj <- get(code$name) if(is.rcf(obj)) { ## it is an RC function nfmObj <- environment(obj)$nfMethodRCobject uniqueName <- nfmObj$uniqueName if(length(uniqueName)==0) stop(exprClassProcessingErrorMsg(code, 'In size processing: A no-setup nimbleFunction with no internal name is being called.'), call. = FALSE) if(is.null(typeEnv$neededRCfuns[[uniqueName]])) { typeEnv$neededRCfuns[[uniqueName]] <- nfmObj } ## new with nimbleLists: we need to initiate compilation here so we can get full returnType information, including of nimbleLists RCfunProc <- typeEnv$.nimbleProject$compileRCfun(obj, initialTypeInference = TRUE) return(sizeRCfunction(code, symTab, typeEnv, nfmObj, RCfunProc)) } } } invisible(NULL) } sizeProxyForDebugging <- function(code, symTab, typeEnv) { browser() origValue <- .nimbleOptions$debugSizeProcessing message('Entering into size processing debugging. You may need to do nimbleOptions(debugSizeProcessing = FALSE) if this exits in any non-standard way.') setNimbleOption('debugSizeProcessing', TRUE) ans <- recurseSetSizes(code, symTab, typeEnv) removeExprClassLayer(code$caller, 1) setNimbleOption('debugSizeProcessing', origValue) return(ans) } ## This is used by nimbleExternalCall. ## When the external call is provided as foo, returning e.g. double(0), ## we end up needing a line of code RETURNVALUE <- foo(args). ## To get the type of RETURNVALUE, we wrap that as RETURNVALUE <- asReturnSymbol(foo(args), type, nDim) sizeAsReturnSymbol <- function(code, symTab, typeEnv) { returnType <- code$args[[2]] returnNDim <- code$args[[3]] code$args <- list(code$args[[1]]) code$args[[1]]$type <- returnType code$args[[1]]$nDim <- returnNDim code$args[[1]]$toEigenize <- 'no' code$args[[1]]$sizeExprs <- NULL removeExprClassLayer(code, 1) list() } productSizeExprs <- function(sizeExprs) { if(length(sizeExprs)==0) return(1) if(length(sizeExprs)==1) return(sizeExprs[[1]]) ans <- substitute( (A), list(A = sizeExprs[[1]])) for(i in 2:length(sizeExprs)) { ans <- substitute(A * (B), list(A = ans, B = sizeExprs[[i]])) } ans } multiMaxSizeExprs <- function(code, useArgs = rep(TRUE, length(code$args))) { if(length(code$args)==0) return(list()) ## probably something wrong codeArgsUsed <- code$args[useArgs] totalLengthExprs <- lapply(codeArgsUsed, function(x) if(inherits(x, 'exprClass')) productSizeExprs(x$sizeExprs) else 1) if(length(codeArgsUsed)==1) return(totalLengthExprs) ## a list of length 1 numericTotalLengths <- unlist(lapply(totalLengthExprs, is.numeric)) if(sum(numericTotalLengths) > 0) { maxKnownSize <- max(unlist(totalLengthExprs[numericTotalLengths])) if(sum(numericTotalLengths)==length(totalLengthExprs)) return(list(maxKnownSize)) totalLengthExprs <- c(list(maxKnownSize), totalLengthExprs[-which(numericTotalLengths)]) } numArgs <- length(totalLengthExprs) ## must be > 1 or it would have returned two lines above if(numArgs == 1) return(totalLengthExprs[[1]]) ## but check anyway lastMax <- substitute(max(A, B), list(A = totalLengthExprs[[numArgs]], B = totalLengthExprs[[numArgs-1]])) if(numArgs > 2) { for(i in (numArgs-2):1) { lastMax <- substitute(max(A, B), list(A = totalLengthExprs[[i]], B = lastMax)) } } return(list(lastMax)) } addDIB <- function(name, type) { paste0(name, switch(type, double = 'D', integer = 'I', logical = 'B')) } sizeDim <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) if(!inherits(code$args[[1]], 'exprClass')) { stop(exprClassProcessingErrorMsg(code, paste0('Argument of dim is not valid')), call. = FALSE) } if(code$args[[1]]$nDim == 0) { stop(exprClassProcessingErrorMsg(code, paste0('dim() cannot take a scalar as its argument.')), call. = FALSE) } if(code$caller$name != '[') code$name <- 'dimNimArr' code$nDim <- 1 code$type <- 'integer' code$toEigenize <- 'no' code$sizeExprs <- list( code$args[[1]]$nDim ) return(if(is.null(asserts)) list() else asserts) } sizeDiagonal <- function(code, symTab, typeEnv) { ## experimentalNewSizeProcessing: code$name change step stays here ## experimentalNewSizeProcessing: because the 3 cases are not implementation-specific asserts <- recurseSetSizes(code, symTab, typeEnv) argIsExprClass <- inherits(code$args[[1]], 'exprClass') nDimArg <- if(argIsExprClass) code$args[[1]]$nDim else 0 if(nDimArg == 0) { code$nDim <- 2 code$type <- 'double' newSizeExpr <- parse(text = nimDeparse(code$args[[1]]), keep.source = FALSE)[[1]] code$sizeExprs <- list(newSizeExpr, newSizeExpr) code$toEigenize <- 'yes' code$name <- addDIB('nimDiagonal', code$type) ## These all go to double anyway return( if(length(asserts) == 0) NULL else asserts ) } if(nDimArg == 1) { code$nDim <- 2 code$type <- 'double' newSizeExpr <- code$args[[1]]$sizeExprs[[1]] code$sizeExprs <- list(newSizeExpr, newSizeExpr) code$toEigenize <- 'yes' code$name <- addDIB('nimDiagonal', code$args[[1]]$type) ## double anyway return( if(length(asserts) == 0) NULL else asserts ) } if(nDimArg == 2) { code$nDim <- 1 code$type <- code$args[[1]]$type code$sizeExprs <- list(substitute(min(X, Y), list(X = code$args[[1]]$sizeExprs[[1]], Y = code$args[[1]]$sizeExprs[[2]]))) code$toEigenize <- 'yes' code$name <- 'diagonal' return( if(length(asserts) == 0) NULL else asserts ) } stop(exprClassProcessingErrorMsg(code, paste0('Something is wrong with this usage of diag()')), call. = FALSE) } sizeWhich <- function(code, symTab, typeEnv) { ## which is a somewhat unique construction. ## It should only appear as ## answer <- which(boolExpr) ## and should be lifted to an intermediate if necessary ## The sizeExprs on "which" in the syntax tree will be NULL ## which will trigger sizeAssignAfterRecursing to make default size expressions on "answer" ## and then it will transform to ## setWhich(answer, boolExpr) for C++ output asserts <- recurseSetSizes(code, symTab, typeEnv) code$type = 'integer' code$sizeExprs <- list(NULL) code$nDim <- 1 code$toEigenize <- 'yes' code$name <- 'setWhich' if(!nimbleOptions('experimentalSelfLiftStage')) { if(!(code$caller$name %in% assignmentOperators)) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } } if(length(asserts) == 0) NULL else asserts } sizeRecyclingRule <- function(code, symTab, typeEnv) { ## also need an entry in eigenization. asserts <- recurseSetSizes(code, symTab, typeEnv) ## for now this is entirely for d, p and q distribution functions, so we'll look up number of arguments for recycling numArgs <- length(code$args) if(numArgs == 0) return(asserts) recycleArgs <- rep(TRUE, numArgs) dFunName <- code$name substr(dFunName, 1, 1) <- 'd' thisDist <- distributions$distObjects[[dFunName]] if(!is.null(thisDist)) { numReqdArgs <- length(thisDist$reqdArgs) recycleArgs[-(1:(numReqdArgs+1))] <- FALSE } newSizeExprs <- multiMaxSizeExprs(code, recycleArgs) if(length(newSizeExprs)==1) if(is.numeric(newSizeExprs[[1]])) if(newSizeExprs[[1]] == 1) return(c(asserts, sizeScalarRecurse(code, symTab, typeEnv, recurse = FALSE))) ## ALSO NEED ALL ARGS TO HAVE nDim 0 code$sizeExprs <- newSizeExprs code$type <- 'double' ## will need to look up from a list code$nDim <- 1 code$toEigenize <- 'yes' ## toEigen: N.B. This had TRUE return(asserts) } sizeRecyclingRuleRfunction <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) ## for now this is entirely for r distribution functions, so we'll look up number of arguments for recycling numArgs <- length(code$args) if(numArgs == 0) return(asserts) ## Size determined by first arg ## If scalar, that gives size ## If vector, size is length of first argument. ## Problem is vector of length 1, where size should be value of first element, not length of 1. ## toEigen: keep this lift here for now, since it sets up sizes. if(inherits(code$args[[1]], 'exprClass')) { if(!code$args[[1]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } newSizeExprs <- list(substitute(rFunLength(X), list(X = as.name(code$args[[1]]$name)))) } else { newSizeExprs <- list(code$args[[1]]) } if(length(newSizeExprs)==1) if(is.numeric(newSizeExprs[[1]])) if(newSizeExprs[[1]] == 1) { code$args[[1]] <- NULL ## strip the first argument, which should be a 1 if we are here for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { code$args[[i]]$callerArgID <- code$args[[i]]$callerArgID - 1 } } return(c(asserts, sizeScalarRecurse(code, symTab, typeEnv, recurse = FALSE))) } code$sizeExprs <- newSizeExprs code$type <- 'double' ## will need to look up from a list code$nDim <- 1 code$toEigenize <- 'yes' return(asserts) } concatenateIntermLabelMaker <- labelFunctionCreator("ConcatenateInterm") sizeConcatenate <- function(code, symTab, typeEnv) { ## This is two argument version asserts <- recurseSetSizes(code, symTab, typeEnv) ## overall strategy is to separate runs of scaalrs and non-scalars ## also in C++ we don't take arbitrary arguments. Instead we chain together calls in groups of 4 ## e.g. c(a1, a2, a3, a4, a5) will become c( c(a1, a2, a3, a4), a5) ## first puzzle is with nimC(scalar1, scalar2, vector1, scalar3) ## we need to extract the runs of scalars like (scalar1, scalar2), so they can be packed up in an object together. isScalar <- unlist(lapply(code$args, function(x) if(inherits(x, 'exprClass')) x$nDim == 0 else TRUE)) ## run length encoding: This native R function returns information about repeats, so we can figure out how long each run of scalars is argRLE <- rle(isScalar) ## How many arguments will we have after packing scalars together into single objects: newNumArgs <- sum(argRLE$values) + sum(argRLE$lengths[!argRLE$values]) ## number of scalar runs + sum of non-scalar runs * run-lengths newArgs <- vector(length(newNumArgs), mode = 'list') iInput <- 1 iOutput <- 1 for(i in seq_along(argRLE$values)) { thisLength <- argRLE$lengths[i] if(!(argRLE$values[i])) { ## it is a run of non-scalars, so pack them into the new argument list, newArgs newArgs[(iOutput-1) + (1:thisLength)] <- code$args[(iInput-1) + (1:thisLength)] iInput <- iInput + thisLength iOutput <- iOutput + thisLength } else { ## it is a run of scalars, so construct an object for them newTempFixedName <- concatenateIntermLabelMaker() newTempVecName <- concatenateIntermLabelMaker() ## Construct: ## concatenateTemp(ConcatenateInterm_1), ## concatenateTemp is not output to C++. It is a placeholder newExpr <- exprClass(isName = FALSE, isCall = TRUE, isAssign = FALSE, name = "concatenateTemp", nDim = 1, sizeExprs = list(thisLength), type = 'double') setArg(newExpr, 1, exprClass(isName = TRUE, isCall = FALSE, isAssign = FALSE, name = newTempVecName, nDim = 1, sizeExprs = list(thisLength), type = 'double')) ## hardCodedVectorInitializer is a wrapper for the "contents1, contents2, ..." below valuesExpr <- quote(hardCodedVectorInitializer()) thisType <- 'logical' for(j in 1:thisLength) { thisArgIndex <- iInput - 1 + j if(inherits(code$args[[thisArgIndex]], 'exprClass')) { if(!code$args[[thisArgIndex]]$isName) ## a little heavy-handed: lift any expression of any kind ## to avoid dealing with eigen or other handling inside initialization values ## This is necessary for cases like nimC(model[[node]][2], 1.2) ## because model[[node]] is a map asserts <- c(asserts, sizeInsertIntermediate(code, thisArgIndex, symTab, typeEnv)) thisType <- arithmeticOutputType(thisType, code$args[[thisArgIndex]]$type) } else { thisType <- storage.mode(code$args[[thisArgIndex]]) ##'double' } ## Putting a map, or a values access, through parse(nimDeparse) won't work ## So we lift any expression element above. ## This could be done more cleanly with more coding work. valuesExpr[[j+1]] <- parse(text = nimDeparse(code$args[[thisArgIndex]]), keep.source = FALSE)[[1]] } newExpr$type <- thisType newExpr$args[[1]]$type <- thisType iInput <- iInput + thisLength if(thisType == 'integer') thisType <- 'int' if(thisType == 'logical') thisType <- 'bool' ## MAKE_FIXED_VECTOR("ConcatenateInterm_2", "ConcatenateInterm_1", numArgs, values, type) goes through a customized output generator ## to create something like ## double ConcatenateIterm_1[] = {contents1, contents2} ## std::vector<double> ConcatenateInterm_2(ConcatenateInterm_1, ConcatenateInterm_1 + length) ## so there is one intermediate whose only purpose is to achieve initialization by value and a second intermediate copied from the first. ## The second intermediate can later be used in the templated nimCd/nimCi/nimCb ## newAssert <- substitute(MAKE_FIXED_VECTOR(newTempVecName, newTempFixedName, thisLength, valuesExpr, thisType), list(newTempVecName = newTempVecName, newTempFixedName = newTempFixedName, thisLength = as.numeric(thisLength), valuesExpr = valuesExpr, thisType = thisType)) newAssert <- as.call(newAssert) asserts <- c(asserts, list(newAssert)) newArgs[[iOutput]] <- newExpr iOutput <- iOutput + 1 } } ## Next step: chain together multiple calls: maxArgsOneCall <- 4 numArgGroups <- ceiling(newNumArgs / (maxArgsOneCall-1)) splitArgIDs <- split(1:newNumArgs, rep(1:numArgGroups, each = maxArgsOneCall-1, length.out = newNumArgs)) ## if last is a singleton it can be put with previous group if(length(splitArgIDs[[numArgGroups]]) == 1) { if(numArgGroups > 1) { splitArgIDs[[numArgGroups-1]] <- c(splitArgIDs[[numArgGroups-1]], splitArgIDs[[numArgGroups]]) splitArgIDs[[numArgGroups]] <- NULL numArgGroups <- numArgGroups-1 } } newExprList <- vector(numArgGroups, mode = 'list') for(i in seq_along(splitArgIDs)) { newExprList[[i]] <- exprClass(isName = FALSE, isCall = TRUE, isAssign = FALSE, name = 'nimC', nDim = 1, toEigenize = 'yes', type = 'double') for(j in seq_along(splitArgIDs[[i]])) setArg(newExprList[[i]], j, newArgs[[splitArgIDs[[i]][j]]]) } ## Last step is to set up nesting and make sizeExprs for each constructed argument for(i in seq_along(splitArgIDs)) { if(i != length(splitArgIDs)) { setArg(newExprList[[i]], maxArgsOneCall, newExprList[[i+1]]) } } for(i in rev(seq_along(splitArgIDs))) { if(inherits(newExprList[[i]]$args[[1]], 'exprClass')) { thisSizeExpr <-productSizeExprs(newExprList[[i]]$args[[1]]$sizeExprs) thisType <- newExprList[[i]]$args[[1]]$type } else { thisSizeExpr <- 1 thisType <- 'double' } for(j in seq_along(newExprList[[i]]$args)) { if(j == 1) next if(inherits(newExprList[[i]]$args[[j]], 'exprClass')) { thisSizeExpr <- substitute( (A) + (B), list(A = thisSizeExpr, B = productSizeExprs(newExprList[[i]]$args[[j]]$sizeExprs) )) thisType <- arithmeticOutputType(thisType, newExprList[[i]]$args[[j]]$type) } else { thisSizeExpr <- substitute( (A) + 1, list(A = thisSizeExpr )) thisType <- 'double' } } if(thisType == 'double') newExprList[[i]]$name <- 'nimCd' ## this change could get moved to genCpp_generateCpp if(thisType == 'integer') newExprList[[i]]$name <- 'nimCi' if(thisType == 'logical') newExprList[[i]]$name <- 'nimCb' newExprList[[i]]$type <- thisType newExprList[[i]]$sizeExprs <- list(thisSizeExpr) } setArg(code$caller, code$callerArgID, newExprList[[1]]) return(asserts) } sizeRep <- function(code, symTab, typeEnv) { ## if times is a vector: If length.out is provided, times is always ignored ## otherwise lift and use assignEigenToNIMBLE asserts <- recurseSetSizes(code, symTab, typeEnv) xIsExpr <- inherits(code$args[[1]], 'exprClass') code$type <- if(xIsExpr) code$args[[1]]$type else 'double' includesLengthOut <- length(code$args) > 3 if(inherits(code$args[[2]], 'exprClass')) if(code$args[[2]]$nDim != 0 && !includesLengthOut) { ## times is a vector and length.out not provided if(!(code$caller$name %in% assignmentOperators)) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } if(length(code$args) > 2) code$args[[3]] <- NULL code$name <- 'setRepVectorTimes' code$sizeExprs <- list(NULL) code$nDim <- 1 code$toEigenize <- 'yes' return(asserts) } if(code$type == 'double') code$name <- 'nimRepd' ## this change could get moved to genCpp_generateCpp if(code$type == 'integer') code$name <- 'nimRepi' if(code$type == 'logical') code$name <- 'nimRepb' ## requiring for now that times and each arguments are given as integers, not expressions ## Since these will go into sizeExprs, which are then processed as R expressions, then as exprClasses but not fully size processed, ## any expression should be lifted if(includesLengthOut) { ## there is a "length.out" argument ## need to lift length.out if it is more than a name or constant if(inherits(code$args[[2]], 'exprClass')) if(code$args[[2]]$nDim > 0) stop(exprClassProcessingErrorMsg(code, paste0('times argument to rep() must be scalar is length.out is also provided.')), call. = FALSE) if(inherits(code$args[[3]], 'exprClass')) { ## if length.out is present, it is argument 3 if(!is.name(code$args[[3]])) asserts <- c(asserts, sizeInsertIntermediate(code, 3, symTab, typeEnv)) if(code$args[[3]]$nDim > 0) code$sizeExprs <- list( parse(text = paste0(nimDeparse(code$args[[3]]),'[1]'), keep.source = FALSE)[[1]]) else code$sizeExprs <- list( parse(text = nimDeparse(code$args[[3]]), keep.source = FALSE)[[1]]) } else { code$sizeExprs <- list(code$args[[3]]) } } else { ## length.out absent, so times is second and each is third for(i in 2:3) { if(inherits(code$args[[i]], 'exprClass')) if(!is.name(code$args[[i]])) asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv)) } part2 <- nimDeparse(code$args[[2]]) if(inherits(code$args[[2]], 'exprClass')) if(code$args[[2]]$nDim > 0) part2 <- paste0(part2, '[1]') part3 <- nimDeparse(code$args[[3]]) if(inherits(code$args[[3]], 'exprClass')) if(code$args[[3]]$nDim > 0) part3 <- paste0(part3, '[1]') thisSizeExpr <- substitute( (AAA_) * (BBB_) * (CCC_), list(AAA_ = if(xIsExpr) productSizeExprs(code$args[[1]]$sizeExprs) else 1, BBB_ = parse(text = part2, keep.source = FALSE)[[1]], CCC_ = parse(text = part3, keep.source = FALSE)[[1]] )) code$sizeExprs <- list(thisSizeExpr) } code$nDim <- 1 code$toEigenize <- 'yes' return(asserts) } sizeNewNimbleList <- function(code, symTab, typeEnv){ ## The code looks like: nimListDef$new(a = 10, b = 12). ## We want to change code$caller to : ## { nimList <- nimListDef$new() ## nimList$a <- 10 ## nimList$b <- 12 }. ## We accomplish this by copying code, getting arguments (e.g. a = 10, b = 12) from copied code and turning them into assignment ## exprs in code$caller, and setting first argument of code$caller to be nimList <- nimListDef$new(). listDefName <- code$args[[1]]$name if(symTab$symbolExists(listDefName, inherits = TRUE)){ listST <- symTab$getSymbolObject(listDefName, inherits = TRUE) } else { ## We need to establish the symbol and needed type. nlDef <- get(listDefName) ## Need the nimbleProject! nlp <- typeEnv$.nimbleProject$compileNimbleList(nlDef, initialTypeInference = TRUE) className <- nl.getListDef(nlDef)$className if(is.null(typeEnv$neededRCfuns[[className]])) { newSym <- symbolNimbleList(name = listDefName, nlProc = nlp) typeEnv$neededRCfuns[[className]] <- newSym } newDefSym <- symbolNimbleListGenerator(name = listDefName, nlProc = nlp) symTab$addSymbol(newDefSym) listST <- newDefSym } code$type <- "nimbleList" code$sizeExprs <- listST code$toEigenize <- "maybe" code$nDim <- 0 asserts <- list() asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useArgs = c(TRUE, rep(FALSE, length(code$args)-1)))) if(!(code$caller$name %in% assignmentOperators)){ intermediateAsserts <- sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv) ## intermediateAsserts can potentially have size setting stuff from sizeAssignAfterRecursing. ## Not sure if that would ever happen in this context, but to be safe we'll use last element as the actual intermediate assignment. ## Embed the intermediate assignment in a '{' (so insertAssertions will recurse on it) and recurse on it. numIntermAsserts <- length(intermediateAsserts) bracketedIntermAssert <- newBracketExpr(intermediateAsserts[numIntermAsserts]) exprClasses_setSizes(bracketedIntermAssert, symTab, typeEnv) intermediateAsserts[[numIntermAsserts]] <- bracketedIntermAssert asserts <- c(asserts, intermediateAsserts) return(asserts) } if(length(code$args) <= 1) return(asserts) ## There are no args to process. RnewExprs <- list() newExprs <- list() RnfVarExprs <- list() nfVarExprs <- list() exprCounter <- 1 originalCode <- code listElements <- listST$nlProc$symTab$getSymbolNames() RlistNameExpr <- nimbleGeneralParseDeparse(originalCode$caller$args[[1]]) for(i in seq_along(listElements)) { thisVarName <- listElements[i] if(!is.null(originalCode$args[[thisVarName]])) { ## Skip first arg, which will be name of nlDef, then check if value is "". if(!inherits(originalCode$args[[thisVarName]], 'exprClass') || (originalCode$args[[thisVarName]]$name != "")) { ## nfVar(A, 'x') for whichever element name it's on ('x') RnfVarExprs[[exprCounter]] <- substitute(nfVar(A, X), list(A = RlistNameExpr, X = thisVarName)) ## nfVar(A, 'x') <- y or whatever code was provided (already recursed for size processing) RnewExprs[[exprCounter]] <- substitute(A <- B, list(A = RnfVarExprs[[exprCounter]], B = nimbleGeneralParseDeparse(originalCode$args[[thisVarName]]))) exprCounter <- exprCounter + 1 } } } if(length(RnewExprs) == 0) return(asserts) ## All args have already been specified. ## Embed RnewExprs in a '{' expression. RbracketNewExprs <- quote(after({})) RbracketNewExprs[[2]][2:(length(RnewExprs) + 1)] <- RnewExprs bracketNewExprs <- RparseTree2ExprClasses(RbracketNewExprs) ## Need to install assignment target in symTab if necessary so that it ## will be there for recursion in the following step. assignmentTarget <- code$caller$args[[1]] if(assignmentTarget$isName) { if(!symTab$symbolExists(assignmentTarget$name, TRUE)) { symTab$addSymbol(symbolNimbleList(name = assignmentTarget$name, type = code$type, nlProc = code$sizeExprs$nlProc)) } } ## Recurse into element assignments. exprClasses_setSizes(bracketNewExprs$args[[1]], symTab, typeEnv) asserts <- c(asserts, list(bracketNewExprs)) if(length(code$args) > 1) ## TODO Remove this conditional, since this should always be true if we make it this far. code$args <- code$args[1] return(asserts) } sizemap <- function(code, symTab, typeEnv) { ## This will only be called on a map generated from setup ## Maps created from indexing in nimble code don't go through this function sym <- symTab$getSymbolObject(code$args[[1]], TRUE) code$type <- sym$type code$nDim <- code$args[[2]] code$sizeExprs <- code$args[[4]] code$toEigenize <- 'maybe' invisible(NULL) } ## size handler for nimArrayGeneral() ## nimArrayGeneral(type(character), nDim, dim (c(sizeExpr1, ...)), value, init (logical), fillZeros, recycle, unpackNDim(optional)) ## nimArrayGeneral( arg1, arg2, arg3, arg4, arg5 , arg6 , arg7 , arg8 ) sizeNimArrayGeneral <- function(code, symTab, typeEnv) { useArgs <- c(FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE) if(!is.null(code$args[['unpackNDim']])) useArgs <- c(useArgs, TRUE) asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = useArgs) ## recurse on initialValue and initialLogical only ## some checking if(inherits(code$args[['init']], 'exprClass')) if(!(code$args[['init']]$nDim == 0)) stop(exprClassProcessingErrorMsg(code, paste0('init argument to numeric, logical, integer, matrix or array must be scalar')), call. = FALSE) type <- code$args[['type']] nDim <- code$args[['nDim']] unpackNDim <- if(!is.null(code$args[['unpackNDim']])) code$args[['unpackNDim']] else FALSE ##if(length(code$args) > 5) code$args[[6]] else FALSE cSizeExprs <- code$args[['dim']] ## these are the size expressions encompassed by collectSizes(), needed for purposes of the C++ line to be generated if(!inherits(cSizeExprs, 'exprClass')) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (i) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) if(cSizeExprs$name != 'collectSizes') stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (ii) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) if(unpackNDim) { ## This means length of dim unknown at compile time but nDim explicitly provided, so we construct c(dim[1], dim[2]), etc. asserts <- c(asserts, recurseSetSizes(cSizeExprs, symTab, typeEnv)) if(!cSizeExprs$args[[1]]$isName) asserts <- c(asserts, sizeInsertIntermediate(cSizeExprs, 1, symTab, typeEnv)) ## this intermediate goes a layer down the AST, but works if(length(cSizeExprs$args[[1]]$sizeExprs) == 0) { ## The argument expression evaluates to scalar if(nDim == -1) { nDim <- 1 code$args[['nDim']] <- 1 } if(nDim == 1) unpackNDim <- FALSE ## and that's ok because nDim given as 1 } if(unpackNDim) { if(length(cSizeExprs$args[[1]]$sizeExprs) != 1) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (ii) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) if(nDim == -1) {## code for nDim not given but dim given as expression if(!is.numeric(cSizeExprs$args[[1]]$sizeExprs[[1]])) stop() nDim <- cSizeExprs$args[[1]]$sizeExprs[1] if(nDim < 1 | nDim > 4) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (iii) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) code$args[['nDim']] <- nDim } varName <- as.name(cSizeExprs$args[[1]]$name) for(i in 1:nDim) { newIndexedSizeExpr <- RparseTree2ExprClasses( substitute(X[I], list(X = varName, I = i) ) ) setArg(cSizeExprs, i, newIndexedSizeExpr) } } } else { asserts <- c(asserts, recurseSetSizes(cSizeExprs, symTab, typeEnv)) nonScalarWhereNeeded <- FALSE if(inherits(cSizeExprs$args[[1]], 'exprClass')) if(cSizeExprs$args[[1]]$nDim != 0) nonScalarWhereNeeded <- TRUE if(nDim == -1) { ## nDim wasn't provided (to nimArray) and dim was an expression, so it ought to be a scalar if(nonScalarWhereNeeded) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (iv) with sizes or dim to numeric, logical, integer, matrix or array. It looks like dim argument was non-scalar but nDim was not provided. If the dim argument to array (or nimArray) is a vector, you must also provide nDim argument to say how many dimensions will be used.')), call. = FALSE) nDim <- code$args[['nDim']] <- 1 } else { ## call was from numeric, integer or logical if(nonScalarWhereNeeded) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (v) with sizes or dim to numeric, logical, integer, matrix or array. It looks like length argument was non-scalar.')), call. = FALSE) } } ## if it is a call to matrix() and the value argument is non-scalar, ## we will generate it in C++ as nimNewMatrix useNewMatrix <- FALSE if(nDim == 2) { if(inherits(code$args[['value']], 'exprClass')) if(code$args[['value']]$nDim > 0) useNewMatrix <- TRUE ## use eigen-compatible C++ } if(code$args[['nDim']] != length(cSizeExprs$args)) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (iii) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) annotationSizeExprs <- lapply(cSizeExprs$args, nimbleGeneralParseDeparse) ## and this is for purposes of the sizeExprs in the AST exprClass object missingSizes <- unlist(lapply(cSizeExprs$args, function(x) identical(x, as.numeric(NA)) | identical(x, NA))) ## note is.na doesn't work b/c the argument can be an expression and is.na warns on that ##old: identical, as.numeric(NA))) ## only case where we do something useful with missingSizes is matrix(value = non-scalar, ...) if(any(missingSizes)) { ## modify sizes in generated C++ line if(useNewMatrix) cSizeExprs$args[missingSizes] <- -1 else cSizeExprs$args[missingSizes] <- 1 ## modify annotation sizeExprs totalInputLengthExpr <- if(inherits(code$args[['value']], 'exprClass')) productSizeExprs(code$args[['value']]$sizeExprs) else 1 ## should always be exprClass in here anyway ## see newMatrixClass in nimbleEigen.h if(missingSizes[1]) { ## missing nrow if(missingSizes[2]) { ## missing both annotationSizeExprs[[1]] <- totalInputLengthExpr annotationSizeExprs[[2]] <- 1 } else { ## ncol provided annotationSizeExprs[[1]] <- substitute(calcMissingMatrixSize(A, B), list(A = totalInputLengthExpr, B = annotationSizeExprs[[2]])) } } else { ## nrow provided, ncol missing (is both provided, we wouldn't be in this code annotationSizeExprs[[2]] <- substitute(calcMissingMatrixSize(A, B), list(A = totalInputLengthExpr, B = annotationSizeExprs[[1]])) } } asserts <- c(asserts, recurseSetSizes(cSizeExprs, symTab, typeEnv)) if(!(type %in% c('double', 'integer', 'logical'))) stop('unknown type in nimArrayGeneral') ## Three possible calls can be emitted by choice of code$name: initialize (this becomes a NimArr member function call. It is used if initialization is scalar, to be repeated); assignNimArrToNimArr (this becomes a call to assignNimArrToNimArr. It is used if initialization is non-scalar and the object being created is not a matrix; nimNewMatrix[D|I|B] (this has the same name in C++. It is used if initialization is non-scalar and the object being created is a matrix. It creates an eigen-compatible object within an expression). code$name <- 'initialize' ## may be replaced below if useNewMatrix if(inherits(code$args[['value']], 'exprClass')) if(code$args[['value']]$nDim > 0) code$name <- 'assignNimArrToNimArr' ## could be replaced by nimNewMatrix[D|B|I] below ## rearrange arguments if(code$name == 'assignNimArrToNimArr') if(!useNewMatrix) code$args <- c(code$args[4:7], cSizeExprs$args) ## args: initialize(value, init, fillZeros, recycle, sizeExpr1, sizeExpr2, etc...) else code$args <- c(code$args[c(4,5,7)], cSizeExprs$args) ## fillZeros has no role in this case. nimNewMatrix creates an eigen object that has to return something for each element, so it will use a zero anyway. else code$args <- c(code$args[4:7], cSizeExprs$args) ## actually this turned out the same as for assignNimArrToNimArr. ## fix code/caller relationships in AST for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { code$args[[i]]$callerArgID <- i code$args[[i]]$caller <- code } } code$type <- type code$nDim <- nDim code$toEigenize <- 'no' ## insert intermediate unless it will be newNimMatrix code$sizeExprs <- annotationSizeExprs ## check for nimNewMatrix case if(useNewMatrix) { suffix <- 'D' if(code$type == 'integer') suffix <- 'I' if(code$type == 'logical') suffix <- 'B' code$name <- paste0("nimNewMatrix", suffix) code$toEigenize <- "yes" } else { ## otherwise, lift values arg if necessary if(inherits(code$args[['value']], 'exprClass')) ## was re-ordered here if(!(code$args[['value']]$isName)) asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } if(!useNewMatrix) if(inherits(code$caller, 'exprClass')) if(!(code$caller$name %in% assignmentOperators)) { if(!is.null(code$caller$name)) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } } else typeEnv$.ensureNimbleBlocks <- TRUE return(asserts) } sizeRunTime <- function(code, symTab, typeEnv) { if(length(code$args) != 1) stop(exprClassProcessingErrorMsg(code, paste0('run.time must take exactly 1 argument')), call. = FALSE) origCaller <- code$caller origCallerArgID <- code$callerArgID if(!code$caller$isAssign) { ## e.g. a + run.time({foo(y)}), should have already been lifted by buildIntermediates message('Problem in sizeRunTime: run.time is not in a simple assignment at this stage of processing.') } ## this is the case ans <- run.time({foo(y)}) lhsName <- code$caller$args[[1]]$name timerName <- IntermLabelMaker() newSym <- symbolNimbleTimer(name = timerName, type = 'symbolNimbleTimer') symTab$addSymbol(newSym) startTimerAssert <- RparseTree2ExprClasses(substitute(startNimbleTimer(TIMERNAME), list(TIMERNAME = as.name(timerName)))) recurseAsserts <- recurseSetSizes(code, symTab, typeEnv) ## arg to run.time should be in {} so any nested asserts should be done by the time this finishes and this should return NULL if(!is.null(recurseAsserts)) { message('issue in sizeRunTime: recurseAsserts is not NULL') } asserts <- list(startTimerAssert, code$args[[1]]) newCode <- RparseTree2ExprClasses(substitute(endNimbleTimer(TIMERNAME), list(TIMERNAME = as.name(timerName)))) newCode$type <- 'double' newCode$nDim <- 0 newCode$sizeExprs <- list() newCode$toEigenize <- 'no' setArg(origCaller, origCallerArgID, newCode) return(asserts) } sizeGetParam <- function(code, symTab, typeEnv) { if(length(code$args) > 3) { asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, FALSE, FALSE, rep(TRUE, length(code$args)-3))) for(i in 4:length(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$toEigenize=='yes') stop(exprClassProcessingErrorMsg(code, 'In sizeGetParam: There is an expression beyond the third argument that cannot be handled. If it involve vectorized math, you need to do it separately, not in this expression.'), call. = FALSE) } } } else { asserts <- list() } paramInfoSym <- symTab$getSymbolObject(code$args[[3]]$name, inherits = TRUE) code$type <- paramInfoSym$paramInfo$type code$nDim <- paramInfoSym$paramInfo$nDim code$sizeExprs <- vector(mode = 'list', length = code$nDim) code$toEigenize <- 'no' if(!(code$caller$name %in% assignmentOperators)) { if(!is.null(code$caller$name)) if(!(code$caller$name == '{')) ## could be on its own line -- useless but possible asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) code$toEigenize <- 'maybe' } else typeEnv$.ensureNimbleBlocks <- TRUE return(asserts) } sizeGetBound <- function(code, symTab, typeEnv) { if(length(code$args) > 3) { asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, FALSE, FALSE, rep(TRUE, length(code$args)-3))) for(i in 4:length(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$toEigenize=='yes') stop(exprClassProcessingErrorMsg(code, 'In sizeGetParam: There is an expression beyond the third argument that cannot be handled. If it involve vectorized math, you need to do it separately, not in this expression.'), call. = FALSE) } } } else { asserts <- list() } boundInfoSym <- symTab$getSymbolObject(code$args[[3]]$name, inherits = TRUE) code$type <- boundInfoSym$boundInfo$type code$nDim <- boundInfoSym$boundInfo$nDim code$sizeExprs <- vector(mode = 'list', length = code$nDim) code$toEigenize <- 'no' if(!(code$caller$name %in% assignmentOperators)) { if(!is.null(code$caller$name)) if(!(code$caller$name == '{')) ## could be on its own line -- useless but possible asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) code$toEigenize <- 'maybe' } return(asserts) } sizeSwitch <- function(code, symTab, typeEnv) { if(length(code$args) <= 2) return(invisible(NULL)) for(i in 3:length(code$args)) { ## just like the '{' clause of exprClasses_setSizes. This treats each of the outcomes as if it was a new line or block of code if(inherits(code$args[[i]], 'exprClass')) { newAsserts <- exprClasses_setSizes(code$args[[i]], symTab, typeEnv) code$args[[i]]$assertions <- if(is.null(newAsserts)) list() else newAsserts } } return(invisible(NULL)) } sizeAsRowOrCol <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] if(!inherits(a1, 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'In sizeAsRowOrCol: Argument must be an expression.'), call. = FALSE) if(a1$nDim == 0) stop(exprClassProcessingErrorMsg(code, 'In sizeAsRowOrCol: Argument cannot be scalar (could be fixed).'), call. = FALSE) code$type <- a1$type code$toEigenize <- 'yes' if(!code$name %in% c('asRow', 'asCol')) stop(exprClassProcessingErrorMsg(code, 'Somehow the system got to sizeAsRowOrCol without a call to asRow or asCol.'), call. = FALSE) if(a1$nDim == 1) { if(code$name == 'asRow') { code$nDim <- 2 code$sizeExprs <- c(list(1), a1$sizeExprs[[1]]) } else { code$nDim <- 2 code$sizeExprs <- c(a1$sizeExprs[[1]], list(1)) } return(asserts) } warning(paste0(' asRow or asCol used on something with more than 1 dimension in ', nimDeparse(code)), call. = FALSE) } ## a$b becomes nfVar(a, 'b') sizeNFvar <- function(code, symTab, typeEnv) { ## toEigen: Is it correct that this does not mark toEigen? asserts <- list() if(!inherits(code$args[[1]], 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'Problem using $: no name on the right?'), call. = FALSE) if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'Problem using $: wrong number of arguments?'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) if(code$args[[1]]$isName) { objectName <- code$args[[1]]$name symbolObject <- symTab$getSymbolObject(objectName, inherits = TRUE) objectType <- symbolObject$type } else { ## if there is nesting, A$B$C, figure out what to do objectType <- code$args[[1]]$type symbolObject <- code$args[[1]]$sizeExprs ## repurposed for this role } isSymFunc <- objectType == 'nimbleFunction' ## minor inconsistency in naming style here isSymList <- objectType == 'nimbleList' ## Cases to handle (nl for nimbleList, nf for nimbleFunction): ## nl$a <- x ## NimArr assignment (not setSize needed) ## nl$a <- x + 1 ## eigen assignment (setSize needed) ## nl1$nl2$ <- x or x + 1 ## x <- foo(nl$a) ## x <- foo(nl1$nl2$b) ## same with nf instead of any nl, in any order ## nl$new()$a , which becomes makeNewNimbleListObject(nl1)$a ## nl in nlEigenReferenceList if(!(isSymFunc || isSymList)) stop(exprClassProcessingErrorMsg(code, 'In sizeNFvar: First argument is not a nimbleFunction or a nimbleList'), call. = FALSE) nfProc <- if(isSymFunc) symbolObject$nfProc else symbolObject$nlProc if(is.null(nfProc)) { browser() stop(exprClassProcessingErrorMsg(code, 'In handling X$Y: Symbols for X have not been set up.'), call. = FALSE) } memberName <- code$args[[2]] if(!is.character(memberName)) stop(exprClassProcessingErrorMsg(code, 'In handling X$Y: Something is wrong with Y.'), call. = FALSE) memberSymbolObject <- nfProc$getSymbolTable()$getSymbolObject(memberName) if(!is.null(memberSymbolObject)) code$type <- memberSymbolObject$type if(isSymList | isSymFunc) { ## nimbleList ## We need (*nl) in C++, represented by cppPointerDereference(nl) if(code$args[[1]]$name != 'cppPointerDereference') { a1 <- insertExprClassLayer(code, 1, 'cppPointerDereference', type = code$args[[1]]$type, nDim = code$args[[1]]$nDim, sizeExprs = code$args[[1]]$sizeExprs) } } ## following checks are on type of A$B (isSymList and isSymFunc refer to type of A) if(code$type == 'nimbleList') { ## for a nimbleList, sizeExprs slot is used for symbol object ## of nlGenerator of member object code$sizeExprs <- memberSymbolObject } else if(code$type == 'nimbleFunction') { ## nimbleFunction code$sizeExprs <- memberSymbolObject } else if(code$type == 'nimbleFunctionList') { code$sizeExprs <- memberSymbolObject } else { ## a numeric etc. type code$nDim <- memberSymbolObject$nDim code$sizeExprs <- if(code$nDim > 0) makeSizeExpressions(memberSymbolObject$size, parse(text = nimDeparse(code))[[1]]) else list() } return(asserts) } sizeNimDerivs <- function(code, symTab, typeEnv){ if(code$args[[1]]$name == 'calculate'){ calcDerivFlag <- T code$args[[1]]$name <- paste0(code$args[[1]]$name, 'WithArgs_deriv') } else{ calcDerivFlag <- F code$args[[1]]$name <- paste0(code$args[[1]]$name, '_deriv') } setArg(code$caller, code$callerArgID, code$args[[1]]) setArg(code$args[[1]], length(code$args[[1]]$args) + 1, code$args[[2]]) # Set order argument. code$args[[2]] <- NULL asserts <- recurseSetSizes(code$args[[1]], symTab, typeEnv) code$args[[1]]$type <- 'nimbleList' code$args[[1]]$sizeExprs <- symTab$getSymbolObject('NIMBLE_ADCLASS', TRUE) code$args[[1]]$toEigenize <- "yes" code$args[[1]]$nDim <- 0 if(calcDerivFlag) asserts <- c(asserts, sizeScalarModelOp(code$args[[1]], symTab, typeEnv)) else asserts <- c(asserts, sizeNimbleFunction(code$args[[1]], symTab, typeEnv)) #setArg(code$args[[1]], length(code$args[[1]]$args) + 1, code$args[[3]]) # Sets variables argument, not yet implemented. if(length(asserts) == 0) NULL else asserts } sizeNimbleListReturningFunction <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) code$type <- 'nimbleList' nlGen <- nimbleListReturningFunctionList[[code$name]]$nlGen nlDef <- nl.getListDef(nlGen) className <- nlDef$className symbolObject <- symTab$getSymbolObject(className, inherits = TRUE) if(is.null(symbolObject)) { nlp <- typeEnv$.nimbleProject$compileNimbleList(nlGen, initialTypeInference = TRUE) symbolObject <- symbolNimbleListGenerator(name = className, nlProc = nlp) symTab$addSymbol(symbolObject) } code$sizeExprs <- symbolObject code$toEigenize <- "yes" # This is specialized for nimSvd and nimEigen. if(code$name == 'getDerivs') code$toEigenize <- 'no' ## Temp. solution to ensure that derivsOrders argument is a nimArray and not an eigen type. code$nDim <- 0 if(!nimbleOptions('experimentalSelfLiftStage')) { if(!(code$caller$name %in% assignmentOperators)) asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } if(length(asserts) == 0) NULL else asserts } sizeOptim <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) code$type <- 'nimbleList' nlGen <- nimbleListReturningFunctionList[[code$name]]$nlGen nlDef <- nl.getListDef(nlGen) className <- nlDef$className symbolObject <- symTab$getSymbolObject(className, inherits = TRUE) if(is.null(symbolObject)) { nlp <- typeEnv$.nimbleProject$compileNimbleList(nlGen, initialTypeInference = TRUE) symbolObject <- symbolNimbleListGenerator(name = className, nlProc = nlp) symTab$addSymbol(symbolObject) } code$sizeExprs <- symbolObject code$toEigenize <- "no" code$nDim <- 0 fnCode <- code$args$fn if (fnCode$name == 'nfMethod') { # This is handled in cppOutputNFmethod. } else if(exists(fnCode$name) && is.rcf(get(fnCode$name))) { # Handle fn arguments that are RCfunctions. fnCode$name <- environment(get(fnCode$name))$nfMethodRCobject$uniqueName } else { stop(paste0('unsupported fn argument in optim(par, fn = ', fnCode$name, '); try an RCfunction or nfMethod instead')) } grCode <- code$args$gr if (identical(grCode, "NULL")) { # We simply emit "NULL". } else if (grCode$name == 'nfMethod') { # This is handled in cppOutputNFmethod. } else if(exists(grCode$name) && is.rcf(get(grCode$name))) { # Handle gr arguments that are RCfunctions. grCode$name <- environment(get(grCode$name))$nfMethodRCobject$uniqueName } else { stop(paste0('unsupported gr argument in optim(par, gr = ', grCode$name, '); try an RCfunction or nfMethod instead')) } for(arg in c(code$args$lower, code$args$upper)) { if(inherits(arg, 'exprClass') && arg$toEigenize=='yes') { asserts <- c(asserts, sizeInsertIntermediate(code, arg$callerArgID, symTab, typeEnv)) } } if(length(asserts) == 0) NULL else asserts } sizeOptimDefaultControl <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) code$type <- 'nimbleList' nlGen <- nimbleListReturningFunctionList[[code$name]]$nlGen nlDef <- nl.getListDef(nlGen) className <- nlDef$className symbolObject <- symTab$getSymbolObject(className, inherits = TRUE) if(is.null(symbolObject)) { nlp <- typeEnv$.nimbleProject$compileNimbleList(nlGen, initialTypeInference = TRUE) symbolObject <- symbolNimbleListGenerator(name = className, nlProc = nlp) symTab$addSymbol(symbolObject) } code$sizeExprs <- symbolObject code$toEigenize <- "no" code$nDim <- 0 if(length(asserts) == 0) NULL else asserts } sizeCppPointerDereference <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) code$type <- code$args[[1]]$type code$sizeExprs <- code$args[[1]]$sizeExprs code$toEigenize <- code$args[[1]]$toEigenize code$nDim <- code$args[[1]]$nDim if(length(asserts) == 0) NULL else asserts } sizeDoubleBracket <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) if(code$args[[1]]$isName) { objectName <- code$args[[1]]$name symbolObject <- symTab$getSymbolObject(objectName, inherits = TRUE) objectType <- symbolObject$type } else { ## if there is nesting, A$B$C, figure out what to do objectType <- code$args[[1]]$type symbolObject <- code$args[[1]]$sizeExprs ## repurposed for this role } isSymFuncList <- objectType == 'nimbleFunctionList' if(!isSymFuncList) stop('nfList[[i]] must use a nimbleFunctionList') code$sizeExprs <- symbolObject code$type <- objectType return(if(is.null(asserts)) list() else asserts) } sizeChainedCall <- function(code, symTab, typeEnv) { ## options include nfMethod(nf, 'foo')(a), or nfMethod(nf[[i]], 'foo')(a) [which arises from nf[[i]]$foo(a), where nf is a local nflist, where nf could need recursion, in which case it will be wrapped in nfVar ## In other places we generate chainedCalls for static_cast<int>(a), but those shouldn't be seen here a1 <- code$args[[1]] if(!inherits(a1, 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'In sizeChainedCall. First arg is not an expression.'), call. = FALSE) nfMethodRCobj <- NULL if(a1$name != 'nfMethod') stop(exprClassProcessingErrorMsg(code, 'Some problem processing a chained call.'), call. = FALSE) asserts <- recurseSetSizes(a1, symTab, typeEnv, useArgs = c(TRUE, rep(FALSE, length(a1$args)-1))) a11 <- a1$args[[1]] methodName <- a1$args[[2]] if(a1$args[[1]]$isName) { objectName <- a1$args[[1]]$name symbolObject <- symTab$getSymbolObject(objectName, inherits = TRUE) objectType <- symbolObject$type } else { ## if there is nesting, A$B$C, figure out what to do objectType <- a1$args[[1]]$type symbolObject <- a1$args[[1]]$sizeExprs ## repurposed for this role } isSymFun <- objectType == 'nimbleFunction' isSymFunList <- objectType == 'nimbleFunctionList' if(! (isSymFun | isSymFunList)) stop('Problem processing what looks like a member function call.') if(!is.character(methodName)) stop(exprClassProcessingErrorMsg(code, 'In handling X$Y: Something is wrong with Y.'), call. = FALSE) nfProc <- symbolObject$nfProc if(is.null(nfProc)) { stop(exprClassProcessingErrorMsg(code, 'In handling X$Y(): Symbols for X have not been set up.'), call. = FALSE) } if(isSymFun) { if(a1$args[[1]]$name != 'cppPointerDereference') { insertExprClassLayer(a1, 1, 'cppPointerDereference') ## not annotated, but not needed } } if(isSymFun) { returnSymbol <- nfProc$compileInfos[[methodName]]$returnSymbol argSymTab <- nfProc$compileInfos[[methodName]]$origLocalSymTab } if(isSymFunList) { returnSymbol <- nfProc$compileInfos[[methodName]]$returnSymbol argSymTab <- nfProc$compileInfos[[methodName]]$origLocalSymTab } if(!is.null(returnSymbol)) { asserts <- generalFunSizeHandlerFromSymbols(code, symTab, typeEnv, returnSymbol, argSymTab, chainedCall = TRUE) return(asserts) } invisible(NULL) writeLines('Warning') } sizeValues <- function(code, symTab, typeEnv) { code$nDim <- 1 code$type <- 'double' code$toEigenize <- 'no' sym <- symTab$getSymbolObject(code$args[[1]]$name, TRUE) indexRangeCase <- FALSE if(length(code$args) == 1) { # full vector of nodes code$sizeExprs <- list(substitute(cppMemberFunction(getTotalLength(ACCESSNAME)), list(ACCESSNAME = as.name(code$args[[1]]$name)))) asserts <- list() } else { # there must be index on the node asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, rep(TRUE, length(code$args)-1))) if(is.numeric(code$args[[2]])) { code$sizeExprs <- list(substitute(cppMemberFunction(getNodeLength(ACCESSNAME, ACCESSINDEX)), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = code$args[[2]]))) } else { if(!(code$args[[2]]$isName)) asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) if(code$args[[2]]$nDim > 0) { code$sizeExprs <- list(substitute(getNodesLength_Indices(ACCESSNAME, ACCESSINDEX), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = as.name(code$args[[2]]$name)))) indexRangeCase <- TRUE } else { code$sizeExprs <- list(substitute(cppMemberFunction(getNodeLength(ACCESSNAME, ACCESSINDEX)), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = as.name(code$args[[2]]$name)))) } } } if(code$caller$name == "[" & code$caller$callerArgID == 1) # values(...)[.] <- if(typeEnv$.AllowUnknowns) ## a surrogate for being on LHS of an assignment. values(...)[] should work on RHS stop(exprClassProcessingErrorMsg(code, 'In sizeValues: indexing of values() on left-hand size of an assignment is not allowed.'), call. = FALSE) if(code$caller$name %in% assignmentOperators) { if(code$callerArgID == 2) { ## ans <- values(...) code$name <- if(!indexRangeCase) 'getValues' else 'getValuesIndexRange' LHS <- code$caller$args[[1]] if(LHS$isName) { ## It is a little awkward to insert setSize here, but this is different from other cases in sizeAssignAfterRecursing assertSS <- list(substitute(setSize(LHS), list(LHS = as.name(LHS$name)))) if(length(code$args) == 1) { # full vector of nodes assertSS[[1]][[3]] <- substitute(cppMemberFunction(getTotalLength(ACCESSNAME)), list(ACCESSNAME = as.name(code$args[[1]]$name))) } else { # there must be index on the node if(is.numeric(code$args[[2]])) { assertSS[[1]][[3]] <- substitute(cppMemberFunction(getNodeLength(ACCESSNAME, ACCESSINDEX)), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = code$args[[2]])) } else { if(code$args[[2]]$nDim > 0) { assertSS[[1]][[3]] <- substitute(getNodesLength_Indices(ACCESSNAME, ACCESSINDEX), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = as.name(code$args[[2]]$name))) ## intermediate has already been inserted above, if needed } else { assertSS[[1]][[3]] <- substitute(cppMemberFunction(getNodeLength(ACCESSNAME, ACCESSINDEX)), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = as.name(code$args[[2]]$name))) } } } asserts <- c(asserts, assertSS) } else typeEnv$.ensureNimbleBlocks <- TRUE } else { # values(...) <- P, don't change it if(indexRangeCase) code$name <- 'valuesIndexRange' } } else { ## values(...) embedded in a RHS expression code$name <- if(!indexRangeCase) 'getValues' else 'getValuesIndexRange' code$toEigenize <- 'yes' ## This tricks sizeAssignAfterRecursing to generate the setSize in asserts, in getValues case (getValuesIndexRange is in set of names to skip for that) asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) code$toEigenize <- 'no' } if(length(asserts)==0) NULL else asserts } sizeRCfunction <- function(code, symTab, typeEnv, nfmObj, RCfunProc) { returnType <- nfmObj$returnType argInfo <- nfmObj$argInfo code$name <- nfmObj$uniqueName returnSymbol <- RCfunProc$compileInfo$returnSymbol argSymTab <- RCfunProc$compileInfo$origLocalSymTab asserts <- generalFunSizeHandlerFromSymbols(code, symTab, typeEnv, returnSymbol, argSymTab) return(asserts) } sizeNimbleFunction <- function(code, symTab, typeEnv) { ## This will handle other nimbleFunction run calls or other methods of this nimbleFunction sym <- symTab$getSymbolObject(code$name, TRUE) ok <- FALSE if(inherits(sym, 'symbolNimbleFunction')) { stop(exprClassProcessingErrorMsg(code, 'In sizeNimbleFunction: A nimbleFunction method should not be processed here.'), call. = FALSE) ## HANDLING OF myNF$run() HERE IS DEFUNCT. ALL SHOULD GO THROUGH sizeChainedCall now (chainedCall(nfMethod(myNF,'run'), arg1, arg2). } if(inherits(sym, 'symbolMemberFunction')) { memberRCfunProc <- sym$RCfunProc returnSymbol <- memberRCfunProc$compileInfo$returnSymbol argSymTab <- memberRCfunProc$compileInfo$origLocalSymTab ok <- TRUE } if(ok) { asserts <- generalFunSizeHandlerFromSymbols(code, symTab, typeEnv, returnSymbol, argSymTab) return(asserts) } stop(exprClassProcessingErrorMsg(code, 'In sizeNimbleFunction: The function name is not known and is not a nimbleFunction or a member function.'), call. = FALSE) } recurseSetSizes <- function(code, symTab, typeEnv, useArgs = rep(TRUE, length(code$args))) { ## won't be here unless code is a call. It will not be a { asserts <- list() for(i in seq_along(code$args)) { if(useArgs[i]) { if(inherits(code$args[[i]], 'exprClass')) { asserts <- c(asserts, exprClasses_setSizes(code$args[[i]], symTab, typeEnv)) } } } if(length(asserts)==0) NULL else asserts } ## promote numeric output to most information-rich type, double > integer > logical ## Note this will not be correct for logical operators, where output type should be logical arithmeticOutputType <- function(t1, t2) { if(t1 == 'double') return('double') if(t2 == 'double') return('double') if(t1 == 'integer') return('integer') if(t2 == 'integer') return('integer') return('logical') } ## Generate R code for an equality assertion identityAssert <- function(lhs, rhs, msg = "") { if(identical(lhs, rhs)) return(NULL) msg <- gsub("\"", "\\\\\"", msg) substitute(if(lhs != rhs) nimPrint(msg), list(lhs = lhs, rhs = rhs, msg = msg)) } ## Determine if LHS is less information-rich that RHS and issue a warning. ## e.g. if LHS is int but RHS is double. assignmentTypeWarn <- function(LHS, RHS) { if(LHS == 'int' & RHS == 'double') return(TRUE) if(LHS == 'logical' & RHS != 'logical') return(TRUE) return(FALSE) } ## used for setAll ## toEigen: N.B. This may be deprecated. sizeOneEigenCommand <- function(code, symTab, typeEnv) { if(!code$args[[1]]$isName) stop(exprClassProcessingErrorMsg(code, 'In sizeOneEigenCommand: First arg should be a name.'), call. = FALSE) recurseSetSizes(code, symTab, typeEnv) if(code$args[[1]]$nDim != 2) stop(exprClassProcessingErrorMsg(code, 'In sizeOneEigenCommand: At the moment only works for 2D objects.'), call. = FALSE) code$toEigenize <- 'yes' invisible(NULL) } ## This is used for nimPrint ## If anything has toEigenize == "maybe", the whole expression gets "yes" ## That way cout<<X; will use an eigen map for X sizeforceEigenize <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) toEigs <- lapply(code$args, function(x) { if(inherits(x, 'exprClass')) x$toEigenize else 'unknown' }) toLift <- lapply(code$args, function(x) { if(inherits(x, 'exprClass')) (identical(x$type, 'logical') & !x$isName) else FALSE }) for(i in seq_along(toLift)) { if(toLift[[i]]) asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv)) } code$toEigenize <- if(any( unlist(toEigs) %in% c('maybe', 'yes'))) 'yes' else 'no' code$type <- 'unknown' if(length(asserts) == 0) NULL else asserts } ## This is for when the programmer has directly written "resize(Z, 3, dim(A)[1])". ## When the resize is automatically generated, it skips size inference nimbleGeneralParseDeparse <- function(code) { if(inherits(code,'exprClass')) parse(text = nimDeparse(code), keep.source = FALSE)[[1]] else code } sizeSetSize <- function(code, symTab, typeEnv) { #go inside nfVar call if resizing nimbleList element if(code$args[[1]]$name == 'nfVar'){ useArg1 <- TRUE sym <- symTab$getSymbolObject(code$args[[1]]$args[[1]]$name) if(sym$type == 'nimbleList'){ sym <- sym$nlProc$symTab$getSymbolObject(code$args[[1]]$args[[2]]) } } else { sym <- symTab$getSymbolObject(code$args[[1]]$name, inherits = TRUE) useArg1 <- FALSE } asserts <- list() if(!inherits(sym, 'symbolNumericList')) { if(sym$nDim == 0) stop(exprClassProcessingErrorMsg(code, 'In sizeSetSize: Resizing a scalar does not make sense.'), call. = FALSE) firstSizeExpr <- code$args[[2]] ## first two arguments are variable to be resized and new sizes ## extra arguments would be fillZeros and recycle ## need to determine if any extra arguments were provided in order to repack arguments correctly below if(length(code$args) > 2) nExtraArgs <- length(code$args)-2 else nExtraArgs <- 0 if(nExtraArgs > 0) asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, c(rep(FALSE, 2), rep(TRUE, nExtraArgs)))) if(inherits(firstSizeExpr, 'exprClass')) { if(firstSizeExpr$name == 'nimC') { ## handle syntax of resize(Z, c(3, dim(A)[1])) if(length(firstSizeExpr$args) != sym$nDim) stop(exprClassProcessingErrorMsg(code, 'In sizeSetSize: Problem with number of dimensions provided in resize.'), call. = FALSE) asserts <- c(asserts, recurseSetSizes(firstSizeExpr, symTab, typeEnv)) ## may set intermediates if needed if(nExtraArgs > 0) { origExtraArgs <- code$args[3:length(code$args)] ## preserve extra arguments code$args <- code$args[1:2] } for(i in 1:length(firstSizeExpr$args)) { code$args[[i+1]] <- firstSizeExpr$args[[i]] if(inherits(firstSizeExpr$args[[i]], 'exprClass')) { firstSizeExpr$args[[i]]$caller <- code firstSizeExpr$args[[i]]$callerArgID <- i+1 } } if(nExtraArgs > 0) { ## reinsert extra arguments on end. for(i in 1:nExtraArgs) { setArg(code, length(code$args) + 1, origExtraArgs[[i]]) } } return(if(length(asserts)==0) NULL else asserts) } } useArgs <- c(useArg1, TRUE ) if(nExtraArgs > 0) useArgs <- c(useArgs, rep(FALSE, nExtraArgs)) asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useArgs) ) if(inherits(code$args[[2]], 'exprClass')) { if(code$args[[2]]$nDim > 0) { if(!(code$args[[2]]$isName)) asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) code$name <- 'setSizeNimArrToNimArr' } } ## We used to update typeEnv here with the new sizes, but it is not safe to do so because the setSize might appear inside a conditional (if-then) ## and hence one can't know until run-time if the size will actually be changed as given. Thus typeEnv sizeExprs are set when a variable first appears ## and should be either constants (and not ever setSized again, which we should check for but don't) or remain generic (dim(x)[1], etc) ## assign(code$args[[1]]$name, exprTypeInfoClass$new(nDim = sym$nDim, sizeExprs = lapply(code$args[-1], nimbleGeneralParseDeparse), type = sym$type), envir = typeEnv) return(if(length(asserts)==0) NULL else asserts) } if(inherits(sym, 'symbolNumericList') ) { ## these are deprecated if(length(code$args) != 2 + sym$nDim) stop(exprClassProcessingErrorMsg(code, 'In sizeSetSize: Problem with number of dimensions provided in resize.'), call. = FALSE) invisible(NULL) } } ## This was redundant and we should eventually be able to remove it ## toEigen: N.B. omitting this sizeResizeNoPtr <- function(code, symTab, typeEnv){ sym <- symTab$getSymbolObject(code$args[[1]]$name, inherits = TRUE) if(length(code$args[[2]]) != 1) stop(exprClassProcessingErrorMsg(code, 'In sizeResizeNoPtr: Problem with number of dimensions provided in resize.'), call. = FALSE) ## no longer modify typeEnv ## assign(code$name, exprTypeInfoClass$new(nDim = 1, sizeExprs = lapply(code$args[-1], nimDeparse), type = sym$type), envir = typeEnv) invisible(NULL) } ## Handler for for-loops: a fairly special case ## e.g. for(i in 1:10) {do(i)} sizeFor <- function(code, symTab, typeEnv) { if(length(code$args) != 3) stop('Error in sizeFor: expected 3 arguments to a for-loop', call. = FALSE) ## first handle type of the indexing variable if(!inherits(code$args[[2]], 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'In sizeFor: expected the index range to be an expression (exprClass).'), call. = FALSE) asserts <- exprClasses_setSizes(code$args[[2]], symTab, typeEnv) code$args[[1]]$nDim <- 0 code$args[[1]]$sizeExprs <- list() code$args[[1]]$type <- code$args[[2]]$type code$args[[1]]$toEigenize <- 'no' ## if index is unknown, create it in typeEnv and in the symTab if(!exists(code$args[[1]]$name, envir = typeEnv, inherits = FALSE)) { assign(code$args[[1]]$name, exprTypeInfoClass$new(nDim = 0, type = code$args[[1]]$type), envir = typeEnv) symTab$addSymbol(symbolBasic(name = code$args[[1]]$name, nDim = 0, type = code$args[[1]]$type)) } typeEnv[[code$args[[1]]$name]]$sizeExprs <- list() ## Now the 3rd arg, the body of the loop, can be processed asserts <- c(asserts, exprClasses_setSizes(code$args[[3]], symTab, typeEnv)) ## I think there shouldn't be any asserts returned since the body should be a bracket expression. return(if(length(asserts) == 0) invisible(NULL) else asserts) } sizeInsertIntermediate <- function(code, argID, symTab, typeEnv, forceAssign = FALSE) { newName <- IntermLabelMaker() ## I think it is valid and general to catch maps here. ## For most variables, creating an intermediate involves interN <- expression being lifted ## But for map, which will be using a NimArr if it is lifted here, what we need to generate is setMap call mapcase <- if(is.numeric(code$args[[argID]])) FALSE else (code$args[[argID]]$name == 'map' & !forceAssign) if(mapcase) { ans <- nimArrMapExpr(code$args[[argID]], symTab, typeEnv, newName) ## That should create the symTab entry ans <- RparseTree2ExprClasses(ans) newArgExpr <- RparseTree2ExprClasses(as.name(newName)) newArgExpr$type <- code$args[[argID]]$type newArgExpr$sizeExprs <- code$args[[argID]]$sizeExprs if(!nimbleOptions('experimentalNewSizeProcessing')) { newArgExpr$toEigenize <- 'maybe' } newArgExpr$nDim <- code$args[[argID]]$nDim } else { ## One may wonder where the new variable is added to the ## symbolTable. That happens when we do ## sizeAssignAfterRecursing, which identifies unknown LHS and ## creates the symTab entry. newExpr <- newAssignmentExpression() setArg(newExpr, 1, RparseTree2ExprClasses(as.name(newName))) setArg(newExpr, 2, code$args[[argID]]) ## The setArg function should set code$caller (to newExpr) and code$callerArgID (to 3) ans <- c(sizeAssignAfterRecursing(newExpr, symTab, typeEnv, NoEigenizeMap = TRUE), list(newExpr)) newArgExpr <- RparseTree2ExprClasses(as.name(newName)) newArgExpr$type <- newExpr$args[[1]]$type newArgExpr$sizeExprs <- newExpr$args[[1]]$sizeExprs if(!nimbleOptions('experimentalNewSizeProcessing')) { newArgExpr$toEigenize <- 'maybe' } newArgExpr$nDim <- newExpr$args[[1]]$nDim } setArg(code, argID, newArgExpr) return(ans) ## This is to be inserted in a list of asserts, even though it is really core code, not just an a test or assertion } sizeAssign <- function(code, symTab, typeEnv) { typeEnv$.AllowUnknowns <- FALSE asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, TRUE)) typeEnv$.AllowUnknowns <- TRUE if(length(code$args) > 2){ asserts <- c(asserts, exprClasses_setSizes(code, symTab, typeEnv)) } else{ asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useArgs = c(TRUE, FALSE))) typeEnv[['.ensureNimbleBlocks']] <- FALSE ## may have been true from RHS of rmnorm etc. asserts <- c(asserts, sizeAssignAfterRecursing(code, symTab, typeEnv)) } if(length(asserts) == 0) NULL else asserts } ## Handler for assignment sizeAssignAfterRecursing <- function(code, symTab, typeEnv, NoEigenizeMap = FALSE) { LHS <- code$args[[1]] RHS <- code$args[[2]] if(inherits(RHS, 'exprClass')) { RHSname <- RHS$name RHSnDim <- RHS$nDim RHStype <- RHS$type RHSsizeExprs <- RHS$sizeExprs } else { if(is.numeric(RHS) | is.logical(RHS)) { RHSname = '' RHSnDim <- 0 RHStype <- storage.mode(RHS) RHSsizeExprs <- list() } else if(is.character(RHS)){ RHSname = '' RHSnDim <- 0 RHStype <- 'character' RHSsizeExprs <- list() } else { stop(exprClassProcessingErrorMsg(code, "In sizeAssignAfterRecursing: don't know what to do with a provided expression."), call. = FALSE) } } test <- try(if(inherits(RHStype, 'uninitializedField') | length(RHStype)==0) { stop(exprClassProcessingErrorMsg(code, paste0("In sizeAssignAfterRecursing: '",RHSname, "' is not available or its output type is unknown.")), call. = FALSE) }) if(inherits(test, 'try-error')) browser() if(LHS$isName) { if(!exists(LHS$name, envir = typeEnv, inherits = FALSE)) { ## not in typeEnv ## If LHS unknown, create it in typeEnv if(!symTab$symbolExists(LHS$name, TRUE)) { ## not in symTab if(RHStype %in% c('double','integer', 'logical')) { ## valid type to create here ## We used to delay creating sizeExprs until below, but now it always generic assign(LHS$name, exprTypeInfoClass$new(nDim = RHSnDim, type = RHStype, sizeExprs = makeSizeExpressions(rep(NA, RHSnDim), LHS$name)), envir = typeEnv) symTab$addSymbol(symbolBasic(name = LHS$name, nDim = RHSnDim, type = RHStype)) } else { ## not valid type to create here if(RHStype == 'voidPtr') { ## This should be ok without sizeExprs content assign(LHS$name, exprTypeInfoClass$new(nDim = RHSnDim, type = RHStype), envir = typeEnv) symTab$addSymbol(symbolVoidPtr(name = LHS$name, type = RHStype)) } ## a path for arbitrary symbols else if(RHStype == "custom") { ConlySym <- RHS$sizeExprs$copy() ## trick to put a symbol object here. use a copy in case this expr is from simple assignment, not creation ConlySym$name <- LHS$name symTab$addSymbol(ConlySym) code$type <- "custom" code$sizeExprs <- ConlySym ## in case there is chained assignment return(invisible(NULL)) } else if(RHStype == "nimbleList") { ## I think we have the nlProc in the RHS sizeExprs in some cases? LHSnlProc <- symTab$getSymbolObject(RHS$name)$nlProc if(is.null(LHSnlProc)) LHSnlProc <- RHS$sizeExprs$nlProc if(is.null(LHSnlProc)) LHSnlProc <- symTab$getSymbolObject(RHS$name, inherits = TRUE)$nlProc symTab$addSymbol(symbolNimbleList(name = LHS$name, type = RHStype, nlProc = LHSnlProc)) } else if(symTab$symbolExists(RHStype, TRUE)){ ## this is TRUE if a nested nimbleFunction returns a nimbleList - the type of ## the returned nimbleList will be a symbolNimbleListGenerator that exists ## in the parent ST. LHSnlProc <- symTab$getSymbolObject(RHStype, TRUE)$nlProc symTab$addSymbol(symbolNimbleList(name = LHS$name, nlProc = LHSnlProc)) } else stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: LHS is not in typeEnv or symTab and cannot be added now.')), call. = FALSE) } } else { ## yes in symTab ## this is another path for arbitrary symbols, but not sure it's used. ## This case is ok. It is in the symbol table but not the typeEnv. So it is something like ptr <- getPtr(A) if(!nimbleOptions('experimentalNewSizeProcessing')) { code$toEigenize <- 'no' } ##experimentalNewSizeProcessing code$nDim <- 0 code$type <- 'unknown' code$sizeExprs <- list() return(invisible(NULL)) } } else { ## yes in typeEnv. must be symTab too. ## If LHS known, check if nDim matches RHS if(length(LHS$nDim) == 0) stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: nDim for LHS not set.')), call. = FALSE) if(length(RHSnDim) == 0) stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: nDim for RHS not set.')), call. = FALSE) if(LHS$nDim != RHSnDim) { message(paste0('Warning, mismatched dimensions in assignment: ', nimDeparse(code), '. Going to browser(). Press Q to exit'), call. = FALSE ) browser() } ## and warn if type issue e.g. int <- double if(assignmentTypeWarn(LHS$type, RHStype)) { message(paste0('Warning, RHS numeric type is losing information in assignment to LHS.', nimDeparse(code))) } } } ## update size info in typeEnv assert <- NULL if((LHS$name == 'values' | LHS$name == 'valuesIndexRange') && length(LHS$args) %in% c(1,2)) { ## It is values(model_values_accessor[, index]) <- STUFF # triggered when we have simple assignment into values() without indexing of values() if(is.numeric(RHS)) stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: Cannot assign into values() from numeric.')), call. = FALSE) code$name <- if(LHS$name == 'values') 'setValues' else 'setValuesIndexRange' code$args <- list(1 + length(LHS$args)) setArg(code, 1, RHS) setArg(code, 2, LHS$args[[1]]) if(length(LHS$args) == 2) setArg(code, 3, LHS$args[[2]]) # for indexed of form values(model, nodes[i]) if(!(RHS$isName)) assert <- c(assert, sizeInsertIntermediate(code, 1, symTab, typeEnv) ) return( if(length(assert) == 0) NULL else assert ) } ## Note this can use LHS$name for RHSsizeExprs when returning from a nimbleFunction on RHS. But this is probably not needed any more. if(any(unlist(lapply(RHSsizeExprs, is.null)))) RHSsizeExprs <- makeSizeExpressions(rep(NA, RHSnDim), LHS$name) ## reset sizeExprs for the LHS var. re-using RHSsizeExprs for LHS. This would only be valid if it is a nimbleFunction returning something on the RHS. For assignment to be executed in Eigen, the RHS sizes MUST be known if(!nimbleOptions('experimentalNewSizeProcessing')) { if(LHS$toEigenize == 'yes') { code$toEigenize <- 'yes' ## message('Warning from sizeAssign: not expecting LHS to have toEigenize == yes') } else { code$toEigenize <-if(inherits(RHS, 'exprClass')) { if(RHS$toEigenize == 'no') 'no' else { if(RHS$toEigenize == 'unknown') 'no' else { if(RHS$toEigenize != 'yes' & (!(LHS$name %in% c('eigenBlock', 'diagonal', 'coeffSetter'))) & (RHS$nDim == 0 | RHS$isName | (RHS$name == 'map' & NoEigenizeMap))) 'no' ## if it is scalar or is just a name or a map, we will do it via NimArr operator= . Used to have "| RHS$name == 'map'", but this allowed X[1:3] <- X[2:4], which requires eigen, with eval triggered, to get right else 'yes' ## if it is 'maybe' and non-scalar and not just a name, default to 'yes' } } } else { if(is.numeric(LHS$nDim)) if(LHS$nDim > 0) 'yes' ## This is for cases like out[1:4] <- scalar else 'no' else 'no' } } if(code$toEigenize == 'yes') { ## this would make more sense in eigenize_assign ## generate setSize(LHS, ...) where ... are dimension expressions if(length(RHSnDim) == 0) { message("confused about trying to eigenize something with nDim = 0") browser() } if(RHSnDim > 0) { if(!(RHS$name %in% setSizeNotNeededOperators)) { if(LHS$isName | LHS$name == "nfVar") { assert <- substitute(setSize(LHS), list(LHS = nimbleGeneralParseDeparse(LHS))) for(i in seq_along(RHSsizeExprs)) { test <- try(assert[[i + 2]] <- RHS$sizeExprs[[i]]) if(inherits(test, 'try-error')) browser() } assert[[ length(assert) + 1]] <- 0 ## copyValues = false assert[[ length(assert) + 1]] <- 0 ## fillZeros = false assert <- list(assert) } else { ## We have an indexed LHS of an eigenizable expression ## need special handling if it is a row assignment like x[i,] <- ... ## also need to generate size assertions if(LHS$nDim == 1) { if(RHS$nDim == 2) { if(is.numeric(RHS$sizeExprs[[1]])) { if(RHS$sizeExprs[[1]] == 1) { newExpr <- insertExprClassLayer(code, 1, 'asRow', type = LHS$type) newExpr$sizeExprs <- RHS$sizeExprs newExpr$type <- LHS$type newExpr$nDim <- RHS$nDim if(!is.numeric(LHS$sizeExprs[[1]]) | !is.numeric(RHS$sizeExprs[[2]])) { assertMessage <- paste0("Run-time size error: expected ", deparse(LHS$sizeExprs[[1]]), " == ", deparse(RHS$sizeExprs[[2]])) thisAssert <- identityAssert(LHS$sizeExprs[[1]], RHS$sizeExprs[[2]], assertMessage) if(!is.null(thisAssert)) assert[[length(assert) + 1]] <- thisAssert } else { if(LHS$sizeExprs[[1]] != RHS$sizeExprs[[2]]) stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: Fixed size mismatch.')), call. = FALSE) } } } } } } } } } else { if(inherits(RHS, 'exprClass')) { ## If we have A <- map(B, ...), we need to generate a setMap for the RHS, which will be done by sizeInsertIntermediate if(RHS$name == 'map') assert <- c(assert, sizeInsertIntermediate(code, 2, symTab, typeEnv) ) } if(inherits(LHS, 'exprClass')) { # ditto if(LHS$name == 'map') assert <- c(assert, sizeInsertIntermediate(code, 1, symTab, typeEnv) ) } } } ##experimentalNewSizeProcessing if(!(LHS$name %in% c('eigenBlock', 'diagonal', 'coeffSetter', 'nimNonseqIndexedd', 'nimNonseqIndexedi','nimNonseqIndexedb'))) { ## should already be annotated if it is an indexed assignment. ## It should be harmless to re-annotated EXCEPT in case like out[1:5] <- scalar code$nDim <- code$args[[1]]$nDim <- RHSnDim code$type <- code$args[[1]]$type <- RHStype code$sizeExprs <- code$args[[1]]$sizeExprs <- RHSsizeExprs } if(RHSname %in% assignmentAsFirstArgFuns) { code$name <- RHS$name oldArgs <- RHS$args LHS <- code$args[[1]] ## could have been reset by LHS$name == 'map' situation above code$args <- list(length(oldArgs) + 1) for(i in seq_along(oldArgs)) { setArg(code, i+1, oldArgs[[i]]) } setArg(code, 1, LHS) } return(assert) } sizePROTECT <- function(code, symTab, typeEnv) { ## Do not recurse. code$type <- "custom" code$sizeExprs <- symbolSEXP(type = 'custom') ## trick to put a symbol object into sizeExprs for later use return(invisible(NULL)) } sizeReval <- function(code, symTab, typeEnv) { code$name <- 'Rf_eval' return(sizePROTECT(code, symTab, typeEnv)) } sizeNimbleConvert <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) ## should not normally have an expression other than variable name as the argument, but do this for safety nDim <- code$args[[1]]$nDim type <- code$args[[1]]$type if(!code$caller$name %in% assignmentOperators) stop(exprClassProcessingErrorMsg(code, 'nimbleConvert can only be used in simple assignment.'), call. = FALSE) targetString <- nimDeparse(code$args[[1]]) targetName <- Rname2CppName(targetString) targetExpr <- parse(text = targetString, keep.source = FALSE)[[1]] copyName <- paste0(targetName, '_nimbleContigCopy') subList <- list(var = targetExpr, copy = as.name(copyName)) newCode <- substitute( nimArrPtr_copyIfNeeded(var, copy), subList ) ## only necessary if the result is needed if(!symTab$symbolExists( copyName )) { symTab$addSymbol( symbolBasic(name = copyName, type = type, nDim = nDim) ) assign(copyName, exprTypeInfoClass$new(nDim = nDim, type = type), envir = typeEnv) } newCode <- RparseTree2ExprClasses(newCode) newCode$type <- "custom" newCode$sizeExprs <- symbolPtr(type = type) ## trick to put a symbol object into sizeExprs for later use setArg(code$caller, code$callerArgID, newCode) asserts } sizeNimbleUnconvert <- function(code, symTab, typeEnv) { ptrString <- nimDeparse(code$args[[1]]) ptrName <- Rname2CppName(ptrString) ptrExpr <- parse(text = ptrString, keep.source = FALSE)[[1]] targetString <- nimDeparse(code$args[[2]]) targetName <- Rname2CppName(targetString) targetExpr <- parse(text = targetString, keep.source = FALSE)[[1]] copyName <- paste0(targetName, '_nimbleContigCopy') subList <- list(ptr = ptrExpr, var = targetExpr, copy = as.name(copyName)) newCode <- substitute( nimArrPtr_copyBackIfNeeded(ptr, var, copy), subList ) newCode <- RparseTree2ExprClasses(newCode) setArg(code$caller, code$callerArgID, newCode) NULL } sizeasDoublePtr <- function(code, symTab, typeEnv) { ## This could also handle copies from ints to doubles, which would ALWAYS require a copy asserts <- recurseSetSizes(code, symTab, typeEnv) nDim <- code$args[[1]]$nDim targetString <- nimDeparse(code$args[[1]]) targetName <- Rname2CppName(targetString) targetExpr <- parse(text = targetString, keep.source = FALSE)[[1]] ptrName <- paste0(targetName, '_DoublePtr') copyName <- paste0(targetName, '_contigCopy') subList <- list(var = targetExpr, copy = as.name(copyName), ptr = as.name(ptrName)) codeBefore <- substitute( if(isMap(var) ) { copy <- var; ptr <- getPtr(copy)} else {ptr <- getPtr(var)}, subList ) codeAfter <- substitute( after( if(isMap(var)) { mapCopy(var, copy) } ), ## after() tags the assertion to go after the code line subList ) if(!symTab$symbolExists( ptrName )) symTab$addSymbol( symbolPtr(name = ptrName, type = 'double') ) if(!symTab$symbolExists( copyName )) { symTab$addSymbol( symbolBasic(name = copyName, type = 'double', nDim = nDim) ) } codeBefore <- RparseTree2ExprClasses(codeBefore) exprClasses_initSizes(codeBefore, symTab, NULL, typeEnv) asserts <- c(asserts, exprClasses_setSizes(codeBefore, symTab, typeEnv)) codeAfter <- RparseTree2ExprClasses(codeAfter) asserts <- c(asserts, exprClasses_setSizes(codeAfter, symTab, typeEnv)) newArgExpr <- RparseTree2ExprClasses( substitute( ptr, subList) ) setArg(code$caller, code$callerArgID, newArgExpr) c(asserts, list(codeBefore, codeAfter)) } sizeScalar <- function(code, symTab, typeEnv) { ## use something different for distributionFuns ## length(model[[node]]) wasn't working because we were not doing recurseSetSize here ## However I am not sure if that is because there are cases where size expects a special argument we don't want to process (a modelValues?) ## So I'm going to wrap it in a try() and suppress messages asserts <- try(recurseSetSizes(code, symTab, typeEnv), silent = TRUE) if(inherits(asserts, 'try-error')) asserts <- list() if(code$args[[1]]$toEigenize == 'yes') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } ## else { ## asserts <- NULL ## } code$nDim <- 0 outputType <- scalarOutputTypes[[code$name]] if(is.null(outputType)) code$type <- 'double' else code$type <- outputType code$sizeExprs <- list() code$toEigenize <- 'maybe' ## a scalar can be eigenized or not ##invisible(NULL) asserts } sizeScalarModelOp <- function(code, symTab, typeEnv) { if(length(code$args) > 1) { asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, rep(TRUE, length(code$args)-1))) for(i in 2:length(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$toEigenize=='yes') asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv)) } } if(inherits(code$args[[2]], 'exprClass')) { ## There is an index expression that may be non-scalar if(code$args[[2]]$nDim > 0) { ## It is non-scalar so we need to set a logical argument about whether is it a logical or numeric vector code$args[[ length(code$args)+1 ]] <- as.integer(code$args[[2]]$type == 'logical') } } } else { asserts <- list() } if(code$args[[1]]$toEigenize == 'yes') { ## not sure when this would be TRUE asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } code$nDim <- 0 outputType <- scalarOutputTypes[[code$name]] if(is.null(outputType)) code$type <- 'double' else code$type <- outputType code$sizeExprs <- list() code$toEigenize <- 'maybe' asserts } sizeSimulate <- function(code, symTab, typeEnv) { if(length(code$args) > 1) { asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, rep(TRUE, length(code$args)-1))) for(i in 2:length(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$toEigenize=='yes') asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv))##toEigenize <- 'yes' } } if(inherits(code$args[[2]], 'exprClass')) { ## There is an index expression that may be non-scalar if(code$args[[2]]$nDim > 0) { ## It is non-scalar so we need to set a logical argument about whether is it a logical or numeric vector code$args[[ length(code$args)+1 ]] <- as.integer(code$args[[2]]$type == 'logical') } } } else { asserts <- list() } code$nDim <- 0 code$type <- as.character(NA) code$sizeExprs <- list() code$toEigenize <- 'maybe' return(asserts) } sizeScalarRecurse <- function(code, symTab, typeEnv, recurse = TRUE) { ## use something different for distributionFuns asserts <- if(recurse) recurseSetSizes(code, symTab, typeEnv) else list() ## This just forces any argument expression to be lifted. Can we lift only things to be eigenized? for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } code$nDim <- 0 outputType <- scalarOutputTypes[[code$name]] if(is.null(outputType)) code$type <- 'double' else code$type <- outputType code$sizeExprs <- list() code$toEigenize <- 'maybe' ## a scalar can be eigenized or not if(length(asserts)==0) NULL else asserts } sizeUndefined <- function(code, symTab, typeEnv) { code$nDim <- 0 code$type <- as.character(NA) code$sizeExprs <- list() code$toEigenize <- 'maybe' invisible(NULL) } sizeBinaryUnaryCwise <- function(code, symTab, typeEnv) { if(length(code$args) == 1) return(sizeUnaryCwise(code, symTab, typeEnv)) if(length(code$args) == 2) return(sizeBinaryCwise(code, symTab, typeEnv)) stop(exprClassProcessingErrorMsg(code, paste0('In sizeBinaryUnarycWise: Length of arguments is not 1 or 2.')), call. = FALSE) } sizemvAccessBracket <- function(code, symTab, typeEnv) { ## this gets called from sizeIndexingBracket, so recurse has already been done asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, TRUE)) if(length(code$args) != 2) { stop(exprClassProcessingErrorMsg(code, paste0('In sizemvAccessBracket: Wrong number of indices provided.')), call. = FALSE) } if(inherits(code$args[[2]], 'exprClass')) { if(code$args[[2]]$nDim != 0) stop(exprClassProcessingErrorMsg(code, paste0('In sizemvAccessBracket: Index is not a scalar.')), call. = FALSE) } sym <- symTab$getSymbolObject(code$args[[1]]$name, TRUE) ## This is the symbolVecNimArrPtr code$type = sym$type code$nDim = sym$nDim code$sizeExprs <- as.list(sym$size) code$toEigenize <- 'maybe' code$name <- 'mvAccessRow' if(length(asserts)==0) NULL else asserts } sizeIndexingBracket <- function(code, symTab, typeEnv) { ## This is for X[i, j], viewed as `[`(X, i, j), where there may be different numbers of indices, and they may be scalars, sequences defined by `:`, or arbitrary (nonSequence) vectors of integers or logicals. ## X itself could be Y[k, l] (or the result of processing it) or map(Y, k, l), which is created if Y is a model variable and we know we need a map into but at the point it is created there is no processing of how it should be represented, so it is just represented as an abstract map. ## recurse into arguments asserts <- recurseSetSizes(code, symTab, typeEnv) ## Check two special cases ## This is from modelValues: if(code$args[[1]]$type == 'symbolVecNimArrPtr') return(c(asserts, sizemvAccessBracket(code, symTab, typeEnv))) ## This is deprecated: if(code$args[[1]]$type == 'symbolNumericList') return(c(asserts, sizemvAccessBracket(code, symTab, typeEnv))) ## Iterate over arguments, lifting any logical indices into which() ## e.g. X[i, bool] becomes X[i, Interm1], with Interm1 <- which(bool) as an assert. for(i in seq_along(code$args)) { if(i == 1) next if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$name != "") if(code$args[[i]]$type == 'logical') { ## first insert which, then lift to intermediate newExpr <- insertExprClassLayer(code, i, 'which') useBool <- rep(FALSE, length(code$args)) useBool[i] <- TRUE asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useBool)) } } } ## Collect information about the number of dimensions and value of a drop argument if provided ## nDimVar is nDim of X nDimVar <- code$args[[1]]$nDim dropBool <- TRUE dropArgProvided <- FALSE if(!is.null(names(code$args))) if('drop' %in% names(code$args)) { dropArgProvided <- TRUE iDropArg <- which(names(code$args) == 'drop') } if(nDimVar != length(code$args) - 1 - dropArgProvided) { ## check if number of indices is correct ## only valid case with fewer index arguments than source dimensions is matrix[indices], where matrix can be treated as a vector if(!( (nDimVar == 2) & (length(code$args) - dropArgProvided) == 1)) { msg <- paste0('Error, wrong number of indices provided for ', nimDeparse(code),'.') stop(exprClassProcessingErrorMsg(code, msg), call. = FALSE) } } ## pick out the drop argument and check if it is logical if(dropArgProvided) { dropBool <- code$args[[iDropArg]] if(!is.logical(dropBool)) { msg <- paste0(msg, "(A drop argument must be hard-coded as TRUE or FALSE, not given as a variable.)") stop(exprClassProcessingErrorMsg(code, msg), call. = FALSE) } } ## These initial annotations may change later code$nDim <- nDimVar code$type <- code$args[[1]]$type ## Initialize sizeExprs code$sizeExprs <- vector('list', length = nDimVar) ## (We could generate asserts here to ensure sub-indexing is within bounds) ## needMap will become TRUE below unless all indices are scalars needMap <- FALSE ## Track whether if all index ranges are defined by `:` or by scalar ## simpleBlockOK will be TRUE if all index vectors and sequential, defined by `:` simpleBlockOK <- TRUE iSizes <- 1 ## Iternate over dimensions of X and see which dimensions will be dropped from X[i,j,k] due to scalar indices, if drop = TRUE for(i in 1:nDimVar) { dropThisDim <- FALSE ## If the index is numeric, drop this dimension if(is.numeric(code$args[[i+1]])) dropThisDim <- TRUE ## If the index is not numeric but it is not a blank and its sizeExprs reveal it is a scalar-equivalent, drop this dimension else if((code$args[[i+1]]$name != "") & (length(dropSingleSizes(code$args[[i+1]]$sizeExprs)$sizeExprs) == 0)) dropThisDim <- TRUE ## Is this indices an expression? isExprClass <- inherits(code$args[[i+1]], 'exprClass') ## if(dropThisDim) { ## The index is a scalar if(nimbleOptions()$indexDrop & dropBool) { ## And flags allow dropping code$sizeExprs[[iSizes]] <- NULL ## Remove that sizeExpr element code$nDim <- code$nDim - 1 ## reduce dimensions of result by 1 } else { code$sizeExprs[[iSizes]] <- 1; iSizes <- iSizes + 1 ## If we are not droping dimensions, set sizeExpr to 1 } next } else { ## not dropping a dimension, so the index is non-scalar if(isExprClass) ## If it is an expression that is not `:` or blank, then a simple block is not allowed if((code$args[[i+1]]$name != ':') && (code$args[[i+1]]$name != "")) simpleBlockOK <- FALSE } needMap <- TRUE ## If the "next" in if(dropThisDim) {} is always hit, then needMap will never be set to TRUE ## Update sizeExprs if(isExprClass) { if(code$args[[i+1]]$name != "") { ## An entry that is a variable possibly with a length code$sizeExprs[[iSizes]] <- code$args[[i+1]]$sizeExprs[[1]] } else { ## blank entry (e.g. A[,i]) is an exprClass with isName = TRUE and name = "" code$sizeExprs[[iSizes]] <- code$args[[1]]$sizeExprs[[i]] ## also at this point we will fill in a `:` expression for the indices, ## so now we have e.g. A[ 1:dim(A)[1], i ] newIndexExpr <- RparseTree2ExprClasses(substitute(1:N, list(N = code$args[[1]]$sizeExprs[[i]]))) setArg(code, i+1, newIndexExpr) useArgs <- rep(FALSE, length(code$args)) useArgs[i+1] <- TRUE asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useArgs)) } iSizes <- iSizes + 1 next } } ## did all dims get dropped? if(length(code$sizeExprs)==0) { code$sizeExprs <- list() ## it was a named, list. this creates consistency. maybe unnecessary ##needMap will be FALSE if we are in this clause ## We need to check whether X is an expression that needs to be lifted, say (A + B)[2, 3] ## We could do better for these cases if(!code$args[[1]]$isName) ## It's not a name if(!(code$args[[1]]$name %in% operatorsAllowedBeforeIndexBracketsWithoutLifting)) {## e.g. 'mvAccessRow' ## At this point we have decided to lift, and the next two if()s determine if that is weird due to being on LHS of assignment if(code$caller$name %in% assignmentOperators) if(code$callerArgID == 1) stop(exprClassProcessingErrorMsg(code, 'There is a problem on the left-hand side of an assignment'), call. = FALSE) asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } } code$toEigenize <- 'maybe' if(needMap) { ## If this is a map on an *expression* that is not a map, we used to always lift it ## e.g. (A + B)[1:4] must become (Interm <- A + B; Interm[1:4]) ## Now we only need to lift it if the map will not be impemented via eigenBlock ## for nested blocking, we have (nonseq | eigenBlock | map) x (nonseq | eigenBlock | map) ## [ coeffSetter is a version of nonseq ] ## where nonseq means non-sequential indices, eigenBlock means sequential indices, and map means a model or modelValues variable marked abstractly for a map ## (map) x (eigenBlock | map) is already handled ## (map) x (nonseq) is already handled ## (eigenBlock) x (eigenBlock) is already handled ## ## check whether to nest the indexing directly ## nestIndexing TRUE means we will convert X[i, j][k, l] into X[ i[k], j[l] ] (while we are working on `[`(X[i, j], k, l) ## We do this for nested indexing except (eigenBlock) x (eigenBlock), which means all indices are sequential ## Then we just generate .block(..).block(..) nestIndexing <- FALSE ## code$args[[1]] is the X[i, j] if(!code$args[[1]]$isName) { ## In X[i], X is an expression ## X is an indexing expression of some kind (other than a map, which is already a new object) ## It can't be coeffSetter at this point in processing flow, because the nestedness implies its caller was not <- if(code$args[[1]]$name %in% c('eigenBlock', 'nimNonseqIndexedd' ,'nimNonseqIndexedi' ,'nimNonseqIndexedb' )) { ## if it is not (eigenBlock) x (eigenBlock) if(!( (code$args[[1]]$name == 'eigenBlock') & (simpleBlockOK))) nestIndexing <- TRUE } } ## implement nestIndexing if(nestIndexing) { ## We have something like `[`( eigenBlock(X, i, j), k, l) or `[`( nimNonseqIndexedd(X, i, j), k, l) ## We will gradually take over the first argument to construct something that will end up like nimNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l)) ## The first one was an eigenBlock (all sequential integer indices defined by `:` or blank imputed with `:`) if(code$args[[1]]$name == 'eigenBlock') { ## We have `[`( eigenBlock(X, i, j), k, l) ## ## reach down to X and rename it ## put `:`(start, finish) back together. ## ## If we are in `[`( eigenBlock(X, i, j), k, l) <- Z, ## convert to `[`( coeffSetter(X, i, j), k, l) <- Z, if(code$caller$name %in% assignmentOperators & code$callerArgID == 1) { code$args[[1]]$name <- 'coeffSetter' } else { ## otherwise, convert `[`( eigenBlock(X, i, j), k, l) to `[`( nimNonseqIndexedd(X, i, j), k, l), e.g. if(code$type == 'double') code$args[[1]]$name <- 'nimNonseqIndexedd' if(code$type == 'integer') code$args[[1]]$name <- 'nimNonseqIndexedi' if(code$type == 'logical') code$args[[1]]$name <- 'nimNonseqIndexedb' } } else { ## The first one was a nonSeq ## it was already nonseq, but it might need to become coeffSetter ## If we are in `[`( nimNonseqIndexedd(X, i, j), k, l) <- Z, ## convert to `[`( coeffSetter(X, i, j), k, l) <- Z, if(code$caller$name %in% assignmentOperators & code$callerArgID == 1) { code$args[[1]]$name <- 'coeffSetter' } } ## Now construct the nesting i[k], j[l], etc. nestedNinds <- length(code$args[[1]]$args)-1 nestedNdim <- code$args[[1]]$nDim nestedDropBool <- TRUE nestedDropArgProvided <- FALSE if(!is.null(names(code$args[[1]]$args))) ## does nimNonseqIndexedd(X, i, j) or coeffSetter(X, i, j) have named arguments? if("drop" %in% names(code$args[[1]]$args)) { ## is drop among the names? nestedDropArgProvided <- TRUE nestedDropBool <- code$args[[1]]$args[[ which(names(code$args[[1]]$args) == 'drop') ]] nestedNinds <- nestedNinds - 1 } nestedBlockBool <- rep(TRUE, nestedNinds) ## is it preserved as a block (can still be scalar if nestedDropBool is FALSE) nestedScalarIndex <- rep(FALSE, nestedNinds) ## Of the indices of nimNonseqIndexedd(X, i, j) or coeffSetter(X, i, j) ## which are scalars, and which are blocks ## If we have nimNonseqIndexedd(X, i, j, drop = FALSE) or coeffSetter(X, i, j, drop = FALSE), ## then we treat all dimensions as blocks, even if scalar indices for(iInd in 1:nestedNinds) { if(is(code$args[[1]]$args[[iInd+1]], 'exprClass')) { if(code$args[[1]]$args[[iInd+1]]$nDim == 0) { nestedScalarIndex[iInd] <- TRUE if(nestedDropBool) nestedBlockBool[iInd] <- FALSE } } else { nestedScalarIndex[iInd] <- TRUE if(nestedDropBool) nestedBlockBool[iInd] <- FALSE } } ## Re-annotate first arg code$args[[1]]$sizeExprs <- code$sizeExprs code$args[[1]]$nDim <- code$nDim code$args[[1]]$type <- code$type numIndices <- length(code$args) - 1 - dropArgProvided ## Do we need to set drop carefully? ## NEED TO SKIP SCALARS IF dropBool = TRUE for nested case. nestedInds <- which(nestedBlockBool) if(length(nestedInds) != numIndices) stop(exprClassProcessingErrorMsg(code, 'Wrong number of nested indices.'), call.=FALSE) ## iterate over indices, constructing i[j] if necessary for(iInd in 1:numIndices) { nestedIind <- nestedInds[iInd] nestedIndexIsScalar <- if(inherits(code$args[[1]]$args[[nestedIind + 1]], 'exprClass')) code$args[[1]]$args[[nestedIind + 1]]$nDim == 0 else TRUE if(nestedIndexIsScalar) { ## check: ## In X[i, j][k, l], if i is scalar, k should also be scalar (can't check its value now, but should be 1 at run-time) indexIsScalar <- if(inherits(code$args[[iInd+1]], 'exprClass')) code$args[[iInd+1]]$nDim == 0 else TRUE if(!indexIsScalar) warning("There is nested indexing with drop=FALSE where an index must be scalar but isn't") } else { ## construct i[k], which is really nimNonseqIndexedi(i, k) newExpr <- exprClass(name = 'nimNonseqIndexedi', isName = FALSE, isCall = TRUE, isAssign = FALSE) newExpr$type <- 'integer' indexIsScalar <- if(inherits(code$args[[iInd+1]], 'exprClass')) code$args[[iInd+1]]$nDim == 0 else TRUE newExpr$sizeExprs <- if(!indexIsScalar) c(code$args[[iInd + 1]]$sizeExprs) else list(1) newExpr$nDim <- 1 newExpr$toEigenize <- 'yes' setArg(newExpr, 1, code$args[[1]]$args[[nestedIind + 1]]) setArg(newExpr, 2, code$args[[iInd + 1]]) setArg(newExpr, 3, 1) setArg(code$args[[1]], nestedIind + 1, newExpr) } } ## The only remaining use of a drop argument is during eigenization to determine if 1xn needs a transpose to become nx1 ## For that purpose, the drop arg of X[ i[k], j[l] ] should be from the outer part of `[`(X[i, j, drop = TRUE|FALSE], k, l, drop = [TRUE|FALSE]), not from the X[i,j] code$args[[1]]$args[['drop']] <- if(dropArgProvided) dropBool else TRUE ## clear remaining indices ## i.e. turn `[`( nimNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l)), k, l) ## into `[`( nimNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l))) code$args[1+(1:numIndices)] <- NULL codeCaller <- code$caller codeCallerArgID <- code$callerArgID ## remove the `[` layer of the current processing ## i.e. turn `[`( nimNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l))) into ## imNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l)) removeExprClassLayer(code) code <- codeCaller$args[[codeCallerArgID]] return(if(length(asserts)==0) NULL else asserts) } ## Now we are in the case where there is no nested indexing, or if there is X[i, j][k, l], it can be chained eigen blocks ## Replace with a map expression if needed if(!simpleBlockOK) { if(typeEnv$.ensureNimbleBlocks) { stop(exprClassProcessingErrorMsg(code, "LHS indexing for a multivariate random draw can only use sequential blocks (via ':')."), call. = FALSE) } ## If this is part of X[i, j] <- Z, convert to coeffSetter(X, i, j) <- Z if(code$caller$name %in% assignmentOperators & code$callerArgID == 1) { code$name <- 'coeffSetter' } else { ## otherwise convert `[`(X, i, j) to e.g. nimNonseqIndexedd(X, i, j) if(code$type == 'double') code$name <- 'nimNonseqIndexedd' ## this change could get moved to genCpp_generateCpp if(code$type == 'integer') code$name <- 'nimNonseqIndexedi' if(code$type == 'logical') code$name <- 'nimNonseqIndexedb' } ## If we have nimNonseqIndexedd(X, i), make it nimNonseqIndexedd(X, i, 1) for Eigen if(length(code$args) - 1 - dropArgProvided == 1) ## only 1 index code$args[[3]] <- 1 ## fill in extra 1 for a second dimension. ## should the index depend on dropArgProvided? } else { ## a simpleBlock is ok if(code$args[[1]]$nDim > 2 | typeEnv$.ensureNimbleBlocks) { ## old-style blocking from >2D down to 2D or 1D, or this is LHS for something like rmnorm, requiring a non-eigen map on LHS. ## We have X[i, j, k] where X has dimension > 2 if(dropArgProvided) code$args[[iDropArg]] <- NULL newExpr <- makeMapExprFromBrackets(code, dropBool) newExpr$sizeExprs <- code$sizeExprs newExpr$type <- code$type newExpr$nDim <- code$nDim newExpr$toEigenize <- code$toEigenize setArg(code$caller, code$callerArgID, newExpr) } else { ## blocking via Eigen ## ## note that any expressions like sum(A) in 1:sum(A) should have already been lifted code$name <- 'eigenBlock' code$toEigenize <- 'yes' } } } if(length(asserts)==0) NULL else asserts } isIntegerEquivalent <- function(code) { if(inherits(code, 'exprClass')) { if(code$type == 'integer') return(TRUE) return(FALSE) } if(is.logical(code)) return(FALSE) if(storage.mode(code) == 'integer') return(TRUE) code == floor(code) ## storage.mode must be 'double' so check if it's equivalent to an integer } sizeSeq <- function(code, symTab, typeEnv, recurse = TRUE) { asserts <- if(recurse) recurseSetSizes(code, symTab, typeEnv) else list() byProvided <- code$name == 'nimSeqBy' | code$name == 'nimSeqByLen' lengthProvided <- code$name == 'nimSeqLen' | code$name == 'nimSeqByLen' integerFrom <- isIntegerEquivalent(code$args[[1]]) integerTo <- isIntegerEquivalent(code$args[[2]]) liftExprRanges <- TRUE if(integerFrom && integerTo) { if((!byProvided && !lengthProvided) || (byProvided && !lengthProvided && is.numeric(code$args[[3]]) && code$args[[3]] == 1)) { code$name = ':' asserts <- c(asserts, sizeColonOperator(code, symTab, typeEnv, recurse = FALSE)) return(if(length(asserts)==0) NULL else asserts) } } else { if(!byProvided && !lengthProvided) { code$args[[3]] <- 1 byProvided <- TRUE } if(byProvided) { code$name <- 'nimSeqByD' ## lift any expression arguments for(i in 1:2) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } if(lengthProvided) { code$name <- 'nimSeqByLenD' thisSizeExpr <- parse(text = nimDeparse(code$args[[4]]), keep.source = FALSE)[[1]] } else { thisSizeExpr <- substitute(calcSeqLength(FROM_, TO_, BY_),##1 + floor((TO_ - FROM_) / BY_), list(FROM_ = parse(text = nimDeparse(code$args[[1]]), keep.source = FALSE)[[1]], TO_ = parse(text = nimDeparse(code$args[[2]]), keep.source = FALSE)[[1]], BY_ = parse(text = nimDeparse(code$args[[3]]), keep.source = FALSE)[[1]])) } } else { ## must be lengthProvided code$name <- 'nimSeqLenD' thisSizeExpr <- parse(text = nimDeparse(code$args[[4]]), keep.source = FALSE)[[1]] } } code$type <- 'double' ## only remaining case to catch here is -1 integer sequences, which we don't move to `:` code$sizeExprs <- list(thisSizeExpr) code$toEigenize <- 'yes' code$nDim <- 1 return(if(length(asserts)==0) NULL else asserts) } sizeColonOperator <- function(code, symTab, typeEnv, recurse = TRUE) { asserts <- if(recurse) recurseSetSizes(code, symTab, typeEnv) else list() if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'In sizeColonOperator: Problem determining size for : without two arguments.'), call. = FALSE) for(i in 1:2) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { if(! (code$args[[i]]$name == '[' && (code$args[[i]]$args[[1]]$name == 'dim' && code$args[[i]]$args[[1]]$args[[1]]$name == 'nfVar'))){ asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } } code$type <- 'double' code$nDim <- 1 code$toEigenize <- 'maybe' ## could generate an assertiong that second arg is >= first arg if(is.numeric(code$args[[1]]) & is.numeric(code$args[[2]])) { code$sizeExprs <- list(code$args[[2]] - code$args[[1]] + 1) } else { ## at least one part is an expression ## This is an awkward case: ## sizeExprs are R parse trees, not exprClasses ## But in this case, we want the expression from an exprClass. ## so we need to nimDeparse and then parse them code$sizeExprs <- list(substitute( A - B + 1, list(A = parse(text = nimDeparse(code$args[[2]]), keep.source = FALSE)[[1]], B = parse(text = nimDeparse(code$args[[1]]), keep.source = FALSE)[[1]] ) ) ) } invisible(asserts) } sizeTranspose <- function(code, symTab, typeEnv) { if(length(code$args) != 1) warning(paste0('More than one argument to transpose in ', nimDeparse(code), '.'), call. = FALSE) ans <- sizeUnaryCwise(code, symTab, typeEnv) if(is.numeric(code$args[[1]])) { warning(paste0('Confused by transpose of a numeric scalar in ', nimDeparse(code), '. Will remove transpose.'), call. = FALSE) removeExprClassLayer(code$caller, 1) return(ans) } code$toEigenize <- 'yes' code$type <- code$args[[1]]$type if(length(code$sizeExprs) == 2) { if(code$nDim != 2) warning(paste0('In sizeTranspose, there are 2 sizeExprs but nDim != 2'), call. = FALSE) code$sizeExprs <- c(code$sizeExprs[2], code$sizeExprs[1]) } else if(length(code$sizeExprs) == 1) { if(code$nDim != 1) warning(paste0('In sizeTranspose, there is 1 sizeExpr but nDim != 1'), call. = FALSE) code$name <- 'asRow' code$sizeExprs <- c(list(1), code$sizeExprs[[1]]) code$nDim <- 2 } return(ans) } getArgumentType <- function(expr) { if(inherits(expr, 'exprClass')) { expr$type } else storage.mode(expr) } setReturnType <- function(keyword, argType) { handling <- returnTypeHandling[[keyword]] if(is.null(handling)) return('double') switch(handling, 'double', ##1 'integer', ##2 'logical', ##3 argType, ##4 if(argType == 'logical') 'integer' else argType ##5 ) } ## Handler for unary functions that operate component-wise sizeUnaryCwise <- function(code, symTab, typeEnv) { if(length(code$args) != 1){ stop(exprClassProcessingErrorMsg(code, 'sizeUnaryCwise called with argument length != 1.'), call. = FALSE) } asserts <- recurseSetSizes(code, symTab, typeEnv) ## lift intermediates a1 <- code$args[[1]] if(inherits(a1, 'exprClass')) { if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } } code$nDim <- a1$nDim code$sizeExprs <- a1$sizeExprs } else { code$nDim <- 0 code$sizeExprs <- list() } code$type <- setReturnType(code$name, getArgumentType(a1)) if(length(code$nDim) != 1) stop(exprClassProcessingErrorMsg(code, 'In sizeUnaryCwise: nDim is not set.'), call. = FALSE) if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- if(code$nDim > 0) 'yes' else 'maybe' return(asserts) } ## currently only inprod(v1, v2) sizeBinaryReduction <- function(code, symTab, typeEnv) { if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryReduction: argument length != 2'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] a2 <- code$args[[2]] ok <- TRUE if(inherits(a1, 'exprClass')) { if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } } else { ok <- FALSE } if(inherits(a2, 'exprClass')) { if(a2$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) a2 <- code$args[[2]] } } else { ok <- FALSE } if(!ok) stop(exprClassProcessingErrorMsg(code, 'Cannot call inprod or other binary reduction operator with constant argument.'), call. = FALSE) code$nDim <- 0 code$sizeExprs <- list() code$type <- 'double' code$toEigenize <- 'yes' if(length(asserts) == 0) NULL else asserts } ## things like trace, det, logdet sizeMatrixSquareReduction <- function(code, symTab, typeEnv) { if(length(code$args) != 1){ stop(exprClassProcessingErrorMsg(code, 'sizeMatrixSquareReduction called with argument length != 1.'), call. = FALSE) } asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] if(!inherits(a1, 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'sizeMatrixSquareReduction called with argument that is not an expression.'), call. = FALSE) if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } if(a1$nDim != 2) stop(exprClassProcessingErrorMsg(code, 'sizeMatrixSquareReduction called with argument that is not a matrix.'), call. = FALSE) code$nDim <- 0 code$sizeExprs <- list() code$type <- if(code$name == 'trace') code$args[[1]]$type else 'double' code$toEigenize <- 'yes' if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } if(length(asserts) == 0) NULL else asserts } sizeUnaryCwiseSquare <- function(code, symTab, typeEnv) { if(length(code$args) != 1){ stop(exprClassProcessingErrorMsg(code, 'sizeUnaryCwiseSquare called with argument length != 1.'), call. = FALSE) } asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] if(!inherits(a1, 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'sizeUnaryCwiseSquare called with argument that is not an expression.'), call. = FALSE) if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } if(a1$nDim != 2) stop(exprClassProcessingErrorMsg(code, 'sizeUnaryCwiseSquare called with argument that is not a matrix.'), call. = FALSE) if(!identical(a1$sizeExprs[[1]], a1$sizeExprs[[2]])) { asserts <- c(asserts, identityAssert(a1$sizeExprs[[1]], a1$sizeExprs[[2]], paste0("Run-time size error: expected ", nimDeparse(a1), " to be square.") )) if(is.integer(a1$sizeExprs[[1]])) { newSize <- a1$sizeExprs[[1]] } else { if(is.integer(a1$sizeExprs[[2]])) { newSize <- a1$sizeExprs[[2]] } else { newSize <- a1$sizeExprs[[1]] } } } else { newSize <- a1$sizeExprs[[1]] } code$nDim <- 2 code$sizeExprs <- list(newSize, newSize) code$type <- setReturnType(code$name, a1$type) code$toEigenize <- if(code$nDim > 0) 'yes' else 'maybe' invisible(asserts) } sizeUnaryNonaryCwise <- function(code, symTab, typeEnv) { if(length(code$args) > 1) stop(exprClassProcessingErrorMsg(code, 'sizeUnaryNonaryCwise called with argument length > 1'), call. = FALSE) if(length(code$args) == 1) return(sizeUnaryCwise(code, symTab, typeEnv)) ## default behavior for a nonary (no-argument) function: code$type <- 'double' code$nDim <- 0 code$sizeExprs <- list() code$toEigenize <- 'maybe' invisible(NULL) } ## things like min, max, mean, sum sizeUnaryReduction <- function(code, symTab, typeEnv) { if(length(code$args) != 1) stop(exprClassProcessingErrorMsg(code, 'sizeUnaryReduction called with argument length != 1.'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) if(inherits(code$args[[1]], 'exprClass')) { ## Kludgy catch of var case here. Can't do var(matrix) because in R that is interpreted as cov(data.frame) if(code$args[[1]]$nDim >= 2) { if(code$name == 'var') { stop(exprClassProcessingErrorMsg(code, 'NIMBLE compiler does not support var with a matrix (or higher dimensional) argument.'), call. = FALSE) } } if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(!code$args[[1]]$isName) { if(code$args[[1]]$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } } } } code$nDim <- 0 code$sizeExprs <- list() code$type <- setReturnType(code$name, code$args[[1]]$type) if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- 'yes' if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } } if(length(asserts) == 0) NULL else asserts } ## There's no real point in annotating return. Just need to recurse and lift sizeReturn <- function(code, symTab, typeEnv) { if(length(code$args) > 1) stop(exprClassProcessingErrorMsg(code, 'return has argument length > 1.'), call. = FALSE) code$toEigenize <- 'no' if(!exists('return', envir = typeEnv)) stop(exprClassProcessingErrorMsg(code, 'There was no returnType declaration and the default is missing.'), call. = FALSE) if(length(code$args) == 0) { if(!identical(typeEnv$return$type, 'void')) stop(exprClassProcessingErrorMsg(code, 'return() with no argument can only be used with returnType(void()), which is the default if there is no returnType() statement.'), call. = FALSE) return(invisible(NULL)) } if(identical(typeEnv$return$type, 'void')) stop(exprClassProcessingErrorMsg(code, 'returnType was declared void() (default) (or something invalid), which is not consistent with the object you are trying to return.'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) if(inherits(code$args[[1]], 'exprClass')) { if(typeEnv$return$type == 'nimbleList' || code$args[[1]]$type == 'nimbleList') { if(typeEnv$return$type != 'nimbleList') stop(exprClassProcessingErrorMsg(code, paste0('return() argument is a nimbleList but returnType() statement gives a different type')), call. = FALSE) if(code$args[[1]]$type != 'nimbleList') stop(exprClassProcessingErrorMsg(code, paste0('returnType statement gives a nimbleList type but return() argument is not the right type')), call. = FALSE) ## equivalent to symTab$getSymbolObject(code$args[[1]]$name)$nlProc, if it is a name if(!identical(code$args[[1]]$sizeExprs$nlProc, typeEnv$return$sizeExprs$nlProc)) stop(exprClassProcessingErrorMsg(code, paste0('nimbleList given in return() argument does not match nimbleList type declared in returnType()')), call. = FALSE) } else { ## check numeric types and nDim fail <- FALSE if(!identical(code$args[[1]]$type, typeEnv$return$type)) { if(typeEnv$return$nDim > 0) { ## allow scalar casting of returns without error failMsg <- paste0('Type ', code$args[[1]]$type, ' of the return() argument does not match type ', typeEnv$return$type, ' given in the returnType() statement (void is default).') fail <- TRUE } } if(!isTRUE(all.equal(code$args[[1]]$nDim, typeEnv$return$nDim))) { failMsg <- paste0( if(exists("failMsg", inherits = FALSE)) paste0(failMsg,' ') else character(), paste0('Number of dimensions ', code$args[[1]]$nDim, ' of the return() argument does not match number ', typeEnv$return$nDim, ' given in the returnType() statement.')) fail <- TRUE } if(fail) stop(exprClassProcessingErrorMsg(code, failMsg), call. = FALSE) } if(!code$args[[1]]$isName) { liftArg <- FALSE if(code$args[[1]]$toEigenize == 'yes') liftArg <- TRUE else if(anyNonScalar(code$args[[1]])) liftArg <- TRUE if(liftArg) asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv, forceAssign = TRUE)) } } invisible(asserts) } sizeMatrixMult <- function(code, symTab, typeEnv) { if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'sizeMatrixMult called with argument length != 2.'), call. = FALSE) a1 <- code$args[[1]] a2 <- code$args[[2]] if(!(inherits(a1, 'exprClass') & inherits(a2, 'exprClass'))) stop(exprClassProcessingErrorMsg(code, 'In sizeMatrixMult: expecting both arguments to be expressions.'), call. = FALSE) ## need promotion from vectors to matrices with asRow or asCol asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] a2 <- code$args[[2]] if(a1$nDim == 0 | a2$nDim == 0) stop(exprClassProcessingErrorMsg(code, 'In sizeMatrixMult: Cannot do matrix multiplication with a scalar.'), call. = FALSE) if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } if(a2$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) a2 <- code$args[[2]] } } ## Note that we could insert RUN-TIME adaptation of mat %*% vec and vec %*% mat ## but to do so we would need to generate trickier sizeExprs ## For now, a vector on the right will be turned into a column ## and a vector on the left will be turned into a row ## The programmer can always use asRow or asCol to control it explicitly if(a1$nDim == 1 & a2$nDim == 1) { origSizeExprs <- a1$sizeExprs[[1]] a1 <- insertExprClassLayer(code, 1, 'asRow', type = a1$type, nDim = 2) a1$sizeExprs <- c(list(1), origSizeExprs) origSizeExprs <- a2$sizeExprs[[1]] a2 <- insertExprClassLayer(code, 2, 'asCol', type = a2$type, nDim = 2) a2$sizeExprs <- c(origSizeExprs, list(1)) } else { if(a1$nDim == 1) { if(a2$nDim != 2) stop(exprClassProcessingErrorMsg(code, paste0('In sizeMatrixMult: First arg has nDim = 1 and 2nd arg has nDim = ', a2$nDim, '.')), call. = FALSE) origSizeExprs <- a1$sizeExprs[[1]] ## For first argument, default to asRow unless second argument has only one row, in which case make first asCol if(identical(a2$sizeExprs[[1]], 1)) { a1 <- insertExprClassLayer(code, 1, 'asCol', type = a1$type, nDim = 2) a1$sizeExprs <- c(origSizeExprs, list(1)) } else { a1 <- insertExprClassLayer(code, 1, 'asRow', type = a1$type, nDim = 2) a1$sizeExprs <- c(list(1), origSizeExprs) } } else if(a2$nDim == 1) { origSizeExprs <- a2$sizeExprs[[1]] if(a1$nDim != 2) stop(exprClassProcessingErrorMsg(code, paste0('In sizeMatrixMult: Second arg has nDim = 1 and 1st arg has nDim = ', a1$nDim, '.')), call. = FALSE) if(identical(a1$sizeExprs[[2]], 1)) { a2 <- insertExprClassLayer(code, 2, 'asRow', type = a2$type, nDim = 2) a2$sizeExprs <- c(list(1), origSizeExprs) } else { a2 <- insertExprClassLayer(code, 2, 'asCol', type = a2$type, nDim = 2) a2$sizeExprs <- c(origSizeExprs, list(1)) } } } code$nDim <- 2 code$sizeExprs <- list(a1$sizeExprs[[1]], a2$sizeExprs[[2]]) code$type <- setReturnType(code$name, arithmeticOutputType(a1$type, a2$type)) if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- 'yes' assertMessage <- paste0("Run-time size error: expected ", deparse(a1$sizeExprs[[2]]), " == ", deparse(a2$sizeExprs[[1]])) newAssert <- identityAssert(a1$sizeExprs[[2]], a2$sizeExprs[[1]], assertMessage) if(is.null(newAssert)) return(asserts) else return(c(asserts, list(newAssert))) } sizeSolveOp <- function(code, symTab, typeEnv) { ## this is for solve(A, b) or forwardsolve(A, b). For inverse, use inverse(A), not solve(A) if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'sizeSolveOp called with argument length != 2.'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] a2 <- code$args[[2]] if(!(inherits(a1, 'exprClass') & inherits(a2, 'exprClass'))) stop(exprClassProcessingErrorMsg(code, 'In sizeSolveOp: expecting both arguments to be exprClasses.'), call. = FALSE) if(a1$nDim != 2) stop(exprClassProcessingErrorMsg(code, 'In sizeSolveOp: first argument to a matrix solver must be a matrix.'), call. = FALSE) if(!any(a2$nDim == 1:2)) stop(exprClassProcessingErrorMsg(code, 'In sizeSolveOp: second argument to a matrix solver must be a vector or matrix.'), call. = FALSE) code$type <- setReturnType(code$name, 'double') code$nDim <- a2$nDim ## keep the same dimension as the 2nd argument if(code$nDim == 1) { code$sizeExprs <- c(a1$sizeExprs[[1]]) } else { code$sizeExprs <- c(a1$sizeExprs[[1]], a2$sizeExprs[[2]]) } code$toEigenize <- 'yes' assertMessage <- paste0("Run-time size error: expected ", deparse(a1$sizeExprs[[1]]), " == ", deparse(a1$sizeExprs[[2]])) assert1 <- identityAssert(a1$sizeExprs[[1]], a1$sizeExprs[[2]], assertMessage) assertMessage <- paste0("Run-time size error: expected ", deparse(a1$sizeExprs[[1]]), " == ", deparse(a2$sizeExprs[[1]])) assert2 <- identityAssert(a1$sizeExprs[[1]], a2$sizeExprs[[1]], assertMessage) asserts <- c(asserts, assert1, assert2) return(asserts) } ## deprecated and will be removed setAsRowOrCol <- function(code, argID, rowOrCol, type ) { recurse <- TRUE if(is.numeric(code$args[[argID]])) return(NULL) if(code$args[[argID]]$isName) { recurse <- FALSE } else { if(code$args[[argID]]$name == 'map') recurse <- FALSE } if(!recurse) { if(code$args[[argID]]$nDim == 2) { if(rowOrCol == 'asRow') { if(is.numeric(code$args[[argID]]$sizeExprs[[1]])) { if(code$args[[argID]]$sizeExprs[[1]] == 1) { return(invisible(NULL)) ## it is already a row } } rowOK <- if(is.numeric(code$args[[argID]]$sizeExprs[[2]])) { ## only ok if a1 2nd size is 1 if(code$sizeExprs[[2]] == 1) TRUE else FALSE } else FALSE if(!rowOK) stop(exprClassProcessingErrorMsg(code, 'In setAsRowOrCol: Cannot convert to row.'), call. = FALSE) lengthExpr <- code$args[[argID]]$sizeExprs[[1]] insertExprClassLayer(code$caller, code$callerArgID, 'transpose', type = type) code$nDim <- 2 code$sizeExprs <- c(list(1), lengthExpr) return(code$args[[argID]]) } else { if(is.numeric(code$args[[argID]]$sizeExprs[[1]])) { if(code$args[[argID]]$sizeExprs[[2]] == 1) { return(invisible(NULL)) ## it is already a col } } colOK <- if(is.numeric(code$args[[argID]]$sizeExprs[[1]])) { ## only ok if a1 1st size is 1 if(code$sizeExprs[[1]] == 1) TRUE else FALSE } else FALSE if(!colOK) stop(exprClassProcessingErrorMsg(code, 'In setAsRowOrCol: Cannot convert to col.'), call. = FALSE) lengthExpr <- code$args[[argID]]$sizeExprs[[1]] insertExprClassLayer(code$caller, code$callerArgID, 'transpose', type = type) code$nDim <- 2 code$sizeExprs <- c(lengthExpr, list(1)) return(code$args[[argID]]) } } else if(code$args[[argID]]$nDim == 1) { oldSizeExprs <- code$args[[argID]]$sizeExprs insertExprClassLayer(code, argID, rowOrCol, type = type) if(rowOrCol == 'asRow') { code$sizeExprs <- c(list(1), oldSizeExprs) } else { code$sizeExprs <- c(oldSizeExprs, list(1)) } code$nDim <- 2 code$type <- type ans <- code$args[[argID]] } } else { for(i in seq_along(code$args[[argID]]$args)) { setAsRowOrCol(code$args[[argID]], i, rowOrCol, type) } ans <- code$args[[argID]] } ans } sizeBinaryCwiseLogical <- function(code, symTab, typeEnv) { ans <- sizeBinaryCwise(code, symTab, typeEnv) code$type <- 'logical' return(ans) } ## Handler for binary component-wise operators sizeBinaryCwise <- function(code, symTab, typeEnv) { if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'sizeBinaryCwise called with argument length != 2.'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) ## sizes of arguments must have already been set ## pull out the two arguments a1 <- code$args[[1]] a2 <- code$args[[2]] ## pull out aXDropNdim, aXnDim, aXsizeExprs, and aXtype (X = 1 or 2) if(inherits(a1, 'exprClass')) { if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } } a1Drop <- dropSingleSizes(a1$sizeExprs) a1DropNdim <- length(a1Drop$sizeExprs) a1nDim <- a1$nDim a1sizeExprs <- a1$sizeExprs a1type <- a1$type if(!nimbleOptions('experimentalNewSizeProcessing') ) a1toEigenize <- a1$toEigenize } else { a1DropNdim <- 0 a1nDim <- 0 a1sizeExprs <- list() a1type <- storage.mode(a1) if(!nimbleOptions('experimentalNewSizeProcessing') ) a1toEigenize <- 'maybe' } if(inherits(a2, 'exprClass')) { if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(a2$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) a2 <- code$args[[2]] } } a2Drop <- dropSingleSizes(a2$sizeExprs) a2DropNdim <- length(a2Drop$sizeExprs) a2nDim <- a2$nDim a2sizeExprs <- a2$sizeExprs a2type <- a2$type if(!nimbleOptions('experimentalNewSizeProcessing') ) a2toEigenize <- a2$toEigenize } else { a2DropNdim <- 0 a2nDim <- 0 a2sizeExprs <- list() a2type <- storage.mode(a2) if(!nimbleOptions('experimentalNewSizeProcessing') ) a2toEigenize <- 'maybe' } ## Choose the output type by type promotion if(length(a1type) == 0) {warning('Problem with type of arg1 in sizeBinaryCwise', call. = FALSE); browser()} if(length(a2type) == 0) {warning('Problem with type of arg2 in sizeBinaryCwise', call. = FALSE); browser()} code$type <- setReturnType(code$name, arithmeticOutputType(a1type, a2type)) if(!nimbleOptions('experimentalNewSizeProcessing') ) { forceYesEigenize <- identical(a1toEigenize, 'yes') | identical(a2toEigenize, 'yes') code$toEigenize <- if(a1DropNdim == 0 & a2DropNdim == 0) if(forceYesEigenize) 'yes' else 'maybe' else 'yes' } ## Catch the case that there is at least one scalar-equivalent (all lengths == 1) ## experimentalNewSizeProcessing: The 3 'code$toEigenize <- ' should be redundant with above and could be removed during refactor if(a1DropNdim == 0 | a2DropNdim == 0) { ## Here we will process effective scalar additions ## and not do any other type of size promotion/dropping if(a1DropNdim == 0) { ## a1 is scalar-equiv if(a2DropNdim == 0) { ##both are scalar-equiv code$nDim <- max(a1nDim, a2nDim) ## use the larger nDims code$sizeExprs <- rep(list(1), code$nDim) ## set sizeExprs to all 1 if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- if(forceYesEigenize) 'yes' else 'maybe' } else { ## a2 is not scalar equiv, so take nDim and sizeExprs from it code$nDim <- a2nDim code$sizeExprs <- a2sizeExprs if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- 'yes' } } else { ## a2 is scalar-equiv, and a1 is not code$nDim <- a1nDim code$sizeExprs <- a1sizeExprs if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- 'yes' } return(if(length(asserts) == 0) NULL else asserts) } if(is.null(asserts)) asserts <- list() ## Catch the case that the number of dimensions is not equal. ## This case doesn't arise as much as it used to because [ (sizeIndexingBracket) now drops single dimensions if(a1nDim != a2nDim) { ## Catch the case that one is 2D and the other is 1D-equivalent. ## This allows e.g. X[1,1:5] + Y[1,1,1:5]. First arg is 2D. 2nd arg is 1D-equivalent. An assertion will check that dim(X)[1] == 1 ## If so, wrap the 1D in asRow or asCol to orient it later for Eigen if(a1DropNdim == 1 & a2DropNdim == 1) { ## Hey, I think this is wrong: I think we should check the aXDropNdims if(a1nDim > 2 | a2nDim > 2) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Dimensions do not match and/or NIMBLE will not handle Array + Vector for dim(Array) > 2.'), call. = FALSE) ## a1 is 2D and a2 is 1D if(a1nDim == 2 & a2nDim == 1) { a1IsCol <- identical(a1sizeExprs[[2]], 1) asFun <- if(a1IsCol) 'asCol' else 'asRow' a2 <- insertExprClassLayer(code, 2, asFun, type = a2type) a2$sizeExprs <- a1sizeExprs a2$nDim <- a1nDim a1ind <- if(a1IsCol) 1 else 2 if(!is.numeric(a1sizeExprs[[a1ind]]) | !is.numeric(a2sizeExprs[[1]])) { ## Really do want original a2sizeExprs assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[a1ind]]), " == ", deparse(a2sizeExprs[[1]])) thisAssert <- identityAssert(a1sizeExprs[[a1ind]], a2sizeExprs[[1]], assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert } else { if(a1sizeExprs[[a1ind]] != a2sizeExprs[[1]]) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Fixed size mismatch.'), call. = FALSE) } code$nDim <- a1nDim code$sizeExprs <- a1sizeExprs } else { a2IsCol <- identical(a2sizeExprs[[2]], 1) asFun <- if(a2IsCol) 'asCol' else 'asRow' a1 <- insertExprClassLayer(code, 1, asFun, type = a1type) a1$sizeExprs <- a2sizeExprs a1$type <- a1type a1$nDim <- a1nDim <- a2nDim a2ind <- if(a2IsCol) 1 else 2 if(!is.numeric(a1sizeExprs[[1]]) | !is.numeric(a2sizeExprs[[a2ind]])) { ## Really do want the original a1sizeExprs[[1]], not the modified one. assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[1]]), " == ", deparse(a2sizeExprs[[a2ind]])) thisAssert <- identityAssert(a1sizeExprs[[1]], a2sizeExprs[[a2ind]], assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert } else { if(a1sizeExprs[[1]] != a2sizeExprs[[a2ind]]) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Fixed size mismatch.'), call. = FALSE) } code$nDim <- a2nDim code$sizeExprs <- a2sizeExprs } } else { ## If at least one arg is a known scalar-equivalent, that case was handled above ## (But it's still not complete) ## Here is the case that nDims aren't equal and dropNdims aren't equal ## either. We used to rely on typeEnv to keep track of when a size resulting from an operation is known to be 1 but realized that isn't safe if that operation is only conditionally executed at run time. ## Hence what will do now is assume the user has written valid code ## but add run-time size checks of which dimension must match ## This is currently limited in what it will handle ## Specifically, it assumes things should be columns assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[1]]), " == ", deparse(a2sizeExprs[[1]])) thisAssert <- identityAssert(a1sizeExprs[[1]], a2sizeExprs[[1]], assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert if(a1nDim == 1 & a2nDim == 2) { assertMessage <- paste0("Run-time size error: expected ", deparse(a2sizeExprs[[2]]), " == ", 1) thisAssert <- identityAssert(a2sizeExprs[[2]], 1, assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert code$sizeExprs <- a2sizeExprs } else { if(a1nDim == 2 & a2nDim == 1) { assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[2]]), " == ", 1) thisAssert <- identityAssert(a1sizeExprs[[2]], 1, assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert code$sizeExprs <- a1sizeExprs } else { stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Dimensions do not matchin a way that can be handled.'), call. = FALSE) } } code$nDim <- 2 } } else { ## dimensions match at the outset nDim <- a1nDim if(nDim > 0) { for(i in 1:nDim) { if(!is.numeric(a1sizeExprs[[i]]) | !is.numeric(a2sizeExprs[[i]])) { assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[i]]), " == ", deparse(a2sizeExprs[[i]])) thisAssert <- identityAssert(a1sizeExprs[[i]], a2sizeExprs[[i]], assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert } else { if(a1sizeExprs[[i]] != a2sizeExprs[[i]]) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Fixed size mismatch.'), call. = FALSE) } } } code$nDim <- a1$nDim code$sizeExprs <- vector('list', code$nDim) for(i in seq_along(code$sizeExprs)) code$sizeExprs[[i]] <- if(is.numeric(a1sizeExprs[[i]])) a1sizeExprs[[i]] else a2sizeExprs[[i]] } if(length(asserts) == 0) NULL else asserts } mvFirstArgCheckLists <- list(nimArr_rmnorm_chol = list(c(1, 2, 0), ## dimensionality of ordered arguments AFTER the first, which is for the return value. e.g. mean (1D), chol(2D), prec_param(scalar) 1, 'double'), ## 1 = argument from which to take answer size, double = answer type nimArr_rmvt_chol = list(c(1, 2, 0, 0), ## dimensionality of ordered arguments AFTER the first, which is for the return value. e.g. mean (1D), chol(2D), df(scalar), prec_param(scalar) 1, 'double'), ## 1 = argument from which to take answer size, double = answer type nimArr_rwish_chol = list(c(2, 0, 0, 0), ## chol, df, prec_param, overwrite_inputs 1, 'double'), nimArr_rinvwish_chol = list(c(2, 0, 0), ## chol, df, prec_param 1, 'double'), nimArr_rcar_normal = list(c(1, 1, 1), 3, 'double'), ## adj, wgts, num nimArr_rmulti = list(c(0, 1), ## size, probs 2, 'double'), ## We treat integer rv's as doubles nimArr_rdirch = list(c(1), 1, 'double')) ## alpha sizeRmultivarFirstArg <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) notOK <- FALSE checkList <- mvFirstArgCheckLists[[code$name]] if(!is.null(checkList)) { if(length(code$args) < length(checkList[[1]])) stop(exprClassProcessingErrorMsg(code, 'Not enough arguments provided.'), call. = FALSE) for(i in seq_along(checkList[[1]])) { notOK <- if(inherits(code$args[[i]], 'exprClass')) code$args[[i]]$nDim != checkList[[1]][i] else notOK } returnSizeArgID <- checkList[[2]] returnType <- checkList[[3]] } else { returnSizeArgID <- 1 returnType <- 'double' } if(notOK) { stop(exprClassProcessingErrorMsg(code, 'Some argument(s) have the wrong dimension.'), call. = FALSE) } if(!inherits(code$args[[returnSizeArgID]], 'exprClass')) stop(exprClassProcessingErrorMsg(code, paste0('Expected ', nimDeparse(code$args[[returnSizeArgID]]) ,' to be an expression.')), call. = FALSE) code$type <- returnType code$nDim <- code$args[[returnSizeArgID]]$nDim code$toEigenize <- 'maybe' code$sizeExprs <- code$args[[returnSizeArgID]]$sizeExprs for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } if(code$nDim > 0) { if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } else typeEnv$.ensureNimbleBlocks <- TRUE ## for purposes of sizeAssign, which recurses on assignment target after RHS } return(asserts) } sizeVoidPtr <- function(code, symTab, typeEnv) { ## lift any argument that is an expression or scalar. ## We expect only one argument ## Lift it if it is an expression, a numeric, or a scalar asserts <- recurseSetSizes(code, symTab, typeEnv) lift <- TRUE if(inherits(code$args[[1]], 'exprClass')) { if(code$args[[1]]$type == 'nimbleFunction') lift <- FALSE else if(code$args[[1]]$isName & code$args[[1]]$nDim > 0) lift <- FALSE ## will already be a pointer } if(lift) { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv) ) } code$type <- 'voidPtr' code$nDim <- 0 code$toEigenize <- 'no' return(asserts) } ### ## This function would be called with arguments from an RCfunction or nimbleFunction ## the functions dim and length would be taken over to work on the sizeExprs. ## but for now it can just return NAs for size expressions, and then the new returned value will have default size expressions (dim(name)[1], etc) ## generalFunSizeHandler <- function(code, symTab, typeEnv, returnType, args, chainedCall = FALSE) { useArgs <- unlist(lapply(args, function(x) as.character(x[[1]]) %in% c('double', 'integer', 'logical'))) if(chainedCall) useArgs <- c(FALSE, useArgs) if(length(code$args) != length(useArgs)) { stop(exprClassProcessingErrorMsg(code, 'In generalFunSizeHandler: Wrong number of arguments.'), call. = FALSE) } ## Note this is NOT checking the dimensions of each arg. useArgs just means it will recurse on that and lift or do as needed asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs) ## lift any argument that is an expression for(i in seq_along(code$args)) { if(useArgs[i]) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } } if(inherits(returnType, 'symbolNimbleList')) { code$type <- 'nimbleList' code$sizeExprs <- returnType code$toEigenize <- 'maybe' code$nDim <- 0 liftIfAmidExpression <- TRUE } else { returnSymbolBasic <- inherits(returnType, 'symbolBasic') returnTypeLabel <- if(returnSymbolBasic) returnType$type else as.character(returnType[[1]]) if(returnTypeLabel == 'void') { code$type <- returnTypeLabel code$toEigenize <- 'unknown' return(asserts) } returnNDim <- if(returnSymbolBasic) returnType$nDim else if(length(returnType) > 1) as.numeric(returnType[[2]]) else 0 returnSizeExprs <- vector('list', returnNDim) ## This stays blank (NULLs), so if assigned as a RHS, the LHS will get default sizes code$type <- returnTypeLabel code$nDim <- returnNDim code$sizeExprs <- returnSizeExprs code$toEigenize <- if(code$nDim == 0) 'maybe' else 'no' liftIfAmidExpression <- code$nDim > 0 } if(liftIfAmidExpression) { if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } else typeEnv$.ensureNimbleBlocks <- TRUE } return(asserts) } generalFunSizeHandlerFromSymbols <- function(code, symTab, typeEnv, returnSymbol, argSymTab, chainedCall = FALSE) { ## symbols should be in order useArgs <- unlist(lapply(argSymTab$symbols, function(x) { if(!is.null(x[['type']])) as.character(x$type) %in% c('double', 'integer', 'logical') else FALSE })) if(chainedCall) useArgs <- c(FALSE, useArgs) if(length(code$args) != length(useArgs)) { stop(exprClassProcessingErrorMsg(code, 'In generalFunSizeHandler: Wrong number of arguments.'), call. = FALSE) } ## Note this is NOT checking the dimensions of each arg. useArgs just means it will recurse on that and lift or do as needed asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs) ## lift any argument that is an expression for(i in seq_along(code$args)) { if(useArgs[i]) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } } if(inherits(returnSymbol, 'symbolNimbleList')) { code$type <- 'nimbleList' code$sizeExprs <- returnSymbol code$toEigenize <- 'maybe' code$nDim <- 0 liftIfAmidExpression <- TRUE } else { returnSymbolBasic <- inherits(returnSymbol, 'symbolBasic') returnTypeLabel <- if(returnSymbolBasic) returnSymbol$type else { stop(exprClassProcessingErrorMsg(code, 'In generalFunSizeHandlerFromSymbols: Problem with return type.'), call. = FALSE) } if(returnTypeLabel == 'void') { code$type <- returnTypeLabel code$toEigenize <- 'unknown' return(asserts) } returnNDim <- if(returnSymbolBasic) returnSymbol$nDim else if(length(returnType) > 1) as.numeric(returnType[[2]]) else 0 returnSizeExprs <- vector('list', returnNDim) ## This stays blank (NULLs), so if assigned as a RHS, the LHS will get default sizes code$type <- returnTypeLabel code$nDim <- returnNDim code$sizeExprs <- returnSizeExprs code$toEigenize <- if(code$nDim == 0) 'maybe' else 'no' liftIfAmidExpression <- code$nDim > 0 } if(liftIfAmidExpression) { if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } else typeEnv$.ensureNimbleBlocks <- TRUE } return(asserts) }
/packages/nimble/R/genCpp_sizeProcessing.R
no_license
nemochina2008/nimble
R
false
false
168,988
r
assignmentAsFirstArgFuns <- c('nimArr_rmnorm_chol', 'nimArr_rmvt_chol', 'nimArr_rwish_chol', 'nimArr_rinvwish_chol', 'nimArr_rcar_normal', 'nimArr_rmulti', 'nimArr_rdirch', 'getValues', 'getValuesIndexRange', 'initialize', 'setWhich', 'setRepVectorTimes', 'assignVectorToNimArr', 'dimNimArr', 'assignNimArrToNimArr') setSizeNotNeededOperators <- c('setWhich', 'setRepVectorTimes', 'SEXP_2_NimArr', 'nimVerbatim') operatorsAllowedBeforeIndexBracketsWithoutLifting <- c('map', 'dim', 'mvAccessRow', 'nfVar') sizeCalls <- c(makeCallList(binaryOperators, 'sizeBinaryCwise'), makeCallList(binaryMidLogicalOperators, 'sizeBinaryCwiseLogical'), makeCallList(binaryOrUnaryOperators, 'sizeBinaryUnaryCwise'), makeCallList(unaryOperators, 'sizeUnaryCwise'), makeCallList(unaryOrNonaryOperators, 'sizeUnaryNonaryCwise'), makeCallList(assignmentOperators, 'sizeAssign'), makeCallList(reductionUnaryOperators, 'sizeUnaryReduction'), makeCallList(matrixSquareReductionOperators, 'sizeMatrixSquareReduction'), makeCallList(reductionBinaryOperators, 'sizeBinaryReduction'), makeCallList(matrixMultOperators, 'sizeMatrixMult'), makeCallList(matrixFlipOperators, 'sizeTranspose'), makeCallList(matrixSolveOperators, 'sizeSolveOp'), makeCallList(matrixSquareOperators, 'sizeUnaryCwiseSquare'), makeCallList(nimbleListReturningOperators, 'sizeNimbleListReturningFunction'), nimOptim = 'sizeOptim', nimOptimDefaultControl = 'sizeOptimDefaultControl', list('debugSizeProcessing' = 'sizeProxyForDebugging', diag = 'sizeDiagonal', dim = 'sizeDim', RRtest_add = 'sizeRecyclingRule', which = 'sizeWhich', nimC = 'sizeConcatenate', nimRep = 'sizeRep', nimSeqBy = 'sizeSeq', nimSeqLen = 'sizeSeq', nimSeqByLen = 'sizeSeq', 'return' = 'sizeReturn', 'asRow' = 'sizeAsRowOrCol', 'asCol' = 'sizeAsRowOrCol', makeNewNimbleListObject = 'sizeNewNimbleList', getParam = 'sizeGetParam', getBound = 'sizeGetBound', nimSwitch = 'sizeSwitch', ## asDoublePtr = 'sizeasDoublePtr', '[' = 'sizeIndexingBracket', '[[' = 'sizeDoubleBracket', ## for nimbleFunctionList, this will always go through chainedCall(nfList[[i]], 'foo')(arg1, arg2) chainedCall = 'sizeChainedCall', nfVar = 'sizeNFvar', map = 'sizemap', ':' = 'sizeColonOperator', ##dim = 'sizeDimOperator', 'if' = 'recurseSetSizes', ##OK 'while' = 'recurseSetSizes', ## callC = 'sizecallC', 'for' = 'sizeFor', cppPointerDereference = 'sizeCppPointerDereference', values = 'sizeValues', '(' = 'sizeUnaryCwise', setSize = 'sizeSetSize', ## OK but not done for numericLists resizeNoPtr = 'sizeResizeNoPtr', ## may not be used any more nimArr_rcat = 'sizeScalarRecurse', nimArr_rinterval = 'sizeScalarRecurse', nimPrint = 'sizeforceEigenize', nimDerivs = 'sizeNimDerivs', ##nimCat = 'sizeforceEigenize', as.integer = 'sizeUnaryCwise', ## Note as.integer and as.numeric will not work on a non-scalar yet as.numeric = 'sizeUnaryCwise', nimArrayGeneral = 'sizeNimArrayGeneral', setAll = 'sizeOneEigenCommand', voidPtr = 'sizeVoidPtr', run.time = 'sizeRunTime', PROTECT = 'sizePROTECT', NimArr_2_SEXP = 'sizePROTECT', ## same need Reval = 'sizeReval', nimbleConvert = 'sizeNimbleConvert', nimbleUnconvert = 'sizeNimbleUnconvert', asReturnSymbol = 'sizeAsReturnSymbol'), makeCallList(scalar_distribution_dFuns, 'sizeRecyclingRule'), makeCallList(scalar_distribution_pFuns, 'sizeRecyclingRule'), makeCallList(scalar_distribution_qFuns, 'sizeRecyclingRule'), makeCallList(scalar_distribution_rFuns, 'sizeRecyclingRuleRfunction'), makeCallList(distributionFuns[!(distributionFuns %in% c(scalar_distribution_dFuns, scalar_distribution_pFuns, scalar_distribution_qFuns, scalar_distribution_rFuns))], 'sizeScalarRecurse'), # R dist functions that are not used by NIMBLE but we allow in DSL makeCallList(paste0(c('d','q','p'), 't'), 'sizeRecyclingRule'), rt = 'sizeRecyclingRuleRfunction', makeCallList(paste0(c('d','q','p'), 'exp'), 'sizeRecyclingRule'), rexp = 'sizeRecyclingRuleRfunction', makeCallList(c('isnan','ISNAN','ISNA'), 'sizeScalarRecurse'), makeCallList(c('nimArr_dmnorm_chol', 'nimArr_dmvt_chol', 'nimArr_dwish_chol', 'nimArr_dinvwish_chol', 'nimArr_dcar_normal', 'nimArr_dmulti', 'nimArr_dcat', 'nimArr_dinterval', 'nimArr_ddirch'), 'sizeScalarRecurse'), makeCallList(c('nimArr_rmnorm_chol', 'nimArr_rmvt_chol', 'nimArr_rwish_chol', 'nimArr_rinvwish_chol', 'nimArr_rcar_normal', 'nimArr_rmulti', 'nimArr_rdirch'), 'sizeRmultivarFirstArg'), makeCallList(c('decide', 'size', 'getsize','getNodeFunctionIndexedInfo', 'endNimbleTimer'), 'sizeScalar'), makeCallList(c('calculate','calculateDiff', 'getLogProb'), 'sizeScalarModelOp'), simulate = 'sizeSimulate', makeCallList(c('blank', 'nfMethod', 'getPtr', 'startNimbleTimer'), 'sizeUndefined') ##'nimFunListAccess' ) scalarOutputTypes <- list(decide = 'logical', size = 'integer', isnan = 'logical', ISNA = 'logical', '!' = 'logical', getNodeFunctionIndexedInfo = 'double', endNimbleTimer = 'double') ## exprClasses_setSizes fills in the type information of exprClass code ## code is an exprClas object ## typeEnv is an environment returned by exprClasses_initSizes ## allowUnknown says whether it is ok to have unknown type. This will be true for LHS of assignments ## ## This returns a set of type assertions collected for each line of code ## This function operates recursively, so the type assertions returned from recursive calls are ## put into the exprClass object for which they were recursed. ## ## For example, if we have A2 <- mean(B + C) ## then typeEnv must have size expressions for B and C to get started. ## If these are matrices, the generic size expressions (for B) will be dim(B)[1] and dim(B)[2] ## Then the exprClass object for `+`(B, C) will generate assertions that dim(B)[1] == dim(C)[1] and dim(B[2]) == dim(C)[2] ## and it will copy the size expressions for B as its own size expressions ## Then the exprClass object for mean(`+`(B, C)) will create a size expression of 1 (with the same dimensions as B+C) ## Then the exprClass object for `<-`(A, mean(`+`(B, C))) will generate assertions that the size of A must be 1 ## and it will set the size expressions for A and for itself to 1. expressionSymbolTypeReplacements <- c('symbolNimbleListGenerator', 'symbolNimbleList', 'symbolNimbleFunction') exprClasses_setSizes <- function(code, symTab, typeEnv) { ## input code is exprClass ## name: if(code$isName) { ## If it doesn't exist and must exist, stop if(code$name != "") { ## e.g. In A[i,], second index gives name=="" if(!exists(code$name, envir = typeEnv, inherits = FALSE)) { if(symTab$symbolExists(code$name, TRUE)) { thisSymbolObject <- symTab$getSymbolObject(code$name, TRUE) code$type <- class(thisSymbolObject)[1] if(code$type %in% expressionSymbolTypeReplacements){ code$type <- thisSymbolObject$type code$sizeExprs <- thisSymbolObject } } else { code$type <- 'unknown' if(!typeEnv$.AllowUnknowns) if(identical(code$name, 'pi')) { ## unique because it may be encountered anew on on RHS and be valid assign('pi', exprTypeInfoClass$new(nDim = 0, type = 'double', sizeExprs = list()), envir = typeEnv) symTab$addSymbol(symbolBasic(name = 'pi', type = 'double', nDim = 0)) code$nDim <- 0 code$type <- 'double' code$sizeExprs <- list() code$toEigenize <- 'maybe' } else { warning(paste0("variable '",code$name,"' has not been created yet."), call.=FALSE) } } } else { ## otherwise fill in type fields from typeEnv object info <- get(code$name, envir = typeEnv) if(inherits(info, 'exprTypeInfoClass')) { code$type <- info$type code$sizeExprs <- info$sizeExprs code$nDim <- info$nDim code$toEigenize <- 'maybe' } } ## Add RCfunctions to neededRCfuns. if(exists(code$name) && is.rcf(get(code$name))) { nfmObj <- environment(get(code$name))$nfMethodRCobject uniqueName <- nfmObj$uniqueName if (is.null(typeEnv$neededRCfuns[[uniqueName]])) { typeEnv$neededRCfuns[[uniqueName]] <- nfmObj } } ## Note that generation of a symbol for LHS of an assignment is done in the sizeAssign function, which is the handler for assignments return(NULL) } } if(code$isCall) { if(code$name == '{') { ## recurse over lines for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { newAsserts <- exprClasses_setSizes(code$args[[i]], symTab, typeEnv) code$args[[i]]$assertions <- if(is.null(newAsserts)) list() else newAsserts } } return(invisible(NULL)) } sizeCall <- sizeCalls[[code$name]] if(!is.null(sizeCall)) { if(.nimbleOptions$debugSizeProcessing) { browser() eval(substitute(debugonce(XYZ), list(XYZ = as.name(sizeCall)))) } test0 <- eval(call(sizeCall, code, symTab, typeEnv)) return(test0) } if(symTab$symbolExists(code$name, TRUE)) { ## could be a nimbleFunction object return(sizeNimbleFunction(code, symTab, typeEnv) ) } ## Finally, it could be an RCfunction (a nimbleFunction with no setup == a simple function) { if(exists(code$name)) { obj <- get(code$name) if(is.rcf(obj)) { ## it is an RC function nfmObj <- environment(obj)$nfMethodRCobject uniqueName <- nfmObj$uniqueName if(length(uniqueName)==0) stop(exprClassProcessingErrorMsg(code, 'In size processing: A no-setup nimbleFunction with no internal name is being called.'), call. = FALSE) if(is.null(typeEnv$neededRCfuns[[uniqueName]])) { typeEnv$neededRCfuns[[uniqueName]] <- nfmObj } ## new with nimbleLists: we need to initiate compilation here so we can get full returnType information, including of nimbleLists RCfunProc <- typeEnv$.nimbleProject$compileRCfun(obj, initialTypeInference = TRUE) return(sizeRCfunction(code, symTab, typeEnv, nfmObj, RCfunProc)) } } } invisible(NULL) } sizeProxyForDebugging <- function(code, symTab, typeEnv) { browser() origValue <- .nimbleOptions$debugSizeProcessing message('Entering into size processing debugging. You may need to do nimbleOptions(debugSizeProcessing = FALSE) if this exits in any non-standard way.') setNimbleOption('debugSizeProcessing', TRUE) ans <- recurseSetSizes(code, symTab, typeEnv) removeExprClassLayer(code$caller, 1) setNimbleOption('debugSizeProcessing', origValue) return(ans) } ## This is used by nimbleExternalCall. ## When the external call is provided as foo, returning e.g. double(0), ## we end up needing a line of code RETURNVALUE <- foo(args). ## To get the type of RETURNVALUE, we wrap that as RETURNVALUE <- asReturnSymbol(foo(args), type, nDim) sizeAsReturnSymbol <- function(code, symTab, typeEnv) { returnType <- code$args[[2]] returnNDim <- code$args[[3]] code$args <- list(code$args[[1]]) code$args[[1]]$type <- returnType code$args[[1]]$nDim <- returnNDim code$args[[1]]$toEigenize <- 'no' code$args[[1]]$sizeExprs <- NULL removeExprClassLayer(code, 1) list() } productSizeExprs <- function(sizeExprs) { if(length(sizeExprs)==0) return(1) if(length(sizeExprs)==1) return(sizeExprs[[1]]) ans <- substitute( (A), list(A = sizeExprs[[1]])) for(i in 2:length(sizeExprs)) { ans <- substitute(A * (B), list(A = ans, B = sizeExprs[[i]])) } ans } multiMaxSizeExprs <- function(code, useArgs = rep(TRUE, length(code$args))) { if(length(code$args)==0) return(list()) ## probably something wrong codeArgsUsed <- code$args[useArgs] totalLengthExprs <- lapply(codeArgsUsed, function(x) if(inherits(x, 'exprClass')) productSizeExprs(x$sizeExprs) else 1) if(length(codeArgsUsed)==1) return(totalLengthExprs) ## a list of length 1 numericTotalLengths <- unlist(lapply(totalLengthExprs, is.numeric)) if(sum(numericTotalLengths) > 0) { maxKnownSize <- max(unlist(totalLengthExprs[numericTotalLengths])) if(sum(numericTotalLengths)==length(totalLengthExprs)) return(list(maxKnownSize)) totalLengthExprs <- c(list(maxKnownSize), totalLengthExprs[-which(numericTotalLengths)]) } numArgs <- length(totalLengthExprs) ## must be > 1 or it would have returned two lines above if(numArgs == 1) return(totalLengthExprs[[1]]) ## but check anyway lastMax <- substitute(max(A, B), list(A = totalLengthExprs[[numArgs]], B = totalLengthExprs[[numArgs-1]])) if(numArgs > 2) { for(i in (numArgs-2):1) { lastMax <- substitute(max(A, B), list(A = totalLengthExprs[[i]], B = lastMax)) } } return(list(lastMax)) } addDIB <- function(name, type) { paste0(name, switch(type, double = 'D', integer = 'I', logical = 'B')) } sizeDim <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) if(!inherits(code$args[[1]], 'exprClass')) { stop(exprClassProcessingErrorMsg(code, paste0('Argument of dim is not valid')), call. = FALSE) } if(code$args[[1]]$nDim == 0) { stop(exprClassProcessingErrorMsg(code, paste0('dim() cannot take a scalar as its argument.')), call. = FALSE) } if(code$caller$name != '[') code$name <- 'dimNimArr' code$nDim <- 1 code$type <- 'integer' code$toEigenize <- 'no' code$sizeExprs <- list( code$args[[1]]$nDim ) return(if(is.null(asserts)) list() else asserts) } sizeDiagonal <- function(code, symTab, typeEnv) { ## experimentalNewSizeProcessing: code$name change step stays here ## experimentalNewSizeProcessing: because the 3 cases are not implementation-specific asserts <- recurseSetSizes(code, symTab, typeEnv) argIsExprClass <- inherits(code$args[[1]], 'exprClass') nDimArg <- if(argIsExprClass) code$args[[1]]$nDim else 0 if(nDimArg == 0) { code$nDim <- 2 code$type <- 'double' newSizeExpr <- parse(text = nimDeparse(code$args[[1]]), keep.source = FALSE)[[1]] code$sizeExprs <- list(newSizeExpr, newSizeExpr) code$toEigenize <- 'yes' code$name <- addDIB('nimDiagonal', code$type) ## These all go to double anyway return( if(length(asserts) == 0) NULL else asserts ) } if(nDimArg == 1) { code$nDim <- 2 code$type <- 'double' newSizeExpr <- code$args[[1]]$sizeExprs[[1]] code$sizeExprs <- list(newSizeExpr, newSizeExpr) code$toEigenize <- 'yes' code$name <- addDIB('nimDiagonal', code$args[[1]]$type) ## double anyway return( if(length(asserts) == 0) NULL else asserts ) } if(nDimArg == 2) { code$nDim <- 1 code$type <- code$args[[1]]$type code$sizeExprs <- list(substitute(min(X, Y), list(X = code$args[[1]]$sizeExprs[[1]], Y = code$args[[1]]$sizeExprs[[2]]))) code$toEigenize <- 'yes' code$name <- 'diagonal' return( if(length(asserts) == 0) NULL else asserts ) } stop(exprClassProcessingErrorMsg(code, paste0('Something is wrong with this usage of diag()')), call. = FALSE) } sizeWhich <- function(code, symTab, typeEnv) { ## which is a somewhat unique construction. ## It should only appear as ## answer <- which(boolExpr) ## and should be lifted to an intermediate if necessary ## The sizeExprs on "which" in the syntax tree will be NULL ## which will trigger sizeAssignAfterRecursing to make default size expressions on "answer" ## and then it will transform to ## setWhich(answer, boolExpr) for C++ output asserts <- recurseSetSizes(code, symTab, typeEnv) code$type = 'integer' code$sizeExprs <- list(NULL) code$nDim <- 1 code$toEigenize <- 'yes' code$name <- 'setWhich' if(!nimbleOptions('experimentalSelfLiftStage')) { if(!(code$caller$name %in% assignmentOperators)) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } } if(length(asserts) == 0) NULL else asserts } sizeRecyclingRule <- function(code, symTab, typeEnv) { ## also need an entry in eigenization. asserts <- recurseSetSizes(code, symTab, typeEnv) ## for now this is entirely for d, p and q distribution functions, so we'll look up number of arguments for recycling numArgs <- length(code$args) if(numArgs == 0) return(asserts) recycleArgs <- rep(TRUE, numArgs) dFunName <- code$name substr(dFunName, 1, 1) <- 'd' thisDist <- distributions$distObjects[[dFunName]] if(!is.null(thisDist)) { numReqdArgs <- length(thisDist$reqdArgs) recycleArgs[-(1:(numReqdArgs+1))] <- FALSE } newSizeExprs <- multiMaxSizeExprs(code, recycleArgs) if(length(newSizeExprs)==1) if(is.numeric(newSizeExprs[[1]])) if(newSizeExprs[[1]] == 1) return(c(asserts, sizeScalarRecurse(code, symTab, typeEnv, recurse = FALSE))) ## ALSO NEED ALL ARGS TO HAVE nDim 0 code$sizeExprs <- newSizeExprs code$type <- 'double' ## will need to look up from a list code$nDim <- 1 code$toEigenize <- 'yes' ## toEigen: N.B. This had TRUE return(asserts) } sizeRecyclingRuleRfunction <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) ## for now this is entirely for r distribution functions, so we'll look up number of arguments for recycling numArgs <- length(code$args) if(numArgs == 0) return(asserts) ## Size determined by first arg ## If scalar, that gives size ## If vector, size is length of first argument. ## Problem is vector of length 1, where size should be value of first element, not length of 1. ## toEigen: keep this lift here for now, since it sets up sizes. if(inherits(code$args[[1]], 'exprClass')) { if(!code$args[[1]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } newSizeExprs <- list(substitute(rFunLength(X), list(X = as.name(code$args[[1]]$name)))) } else { newSizeExprs <- list(code$args[[1]]) } if(length(newSizeExprs)==1) if(is.numeric(newSizeExprs[[1]])) if(newSizeExprs[[1]] == 1) { code$args[[1]] <- NULL ## strip the first argument, which should be a 1 if we are here for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { code$args[[i]]$callerArgID <- code$args[[i]]$callerArgID - 1 } } return(c(asserts, sizeScalarRecurse(code, symTab, typeEnv, recurse = FALSE))) } code$sizeExprs <- newSizeExprs code$type <- 'double' ## will need to look up from a list code$nDim <- 1 code$toEigenize <- 'yes' return(asserts) } concatenateIntermLabelMaker <- labelFunctionCreator("ConcatenateInterm") sizeConcatenate <- function(code, symTab, typeEnv) { ## This is two argument version asserts <- recurseSetSizes(code, symTab, typeEnv) ## overall strategy is to separate runs of scaalrs and non-scalars ## also in C++ we don't take arbitrary arguments. Instead we chain together calls in groups of 4 ## e.g. c(a1, a2, a3, a4, a5) will become c( c(a1, a2, a3, a4), a5) ## first puzzle is with nimC(scalar1, scalar2, vector1, scalar3) ## we need to extract the runs of scalars like (scalar1, scalar2), so they can be packed up in an object together. isScalar <- unlist(lapply(code$args, function(x) if(inherits(x, 'exprClass')) x$nDim == 0 else TRUE)) ## run length encoding: This native R function returns information about repeats, so we can figure out how long each run of scalars is argRLE <- rle(isScalar) ## How many arguments will we have after packing scalars together into single objects: newNumArgs <- sum(argRLE$values) + sum(argRLE$lengths[!argRLE$values]) ## number of scalar runs + sum of non-scalar runs * run-lengths newArgs <- vector(length(newNumArgs), mode = 'list') iInput <- 1 iOutput <- 1 for(i in seq_along(argRLE$values)) { thisLength <- argRLE$lengths[i] if(!(argRLE$values[i])) { ## it is a run of non-scalars, so pack them into the new argument list, newArgs newArgs[(iOutput-1) + (1:thisLength)] <- code$args[(iInput-1) + (1:thisLength)] iInput <- iInput + thisLength iOutput <- iOutput + thisLength } else { ## it is a run of scalars, so construct an object for them newTempFixedName <- concatenateIntermLabelMaker() newTempVecName <- concatenateIntermLabelMaker() ## Construct: ## concatenateTemp(ConcatenateInterm_1), ## concatenateTemp is not output to C++. It is a placeholder newExpr <- exprClass(isName = FALSE, isCall = TRUE, isAssign = FALSE, name = "concatenateTemp", nDim = 1, sizeExprs = list(thisLength), type = 'double') setArg(newExpr, 1, exprClass(isName = TRUE, isCall = FALSE, isAssign = FALSE, name = newTempVecName, nDim = 1, sizeExprs = list(thisLength), type = 'double')) ## hardCodedVectorInitializer is a wrapper for the "contents1, contents2, ..." below valuesExpr <- quote(hardCodedVectorInitializer()) thisType <- 'logical' for(j in 1:thisLength) { thisArgIndex <- iInput - 1 + j if(inherits(code$args[[thisArgIndex]], 'exprClass')) { if(!code$args[[thisArgIndex]]$isName) ## a little heavy-handed: lift any expression of any kind ## to avoid dealing with eigen or other handling inside initialization values ## This is necessary for cases like nimC(model[[node]][2], 1.2) ## because model[[node]] is a map asserts <- c(asserts, sizeInsertIntermediate(code, thisArgIndex, symTab, typeEnv)) thisType <- arithmeticOutputType(thisType, code$args[[thisArgIndex]]$type) } else { thisType <- storage.mode(code$args[[thisArgIndex]]) ##'double' } ## Putting a map, or a values access, through parse(nimDeparse) won't work ## So we lift any expression element above. ## This could be done more cleanly with more coding work. valuesExpr[[j+1]] <- parse(text = nimDeparse(code$args[[thisArgIndex]]), keep.source = FALSE)[[1]] } newExpr$type <- thisType newExpr$args[[1]]$type <- thisType iInput <- iInput + thisLength if(thisType == 'integer') thisType <- 'int' if(thisType == 'logical') thisType <- 'bool' ## MAKE_FIXED_VECTOR("ConcatenateInterm_2", "ConcatenateInterm_1", numArgs, values, type) goes through a customized output generator ## to create something like ## double ConcatenateIterm_1[] = {contents1, contents2} ## std::vector<double> ConcatenateInterm_2(ConcatenateInterm_1, ConcatenateInterm_1 + length) ## so there is one intermediate whose only purpose is to achieve initialization by value and a second intermediate copied from the first. ## The second intermediate can later be used in the templated nimCd/nimCi/nimCb ## newAssert <- substitute(MAKE_FIXED_VECTOR(newTempVecName, newTempFixedName, thisLength, valuesExpr, thisType), list(newTempVecName = newTempVecName, newTempFixedName = newTempFixedName, thisLength = as.numeric(thisLength), valuesExpr = valuesExpr, thisType = thisType)) newAssert <- as.call(newAssert) asserts <- c(asserts, list(newAssert)) newArgs[[iOutput]] <- newExpr iOutput <- iOutput + 1 } } ## Next step: chain together multiple calls: maxArgsOneCall <- 4 numArgGroups <- ceiling(newNumArgs / (maxArgsOneCall-1)) splitArgIDs <- split(1:newNumArgs, rep(1:numArgGroups, each = maxArgsOneCall-1, length.out = newNumArgs)) ## if last is a singleton it can be put with previous group if(length(splitArgIDs[[numArgGroups]]) == 1) { if(numArgGroups > 1) { splitArgIDs[[numArgGroups-1]] <- c(splitArgIDs[[numArgGroups-1]], splitArgIDs[[numArgGroups]]) splitArgIDs[[numArgGroups]] <- NULL numArgGroups <- numArgGroups-1 } } newExprList <- vector(numArgGroups, mode = 'list') for(i in seq_along(splitArgIDs)) { newExprList[[i]] <- exprClass(isName = FALSE, isCall = TRUE, isAssign = FALSE, name = 'nimC', nDim = 1, toEigenize = 'yes', type = 'double') for(j in seq_along(splitArgIDs[[i]])) setArg(newExprList[[i]], j, newArgs[[splitArgIDs[[i]][j]]]) } ## Last step is to set up nesting and make sizeExprs for each constructed argument for(i in seq_along(splitArgIDs)) { if(i != length(splitArgIDs)) { setArg(newExprList[[i]], maxArgsOneCall, newExprList[[i+1]]) } } for(i in rev(seq_along(splitArgIDs))) { if(inherits(newExprList[[i]]$args[[1]], 'exprClass')) { thisSizeExpr <-productSizeExprs(newExprList[[i]]$args[[1]]$sizeExprs) thisType <- newExprList[[i]]$args[[1]]$type } else { thisSizeExpr <- 1 thisType <- 'double' } for(j in seq_along(newExprList[[i]]$args)) { if(j == 1) next if(inherits(newExprList[[i]]$args[[j]], 'exprClass')) { thisSizeExpr <- substitute( (A) + (B), list(A = thisSizeExpr, B = productSizeExprs(newExprList[[i]]$args[[j]]$sizeExprs) )) thisType <- arithmeticOutputType(thisType, newExprList[[i]]$args[[j]]$type) } else { thisSizeExpr <- substitute( (A) + 1, list(A = thisSizeExpr )) thisType <- 'double' } } if(thisType == 'double') newExprList[[i]]$name <- 'nimCd' ## this change could get moved to genCpp_generateCpp if(thisType == 'integer') newExprList[[i]]$name <- 'nimCi' if(thisType == 'logical') newExprList[[i]]$name <- 'nimCb' newExprList[[i]]$type <- thisType newExprList[[i]]$sizeExprs <- list(thisSizeExpr) } setArg(code$caller, code$callerArgID, newExprList[[1]]) return(asserts) } sizeRep <- function(code, symTab, typeEnv) { ## if times is a vector: If length.out is provided, times is always ignored ## otherwise lift and use assignEigenToNIMBLE asserts <- recurseSetSizes(code, symTab, typeEnv) xIsExpr <- inherits(code$args[[1]], 'exprClass') code$type <- if(xIsExpr) code$args[[1]]$type else 'double' includesLengthOut <- length(code$args) > 3 if(inherits(code$args[[2]], 'exprClass')) if(code$args[[2]]$nDim != 0 && !includesLengthOut) { ## times is a vector and length.out not provided if(!(code$caller$name %in% assignmentOperators)) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } if(length(code$args) > 2) code$args[[3]] <- NULL code$name <- 'setRepVectorTimes' code$sizeExprs <- list(NULL) code$nDim <- 1 code$toEigenize <- 'yes' return(asserts) } if(code$type == 'double') code$name <- 'nimRepd' ## this change could get moved to genCpp_generateCpp if(code$type == 'integer') code$name <- 'nimRepi' if(code$type == 'logical') code$name <- 'nimRepb' ## requiring for now that times and each arguments are given as integers, not expressions ## Since these will go into sizeExprs, which are then processed as R expressions, then as exprClasses but not fully size processed, ## any expression should be lifted if(includesLengthOut) { ## there is a "length.out" argument ## need to lift length.out if it is more than a name or constant if(inherits(code$args[[2]], 'exprClass')) if(code$args[[2]]$nDim > 0) stop(exprClassProcessingErrorMsg(code, paste0('times argument to rep() must be scalar is length.out is also provided.')), call. = FALSE) if(inherits(code$args[[3]], 'exprClass')) { ## if length.out is present, it is argument 3 if(!is.name(code$args[[3]])) asserts <- c(asserts, sizeInsertIntermediate(code, 3, symTab, typeEnv)) if(code$args[[3]]$nDim > 0) code$sizeExprs <- list( parse(text = paste0(nimDeparse(code$args[[3]]),'[1]'), keep.source = FALSE)[[1]]) else code$sizeExprs <- list( parse(text = nimDeparse(code$args[[3]]), keep.source = FALSE)[[1]]) } else { code$sizeExprs <- list(code$args[[3]]) } } else { ## length.out absent, so times is second and each is third for(i in 2:3) { if(inherits(code$args[[i]], 'exprClass')) if(!is.name(code$args[[i]])) asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv)) } part2 <- nimDeparse(code$args[[2]]) if(inherits(code$args[[2]], 'exprClass')) if(code$args[[2]]$nDim > 0) part2 <- paste0(part2, '[1]') part3 <- nimDeparse(code$args[[3]]) if(inherits(code$args[[3]], 'exprClass')) if(code$args[[3]]$nDim > 0) part3 <- paste0(part3, '[1]') thisSizeExpr <- substitute( (AAA_) * (BBB_) * (CCC_), list(AAA_ = if(xIsExpr) productSizeExprs(code$args[[1]]$sizeExprs) else 1, BBB_ = parse(text = part2, keep.source = FALSE)[[1]], CCC_ = parse(text = part3, keep.source = FALSE)[[1]] )) code$sizeExprs <- list(thisSizeExpr) } code$nDim <- 1 code$toEigenize <- 'yes' return(asserts) } sizeNewNimbleList <- function(code, symTab, typeEnv){ ## The code looks like: nimListDef$new(a = 10, b = 12). ## We want to change code$caller to : ## { nimList <- nimListDef$new() ## nimList$a <- 10 ## nimList$b <- 12 }. ## We accomplish this by copying code, getting arguments (e.g. a = 10, b = 12) from copied code and turning them into assignment ## exprs in code$caller, and setting first argument of code$caller to be nimList <- nimListDef$new(). listDefName <- code$args[[1]]$name if(symTab$symbolExists(listDefName, inherits = TRUE)){ listST <- symTab$getSymbolObject(listDefName, inherits = TRUE) } else { ## We need to establish the symbol and needed type. nlDef <- get(listDefName) ## Need the nimbleProject! nlp <- typeEnv$.nimbleProject$compileNimbleList(nlDef, initialTypeInference = TRUE) className <- nl.getListDef(nlDef)$className if(is.null(typeEnv$neededRCfuns[[className]])) { newSym <- symbolNimbleList(name = listDefName, nlProc = nlp) typeEnv$neededRCfuns[[className]] <- newSym } newDefSym <- symbolNimbleListGenerator(name = listDefName, nlProc = nlp) symTab$addSymbol(newDefSym) listST <- newDefSym } code$type <- "nimbleList" code$sizeExprs <- listST code$toEigenize <- "maybe" code$nDim <- 0 asserts <- list() asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useArgs = c(TRUE, rep(FALSE, length(code$args)-1)))) if(!(code$caller$name %in% assignmentOperators)){ intermediateAsserts <- sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv) ## intermediateAsserts can potentially have size setting stuff from sizeAssignAfterRecursing. ## Not sure if that would ever happen in this context, but to be safe we'll use last element as the actual intermediate assignment. ## Embed the intermediate assignment in a '{' (so insertAssertions will recurse on it) and recurse on it. numIntermAsserts <- length(intermediateAsserts) bracketedIntermAssert <- newBracketExpr(intermediateAsserts[numIntermAsserts]) exprClasses_setSizes(bracketedIntermAssert, symTab, typeEnv) intermediateAsserts[[numIntermAsserts]] <- bracketedIntermAssert asserts <- c(asserts, intermediateAsserts) return(asserts) } if(length(code$args) <= 1) return(asserts) ## There are no args to process. RnewExprs <- list() newExprs <- list() RnfVarExprs <- list() nfVarExprs <- list() exprCounter <- 1 originalCode <- code listElements <- listST$nlProc$symTab$getSymbolNames() RlistNameExpr <- nimbleGeneralParseDeparse(originalCode$caller$args[[1]]) for(i in seq_along(listElements)) { thisVarName <- listElements[i] if(!is.null(originalCode$args[[thisVarName]])) { ## Skip first arg, which will be name of nlDef, then check if value is "". if(!inherits(originalCode$args[[thisVarName]], 'exprClass') || (originalCode$args[[thisVarName]]$name != "")) { ## nfVar(A, 'x') for whichever element name it's on ('x') RnfVarExprs[[exprCounter]] <- substitute(nfVar(A, X), list(A = RlistNameExpr, X = thisVarName)) ## nfVar(A, 'x') <- y or whatever code was provided (already recursed for size processing) RnewExprs[[exprCounter]] <- substitute(A <- B, list(A = RnfVarExprs[[exprCounter]], B = nimbleGeneralParseDeparse(originalCode$args[[thisVarName]]))) exprCounter <- exprCounter + 1 } } } if(length(RnewExprs) == 0) return(asserts) ## All args have already been specified. ## Embed RnewExprs in a '{' expression. RbracketNewExprs <- quote(after({})) RbracketNewExprs[[2]][2:(length(RnewExprs) + 1)] <- RnewExprs bracketNewExprs <- RparseTree2ExprClasses(RbracketNewExprs) ## Need to install assignment target in symTab if necessary so that it ## will be there for recursion in the following step. assignmentTarget <- code$caller$args[[1]] if(assignmentTarget$isName) { if(!symTab$symbolExists(assignmentTarget$name, TRUE)) { symTab$addSymbol(symbolNimbleList(name = assignmentTarget$name, type = code$type, nlProc = code$sizeExprs$nlProc)) } } ## Recurse into element assignments. exprClasses_setSizes(bracketNewExprs$args[[1]], symTab, typeEnv) asserts <- c(asserts, list(bracketNewExprs)) if(length(code$args) > 1) ## TODO Remove this conditional, since this should always be true if we make it this far. code$args <- code$args[1] return(asserts) } sizemap <- function(code, symTab, typeEnv) { ## This will only be called on a map generated from setup ## Maps created from indexing in nimble code don't go through this function sym <- symTab$getSymbolObject(code$args[[1]], TRUE) code$type <- sym$type code$nDim <- code$args[[2]] code$sizeExprs <- code$args[[4]] code$toEigenize <- 'maybe' invisible(NULL) } ## size handler for nimArrayGeneral() ## nimArrayGeneral(type(character), nDim, dim (c(sizeExpr1, ...)), value, init (logical), fillZeros, recycle, unpackNDim(optional)) ## nimArrayGeneral( arg1, arg2, arg3, arg4, arg5 , arg6 , arg7 , arg8 ) sizeNimArrayGeneral <- function(code, symTab, typeEnv) { useArgs <- c(FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE) if(!is.null(code$args[['unpackNDim']])) useArgs <- c(useArgs, TRUE) asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = useArgs) ## recurse on initialValue and initialLogical only ## some checking if(inherits(code$args[['init']], 'exprClass')) if(!(code$args[['init']]$nDim == 0)) stop(exprClassProcessingErrorMsg(code, paste0('init argument to numeric, logical, integer, matrix or array must be scalar')), call. = FALSE) type <- code$args[['type']] nDim <- code$args[['nDim']] unpackNDim <- if(!is.null(code$args[['unpackNDim']])) code$args[['unpackNDim']] else FALSE ##if(length(code$args) > 5) code$args[[6]] else FALSE cSizeExprs <- code$args[['dim']] ## these are the size expressions encompassed by collectSizes(), needed for purposes of the C++ line to be generated if(!inherits(cSizeExprs, 'exprClass')) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (i) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) if(cSizeExprs$name != 'collectSizes') stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (ii) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) if(unpackNDim) { ## This means length of dim unknown at compile time but nDim explicitly provided, so we construct c(dim[1], dim[2]), etc. asserts <- c(asserts, recurseSetSizes(cSizeExprs, symTab, typeEnv)) if(!cSizeExprs$args[[1]]$isName) asserts <- c(asserts, sizeInsertIntermediate(cSizeExprs, 1, symTab, typeEnv)) ## this intermediate goes a layer down the AST, but works if(length(cSizeExprs$args[[1]]$sizeExprs) == 0) { ## The argument expression evaluates to scalar if(nDim == -1) { nDim <- 1 code$args[['nDim']] <- 1 } if(nDim == 1) unpackNDim <- FALSE ## and that's ok because nDim given as 1 } if(unpackNDim) { if(length(cSizeExprs$args[[1]]$sizeExprs) != 1) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (ii) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) if(nDim == -1) {## code for nDim not given but dim given as expression if(!is.numeric(cSizeExprs$args[[1]]$sizeExprs[[1]])) stop() nDim <- cSizeExprs$args[[1]]$sizeExprs[1] if(nDim < 1 | nDim > 4) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (iii) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) code$args[['nDim']] <- nDim } varName <- as.name(cSizeExprs$args[[1]]$name) for(i in 1:nDim) { newIndexedSizeExpr <- RparseTree2ExprClasses( substitute(X[I], list(X = varName, I = i) ) ) setArg(cSizeExprs, i, newIndexedSizeExpr) } } } else { asserts <- c(asserts, recurseSetSizes(cSizeExprs, symTab, typeEnv)) nonScalarWhereNeeded <- FALSE if(inherits(cSizeExprs$args[[1]], 'exprClass')) if(cSizeExprs$args[[1]]$nDim != 0) nonScalarWhereNeeded <- TRUE if(nDim == -1) { ## nDim wasn't provided (to nimArray) and dim was an expression, so it ought to be a scalar if(nonScalarWhereNeeded) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (iv) with sizes or dim to numeric, logical, integer, matrix or array. It looks like dim argument was non-scalar but nDim was not provided. If the dim argument to array (or nimArray) is a vector, you must also provide nDim argument to say how many dimensions will be used.')), call. = FALSE) nDim <- code$args[['nDim']] <- 1 } else { ## call was from numeric, integer or logical if(nonScalarWhereNeeded) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (v) with sizes or dim to numeric, logical, integer, matrix or array. It looks like length argument was non-scalar.')), call. = FALSE) } } ## if it is a call to matrix() and the value argument is non-scalar, ## we will generate it in C++ as nimNewMatrix useNewMatrix <- FALSE if(nDim == 2) { if(inherits(code$args[['value']], 'exprClass')) if(code$args[['value']]$nDim > 0) useNewMatrix <- TRUE ## use eigen-compatible C++ } if(code$args[['nDim']] != length(cSizeExprs$args)) stop(exprClassProcessingErrorMsg(code, paste0('Something wrong (iii) with sizes or dim to numeric, logical, integer, matrix or array')), call. = FALSE) annotationSizeExprs <- lapply(cSizeExprs$args, nimbleGeneralParseDeparse) ## and this is for purposes of the sizeExprs in the AST exprClass object missingSizes <- unlist(lapply(cSizeExprs$args, function(x) identical(x, as.numeric(NA)) | identical(x, NA))) ## note is.na doesn't work b/c the argument can be an expression and is.na warns on that ##old: identical, as.numeric(NA))) ## only case where we do something useful with missingSizes is matrix(value = non-scalar, ...) if(any(missingSizes)) { ## modify sizes in generated C++ line if(useNewMatrix) cSizeExprs$args[missingSizes] <- -1 else cSizeExprs$args[missingSizes] <- 1 ## modify annotation sizeExprs totalInputLengthExpr <- if(inherits(code$args[['value']], 'exprClass')) productSizeExprs(code$args[['value']]$sizeExprs) else 1 ## should always be exprClass in here anyway ## see newMatrixClass in nimbleEigen.h if(missingSizes[1]) { ## missing nrow if(missingSizes[2]) { ## missing both annotationSizeExprs[[1]] <- totalInputLengthExpr annotationSizeExprs[[2]] <- 1 } else { ## ncol provided annotationSizeExprs[[1]] <- substitute(calcMissingMatrixSize(A, B), list(A = totalInputLengthExpr, B = annotationSizeExprs[[2]])) } } else { ## nrow provided, ncol missing (is both provided, we wouldn't be in this code annotationSizeExprs[[2]] <- substitute(calcMissingMatrixSize(A, B), list(A = totalInputLengthExpr, B = annotationSizeExprs[[1]])) } } asserts <- c(asserts, recurseSetSizes(cSizeExprs, symTab, typeEnv)) if(!(type %in% c('double', 'integer', 'logical'))) stop('unknown type in nimArrayGeneral') ## Three possible calls can be emitted by choice of code$name: initialize (this becomes a NimArr member function call. It is used if initialization is scalar, to be repeated); assignNimArrToNimArr (this becomes a call to assignNimArrToNimArr. It is used if initialization is non-scalar and the object being created is not a matrix; nimNewMatrix[D|I|B] (this has the same name in C++. It is used if initialization is non-scalar and the object being created is a matrix. It creates an eigen-compatible object within an expression). code$name <- 'initialize' ## may be replaced below if useNewMatrix if(inherits(code$args[['value']], 'exprClass')) if(code$args[['value']]$nDim > 0) code$name <- 'assignNimArrToNimArr' ## could be replaced by nimNewMatrix[D|B|I] below ## rearrange arguments if(code$name == 'assignNimArrToNimArr') if(!useNewMatrix) code$args <- c(code$args[4:7], cSizeExprs$args) ## args: initialize(value, init, fillZeros, recycle, sizeExpr1, sizeExpr2, etc...) else code$args <- c(code$args[c(4,5,7)], cSizeExprs$args) ## fillZeros has no role in this case. nimNewMatrix creates an eigen object that has to return something for each element, so it will use a zero anyway. else code$args <- c(code$args[4:7], cSizeExprs$args) ## actually this turned out the same as for assignNimArrToNimArr. ## fix code/caller relationships in AST for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { code$args[[i]]$callerArgID <- i code$args[[i]]$caller <- code } } code$type <- type code$nDim <- nDim code$toEigenize <- 'no' ## insert intermediate unless it will be newNimMatrix code$sizeExprs <- annotationSizeExprs ## check for nimNewMatrix case if(useNewMatrix) { suffix <- 'D' if(code$type == 'integer') suffix <- 'I' if(code$type == 'logical') suffix <- 'B' code$name <- paste0("nimNewMatrix", suffix) code$toEigenize <- "yes" } else { ## otherwise, lift values arg if necessary if(inherits(code$args[['value']], 'exprClass')) ## was re-ordered here if(!(code$args[['value']]$isName)) asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } if(!useNewMatrix) if(inherits(code$caller, 'exprClass')) if(!(code$caller$name %in% assignmentOperators)) { if(!is.null(code$caller$name)) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } } else typeEnv$.ensureNimbleBlocks <- TRUE return(asserts) } sizeRunTime <- function(code, symTab, typeEnv) { if(length(code$args) != 1) stop(exprClassProcessingErrorMsg(code, paste0('run.time must take exactly 1 argument')), call. = FALSE) origCaller <- code$caller origCallerArgID <- code$callerArgID if(!code$caller$isAssign) { ## e.g. a + run.time({foo(y)}), should have already been lifted by buildIntermediates message('Problem in sizeRunTime: run.time is not in a simple assignment at this stage of processing.') } ## this is the case ans <- run.time({foo(y)}) lhsName <- code$caller$args[[1]]$name timerName <- IntermLabelMaker() newSym <- symbolNimbleTimer(name = timerName, type = 'symbolNimbleTimer') symTab$addSymbol(newSym) startTimerAssert <- RparseTree2ExprClasses(substitute(startNimbleTimer(TIMERNAME), list(TIMERNAME = as.name(timerName)))) recurseAsserts <- recurseSetSizes(code, symTab, typeEnv) ## arg to run.time should be in {} so any nested asserts should be done by the time this finishes and this should return NULL if(!is.null(recurseAsserts)) { message('issue in sizeRunTime: recurseAsserts is not NULL') } asserts <- list(startTimerAssert, code$args[[1]]) newCode <- RparseTree2ExprClasses(substitute(endNimbleTimer(TIMERNAME), list(TIMERNAME = as.name(timerName)))) newCode$type <- 'double' newCode$nDim <- 0 newCode$sizeExprs <- list() newCode$toEigenize <- 'no' setArg(origCaller, origCallerArgID, newCode) return(asserts) } sizeGetParam <- function(code, symTab, typeEnv) { if(length(code$args) > 3) { asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, FALSE, FALSE, rep(TRUE, length(code$args)-3))) for(i in 4:length(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$toEigenize=='yes') stop(exprClassProcessingErrorMsg(code, 'In sizeGetParam: There is an expression beyond the third argument that cannot be handled. If it involve vectorized math, you need to do it separately, not in this expression.'), call. = FALSE) } } } else { asserts <- list() } paramInfoSym <- symTab$getSymbolObject(code$args[[3]]$name, inherits = TRUE) code$type <- paramInfoSym$paramInfo$type code$nDim <- paramInfoSym$paramInfo$nDim code$sizeExprs <- vector(mode = 'list', length = code$nDim) code$toEigenize <- 'no' if(!(code$caller$name %in% assignmentOperators)) { if(!is.null(code$caller$name)) if(!(code$caller$name == '{')) ## could be on its own line -- useless but possible asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) code$toEigenize <- 'maybe' } else typeEnv$.ensureNimbleBlocks <- TRUE return(asserts) } sizeGetBound <- function(code, symTab, typeEnv) { if(length(code$args) > 3) { asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, FALSE, FALSE, rep(TRUE, length(code$args)-3))) for(i in 4:length(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$toEigenize=='yes') stop(exprClassProcessingErrorMsg(code, 'In sizeGetParam: There is an expression beyond the third argument that cannot be handled. If it involve vectorized math, you need to do it separately, not in this expression.'), call. = FALSE) } } } else { asserts <- list() } boundInfoSym <- symTab$getSymbolObject(code$args[[3]]$name, inherits = TRUE) code$type <- boundInfoSym$boundInfo$type code$nDim <- boundInfoSym$boundInfo$nDim code$sizeExprs <- vector(mode = 'list', length = code$nDim) code$toEigenize <- 'no' if(!(code$caller$name %in% assignmentOperators)) { if(!is.null(code$caller$name)) if(!(code$caller$name == '{')) ## could be on its own line -- useless but possible asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) code$toEigenize <- 'maybe' } return(asserts) } sizeSwitch <- function(code, symTab, typeEnv) { if(length(code$args) <= 2) return(invisible(NULL)) for(i in 3:length(code$args)) { ## just like the '{' clause of exprClasses_setSizes. This treats each of the outcomes as if it was a new line or block of code if(inherits(code$args[[i]], 'exprClass')) { newAsserts <- exprClasses_setSizes(code$args[[i]], symTab, typeEnv) code$args[[i]]$assertions <- if(is.null(newAsserts)) list() else newAsserts } } return(invisible(NULL)) } sizeAsRowOrCol <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] if(!inherits(a1, 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'In sizeAsRowOrCol: Argument must be an expression.'), call. = FALSE) if(a1$nDim == 0) stop(exprClassProcessingErrorMsg(code, 'In sizeAsRowOrCol: Argument cannot be scalar (could be fixed).'), call. = FALSE) code$type <- a1$type code$toEigenize <- 'yes' if(!code$name %in% c('asRow', 'asCol')) stop(exprClassProcessingErrorMsg(code, 'Somehow the system got to sizeAsRowOrCol without a call to asRow or asCol.'), call. = FALSE) if(a1$nDim == 1) { if(code$name == 'asRow') { code$nDim <- 2 code$sizeExprs <- c(list(1), a1$sizeExprs[[1]]) } else { code$nDim <- 2 code$sizeExprs <- c(a1$sizeExprs[[1]], list(1)) } return(asserts) } warning(paste0(' asRow or asCol used on something with more than 1 dimension in ', nimDeparse(code)), call. = FALSE) } ## a$b becomes nfVar(a, 'b') sizeNFvar <- function(code, symTab, typeEnv) { ## toEigen: Is it correct that this does not mark toEigen? asserts <- list() if(!inherits(code$args[[1]], 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'Problem using $: no name on the right?'), call. = FALSE) if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'Problem using $: wrong number of arguments?'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) if(code$args[[1]]$isName) { objectName <- code$args[[1]]$name symbolObject <- symTab$getSymbolObject(objectName, inherits = TRUE) objectType <- symbolObject$type } else { ## if there is nesting, A$B$C, figure out what to do objectType <- code$args[[1]]$type symbolObject <- code$args[[1]]$sizeExprs ## repurposed for this role } isSymFunc <- objectType == 'nimbleFunction' ## minor inconsistency in naming style here isSymList <- objectType == 'nimbleList' ## Cases to handle (nl for nimbleList, nf for nimbleFunction): ## nl$a <- x ## NimArr assignment (not setSize needed) ## nl$a <- x + 1 ## eigen assignment (setSize needed) ## nl1$nl2$ <- x or x + 1 ## x <- foo(nl$a) ## x <- foo(nl1$nl2$b) ## same with nf instead of any nl, in any order ## nl$new()$a , which becomes makeNewNimbleListObject(nl1)$a ## nl in nlEigenReferenceList if(!(isSymFunc || isSymList)) stop(exprClassProcessingErrorMsg(code, 'In sizeNFvar: First argument is not a nimbleFunction or a nimbleList'), call. = FALSE) nfProc <- if(isSymFunc) symbolObject$nfProc else symbolObject$nlProc if(is.null(nfProc)) { browser() stop(exprClassProcessingErrorMsg(code, 'In handling X$Y: Symbols for X have not been set up.'), call. = FALSE) } memberName <- code$args[[2]] if(!is.character(memberName)) stop(exprClassProcessingErrorMsg(code, 'In handling X$Y: Something is wrong with Y.'), call. = FALSE) memberSymbolObject <- nfProc$getSymbolTable()$getSymbolObject(memberName) if(!is.null(memberSymbolObject)) code$type <- memberSymbolObject$type if(isSymList | isSymFunc) { ## nimbleList ## We need (*nl) in C++, represented by cppPointerDereference(nl) if(code$args[[1]]$name != 'cppPointerDereference') { a1 <- insertExprClassLayer(code, 1, 'cppPointerDereference', type = code$args[[1]]$type, nDim = code$args[[1]]$nDim, sizeExprs = code$args[[1]]$sizeExprs) } } ## following checks are on type of A$B (isSymList and isSymFunc refer to type of A) if(code$type == 'nimbleList') { ## for a nimbleList, sizeExprs slot is used for symbol object ## of nlGenerator of member object code$sizeExprs <- memberSymbolObject } else if(code$type == 'nimbleFunction') { ## nimbleFunction code$sizeExprs <- memberSymbolObject } else if(code$type == 'nimbleFunctionList') { code$sizeExprs <- memberSymbolObject } else { ## a numeric etc. type code$nDim <- memberSymbolObject$nDim code$sizeExprs <- if(code$nDim > 0) makeSizeExpressions(memberSymbolObject$size, parse(text = nimDeparse(code))[[1]]) else list() } return(asserts) } sizeNimDerivs <- function(code, symTab, typeEnv){ if(code$args[[1]]$name == 'calculate'){ calcDerivFlag <- T code$args[[1]]$name <- paste0(code$args[[1]]$name, 'WithArgs_deriv') } else{ calcDerivFlag <- F code$args[[1]]$name <- paste0(code$args[[1]]$name, '_deriv') } setArg(code$caller, code$callerArgID, code$args[[1]]) setArg(code$args[[1]], length(code$args[[1]]$args) + 1, code$args[[2]]) # Set order argument. code$args[[2]] <- NULL asserts <- recurseSetSizes(code$args[[1]], symTab, typeEnv) code$args[[1]]$type <- 'nimbleList' code$args[[1]]$sizeExprs <- symTab$getSymbolObject('NIMBLE_ADCLASS', TRUE) code$args[[1]]$toEigenize <- "yes" code$args[[1]]$nDim <- 0 if(calcDerivFlag) asserts <- c(asserts, sizeScalarModelOp(code$args[[1]], symTab, typeEnv)) else asserts <- c(asserts, sizeNimbleFunction(code$args[[1]], symTab, typeEnv)) #setArg(code$args[[1]], length(code$args[[1]]$args) + 1, code$args[[3]]) # Sets variables argument, not yet implemented. if(length(asserts) == 0) NULL else asserts } sizeNimbleListReturningFunction <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) code$type <- 'nimbleList' nlGen <- nimbleListReturningFunctionList[[code$name]]$nlGen nlDef <- nl.getListDef(nlGen) className <- nlDef$className symbolObject <- symTab$getSymbolObject(className, inherits = TRUE) if(is.null(symbolObject)) { nlp <- typeEnv$.nimbleProject$compileNimbleList(nlGen, initialTypeInference = TRUE) symbolObject <- symbolNimbleListGenerator(name = className, nlProc = nlp) symTab$addSymbol(symbolObject) } code$sizeExprs <- symbolObject code$toEigenize <- "yes" # This is specialized for nimSvd and nimEigen. if(code$name == 'getDerivs') code$toEigenize <- 'no' ## Temp. solution to ensure that derivsOrders argument is a nimArray and not an eigen type. code$nDim <- 0 if(!nimbleOptions('experimentalSelfLiftStage')) { if(!(code$caller$name %in% assignmentOperators)) asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } if(length(asserts) == 0) NULL else asserts } sizeOptim <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) code$type <- 'nimbleList' nlGen <- nimbleListReturningFunctionList[[code$name]]$nlGen nlDef <- nl.getListDef(nlGen) className <- nlDef$className symbolObject <- symTab$getSymbolObject(className, inherits = TRUE) if(is.null(symbolObject)) { nlp <- typeEnv$.nimbleProject$compileNimbleList(nlGen, initialTypeInference = TRUE) symbolObject <- symbolNimbleListGenerator(name = className, nlProc = nlp) symTab$addSymbol(symbolObject) } code$sizeExprs <- symbolObject code$toEigenize <- "no" code$nDim <- 0 fnCode <- code$args$fn if (fnCode$name == 'nfMethod') { # This is handled in cppOutputNFmethod. } else if(exists(fnCode$name) && is.rcf(get(fnCode$name))) { # Handle fn arguments that are RCfunctions. fnCode$name <- environment(get(fnCode$name))$nfMethodRCobject$uniqueName } else { stop(paste0('unsupported fn argument in optim(par, fn = ', fnCode$name, '); try an RCfunction or nfMethod instead')) } grCode <- code$args$gr if (identical(grCode, "NULL")) { # We simply emit "NULL". } else if (grCode$name == 'nfMethod') { # This is handled in cppOutputNFmethod. } else if(exists(grCode$name) && is.rcf(get(grCode$name))) { # Handle gr arguments that are RCfunctions. grCode$name <- environment(get(grCode$name))$nfMethodRCobject$uniqueName } else { stop(paste0('unsupported gr argument in optim(par, gr = ', grCode$name, '); try an RCfunction or nfMethod instead')) } for(arg in c(code$args$lower, code$args$upper)) { if(inherits(arg, 'exprClass') && arg$toEigenize=='yes') { asserts <- c(asserts, sizeInsertIntermediate(code, arg$callerArgID, symTab, typeEnv)) } } if(length(asserts) == 0) NULL else asserts } sizeOptimDefaultControl <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) code$type <- 'nimbleList' nlGen <- nimbleListReturningFunctionList[[code$name]]$nlGen nlDef <- nl.getListDef(nlGen) className <- nlDef$className symbolObject <- symTab$getSymbolObject(className, inherits = TRUE) if(is.null(symbolObject)) { nlp <- typeEnv$.nimbleProject$compileNimbleList(nlGen, initialTypeInference = TRUE) symbolObject <- symbolNimbleListGenerator(name = className, nlProc = nlp) symTab$addSymbol(symbolObject) } code$sizeExprs <- symbolObject code$toEigenize <- "no" code$nDim <- 0 if(length(asserts) == 0) NULL else asserts } sizeCppPointerDereference <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) code$type <- code$args[[1]]$type code$sizeExprs <- code$args[[1]]$sizeExprs code$toEigenize <- code$args[[1]]$toEigenize code$nDim <- code$args[[1]]$nDim if(length(asserts) == 0) NULL else asserts } sizeDoubleBracket <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) if(code$args[[1]]$isName) { objectName <- code$args[[1]]$name symbolObject <- symTab$getSymbolObject(objectName, inherits = TRUE) objectType <- symbolObject$type } else { ## if there is nesting, A$B$C, figure out what to do objectType <- code$args[[1]]$type symbolObject <- code$args[[1]]$sizeExprs ## repurposed for this role } isSymFuncList <- objectType == 'nimbleFunctionList' if(!isSymFuncList) stop('nfList[[i]] must use a nimbleFunctionList') code$sizeExprs <- symbolObject code$type <- objectType return(if(is.null(asserts)) list() else asserts) } sizeChainedCall <- function(code, symTab, typeEnv) { ## options include nfMethod(nf, 'foo')(a), or nfMethod(nf[[i]], 'foo')(a) [which arises from nf[[i]]$foo(a), where nf is a local nflist, where nf could need recursion, in which case it will be wrapped in nfVar ## In other places we generate chainedCalls for static_cast<int>(a), but those shouldn't be seen here a1 <- code$args[[1]] if(!inherits(a1, 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'In sizeChainedCall. First arg is not an expression.'), call. = FALSE) nfMethodRCobj <- NULL if(a1$name != 'nfMethod') stop(exprClassProcessingErrorMsg(code, 'Some problem processing a chained call.'), call. = FALSE) asserts <- recurseSetSizes(a1, symTab, typeEnv, useArgs = c(TRUE, rep(FALSE, length(a1$args)-1))) a11 <- a1$args[[1]] methodName <- a1$args[[2]] if(a1$args[[1]]$isName) { objectName <- a1$args[[1]]$name symbolObject <- symTab$getSymbolObject(objectName, inherits = TRUE) objectType <- symbolObject$type } else { ## if there is nesting, A$B$C, figure out what to do objectType <- a1$args[[1]]$type symbolObject <- a1$args[[1]]$sizeExprs ## repurposed for this role } isSymFun <- objectType == 'nimbleFunction' isSymFunList <- objectType == 'nimbleFunctionList' if(! (isSymFun | isSymFunList)) stop('Problem processing what looks like a member function call.') if(!is.character(methodName)) stop(exprClassProcessingErrorMsg(code, 'In handling X$Y: Something is wrong with Y.'), call. = FALSE) nfProc <- symbolObject$nfProc if(is.null(nfProc)) { stop(exprClassProcessingErrorMsg(code, 'In handling X$Y(): Symbols for X have not been set up.'), call. = FALSE) } if(isSymFun) { if(a1$args[[1]]$name != 'cppPointerDereference') { insertExprClassLayer(a1, 1, 'cppPointerDereference') ## not annotated, but not needed } } if(isSymFun) { returnSymbol <- nfProc$compileInfos[[methodName]]$returnSymbol argSymTab <- nfProc$compileInfos[[methodName]]$origLocalSymTab } if(isSymFunList) { returnSymbol <- nfProc$compileInfos[[methodName]]$returnSymbol argSymTab <- nfProc$compileInfos[[methodName]]$origLocalSymTab } if(!is.null(returnSymbol)) { asserts <- generalFunSizeHandlerFromSymbols(code, symTab, typeEnv, returnSymbol, argSymTab, chainedCall = TRUE) return(asserts) } invisible(NULL) writeLines('Warning') } sizeValues <- function(code, symTab, typeEnv) { code$nDim <- 1 code$type <- 'double' code$toEigenize <- 'no' sym <- symTab$getSymbolObject(code$args[[1]]$name, TRUE) indexRangeCase <- FALSE if(length(code$args) == 1) { # full vector of nodes code$sizeExprs <- list(substitute(cppMemberFunction(getTotalLength(ACCESSNAME)), list(ACCESSNAME = as.name(code$args[[1]]$name)))) asserts <- list() } else { # there must be index on the node asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, rep(TRUE, length(code$args)-1))) if(is.numeric(code$args[[2]])) { code$sizeExprs <- list(substitute(cppMemberFunction(getNodeLength(ACCESSNAME, ACCESSINDEX)), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = code$args[[2]]))) } else { if(!(code$args[[2]]$isName)) asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) if(code$args[[2]]$nDim > 0) { code$sizeExprs <- list(substitute(getNodesLength_Indices(ACCESSNAME, ACCESSINDEX), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = as.name(code$args[[2]]$name)))) indexRangeCase <- TRUE } else { code$sizeExprs <- list(substitute(cppMemberFunction(getNodeLength(ACCESSNAME, ACCESSINDEX)), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = as.name(code$args[[2]]$name)))) } } } if(code$caller$name == "[" & code$caller$callerArgID == 1) # values(...)[.] <- if(typeEnv$.AllowUnknowns) ## a surrogate for being on LHS of an assignment. values(...)[] should work on RHS stop(exprClassProcessingErrorMsg(code, 'In sizeValues: indexing of values() on left-hand size of an assignment is not allowed.'), call. = FALSE) if(code$caller$name %in% assignmentOperators) { if(code$callerArgID == 2) { ## ans <- values(...) code$name <- if(!indexRangeCase) 'getValues' else 'getValuesIndexRange' LHS <- code$caller$args[[1]] if(LHS$isName) { ## It is a little awkward to insert setSize here, but this is different from other cases in sizeAssignAfterRecursing assertSS <- list(substitute(setSize(LHS), list(LHS = as.name(LHS$name)))) if(length(code$args) == 1) { # full vector of nodes assertSS[[1]][[3]] <- substitute(cppMemberFunction(getTotalLength(ACCESSNAME)), list(ACCESSNAME = as.name(code$args[[1]]$name))) } else { # there must be index on the node if(is.numeric(code$args[[2]])) { assertSS[[1]][[3]] <- substitute(cppMemberFunction(getNodeLength(ACCESSNAME, ACCESSINDEX)), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = code$args[[2]])) } else { if(code$args[[2]]$nDim > 0) { assertSS[[1]][[3]] <- substitute(getNodesLength_Indices(ACCESSNAME, ACCESSINDEX), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = as.name(code$args[[2]]$name))) ## intermediate has already been inserted above, if needed } else { assertSS[[1]][[3]] <- substitute(cppMemberFunction(getNodeLength(ACCESSNAME, ACCESSINDEX)), list(ACCESSNAME = as.name(code$args[[1]]$name), ACCESSINDEX = as.name(code$args[[2]]$name))) } } } asserts <- c(asserts, assertSS) } else typeEnv$.ensureNimbleBlocks <- TRUE } else { # values(...) <- P, don't change it if(indexRangeCase) code$name <- 'valuesIndexRange' } } else { ## values(...) embedded in a RHS expression code$name <- if(!indexRangeCase) 'getValues' else 'getValuesIndexRange' code$toEigenize <- 'yes' ## This tricks sizeAssignAfterRecursing to generate the setSize in asserts, in getValues case (getValuesIndexRange is in set of names to skip for that) asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) code$toEigenize <- 'no' } if(length(asserts)==0) NULL else asserts } sizeRCfunction <- function(code, symTab, typeEnv, nfmObj, RCfunProc) { returnType <- nfmObj$returnType argInfo <- nfmObj$argInfo code$name <- nfmObj$uniqueName returnSymbol <- RCfunProc$compileInfo$returnSymbol argSymTab <- RCfunProc$compileInfo$origLocalSymTab asserts <- generalFunSizeHandlerFromSymbols(code, symTab, typeEnv, returnSymbol, argSymTab) return(asserts) } sizeNimbleFunction <- function(code, symTab, typeEnv) { ## This will handle other nimbleFunction run calls or other methods of this nimbleFunction sym <- symTab$getSymbolObject(code$name, TRUE) ok <- FALSE if(inherits(sym, 'symbolNimbleFunction')) { stop(exprClassProcessingErrorMsg(code, 'In sizeNimbleFunction: A nimbleFunction method should not be processed here.'), call. = FALSE) ## HANDLING OF myNF$run() HERE IS DEFUNCT. ALL SHOULD GO THROUGH sizeChainedCall now (chainedCall(nfMethod(myNF,'run'), arg1, arg2). } if(inherits(sym, 'symbolMemberFunction')) { memberRCfunProc <- sym$RCfunProc returnSymbol <- memberRCfunProc$compileInfo$returnSymbol argSymTab <- memberRCfunProc$compileInfo$origLocalSymTab ok <- TRUE } if(ok) { asserts <- generalFunSizeHandlerFromSymbols(code, symTab, typeEnv, returnSymbol, argSymTab) return(asserts) } stop(exprClassProcessingErrorMsg(code, 'In sizeNimbleFunction: The function name is not known and is not a nimbleFunction or a member function.'), call. = FALSE) } recurseSetSizes <- function(code, symTab, typeEnv, useArgs = rep(TRUE, length(code$args))) { ## won't be here unless code is a call. It will not be a { asserts <- list() for(i in seq_along(code$args)) { if(useArgs[i]) { if(inherits(code$args[[i]], 'exprClass')) { asserts <- c(asserts, exprClasses_setSizes(code$args[[i]], symTab, typeEnv)) } } } if(length(asserts)==0) NULL else asserts } ## promote numeric output to most information-rich type, double > integer > logical ## Note this will not be correct for logical operators, where output type should be logical arithmeticOutputType <- function(t1, t2) { if(t1 == 'double') return('double') if(t2 == 'double') return('double') if(t1 == 'integer') return('integer') if(t2 == 'integer') return('integer') return('logical') } ## Generate R code for an equality assertion identityAssert <- function(lhs, rhs, msg = "") { if(identical(lhs, rhs)) return(NULL) msg <- gsub("\"", "\\\\\"", msg) substitute(if(lhs != rhs) nimPrint(msg), list(lhs = lhs, rhs = rhs, msg = msg)) } ## Determine if LHS is less information-rich that RHS and issue a warning. ## e.g. if LHS is int but RHS is double. assignmentTypeWarn <- function(LHS, RHS) { if(LHS == 'int' & RHS == 'double') return(TRUE) if(LHS == 'logical' & RHS != 'logical') return(TRUE) return(FALSE) } ## used for setAll ## toEigen: N.B. This may be deprecated. sizeOneEigenCommand <- function(code, symTab, typeEnv) { if(!code$args[[1]]$isName) stop(exprClassProcessingErrorMsg(code, 'In sizeOneEigenCommand: First arg should be a name.'), call. = FALSE) recurseSetSizes(code, symTab, typeEnv) if(code$args[[1]]$nDim != 2) stop(exprClassProcessingErrorMsg(code, 'In sizeOneEigenCommand: At the moment only works for 2D objects.'), call. = FALSE) code$toEigenize <- 'yes' invisible(NULL) } ## This is used for nimPrint ## If anything has toEigenize == "maybe", the whole expression gets "yes" ## That way cout<<X; will use an eigen map for X sizeforceEigenize <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) toEigs <- lapply(code$args, function(x) { if(inherits(x, 'exprClass')) x$toEigenize else 'unknown' }) toLift <- lapply(code$args, function(x) { if(inherits(x, 'exprClass')) (identical(x$type, 'logical') & !x$isName) else FALSE }) for(i in seq_along(toLift)) { if(toLift[[i]]) asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv)) } code$toEigenize <- if(any( unlist(toEigs) %in% c('maybe', 'yes'))) 'yes' else 'no' code$type <- 'unknown' if(length(asserts) == 0) NULL else asserts } ## This is for when the programmer has directly written "resize(Z, 3, dim(A)[1])". ## When the resize is automatically generated, it skips size inference nimbleGeneralParseDeparse <- function(code) { if(inherits(code,'exprClass')) parse(text = nimDeparse(code), keep.source = FALSE)[[1]] else code } sizeSetSize <- function(code, symTab, typeEnv) { #go inside nfVar call if resizing nimbleList element if(code$args[[1]]$name == 'nfVar'){ useArg1 <- TRUE sym <- symTab$getSymbolObject(code$args[[1]]$args[[1]]$name) if(sym$type == 'nimbleList'){ sym <- sym$nlProc$symTab$getSymbolObject(code$args[[1]]$args[[2]]) } } else { sym <- symTab$getSymbolObject(code$args[[1]]$name, inherits = TRUE) useArg1 <- FALSE } asserts <- list() if(!inherits(sym, 'symbolNumericList')) { if(sym$nDim == 0) stop(exprClassProcessingErrorMsg(code, 'In sizeSetSize: Resizing a scalar does not make sense.'), call. = FALSE) firstSizeExpr <- code$args[[2]] ## first two arguments are variable to be resized and new sizes ## extra arguments would be fillZeros and recycle ## need to determine if any extra arguments were provided in order to repack arguments correctly below if(length(code$args) > 2) nExtraArgs <- length(code$args)-2 else nExtraArgs <- 0 if(nExtraArgs > 0) asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, c(rep(FALSE, 2), rep(TRUE, nExtraArgs)))) if(inherits(firstSizeExpr, 'exprClass')) { if(firstSizeExpr$name == 'nimC') { ## handle syntax of resize(Z, c(3, dim(A)[1])) if(length(firstSizeExpr$args) != sym$nDim) stop(exprClassProcessingErrorMsg(code, 'In sizeSetSize: Problem with number of dimensions provided in resize.'), call. = FALSE) asserts <- c(asserts, recurseSetSizes(firstSizeExpr, symTab, typeEnv)) ## may set intermediates if needed if(nExtraArgs > 0) { origExtraArgs <- code$args[3:length(code$args)] ## preserve extra arguments code$args <- code$args[1:2] } for(i in 1:length(firstSizeExpr$args)) { code$args[[i+1]] <- firstSizeExpr$args[[i]] if(inherits(firstSizeExpr$args[[i]], 'exprClass')) { firstSizeExpr$args[[i]]$caller <- code firstSizeExpr$args[[i]]$callerArgID <- i+1 } } if(nExtraArgs > 0) { ## reinsert extra arguments on end. for(i in 1:nExtraArgs) { setArg(code, length(code$args) + 1, origExtraArgs[[i]]) } } return(if(length(asserts)==0) NULL else asserts) } } useArgs <- c(useArg1, TRUE ) if(nExtraArgs > 0) useArgs <- c(useArgs, rep(FALSE, nExtraArgs)) asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useArgs) ) if(inherits(code$args[[2]], 'exprClass')) { if(code$args[[2]]$nDim > 0) { if(!(code$args[[2]]$isName)) asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) code$name <- 'setSizeNimArrToNimArr' } } ## We used to update typeEnv here with the new sizes, but it is not safe to do so because the setSize might appear inside a conditional (if-then) ## and hence one can't know until run-time if the size will actually be changed as given. Thus typeEnv sizeExprs are set when a variable first appears ## and should be either constants (and not ever setSized again, which we should check for but don't) or remain generic (dim(x)[1], etc) ## assign(code$args[[1]]$name, exprTypeInfoClass$new(nDim = sym$nDim, sizeExprs = lapply(code$args[-1], nimbleGeneralParseDeparse), type = sym$type), envir = typeEnv) return(if(length(asserts)==0) NULL else asserts) } if(inherits(sym, 'symbolNumericList') ) { ## these are deprecated if(length(code$args) != 2 + sym$nDim) stop(exprClassProcessingErrorMsg(code, 'In sizeSetSize: Problem with number of dimensions provided in resize.'), call. = FALSE) invisible(NULL) } } ## This was redundant and we should eventually be able to remove it ## toEigen: N.B. omitting this sizeResizeNoPtr <- function(code, symTab, typeEnv){ sym <- symTab$getSymbolObject(code$args[[1]]$name, inherits = TRUE) if(length(code$args[[2]]) != 1) stop(exprClassProcessingErrorMsg(code, 'In sizeResizeNoPtr: Problem with number of dimensions provided in resize.'), call. = FALSE) ## no longer modify typeEnv ## assign(code$name, exprTypeInfoClass$new(nDim = 1, sizeExprs = lapply(code$args[-1], nimDeparse), type = sym$type), envir = typeEnv) invisible(NULL) } ## Handler for for-loops: a fairly special case ## e.g. for(i in 1:10) {do(i)} sizeFor <- function(code, symTab, typeEnv) { if(length(code$args) != 3) stop('Error in sizeFor: expected 3 arguments to a for-loop', call. = FALSE) ## first handle type of the indexing variable if(!inherits(code$args[[2]], 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'In sizeFor: expected the index range to be an expression (exprClass).'), call. = FALSE) asserts <- exprClasses_setSizes(code$args[[2]], symTab, typeEnv) code$args[[1]]$nDim <- 0 code$args[[1]]$sizeExprs <- list() code$args[[1]]$type <- code$args[[2]]$type code$args[[1]]$toEigenize <- 'no' ## if index is unknown, create it in typeEnv and in the symTab if(!exists(code$args[[1]]$name, envir = typeEnv, inherits = FALSE)) { assign(code$args[[1]]$name, exprTypeInfoClass$new(nDim = 0, type = code$args[[1]]$type), envir = typeEnv) symTab$addSymbol(symbolBasic(name = code$args[[1]]$name, nDim = 0, type = code$args[[1]]$type)) } typeEnv[[code$args[[1]]$name]]$sizeExprs <- list() ## Now the 3rd arg, the body of the loop, can be processed asserts <- c(asserts, exprClasses_setSizes(code$args[[3]], symTab, typeEnv)) ## I think there shouldn't be any asserts returned since the body should be a bracket expression. return(if(length(asserts) == 0) invisible(NULL) else asserts) } sizeInsertIntermediate <- function(code, argID, symTab, typeEnv, forceAssign = FALSE) { newName <- IntermLabelMaker() ## I think it is valid and general to catch maps here. ## For most variables, creating an intermediate involves interN <- expression being lifted ## But for map, which will be using a NimArr if it is lifted here, what we need to generate is setMap call mapcase <- if(is.numeric(code$args[[argID]])) FALSE else (code$args[[argID]]$name == 'map' & !forceAssign) if(mapcase) { ans <- nimArrMapExpr(code$args[[argID]], symTab, typeEnv, newName) ## That should create the symTab entry ans <- RparseTree2ExprClasses(ans) newArgExpr <- RparseTree2ExprClasses(as.name(newName)) newArgExpr$type <- code$args[[argID]]$type newArgExpr$sizeExprs <- code$args[[argID]]$sizeExprs if(!nimbleOptions('experimentalNewSizeProcessing')) { newArgExpr$toEigenize <- 'maybe' } newArgExpr$nDim <- code$args[[argID]]$nDim } else { ## One may wonder where the new variable is added to the ## symbolTable. That happens when we do ## sizeAssignAfterRecursing, which identifies unknown LHS and ## creates the symTab entry. newExpr <- newAssignmentExpression() setArg(newExpr, 1, RparseTree2ExprClasses(as.name(newName))) setArg(newExpr, 2, code$args[[argID]]) ## The setArg function should set code$caller (to newExpr) and code$callerArgID (to 3) ans <- c(sizeAssignAfterRecursing(newExpr, symTab, typeEnv, NoEigenizeMap = TRUE), list(newExpr)) newArgExpr <- RparseTree2ExprClasses(as.name(newName)) newArgExpr$type <- newExpr$args[[1]]$type newArgExpr$sizeExprs <- newExpr$args[[1]]$sizeExprs if(!nimbleOptions('experimentalNewSizeProcessing')) { newArgExpr$toEigenize <- 'maybe' } newArgExpr$nDim <- newExpr$args[[1]]$nDim } setArg(code, argID, newArgExpr) return(ans) ## This is to be inserted in a list of asserts, even though it is really core code, not just an a test or assertion } sizeAssign <- function(code, symTab, typeEnv) { typeEnv$.AllowUnknowns <- FALSE asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, TRUE)) typeEnv$.AllowUnknowns <- TRUE if(length(code$args) > 2){ asserts <- c(asserts, exprClasses_setSizes(code, symTab, typeEnv)) } else{ asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useArgs = c(TRUE, FALSE))) typeEnv[['.ensureNimbleBlocks']] <- FALSE ## may have been true from RHS of rmnorm etc. asserts <- c(asserts, sizeAssignAfterRecursing(code, symTab, typeEnv)) } if(length(asserts) == 0) NULL else asserts } ## Handler for assignment sizeAssignAfterRecursing <- function(code, symTab, typeEnv, NoEigenizeMap = FALSE) { LHS <- code$args[[1]] RHS <- code$args[[2]] if(inherits(RHS, 'exprClass')) { RHSname <- RHS$name RHSnDim <- RHS$nDim RHStype <- RHS$type RHSsizeExprs <- RHS$sizeExprs } else { if(is.numeric(RHS) | is.logical(RHS)) { RHSname = '' RHSnDim <- 0 RHStype <- storage.mode(RHS) RHSsizeExprs <- list() } else if(is.character(RHS)){ RHSname = '' RHSnDim <- 0 RHStype <- 'character' RHSsizeExprs <- list() } else { stop(exprClassProcessingErrorMsg(code, "In sizeAssignAfterRecursing: don't know what to do with a provided expression."), call. = FALSE) } } test <- try(if(inherits(RHStype, 'uninitializedField') | length(RHStype)==0) { stop(exprClassProcessingErrorMsg(code, paste0("In sizeAssignAfterRecursing: '",RHSname, "' is not available or its output type is unknown.")), call. = FALSE) }) if(inherits(test, 'try-error')) browser() if(LHS$isName) { if(!exists(LHS$name, envir = typeEnv, inherits = FALSE)) { ## not in typeEnv ## If LHS unknown, create it in typeEnv if(!symTab$symbolExists(LHS$name, TRUE)) { ## not in symTab if(RHStype %in% c('double','integer', 'logical')) { ## valid type to create here ## We used to delay creating sizeExprs until below, but now it always generic assign(LHS$name, exprTypeInfoClass$new(nDim = RHSnDim, type = RHStype, sizeExprs = makeSizeExpressions(rep(NA, RHSnDim), LHS$name)), envir = typeEnv) symTab$addSymbol(symbolBasic(name = LHS$name, nDim = RHSnDim, type = RHStype)) } else { ## not valid type to create here if(RHStype == 'voidPtr') { ## This should be ok without sizeExprs content assign(LHS$name, exprTypeInfoClass$new(nDim = RHSnDim, type = RHStype), envir = typeEnv) symTab$addSymbol(symbolVoidPtr(name = LHS$name, type = RHStype)) } ## a path for arbitrary symbols else if(RHStype == "custom") { ConlySym <- RHS$sizeExprs$copy() ## trick to put a symbol object here. use a copy in case this expr is from simple assignment, not creation ConlySym$name <- LHS$name symTab$addSymbol(ConlySym) code$type <- "custom" code$sizeExprs <- ConlySym ## in case there is chained assignment return(invisible(NULL)) } else if(RHStype == "nimbleList") { ## I think we have the nlProc in the RHS sizeExprs in some cases? LHSnlProc <- symTab$getSymbolObject(RHS$name)$nlProc if(is.null(LHSnlProc)) LHSnlProc <- RHS$sizeExprs$nlProc if(is.null(LHSnlProc)) LHSnlProc <- symTab$getSymbolObject(RHS$name, inherits = TRUE)$nlProc symTab$addSymbol(symbolNimbleList(name = LHS$name, type = RHStype, nlProc = LHSnlProc)) } else if(symTab$symbolExists(RHStype, TRUE)){ ## this is TRUE if a nested nimbleFunction returns a nimbleList - the type of ## the returned nimbleList will be a symbolNimbleListGenerator that exists ## in the parent ST. LHSnlProc <- symTab$getSymbolObject(RHStype, TRUE)$nlProc symTab$addSymbol(symbolNimbleList(name = LHS$name, nlProc = LHSnlProc)) } else stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: LHS is not in typeEnv or symTab and cannot be added now.')), call. = FALSE) } } else { ## yes in symTab ## this is another path for arbitrary symbols, but not sure it's used. ## This case is ok. It is in the symbol table but not the typeEnv. So it is something like ptr <- getPtr(A) if(!nimbleOptions('experimentalNewSizeProcessing')) { code$toEigenize <- 'no' } ##experimentalNewSizeProcessing code$nDim <- 0 code$type <- 'unknown' code$sizeExprs <- list() return(invisible(NULL)) } } else { ## yes in typeEnv. must be symTab too. ## If LHS known, check if nDim matches RHS if(length(LHS$nDim) == 0) stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: nDim for LHS not set.')), call. = FALSE) if(length(RHSnDim) == 0) stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: nDim for RHS not set.')), call. = FALSE) if(LHS$nDim != RHSnDim) { message(paste0('Warning, mismatched dimensions in assignment: ', nimDeparse(code), '. Going to browser(). Press Q to exit'), call. = FALSE ) browser() } ## and warn if type issue e.g. int <- double if(assignmentTypeWarn(LHS$type, RHStype)) { message(paste0('Warning, RHS numeric type is losing information in assignment to LHS.', nimDeparse(code))) } } } ## update size info in typeEnv assert <- NULL if((LHS$name == 'values' | LHS$name == 'valuesIndexRange') && length(LHS$args) %in% c(1,2)) { ## It is values(model_values_accessor[, index]) <- STUFF # triggered when we have simple assignment into values() without indexing of values() if(is.numeric(RHS)) stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: Cannot assign into values() from numeric.')), call. = FALSE) code$name <- if(LHS$name == 'values') 'setValues' else 'setValuesIndexRange' code$args <- list(1 + length(LHS$args)) setArg(code, 1, RHS) setArg(code, 2, LHS$args[[1]]) if(length(LHS$args) == 2) setArg(code, 3, LHS$args[[2]]) # for indexed of form values(model, nodes[i]) if(!(RHS$isName)) assert <- c(assert, sizeInsertIntermediate(code, 1, symTab, typeEnv) ) return( if(length(assert) == 0) NULL else assert ) } ## Note this can use LHS$name for RHSsizeExprs when returning from a nimbleFunction on RHS. But this is probably not needed any more. if(any(unlist(lapply(RHSsizeExprs, is.null)))) RHSsizeExprs <- makeSizeExpressions(rep(NA, RHSnDim), LHS$name) ## reset sizeExprs for the LHS var. re-using RHSsizeExprs for LHS. This would only be valid if it is a nimbleFunction returning something on the RHS. For assignment to be executed in Eigen, the RHS sizes MUST be known if(!nimbleOptions('experimentalNewSizeProcessing')) { if(LHS$toEigenize == 'yes') { code$toEigenize <- 'yes' ## message('Warning from sizeAssign: not expecting LHS to have toEigenize == yes') } else { code$toEigenize <-if(inherits(RHS, 'exprClass')) { if(RHS$toEigenize == 'no') 'no' else { if(RHS$toEigenize == 'unknown') 'no' else { if(RHS$toEigenize != 'yes' & (!(LHS$name %in% c('eigenBlock', 'diagonal', 'coeffSetter'))) & (RHS$nDim == 0 | RHS$isName | (RHS$name == 'map' & NoEigenizeMap))) 'no' ## if it is scalar or is just a name or a map, we will do it via NimArr operator= . Used to have "| RHS$name == 'map'", but this allowed X[1:3] <- X[2:4], which requires eigen, with eval triggered, to get right else 'yes' ## if it is 'maybe' and non-scalar and not just a name, default to 'yes' } } } else { if(is.numeric(LHS$nDim)) if(LHS$nDim > 0) 'yes' ## This is for cases like out[1:4] <- scalar else 'no' else 'no' } } if(code$toEigenize == 'yes') { ## this would make more sense in eigenize_assign ## generate setSize(LHS, ...) where ... are dimension expressions if(length(RHSnDim) == 0) { message("confused about trying to eigenize something with nDim = 0") browser() } if(RHSnDim > 0) { if(!(RHS$name %in% setSizeNotNeededOperators)) { if(LHS$isName | LHS$name == "nfVar") { assert <- substitute(setSize(LHS), list(LHS = nimbleGeneralParseDeparse(LHS))) for(i in seq_along(RHSsizeExprs)) { test <- try(assert[[i + 2]] <- RHS$sizeExprs[[i]]) if(inherits(test, 'try-error')) browser() } assert[[ length(assert) + 1]] <- 0 ## copyValues = false assert[[ length(assert) + 1]] <- 0 ## fillZeros = false assert <- list(assert) } else { ## We have an indexed LHS of an eigenizable expression ## need special handling if it is a row assignment like x[i,] <- ... ## also need to generate size assertions if(LHS$nDim == 1) { if(RHS$nDim == 2) { if(is.numeric(RHS$sizeExprs[[1]])) { if(RHS$sizeExprs[[1]] == 1) { newExpr <- insertExprClassLayer(code, 1, 'asRow', type = LHS$type) newExpr$sizeExprs <- RHS$sizeExprs newExpr$type <- LHS$type newExpr$nDim <- RHS$nDim if(!is.numeric(LHS$sizeExprs[[1]]) | !is.numeric(RHS$sizeExprs[[2]])) { assertMessage <- paste0("Run-time size error: expected ", deparse(LHS$sizeExprs[[1]]), " == ", deparse(RHS$sizeExprs[[2]])) thisAssert <- identityAssert(LHS$sizeExprs[[1]], RHS$sizeExprs[[2]], assertMessage) if(!is.null(thisAssert)) assert[[length(assert) + 1]] <- thisAssert } else { if(LHS$sizeExprs[[1]] != RHS$sizeExprs[[2]]) stop(exprClassProcessingErrorMsg(code, paste0('In sizeAssignAfterRecursing: Fixed size mismatch.')), call. = FALSE) } } } } } } } } } else { if(inherits(RHS, 'exprClass')) { ## If we have A <- map(B, ...), we need to generate a setMap for the RHS, which will be done by sizeInsertIntermediate if(RHS$name == 'map') assert <- c(assert, sizeInsertIntermediate(code, 2, symTab, typeEnv) ) } if(inherits(LHS, 'exprClass')) { # ditto if(LHS$name == 'map') assert <- c(assert, sizeInsertIntermediate(code, 1, symTab, typeEnv) ) } } } ##experimentalNewSizeProcessing if(!(LHS$name %in% c('eigenBlock', 'diagonal', 'coeffSetter', 'nimNonseqIndexedd', 'nimNonseqIndexedi','nimNonseqIndexedb'))) { ## should already be annotated if it is an indexed assignment. ## It should be harmless to re-annotated EXCEPT in case like out[1:5] <- scalar code$nDim <- code$args[[1]]$nDim <- RHSnDim code$type <- code$args[[1]]$type <- RHStype code$sizeExprs <- code$args[[1]]$sizeExprs <- RHSsizeExprs } if(RHSname %in% assignmentAsFirstArgFuns) { code$name <- RHS$name oldArgs <- RHS$args LHS <- code$args[[1]] ## could have been reset by LHS$name == 'map' situation above code$args <- list(length(oldArgs) + 1) for(i in seq_along(oldArgs)) { setArg(code, i+1, oldArgs[[i]]) } setArg(code, 1, LHS) } return(assert) } sizePROTECT <- function(code, symTab, typeEnv) { ## Do not recurse. code$type <- "custom" code$sizeExprs <- symbolSEXP(type = 'custom') ## trick to put a symbol object into sizeExprs for later use return(invisible(NULL)) } sizeReval <- function(code, symTab, typeEnv) { code$name <- 'Rf_eval' return(sizePROTECT(code, symTab, typeEnv)) } sizeNimbleConvert <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) ## should not normally have an expression other than variable name as the argument, but do this for safety nDim <- code$args[[1]]$nDim type <- code$args[[1]]$type if(!code$caller$name %in% assignmentOperators) stop(exprClassProcessingErrorMsg(code, 'nimbleConvert can only be used in simple assignment.'), call. = FALSE) targetString <- nimDeparse(code$args[[1]]) targetName <- Rname2CppName(targetString) targetExpr <- parse(text = targetString, keep.source = FALSE)[[1]] copyName <- paste0(targetName, '_nimbleContigCopy') subList <- list(var = targetExpr, copy = as.name(copyName)) newCode <- substitute( nimArrPtr_copyIfNeeded(var, copy), subList ) ## only necessary if the result is needed if(!symTab$symbolExists( copyName )) { symTab$addSymbol( symbolBasic(name = copyName, type = type, nDim = nDim) ) assign(copyName, exprTypeInfoClass$new(nDim = nDim, type = type), envir = typeEnv) } newCode <- RparseTree2ExprClasses(newCode) newCode$type <- "custom" newCode$sizeExprs <- symbolPtr(type = type) ## trick to put a symbol object into sizeExprs for later use setArg(code$caller, code$callerArgID, newCode) asserts } sizeNimbleUnconvert <- function(code, symTab, typeEnv) { ptrString <- nimDeparse(code$args[[1]]) ptrName <- Rname2CppName(ptrString) ptrExpr <- parse(text = ptrString, keep.source = FALSE)[[1]] targetString <- nimDeparse(code$args[[2]]) targetName <- Rname2CppName(targetString) targetExpr <- parse(text = targetString, keep.source = FALSE)[[1]] copyName <- paste0(targetName, '_nimbleContigCopy') subList <- list(ptr = ptrExpr, var = targetExpr, copy = as.name(copyName)) newCode <- substitute( nimArrPtr_copyBackIfNeeded(ptr, var, copy), subList ) newCode <- RparseTree2ExprClasses(newCode) setArg(code$caller, code$callerArgID, newCode) NULL } sizeasDoublePtr <- function(code, symTab, typeEnv) { ## This could also handle copies from ints to doubles, which would ALWAYS require a copy asserts <- recurseSetSizes(code, symTab, typeEnv) nDim <- code$args[[1]]$nDim targetString <- nimDeparse(code$args[[1]]) targetName <- Rname2CppName(targetString) targetExpr <- parse(text = targetString, keep.source = FALSE)[[1]] ptrName <- paste0(targetName, '_DoublePtr') copyName <- paste0(targetName, '_contigCopy') subList <- list(var = targetExpr, copy = as.name(copyName), ptr = as.name(ptrName)) codeBefore <- substitute( if(isMap(var) ) { copy <- var; ptr <- getPtr(copy)} else {ptr <- getPtr(var)}, subList ) codeAfter <- substitute( after( if(isMap(var)) { mapCopy(var, copy) } ), ## after() tags the assertion to go after the code line subList ) if(!symTab$symbolExists( ptrName )) symTab$addSymbol( symbolPtr(name = ptrName, type = 'double') ) if(!symTab$symbolExists( copyName )) { symTab$addSymbol( symbolBasic(name = copyName, type = 'double', nDim = nDim) ) } codeBefore <- RparseTree2ExprClasses(codeBefore) exprClasses_initSizes(codeBefore, symTab, NULL, typeEnv) asserts <- c(asserts, exprClasses_setSizes(codeBefore, symTab, typeEnv)) codeAfter <- RparseTree2ExprClasses(codeAfter) asserts <- c(asserts, exprClasses_setSizes(codeAfter, symTab, typeEnv)) newArgExpr <- RparseTree2ExprClasses( substitute( ptr, subList) ) setArg(code$caller, code$callerArgID, newArgExpr) c(asserts, list(codeBefore, codeAfter)) } sizeScalar <- function(code, symTab, typeEnv) { ## use something different for distributionFuns ## length(model[[node]]) wasn't working because we were not doing recurseSetSize here ## However I am not sure if that is because there are cases where size expects a special argument we don't want to process (a modelValues?) ## So I'm going to wrap it in a try() and suppress messages asserts <- try(recurseSetSizes(code, symTab, typeEnv), silent = TRUE) if(inherits(asserts, 'try-error')) asserts <- list() if(code$args[[1]]$toEigenize == 'yes') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } ## else { ## asserts <- NULL ## } code$nDim <- 0 outputType <- scalarOutputTypes[[code$name]] if(is.null(outputType)) code$type <- 'double' else code$type <- outputType code$sizeExprs <- list() code$toEigenize <- 'maybe' ## a scalar can be eigenized or not ##invisible(NULL) asserts } sizeScalarModelOp <- function(code, symTab, typeEnv) { if(length(code$args) > 1) { asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, rep(TRUE, length(code$args)-1))) for(i in 2:length(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$toEigenize=='yes') asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv)) } } if(inherits(code$args[[2]], 'exprClass')) { ## There is an index expression that may be non-scalar if(code$args[[2]]$nDim > 0) { ## It is non-scalar so we need to set a logical argument about whether is it a logical or numeric vector code$args[[ length(code$args)+1 ]] <- as.integer(code$args[[2]]$type == 'logical') } } } else { asserts <- list() } if(code$args[[1]]$toEigenize == 'yes') { ## not sure when this would be TRUE asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } code$nDim <- 0 outputType <- scalarOutputTypes[[code$name]] if(is.null(outputType)) code$type <- 'double' else code$type <- outputType code$sizeExprs <- list() code$toEigenize <- 'maybe' asserts } sizeSimulate <- function(code, symTab, typeEnv) { if(length(code$args) > 1) { asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, rep(TRUE, length(code$args)-1))) for(i in 2:length(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$toEigenize=='yes') asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv))##toEigenize <- 'yes' } } if(inherits(code$args[[2]], 'exprClass')) { ## There is an index expression that may be non-scalar if(code$args[[2]]$nDim > 0) { ## It is non-scalar so we need to set a logical argument about whether is it a logical or numeric vector code$args[[ length(code$args)+1 ]] <- as.integer(code$args[[2]]$type == 'logical') } } } else { asserts <- list() } code$nDim <- 0 code$type <- as.character(NA) code$sizeExprs <- list() code$toEigenize <- 'maybe' return(asserts) } sizeScalarRecurse <- function(code, symTab, typeEnv, recurse = TRUE) { ## use something different for distributionFuns asserts <- if(recurse) recurseSetSizes(code, symTab, typeEnv) else list() ## This just forces any argument expression to be lifted. Can we lift only things to be eigenized? for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } code$nDim <- 0 outputType <- scalarOutputTypes[[code$name]] if(is.null(outputType)) code$type <- 'double' else code$type <- outputType code$sizeExprs <- list() code$toEigenize <- 'maybe' ## a scalar can be eigenized or not if(length(asserts)==0) NULL else asserts } sizeUndefined <- function(code, symTab, typeEnv) { code$nDim <- 0 code$type <- as.character(NA) code$sizeExprs <- list() code$toEigenize <- 'maybe' invisible(NULL) } sizeBinaryUnaryCwise <- function(code, symTab, typeEnv) { if(length(code$args) == 1) return(sizeUnaryCwise(code, symTab, typeEnv)) if(length(code$args) == 2) return(sizeBinaryCwise(code, symTab, typeEnv)) stop(exprClassProcessingErrorMsg(code, paste0('In sizeBinaryUnarycWise: Length of arguments is not 1 or 2.')), call. = FALSE) } sizemvAccessBracket <- function(code, symTab, typeEnv) { ## this gets called from sizeIndexingBracket, so recurse has already been done asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs = c(FALSE, TRUE)) if(length(code$args) != 2) { stop(exprClassProcessingErrorMsg(code, paste0('In sizemvAccessBracket: Wrong number of indices provided.')), call. = FALSE) } if(inherits(code$args[[2]], 'exprClass')) { if(code$args[[2]]$nDim != 0) stop(exprClassProcessingErrorMsg(code, paste0('In sizemvAccessBracket: Index is not a scalar.')), call. = FALSE) } sym <- symTab$getSymbolObject(code$args[[1]]$name, TRUE) ## This is the symbolVecNimArrPtr code$type = sym$type code$nDim = sym$nDim code$sizeExprs <- as.list(sym$size) code$toEigenize <- 'maybe' code$name <- 'mvAccessRow' if(length(asserts)==0) NULL else asserts } sizeIndexingBracket <- function(code, symTab, typeEnv) { ## This is for X[i, j], viewed as `[`(X, i, j), where there may be different numbers of indices, and they may be scalars, sequences defined by `:`, or arbitrary (nonSequence) vectors of integers or logicals. ## X itself could be Y[k, l] (or the result of processing it) or map(Y, k, l), which is created if Y is a model variable and we know we need a map into but at the point it is created there is no processing of how it should be represented, so it is just represented as an abstract map. ## recurse into arguments asserts <- recurseSetSizes(code, symTab, typeEnv) ## Check two special cases ## This is from modelValues: if(code$args[[1]]$type == 'symbolVecNimArrPtr') return(c(asserts, sizemvAccessBracket(code, symTab, typeEnv))) ## This is deprecated: if(code$args[[1]]$type == 'symbolNumericList') return(c(asserts, sizemvAccessBracket(code, symTab, typeEnv))) ## Iterate over arguments, lifting any logical indices into which() ## e.g. X[i, bool] becomes X[i, Interm1], with Interm1 <- which(bool) as an assert. for(i in seq_along(code$args)) { if(i == 1) next if(inherits(code$args[[i]], 'exprClass')) { if(code$args[[i]]$name != "") if(code$args[[i]]$type == 'logical') { ## first insert which, then lift to intermediate newExpr <- insertExprClassLayer(code, i, 'which') useBool <- rep(FALSE, length(code$args)) useBool[i] <- TRUE asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useBool)) } } } ## Collect information about the number of dimensions and value of a drop argument if provided ## nDimVar is nDim of X nDimVar <- code$args[[1]]$nDim dropBool <- TRUE dropArgProvided <- FALSE if(!is.null(names(code$args))) if('drop' %in% names(code$args)) { dropArgProvided <- TRUE iDropArg <- which(names(code$args) == 'drop') } if(nDimVar != length(code$args) - 1 - dropArgProvided) { ## check if number of indices is correct ## only valid case with fewer index arguments than source dimensions is matrix[indices], where matrix can be treated as a vector if(!( (nDimVar == 2) & (length(code$args) - dropArgProvided) == 1)) { msg <- paste0('Error, wrong number of indices provided for ', nimDeparse(code),'.') stop(exprClassProcessingErrorMsg(code, msg), call. = FALSE) } } ## pick out the drop argument and check if it is logical if(dropArgProvided) { dropBool <- code$args[[iDropArg]] if(!is.logical(dropBool)) { msg <- paste0(msg, "(A drop argument must be hard-coded as TRUE or FALSE, not given as a variable.)") stop(exprClassProcessingErrorMsg(code, msg), call. = FALSE) } } ## These initial annotations may change later code$nDim <- nDimVar code$type <- code$args[[1]]$type ## Initialize sizeExprs code$sizeExprs <- vector('list', length = nDimVar) ## (We could generate asserts here to ensure sub-indexing is within bounds) ## needMap will become TRUE below unless all indices are scalars needMap <- FALSE ## Track whether if all index ranges are defined by `:` or by scalar ## simpleBlockOK will be TRUE if all index vectors and sequential, defined by `:` simpleBlockOK <- TRUE iSizes <- 1 ## Iternate over dimensions of X and see which dimensions will be dropped from X[i,j,k] due to scalar indices, if drop = TRUE for(i in 1:nDimVar) { dropThisDim <- FALSE ## If the index is numeric, drop this dimension if(is.numeric(code$args[[i+1]])) dropThisDim <- TRUE ## If the index is not numeric but it is not a blank and its sizeExprs reveal it is a scalar-equivalent, drop this dimension else if((code$args[[i+1]]$name != "") & (length(dropSingleSizes(code$args[[i+1]]$sizeExprs)$sizeExprs) == 0)) dropThisDim <- TRUE ## Is this indices an expression? isExprClass <- inherits(code$args[[i+1]], 'exprClass') ## if(dropThisDim) { ## The index is a scalar if(nimbleOptions()$indexDrop & dropBool) { ## And flags allow dropping code$sizeExprs[[iSizes]] <- NULL ## Remove that sizeExpr element code$nDim <- code$nDim - 1 ## reduce dimensions of result by 1 } else { code$sizeExprs[[iSizes]] <- 1; iSizes <- iSizes + 1 ## If we are not droping dimensions, set sizeExpr to 1 } next } else { ## not dropping a dimension, so the index is non-scalar if(isExprClass) ## If it is an expression that is not `:` or blank, then a simple block is not allowed if((code$args[[i+1]]$name != ':') && (code$args[[i+1]]$name != "")) simpleBlockOK <- FALSE } needMap <- TRUE ## If the "next" in if(dropThisDim) {} is always hit, then needMap will never be set to TRUE ## Update sizeExprs if(isExprClass) { if(code$args[[i+1]]$name != "") { ## An entry that is a variable possibly with a length code$sizeExprs[[iSizes]] <- code$args[[i+1]]$sizeExprs[[1]] } else { ## blank entry (e.g. A[,i]) is an exprClass with isName = TRUE and name = "" code$sizeExprs[[iSizes]] <- code$args[[1]]$sizeExprs[[i]] ## also at this point we will fill in a `:` expression for the indices, ## so now we have e.g. A[ 1:dim(A)[1], i ] newIndexExpr <- RparseTree2ExprClasses(substitute(1:N, list(N = code$args[[1]]$sizeExprs[[i]]))) setArg(code, i+1, newIndexExpr) useArgs <- rep(FALSE, length(code$args)) useArgs[i+1] <- TRUE asserts <- c(asserts, recurseSetSizes(code, symTab, typeEnv, useArgs)) } iSizes <- iSizes + 1 next } } ## did all dims get dropped? if(length(code$sizeExprs)==0) { code$sizeExprs <- list() ## it was a named, list. this creates consistency. maybe unnecessary ##needMap will be FALSE if we are in this clause ## We need to check whether X is an expression that needs to be lifted, say (A + B)[2, 3] ## We could do better for these cases if(!code$args[[1]]$isName) ## It's not a name if(!(code$args[[1]]$name %in% operatorsAllowedBeforeIndexBracketsWithoutLifting)) {## e.g. 'mvAccessRow' ## At this point we have decided to lift, and the next two if()s determine if that is weird due to being on LHS of assignment if(code$caller$name %in% assignmentOperators) if(code$callerArgID == 1) stop(exprClassProcessingErrorMsg(code, 'There is a problem on the left-hand side of an assignment'), call. = FALSE) asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } } code$toEigenize <- 'maybe' if(needMap) { ## If this is a map on an *expression* that is not a map, we used to always lift it ## e.g. (A + B)[1:4] must become (Interm <- A + B; Interm[1:4]) ## Now we only need to lift it if the map will not be impemented via eigenBlock ## for nested blocking, we have (nonseq | eigenBlock | map) x (nonseq | eigenBlock | map) ## [ coeffSetter is a version of nonseq ] ## where nonseq means non-sequential indices, eigenBlock means sequential indices, and map means a model or modelValues variable marked abstractly for a map ## (map) x (eigenBlock | map) is already handled ## (map) x (nonseq) is already handled ## (eigenBlock) x (eigenBlock) is already handled ## ## check whether to nest the indexing directly ## nestIndexing TRUE means we will convert X[i, j][k, l] into X[ i[k], j[l] ] (while we are working on `[`(X[i, j], k, l) ## We do this for nested indexing except (eigenBlock) x (eigenBlock), which means all indices are sequential ## Then we just generate .block(..).block(..) nestIndexing <- FALSE ## code$args[[1]] is the X[i, j] if(!code$args[[1]]$isName) { ## In X[i], X is an expression ## X is an indexing expression of some kind (other than a map, which is already a new object) ## It can't be coeffSetter at this point in processing flow, because the nestedness implies its caller was not <- if(code$args[[1]]$name %in% c('eigenBlock', 'nimNonseqIndexedd' ,'nimNonseqIndexedi' ,'nimNonseqIndexedb' )) { ## if it is not (eigenBlock) x (eigenBlock) if(!( (code$args[[1]]$name == 'eigenBlock') & (simpleBlockOK))) nestIndexing <- TRUE } } ## implement nestIndexing if(nestIndexing) { ## We have something like `[`( eigenBlock(X, i, j), k, l) or `[`( nimNonseqIndexedd(X, i, j), k, l) ## We will gradually take over the first argument to construct something that will end up like nimNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l)) ## The first one was an eigenBlock (all sequential integer indices defined by `:` or blank imputed with `:`) if(code$args[[1]]$name == 'eigenBlock') { ## We have `[`( eigenBlock(X, i, j), k, l) ## ## reach down to X and rename it ## put `:`(start, finish) back together. ## ## If we are in `[`( eigenBlock(X, i, j), k, l) <- Z, ## convert to `[`( coeffSetter(X, i, j), k, l) <- Z, if(code$caller$name %in% assignmentOperators & code$callerArgID == 1) { code$args[[1]]$name <- 'coeffSetter' } else { ## otherwise, convert `[`( eigenBlock(X, i, j), k, l) to `[`( nimNonseqIndexedd(X, i, j), k, l), e.g. if(code$type == 'double') code$args[[1]]$name <- 'nimNonseqIndexedd' if(code$type == 'integer') code$args[[1]]$name <- 'nimNonseqIndexedi' if(code$type == 'logical') code$args[[1]]$name <- 'nimNonseqIndexedb' } } else { ## The first one was a nonSeq ## it was already nonseq, but it might need to become coeffSetter ## If we are in `[`( nimNonseqIndexedd(X, i, j), k, l) <- Z, ## convert to `[`( coeffSetter(X, i, j), k, l) <- Z, if(code$caller$name %in% assignmentOperators & code$callerArgID == 1) { code$args[[1]]$name <- 'coeffSetter' } } ## Now construct the nesting i[k], j[l], etc. nestedNinds <- length(code$args[[1]]$args)-1 nestedNdim <- code$args[[1]]$nDim nestedDropBool <- TRUE nestedDropArgProvided <- FALSE if(!is.null(names(code$args[[1]]$args))) ## does nimNonseqIndexedd(X, i, j) or coeffSetter(X, i, j) have named arguments? if("drop" %in% names(code$args[[1]]$args)) { ## is drop among the names? nestedDropArgProvided <- TRUE nestedDropBool <- code$args[[1]]$args[[ which(names(code$args[[1]]$args) == 'drop') ]] nestedNinds <- nestedNinds - 1 } nestedBlockBool <- rep(TRUE, nestedNinds) ## is it preserved as a block (can still be scalar if nestedDropBool is FALSE) nestedScalarIndex <- rep(FALSE, nestedNinds) ## Of the indices of nimNonseqIndexedd(X, i, j) or coeffSetter(X, i, j) ## which are scalars, and which are blocks ## If we have nimNonseqIndexedd(X, i, j, drop = FALSE) or coeffSetter(X, i, j, drop = FALSE), ## then we treat all dimensions as blocks, even if scalar indices for(iInd in 1:nestedNinds) { if(is(code$args[[1]]$args[[iInd+1]], 'exprClass')) { if(code$args[[1]]$args[[iInd+1]]$nDim == 0) { nestedScalarIndex[iInd] <- TRUE if(nestedDropBool) nestedBlockBool[iInd] <- FALSE } } else { nestedScalarIndex[iInd] <- TRUE if(nestedDropBool) nestedBlockBool[iInd] <- FALSE } } ## Re-annotate first arg code$args[[1]]$sizeExprs <- code$sizeExprs code$args[[1]]$nDim <- code$nDim code$args[[1]]$type <- code$type numIndices <- length(code$args) - 1 - dropArgProvided ## Do we need to set drop carefully? ## NEED TO SKIP SCALARS IF dropBool = TRUE for nested case. nestedInds <- which(nestedBlockBool) if(length(nestedInds) != numIndices) stop(exprClassProcessingErrorMsg(code, 'Wrong number of nested indices.'), call.=FALSE) ## iterate over indices, constructing i[j] if necessary for(iInd in 1:numIndices) { nestedIind <- nestedInds[iInd] nestedIndexIsScalar <- if(inherits(code$args[[1]]$args[[nestedIind + 1]], 'exprClass')) code$args[[1]]$args[[nestedIind + 1]]$nDim == 0 else TRUE if(nestedIndexIsScalar) { ## check: ## In X[i, j][k, l], if i is scalar, k should also be scalar (can't check its value now, but should be 1 at run-time) indexIsScalar <- if(inherits(code$args[[iInd+1]], 'exprClass')) code$args[[iInd+1]]$nDim == 0 else TRUE if(!indexIsScalar) warning("There is nested indexing with drop=FALSE where an index must be scalar but isn't") } else { ## construct i[k], which is really nimNonseqIndexedi(i, k) newExpr <- exprClass(name = 'nimNonseqIndexedi', isName = FALSE, isCall = TRUE, isAssign = FALSE) newExpr$type <- 'integer' indexIsScalar <- if(inherits(code$args[[iInd+1]], 'exprClass')) code$args[[iInd+1]]$nDim == 0 else TRUE newExpr$sizeExprs <- if(!indexIsScalar) c(code$args[[iInd + 1]]$sizeExprs) else list(1) newExpr$nDim <- 1 newExpr$toEigenize <- 'yes' setArg(newExpr, 1, code$args[[1]]$args[[nestedIind + 1]]) setArg(newExpr, 2, code$args[[iInd + 1]]) setArg(newExpr, 3, 1) setArg(code$args[[1]], nestedIind + 1, newExpr) } } ## The only remaining use of a drop argument is during eigenization to determine if 1xn needs a transpose to become nx1 ## For that purpose, the drop arg of X[ i[k], j[l] ] should be from the outer part of `[`(X[i, j, drop = TRUE|FALSE], k, l, drop = [TRUE|FALSE]), not from the X[i,j] code$args[[1]]$args[['drop']] <- if(dropArgProvided) dropBool else TRUE ## clear remaining indices ## i.e. turn `[`( nimNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l)), k, l) ## into `[`( nimNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l))) code$args[1+(1:numIndices)] <- NULL codeCaller <- code$caller codeCallerArgID <- code$callerArgID ## remove the `[` layer of the current processing ## i.e. turn `[`( nimNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l))) into ## imNonseqIndexedd(X, nimNonseqIndexedi(i, k), nimNonseqIndexedi(j, l)) removeExprClassLayer(code) code <- codeCaller$args[[codeCallerArgID]] return(if(length(asserts)==0) NULL else asserts) } ## Now we are in the case where there is no nested indexing, or if there is X[i, j][k, l], it can be chained eigen blocks ## Replace with a map expression if needed if(!simpleBlockOK) { if(typeEnv$.ensureNimbleBlocks) { stop(exprClassProcessingErrorMsg(code, "LHS indexing for a multivariate random draw can only use sequential blocks (via ':')."), call. = FALSE) } ## If this is part of X[i, j] <- Z, convert to coeffSetter(X, i, j) <- Z if(code$caller$name %in% assignmentOperators & code$callerArgID == 1) { code$name <- 'coeffSetter' } else { ## otherwise convert `[`(X, i, j) to e.g. nimNonseqIndexedd(X, i, j) if(code$type == 'double') code$name <- 'nimNonseqIndexedd' ## this change could get moved to genCpp_generateCpp if(code$type == 'integer') code$name <- 'nimNonseqIndexedi' if(code$type == 'logical') code$name <- 'nimNonseqIndexedb' } ## If we have nimNonseqIndexedd(X, i), make it nimNonseqIndexedd(X, i, 1) for Eigen if(length(code$args) - 1 - dropArgProvided == 1) ## only 1 index code$args[[3]] <- 1 ## fill in extra 1 for a second dimension. ## should the index depend on dropArgProvided? } else { ## a simpleBlock is ok if(code$args[[1]]$nDim > 2 | typeEnv$.ensureNimbleBlocks) { ## old-style blocking from >2D down to 2D or 1D, or this is LHS for something like rmnorm, requiring a non-eigen map on LHS. ## We have X[i, j, k] where X has dimension > 2 if(dropArgProvided) code$args[[iDropArg]] <- NULL newExpr <- makeMapExprFromBrackets(code, dropBool) newExpr$sizeExprs <- code$sizeExprs newExpr$type <- code$type newExpr$nDim <- code$nDim newExpr$toEigenize <- code$toEigenize setArg(code$caller, code$callerArgID, newExpr) } else { ## blocking via Eigen ## ## note that any expressions like sum(A) in 1:sum(A) should have already been lifted code$name <- 'eigenBlock' code$toEigenize <- 'yes' } } } if(length(asserts)==0) NULL else asserts } isIntegerEquivalent <- function(code) { if(inherits(code, 'exprClass')) { if(code$type == 'integer') return(TRUE) return(FALSE) } if(is.logical(code)) return(FALSE) if(storage.mode(code) == 'integer') return(TRUE) code == floor(code) ## storage.mode must be 'double' so check if it's equivalent to an integer } sizeSeq <- function(code, symTab, typeEnv, recurse = TRUE) { asserts <- if(recurse) recurseSetSizes(code, symTab, typeEnv) else list() byProvided <- code$name == 'nimSeqBy' | code$name == 'nimSeqByLen' lengthProvided <- code$name == 'nimSeqLen' | code$name == 'nimSeqByLen' integerFrom <- isIntegerEquivalent(code$args[[1]]) integerTo <- isIntegerEquivalent(code$args[[2]]) liftExprRanges <- TRUE if(integerFrom && integerTo) { if((!byProvided && !lengthProvided) || (byProvided && !lengthProvided && is.numeric(code$args[[3]]) && code$args[[3]] == 1)) { code$name = ':' asserts <- c(asserts, sizeColonOperator(code, symTab, typeEnv, recurse = FALSE)) return(if(length(asserts)==0) NULL else asserts) } } else { if(!byProvided && !lengthProvided) { code$args[[3]] <- 1 byProvided <- TRUE } if(byProvided) { code$name <- 'nimSeqByD' ## lift any expression arguments for(i in 1:2) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } if(lengthProvided) { code$name <- 'nimSeqByLenD' thisSizeExpr <- parse(text = nimDeparse(code$args[[4]]), keep.source = FALSE)[[1]] } else { thisSizeExpr <- substitute(calcSeqLength(FROM_, TO_, BY_),##1 + floor((TO_ - FROM_) / BY_), list(FROM_ = parse(text = nimDeparse(code$args[[1]]), keep.source = FALSE)[[1]], TO_ = parse(text = nimDeparse(code$args[[2]]), keep.source = FALSE)[[1]], BY_ = parse(text = nimDeparse(code$args[[3]]), keep.source = FALSE)[[1]])) } } else { ## must be lengthProvided code$name <- 'nimSeqLenD' thisSizeExpr <- parse(text = nimDeparse(code$args[[4]]), keep.source = FALSE)[[1]] } } code$type <- 'double' ## only remaining case to catch here is -1 integer sequences, which we don't move to `:` code$sizeExprs <- list(thisSizeExpr) code$toEigenize <- 'yes' code$nDim <- 1 return(if(length(asserts)==0) NULL else asserts) } sizeColonOperator <- function(code, symTab, typeEnv, recurse = TRUE) { asserts <- if(recurse) recurseSetSizes(code, symTab, typeEnv) else list() if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'In sizeColonOperator: Problem determining size for : without two arguments.'), call. = FALSE) for(i in 1:2) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { if(! (code$args[[i]]$name == '[' && (code$args[[i]]$args[[1]]$name == 'dim' && code$args[[i]]$args[[1]]$args[[1]]$name == 'nfVar'))){ asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } } code$type <- 'double' code$nDim <- 1 code$toEigenize <- 'maybe' ## could generate an assertiong that second arg is >= first arg if(is.numeric(code$args[[1]]) & is.numeric(code$args[[2]])) { code$sizeExprs <- list(code$args[[2]] - code$args[[1]] + 1) } else { ## at least one part is an expression ## This is an awkward case: ## sizeExprs are R parse trees, not exprClasses ## But in this case, we want the expression from an exprClass. ## so we need to nimDeparse and then parse them code$sizeExprs <- list(substitute( A - B + 1, list(A = parse(text = nimDeparse(code$args[[2]]), keep.source = FALSE)[[1]], B = parse(text = nimDeparse(code$args[[1]]), keep.source = FALSE)[[1]] ) ) ) } invisible(asserts) } sizeTranspose <- function(code, symTab, typeEnv) { if(length(code$args) != 1) warning(paste0('More than one argument to transpose in ', nimDeparse(code), '.'), call. = FALSE) ans <- sizeUnaryCwise(code, symTab, typeEnv) if(is.numeric(code$args[[1]])) { warning(paste0('Confused by transpose of a numeric scalar in ', nimDeparse(code), '. Will remove transpose.'), call. = FALSE) removeExprClassLayer(code$caller, 1) return(ans) } code$toEigenize <- 'yes' code$type <- code$args[[1]]$type if(length(code$sizeExprs) == 2) { if(code$nDim != 2) warning(paste0('In sizeTranspose, there are 2 sizeExprs but nDim != 2'), call. = FALSE) code$sizeExprs <- c(code$sizeExprs[2], code$sizeExprs[1]) } else if(length(code$sizeExprs) == 1) { if(code$nDim != 1) warning(paste0('In sizeTranspose, there is 1 sizeExpr but nDim != 1'), call. = FALSE) code$name <- 'asRow' code$sizeExprs <- c(list(1), code$sizeExprs[[1]]) code$nDim <- 2 } return(ans) } getArgumentType <- function(expr) { if(inherits(expr, 'exprClass')) { expr$type } else storage.mode(expr) } setReturnType <- function(keyword, argType) { handling <- returnTypeHandling[[keyword]] if(is.null(handling)) return('double') switch(handling, 'double', ##1 'integer', ##2 'logical', ##3 argType, ##4 if(argType == 'logical') 'integer' else argType ##5 ) } ## Handler for unary functions that operate component-wise sizeUnaryCwise <- function(code, symTab, typeEnv) { if(length(code$args) != 1){ stop(exprClassProcessingErrorMsg(code, 'sizeUnaryCwise called with argument length != 1.'), call. = FALSE) } asserts <- recurseSetSizes(code, symTab, typeEnv) ## lift intermediates a1 <- code$args[[1]] if(inherits(a1, 'exprClass')) { if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } } code$nDim <- a1$nDim code$sizeExprs <- a1$sizeExprs } else { code$nDim <- 0 code$sizeExprs <- list() } code$type <- setReturnType(code$name, getArgumentType(a1)) if(length(code$nDim) != 1) stop(exprClassProcessingErrorMsg(code, 'In sizeUnaryCwise: nDim is not set.'), call. = FALSE) if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- if(code$nDim > 0) 'yes' else 'maybe' return(asserts) } ## currently only inprod(v1, v2) sizeBinaryReduction <- function(code, symTab, typeEnv) { if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryReduction: argument length != 2'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] a2 <- code$args[[2]] ok <- TRUE if(inherits(a1, 'exprClass')) { if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } } else { ok <- FALSE } if(inherits(a2, 'exprClass')) { if(a2$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) a2 <- code$args[[2]] } } else { ok <- FALSE } if(!ok) stop(exprClassProcessingErrorMsg(code, 'Cannot call inprod or other binary reduction operator with constant argument.'), call. = FALSE) code$nDim <- 0 code$sizeExprs <- list() code$type <- 'double' code$toEigenize <- 'yes' if(length(asserts) == 0) NULL else asserts } ## things like trace, det, logdet sizeMatrixSquareReduction <- function(code, symTab, typeEnv) { if(length(code$args) != 1){ stop(exprClassProcessingErrorMsg(code, 'sizeMatrixSquareReduction called with argument length != 1.'), call. = FALSE) } asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] if(!inherits(a1, 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'sizeMatrixSquareReduction called with argument that is not an expression.'), call. = FALSE) if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } if(a1$nDim != 2) stop(exprClassProcessingErrorMsg(code, 'sizeMatrixSquareReduction called with argument that is not a matrix.'), call. = FALSE) code$nDim <- 0 code$sizeExprs <- list() code$type <- if(code$name == 'trace') code$args[[1]]$type else 'double' code$toEigenize <- 'yes' if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } if(length(asserts) == 0) NULL else asserts } sizeUnaryCwiseSquare <- function(code, symTab, typeEnv) { if(length(code$args) != 1){ stop(exprClassProcessingErrorMsg(code, 'sizeUnaryCwiseSquare called with argument length != 1.'), call. = FALSE) } asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] if(!inherits(a1, 'exprClass')) stop(exprClassProcessingErrorMsg(code, 'sizeUnaryCwiseSquare called with argument that is not an expression.'), call. = FALSE) if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } if(a1$nDim != 2) stop(exprClassProcessingErrorMsg(code, 'sizeUnaryCwiseSquare called with argument that is not a matrix.'), call. = FALSE) if(!identical(a1$sizeExprs[[1]], a1$sizeExprs[[2]])) { asserts <- c(asserts, identityAssert(a1$sizeExprs[[1]], a1$sizeExprs[[2]], paste0("Run-time size error: expected ", nimDeparse(a1), " to be square.") )) if(is.integer(a1$sizeExprs[[1]])) { newSize <- a1$sizeExprs[[1]] } else { if(is.integer(a1$sizeExprs[[2]])) { newSize <- a1$sizeExprs[[2]] } else { newSize <- a1$sizeExprs[[1]] } } } else { newSize <- a1$sizeExprs[[1]] } code$nDim <- 2 code$sizeExprs <- list(newSize, newSize) code$type <- setReturnType(code$name, a1$type) code$toEigenize <- if(code$nDim > 0) 'yes' else 'maybe' invisible(asserts) } sizeUnaryNonaryCwise <- function(code, symTab, typeEnv) { if(length(code$args) > 1) stop(exprClassProcessingErrorMsg(code, 'sizeUnaryNonaryCwise called with argument length > 1'), call. = FALSE) if(length(code$args) == 1) return(sizeUnaryCwise(code, symTab, typeEnv)) ## default behavior for a nonary (no-argument) function: code$type <- 'double' code$nDim <- 0 code$sizeExprs <- list() code$toEigenize <- 'maybe' invisible(NULL) } ## things like min, max, mean, sum sizeUnaryReduction <- function(code, symTab, typeEnv) { if(length(code$args) != 1) stop(exprClassProcessingErrorMsg(code, 'sizeUnaryReduction called with argument length != 1.'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) if(inherits(code$args[[1]], 'exprClass')) { ## Kludgy catch of var case here. Can't do var(matrix) because in R that is interpreted as cov(data.frame) if(code$args[[1]]$nDim >= 2) { if(code$name == 'var') { stop(exprClassProcessingErrorMsg(code, 'NIMBLE compiler does not support var with a matrix (or higher dimensional) argument.'), call. = FALSE) } } if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(!code$args[[1]]$isName) { if(code$args[[1]]$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) } } } } code$nDim <- 0 code$sizeExprs <- list() code$type <- setReturnType(code$name, code$args[[1]]$type) if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- 'yes' if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } } if(length(asserts) == 0) NULL else asserts } ## There's no real point in annotating return. Just need to recurse and lift sizeReturn <- function(code, symTab, typeEnv) { if(length(code$args) > 1) stop(exprClassProcessingErrorMsg(code, 'return has argument length > 1.'), call. = FALSE) code$toEigenize <- 'no' if(!exists('return', envir = typeEnv)) stop(exprClassProcessingErrorMsg(code, 'There was no returnType declaration and the default is missing.'), call. = FALSE) if(length(code$args) == 0) { if(!identical(typeEnv$return$type, 'void')) stop(exprClassProcessingErrorMsg(code, 'return() with no argument can only be used with returnType(void()), which is the default if there is no returnType() statement.'), call. = FALSE) return(invisible(NULL)) } if(identical(typeEnv$return$type, 'void')) stop(exprClassProcessingErrorMsg(code, 'returnType was declared void() (default) (or something invalid), which is not consistent with the object you are trying to return.'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) if(inherits(code$args[[1]], 'exprClass')) { if(typeEnv$return$type == 'nimbleList' || code$args[[1]]$type == 'nimbleList') { if(typeEnv$return$type != 'nimbleList') stop(exprClassProcessingErrorMsg(code, paste0('return() argument is a nimbleList but returnType() statement gives a different type')), call. = FALSE) if(code$args[[1]]$type != 'nimbleList') stop(exprClassProcessingErrorMsg(code, paste0('returnType statement gives a nimbleList type but return() argument is not the right type')), call. = FALSE) ## equivalent to symTab$getSymbolObject(code$args[[1]]$name)$nlProc, if it is a name if(!identical(code$args[[1]]$sizeExprs$nlProc, typeEnv$return$sizeExprs$nlProc)) stop(exprClassProcessingErrorMsg(code, paste0('nimbleList given in return() argument does not match nimbleList type declared in returnType()')), call. = FALSE) } else { ## check numeric types and nDim fail <- FALSE if(!identical(code$args[[1]]$type, typeEnv$return$type)) { if(typeEnv$return$nDim > 0) { ## allow scalar casting of returns without error failMsg <- paste0('Type ', code$args[[1]]$type, ' of the return() argument does not match type ', typeEnv$return$type, ' given in the returnType() statement (void is default).') fail <- TRUE } } if(!isTRUE(all.equal(code$args[[1]]$nDim, typeEnv$return$nDim))) { failMsg <- paste0( if(exists("failMsg", inherits = FALSE)) paste0(failMsg,' ') else character(), paste0('Number of dimensions ', code$args[[1]]$nDim, ' of the return() argument does not match number ', typeEnv$return$nDim, ' given in the returnType() statement.')) fail <- TRUE } if(fail) stop(exprClassProcessingErrorMsg(code, failMsg), call. = FALSE) } if(!code$args[[1]]$isName) { liftArg <- FALSE if(code$args[[1]]$toEigenize == 'yes') liftArg <- TRUE else if(anyNonScalar(code$args[[1]])) liftArg <- TRUE if(liftArg) asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv, forceAssign = TRUE)) } } invisible(asserts) } sizeMatrixMult <- function(code, symTab, typeEnv) { if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'sizeMatrixMult called with argument length != 2.'), call. = FALSE) a1 <- code$args[[1]] a2 <- code$args[[2]] if(!(inherits(a1, 'exprClass') & inherits(a2, 'exprClass'))) stop(exprClassProcessingErrorMsg(code, 'In sizeMatrixMult: expecting both arguments to be expressions.'), call. = FALSE) ## need promotion from vectors to matrices with asRow or asCol asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] a2 <- code$args[[2]] if(a1$nDim == 0 | a2$nDim == 0) stop(exprClassProcessingErrorMsg(code, 'In sizeMatrixMult: Cannot do matrix multiplication with a scalar.'), call. = FALSE) if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } if(a2$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) a2 <- code$args[[2]] } } ## Note that we could insert RUN-TIME adaptation of mat %*% vec and vec %*% mat ## but to do so we would need to generate trickier sizeExprs ## For now, a vector on the right will be turned into a column ## and a vector on the left will be turned into a row ## The programmer can always use asRow or asCol to control it explicitly if(a1$nDim == 1 & a2$nDim == 1) { origSizeExprs <- a1$sizeExprs[[1]] a1 <- insertExprClassLayer(code, 1, 'asRow', type = a1$type, nDim = 2) a1$sizeExprs <- c(list(1), origSizeExprs) origSizeExprs <- a2$sizeExprs[[1]] a2 <- insertExprClassLayer(code, 2, 'asCol', type = a2$type, nDim = 2) a2$sizeExprs <- c(origSizeExprs, list(1)) } else { if(a1$nDim == 1) { if(a2$nDim != 2) stop(exprClassProcessingErrorMsg(code, paste0('In sizeMatrixMult: First arg has nDim = 1 and 2nd arg has nDim = ', a2$nDim, '.')), call. = FALSE) origSizeExprs <- a1$sizeExprs[[1]] ## For first argument, default to asRow unless second argument has only one row, in which case make first asCol if(identical(a2$sizeExprs[[1]], 1)) { a1 <- insertExprClassLayer(code, 1, 'asCol', type = a1$type, nDim = 2) a1$sizeExprs <- c(origSizeExprs, list(1)) } else { a1 <- insertExprClassLayer(code, 1, 'asRow', type = a1$type, nDim = 2) a1$sizeExprs <- c(list(1), origSizeExprs) } } else if(a2$nDim == 1) { origSizeExprs <- a2$sizeExprs[[1]] if(a1$nDim != 2) stop(exprClassProcessingErrorMsg(code, paste0('In sizeMatrixMult: Second arg has nDim = 1 and 1st arg has nDim = ', a1$nDim, '.')), call. = FALSE) if(identical(a1$sizeExprs[[2]], 1)) { a2 <- insertExprClassLayer(code, 2, 'asRow', type = a2$type, nDim = 2) a2$sizeExprs <- c(list(1), origSizeExprs) } else { a2 <- insertExprClassLayer(code, 2, 'asCol', type = a2$type, nDim = 2) a2$sizeExprs <- c(origSizeExprs, list(1)) } } } code$nDim <- 2 code$sizeExprs <- list(a1$sizeExprs[[1]], a2$sizeExprs[[2]]) code$type <- setReturnType(code$name, arithmeticOutputType(a1$type, a2$type)) if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- 'yes' assertMessage <- paste0("Run-time size error: expected ", deparse(a1$sizeExprs[[2]]), " == ", deparse(a2$sizeExprs[[1]])) newAssert <- identityAssert(a1$sizeExprs[[2]], a2$sizeExprs[[1]], assertMessage) if(is.null(newAssert)) return(asserts) else return(c(asserts, list(newAssert))) } sizeSolveOp <- function(code, symTab, typeEnv) { ## this is for solve(A, b) or forwardsolve(A, b). For inverse, use inverse(A), not solve(A) if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'sizeSolveOp called with argument length != 2.'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) a1 <- code$args[[1]] a2 <- code$args[[2]] if(!(inherits(a1, 'exprClass') & inherits(a2, 'exprClass'))) stop(exprClassProcessingErrorMsg(code, 'In sizeSolveOp: expecting both arguments to be exprClasses.'), call. = FALSE) if(a1$nDim != 2) stop(exprClassProcessingErrorMsg(code, 'In sizeSolveOp: first argument to a matrix solver must be a matrix.'), call. = FALSE) if(!any(a2$nDim == 1:2)) stop(exprClassProcessingErrorMsg(code, 'In sizeSolveOp: second argument to a matrix solver must be a vector or matrix.'), call. = FALSE) code$type <- setReturnType(code$name, 'double') code$nDim <- a2$nDim ## keep the same dimension as the 2nd argument if(code$nDim == 1) { code$sizeExprs <- c(a1$sizeExprs[[1]]) } else { code$sizeExprs <- c(a1$sizeExprs[[1]], a2$sizeExprs[[2]]) } code$toEigenize <- 'yes' assertMessage <- paste0("Run-time size error: expected ", deparse(a1$sizeExprs[[1]]), " == ", deparse(a1$sizeExprs[[2]])) assert1 <- identityAssert(a1$sizeExprs[[1]], a1$sizeExprs[[2]], assertMessage) assertMessage <- paste0("Run-time size error: expected ", deparse(a1$sizeExprs[[1]]), " == ", deparse(a2$sizeExprs[[1]])) assert2 <- identityAssert(a1$sizeExprs[[1]], a2$sizeExprs[[1]], assertMessage) asserts <- c(asserts, assert1, assert2) return(asserts) } ## deprecated and will be removed setAsRowOrCol <- function(code, argID, rowOrCol, type ) { recurse <- TRUE if(is.numeric(code$args[[argID]])) return(NULL) if(code$args[[argID]]$isName) { recurse <- FALSE } else { if(code$args[[argID]]$name == 'map') recurse <- FALSE } if(!recurse) { if(code$args[[argID]]$nDim == 2) { if(rowOrCol == 'asRow') { if(is.numeric(code$args[[argID]]$sizeExprs[[1]])) { if(code$args[[argID]]$sizeExprs[[1]] == 1) { return(invisible(NULL)) ## it is already a row } } rowOK <- if(is.numeric(code$args[[argID]]$sizeExprs[[2]])) { ## only ok if a1 2nd size is 1 if(code$sizeExprs[[2]] == 1) TRUE else FALSE } else FALSE if(!rowOK) stop(exprClassProcessingErrorMsg(code, 'In setAsRowOrCol: Cannot convert to row.'), call. = FALSE) lengthExpr <- code$args[[argID]]$sizeExprs[[1]] insertExprClassLayer(code$caller, code$callerArgID, 'transpose', type = type) code$nDim <- 2 code$sizeExprs <- c(list(1), lengthExpr) return(code$args[[argID]]) } else { if(is.numeric(code$args[[argID]]$sizeExprs[[1]])) { if(code$args[[argID]]$sizeExprs[[2]] == 1) { return(invisible(NULL)) ## it is already a col } } colOK <- if(is.numeric(code$args[[argID]]$sizeExprs[[1]])) { ## only ok if a1 1st size is 1 if(code$sizeExprs[[1]] == 1) TRUE else FALSE } else FALSE if(!colOK) stop(exprClassProcessingErrorMsg(code, 'In setAsRowOrCol: Cannot convert to col.'), call. = FALSE) lengthExpr <- code$args[[argID]]$sizeExprs[[1]] insertExprClassLayer(code$caller, code$callerArgID, 'transpose', type = type) code$nDim <- 2 code$sizeExprs <- c(lengthExpr, list(1)) return(code$args[[argID]]) } } else if(code$args[[argID]]$nDim == 1) { oldSizeExprs <- code$args[[argID]]$sizeExprs insertExprClassLayer(code, argID, rowOrCol, type = type) if(rowOrCol == 'asRow') { code$sizeExprs <- c(list(1), oldSizeExprs) } else { code$sizeExprs <- c(oldSizeExprs, list(1)) } code$nDim <- 2 code$type <- type ans <- code$args[[argID]] } } else { for(i in seq_along(code$args[[argID]]$args)) { setAsRowOrCol(code$args[[argID]], i, rowOrCol, type) } ans <- code$args[[argID]] } ans } sizeBinaryCwiseLogical <- function(code, symTab, typeEnv) { ans <- sizeBinaryCwise(code, symTab, typeEnv) code$type <- 'logical' return(ans) } ## Handler for binary component-wise operators sizeBinaryCwise <- function(code, symTab, typeEnv) { if(length(code$args) != 2) stop(exprClassProcessingErrorMsg(code, 'sizeBinaryCwise called with argument length != 2.'), call. = FALSE) asserts <- recurseSetSizes(code, symTab, typeEnv) ## sizes of arguments must have already been set ## pull out the two arguments a1 <- code$args[[1]] a2 <- code$args[[2]] ## pull out aXDropNdim, aXnDim, aXsizeExprs, and aXtype (X = 1 or 2) if(inherits(a1, 'exprClass')) { if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(a1$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv)) a1 <- code$args[[1]] } } a1Drop <- dropSingleSizes(a1$sizeExprs) a1DropNdim <- length(a1Drop$sizeExprs) a1nDim <- a1$nDim a1sizeExprs <- a1$sizeExprs a1type <- a1$type if(!nimbleOptions('experimentalNewSizeProcessing') ) a1toEigenize <- a1$toEigenize } else { a1DropNdim <- 0 a1nDim <- 0 a1sizeExprs <- list() a1type <- storage.mode(a1) if(!nimbleOptions('experimentalNewSizeProcessing') ) a1toEigenize <- 'maybe' } if(inherits(a2, 'exprClass')) { if(!nimbleOptions('experimentalNewSizeProcessing') ) { if(a2$toEigenize == 'no') { asserts <- c(asserts, sizeInsertIntermediate(code, 2, symTab, typeEnv)) a2 <- code$args[[2]] } } a2Drop <- dropSingleSizes(a2$sizeExprs) a2DropNdim <- length(a2Drop$sizeExprs) a2nDim <- a2$nDim a2sizeExprs <- a2$sizeExprs a2type <- a2$type if(!nimbleOptions('experimentalNewSizeProcessing') ) a2toEigenize <- a2$toEigenize } else { a2DropNdim <- 0 a2nDim <- 0 a2sizeExprs <- list() a2type <- storage.mode(a2) if(!nimbleOptions('experimentalNewSizeProcessing') ) a2toEigenize <- 'maybe' } ## Choose the output type by type promotion if(length(a1type) == 0) {warning('Problem with type of arg1 in sizeBinaryCwise', call. = FALSE); browser()} if(length(a2type) == 0) {warning('Problem with type of arg2 in sizeBinaryCwise', call. = FALSE); browser()} code$type <- setReturnType(code$name, arithmeticOutputType(a1type, a2type)) if(!nimbleOptions('experimentalNewSizeProcessing') ) { forceYesEigenize <- identical(a1toEigenize, 'yes') | identical(a2toEigenize, 'yes') code$toEigenize <- if(a1DropNdim == 0 & a2DropNdim == 0) if(forceYesEigenize) 'yes' else 'maybe' else 'yes' } ## Catch the case that there is at least one scalar-equivalent (all lengths == 1) ## experimentalNewSizeProcessing: The 3 'code$toEigenize <- ' should be redundant with above and could be removed during refactor if(a1DropNdim == 0 | a2DropNdim == 0) { ## Here we will process effective scalar additions ## and not do any other type of size promotion/dropping if(a1DropNdim == 0) { ## a1 is scalar-equiv if(a2DropNdim == 0) { ##both are scalar-equiv code$nDim <- max(a1nDim, a2nDim) ## use the larger nDims code$sizeExprs <- rep(list(1), code$nDim) ## set sizeExprs to all 1 if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- if(forceYesEigenize) 'yes' else 'maybe' } else { ## a2 is not scalar equiv, so take nDim and sizeExprs from it code$nDim <- a2nDim code$sizeExprs <- a2sizeExprs if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- 'yes' } } else { ## a2 is scalar-equiv, and a1 is not code$nDim <- a1nDim code$sizeExprs <- a1sizeExprs if(!nimbleOptions('experimentalNewSizeProcessing') ) code$toEigenize <- 'yes' } return(if(length(asserts) == 0) NULL else asserts) } if(is.null(asserts)) asserts <- list() ## Catch the case that the number of dimensions is not equal. ## This case doesn't arise as much as it used to because [ (sizeIndexingBracket) now drops single dimensions if(a1nDim != a2nDim) { ## Catch the case that one is 2D and the other is 1D-equivalent. ## This allows e.g. X[1,1:5] + Y[1,1,1:5]. First arg is 2D. 2nd arg is 1D-equivalent. An assertion will check that dim(X)[1] == 1 ## If so, wrap the 1D in asRow or asCol to orient it later for Eigen if(a1DropNdim == 1 & a2DropNdim == 1) { ## Hey, I think this is wrong: I think we should check the aXDropNdims if(a1nDim > 2 | a2nDim > 2) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Dimensions do not match and/or NIMBLE will not handle Array + Vector for dim(Array) > 2.'), call. = FALSE) ## a1 is 2D and a2 is 1D if(a1nDim == 2 & a2nDim == 1) { a1IsCol <- identical(a1sizeExprs[[2]], 1) asFun <- if(a1IsCol) 'asCol' else 'asRow' a2 <- insertExprClassLayer(code, 2, asFun, type = a2type) a2$sizeExprs <- a1sizeExprs a2$nDim <- a1nDim a1ind <- if(a1IsCol) 1 else 2 if(!is.numeric(a1sizeExprs[[a1ind]]) | !is.numeric(a2sizeExprs[[1]])) { ## Really do want original a2sizeExprs assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[a1ind]]), " == ", deparse(a2sizeExprs[[1]])) thisAssert <- identityAssert(a1sizeExprs[[a1ind]], a2sizeExprs[[1]], assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert } else { if(a1sizeExprs[[a1ind]] != a2sizeExprs[[1]]) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Fixed size mismatch.'), call. = FALSE) } code$nDim <- a1nDim code$sizeExprs <- a1sizeExprs } else { a2IsCol <- identical(a2sizeExprs[[2]], 1) asFun <- if(a2IsCol) 'asCol' else 'asRow' a1 <- insertExprClassLayer(code, 1, asFun, type = a1type) a1$sizeExprs <- a2sizeExprs a1$type <- a1type a1$nDim <- a1nDim <- a2nDim a2ind <- if(a2IsCol) 1 else 2 if(!is.numeric(a1sizeExprs[[1]]) | !is.numeric(a2sizeExprs[[a2ind]])) { ## Really do want the original a1sizeExprs[[1]], not the modified one. assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[1]]), " == ", deparse(a2sizeExprs[[a2ind]])) thisAssert <- identityAssert(a1sizeExprs[[1]], a2sizeExprs[[a2ind]], assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert } else { if(a1sizeExprs[[1]] != a2sizeExprs[[a2ind]]) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Fixed size mismatch.'), call. = FALSE) } code$nDim <- a2nDim code$sizeExprs <- a2sizeExprs } } else { ## If at least one arg is a known scalar-equivalent, that case was handled above ## (But it's still not complete) ## Here is the case that nDims aren't equal and dropNdims aren't equal ## either. We used to rely on typeEnv to keep track of when a size resulting from an operation is known to be 1 but realized that isn't safe if that operation is only conditionally executed at run time. ## Hence what will do now is assume the user has written valid code ## but add run-time size checks of which dimension must match ## This is currently limited in what it will handle ## Specifically, it assumes things should be columns assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[1]]), " == ", deparse(a2sizeExprs[[1]])) thisAssert <- identityAssert(a1sizeExprs[[1]], a2sizeExprs[[1]], assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert if(a1nDim == 1 & a2nDim == 2) { assertMessage <- paste0("Run-time size error: expected ", deparse(a2sizeExprs[[2]]), " == ", 1) thisAssert <- identityAssert(a2sizeExprs[[2]], 1, assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert code$sizeExprs <- a2sizeExprs } else { if(a1nDim == 2 & a2nDim == 1) { assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[2]]), " == ", 1) thisAssert <- identityAssert(a1sizeExprs[[2]], 1, assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert code$sizeExprs <- a1sizeExprs } else { stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Dimensions do not matchin a way that can be handled.'), call. = FALSE) } } code$nDim <- 2 } } else { ## dimensions match at the outset nDim <- a1nDim if(nDim > 0) { for(i in 1:nDim) { if(!is.numeric(a1sizeExprs[[i]]) | !is.numeric(a2sizeExprs[[i]])) { assertMessage <- paste0("Run-time size error: expected ", deparse(a1sizeExprs[[i]]), " == ", deparse(a2sizeExprs[[i]])) thisAssert <- identityAssert(a1sizeExprs[[i]], a2sizeExprs[[i]], assertMessage) if(!is.null(thisAssert)) asserts[[length(asserts) + 1]] <- thisAssert } else { if(a1sizeExprs[[i]] != a2sizeExprs[[i]]) stop(exprClassProcessingErrorMsg(code, 'In sizeBinaryCwise: Fixed size mismatch.'), call. = FALSE) } } } code$nDim <- a1$nDim code$sizeExprs <- vector('list', code$nDim) for(i in seq_along(code$sizeExprs)) code$sizeExprs[[i]] <- if(is.numeric(a1sizeExprs[[i]])) a1sizeExprs[[i]] else a2sizeExprs[[i]] } if(length(asserts) == 0) NULL else asserts } mvFirstArgCheckLists <- list(nimArr_rmnorm_chol = list(c(1, 2, 0), ## dimensionality of ordered arguments AFTER the first, which is for the return value. e.g. mean (1D), chol(2D), prec_param(scalar) 1, 'double'), ## 1 = argument from which to take answer size, double = answer type nimArr_rmvt_chol = list(c(1, 2, 0, 0), ## dimensionality of ordered arguments AFTER the first, which is for the return value. e.g. mean (1D), chol(2D), df(scalar), prec_param(scalar) 1, 'double'), ## 1 = argument from which to take answer size, double = answer type nimArr_rwish_chol = list(c(2, 0, 0, 0), ## chol, df, prec_param, overwrite_inputs 1, 'double'), nimArr_rinvwish_chol = list(c(2, 0, 0), ## chol, df, prec_param 1, 'double'), nimArr_rcar_normal = list(c(1, 1, 1), 3, 'double'), ## adj, wgts, num nimArr_rmulti = list(c(0, 1), ## size, probs 2, 'double'), ## We treat integer rv's as doubles nimArr_rdirch = list(c(1), 1, 'double')) ## alpha sizeRmultivarFirstArg <- function(code, symTab, typeEnv) { asserts <- recurseSetSizes(code, symTab, typeEnv) notOK <- FALSE checkList <- mvFirstArgCheckLists[[code$name]] if(!is.null(checkList)) { if(length(code$args) < length(checkList[[1]])) stop(exprClassProcessingErrorMsg(code, 'Not enough arguments provided.'), call. = FALSE) for(i in seq_along(checkList[[1]])) { notOK <- if(inherits(code$args[[i]], 'exprClass')) code$args[[i]]$nDim != checkList[[1]][i] else notOK } returnSizeArgID <- checkList[[2]] returnType <- checkList[[3]] } else { returnSizeArgID <- 1 returnType <- 'double' } if(notOK) { stop(exprClassProcessingErrorMsg(code, 'Some argument(s) have the wrong dimension.'), call. = FALSE) } if(!inherits(code$args[[returnSizeArgID]], 'exprClass')) stop(exprClassProcessingErrorMsg(code, paste0('Expected ', nimDeparse(code$args[[returnSizeArgID]]) ,' to be an expression.')), call. = FALSE) code$type <- returnType code$nDim <- code$args[[returnSizeArgID]]$nDim code$toEigenize <- 'maybe' code$sizeExprs <- code$args[[returnSizeArgID]]$sizeExprs for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } if(code$nDim > 0) { if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } else typeEnv$.ensureNimbleBlocks <- TRUE ## for purposes of sizeAssign, which recurses on assignment target after RHS } return(asserts) } sizeVoidPtr <- function(code, symTab, typeEnv) { ## lift any argument that is an expression or scalar. ## We expect only one argument ## Lift it if it is an expression, a numeric, or a scalar asserts <- recurseSetSizes(code, symTab, typeEnv) lift <- TRUE if(inherits(code$args[[1]], 'exprClass')) { if(code$args[[1]]$type == 'nimbleFunction') lift <- FALSE else if(code$args[[1]]$isName & code$args[[1]]$nDim > 0) lift <- FALSE ## will already be a pointer } if(lift) { asserts <- c(asserts, sizeInsertIntermediate(code, 1, symTab, typeEnv) ) } code$type <- 'voidPtr' code$nDim <- 0 code$toEigenize <- 'no' return(asserts) } ### ## This function would be called with arguments from an RCfunction or nimbleFunction ## the functions dim and length would be taken over to work on the sizeExprs. ## but for now it can just return NAs for size expressions, and then the new returned value will have default size expressions (dim(name)[1], etc) ## generalFunSizeHandler <- function(code, symTab, typeEnv, returnType, args, chainedCall = FALSE) { useArgs <- unlist(lapply(args, function(x) as.character(x[[1]]) %in% c('double', 'integer', 'logical'))) if(chainedCall) useArgs <- c(FALSE, useArgs) if(length(code$args) != length(useArgs)) { stop(exprClassProcessingErrorMsg(code, 'In generalFunSizeHandler: Wrong number of arguments.'), call. = FALSE) } ## Note this is NOT checking the dimensions of each arg. useArgs just means it will recurse on that and lift or do as needed asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs) ## lift any argument that is an expression for(i in seq_along(code$args)) { if(useArgs[i]) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } } if(inherits(returnType, 'symbolNimbleList')) { code$type <- 'nimbleList' code$sizeExprs <- returnType code$toEigenize <- 'maybe' code$nDim <- 0 liftIfAmidExpression <- TRUE } else { returnSymbolBasic <- inherits(returnType, 'symbolBasic') returnTypeLabel <- if(returnSymbolBasic) returnType$type else as.character(returnType[[1]]) if(returnTypeLabel == 'void') { code$type <- returnTypeLabel code$toEigenize <- 'unknown' return(asserts) } returnNDim <- if(returnSymbolBasic) returnType$nDim else if(length(returnType) > 1) as.numeric(returnType[[2]]) else 0 returnSizeExprs <- vector('list', returnNDim) ## This stays blank (NULLs), so if assigned as a RHS, the LHS will get default sizes code$type <- returnTypeLabel code$nDim <- returnNDim code$sizeExprs <- returnSizeExprs code$toEigenize <- if(code$nDim == 0) 'maybe' else 'no' liftIfAmidExpression <- code$nDim > 0 } if(liftIfAmidExpression) { if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } else typeEnv$.ensureNimbleBlocks <- TRUE } return(asserts) } generalFunSizeHandlerFromSymbols <- function(code, symTab, typeEnv, returnSymbol, argSymTab, chainedCall = FALSE) { ## symbols should be in order useArgs <- unlist(lapply(argSymTab$symbols, function(x) { if(!is.null(x[['type']])) as.character(x$type) %in% c('double', 'integer', 'logical') else FALSE })) if(chainedCall) useArgs <- c(FALSE, useArgs) if(length(code$args) != length(useArgs)) { stop(exprClassProcessingErrorMsg(code, 'In generalFunSizeHandler: Wrong number of arguments.'), call. = FALSE) } ## Note this is NOT checking the dimensions of each arg. useArgs just means it will recurse on that and lift or do as needed asserts <- recurseSetSizes(code, symTab, typeEnv, useArgs) ## lift any argument that is an expression for(i in seq_along(code$args)) { if(useArgs[i]) { if(inherits(code$args[[i]], 'exprClass')) { if(!code$args[[i]]$isName) { asserts <- c(asserts, sizeInsertIntermediate(code, i, symTab, typeEnv) ) } } } } if(inherits(returnSymbol, 'symbolNimbleList')) { code$type <- 'nimbleList' code$sizeExprs <- returnSymbol code$toEigenize <- 'maybe' code$nDim <- 0 liftIfAmidExpression <- TRUE } else { returnSymbolBasic <- inherits(returnSymbol, 'symbolBasic') returnTypeLabel <- if(returnSymbolBasic) returnSymbol$type else { stop(exprClassProcessingErrorMsg(code, 'In generalFunSizeHandlerFromSymbols: Problem with return type.'), call. = FALSE) } if(returnTypeLabel == 'void') { code$type <- returnTypeLabel code$toEigenize <- 'unknown' return(asserts) } returnNDim <- if(returnSymbolBasic) returnSymbol$nDim else if(length(returnType) > 1) as.numeric(returnType[[2]]) else 0 returnSizeExprs <- vector('list', returnNDim) ## This stays blank (NULLs), so if assigned as a RHS, the LHS will get default sizes code$type <- returnTypeLabel code$nDim <- returnNDim code$sizeExprs <- returnSizeExprs code$toEigenize <- if(code$nDim == 0) 'maybe' else 'no' liftIfAmidExpression <- code$nDim > 0 } if(liftIfAmidExpression) { if(!(code$caller$name %in% c('{','<-','<<-','='))) { asserts <- c(asserts, sizeInsertIntermediate(code$caller, code$callerArgID, symTab, typeEnv)) } else typeEnv$.ensureNimbleBlocks <- TRUE } return(asserts) }
write.flopfile <- function(dataset, outfile){ #transpose the matrix and replace NA's calls <- dataset$calls calls <- t(calls) missing <- is.na(calls) calls[which(missing==TRUE)] <- "--" #add the sequence position calls <- rbind(as.character(dataset$position), calls) #add chromosome calls <- rbind(as.character(dataset$LG), calls) #add line names first_column <- c("Chromosome", "Position", dimnames(dataset$calls)[[2]]) calls <- cbind(first_column, calls) write.table(calls, file=outfile, quote=F, row.names=F, col.names=F, sep=" ") } #write a dataset to PLINK outfiles write.plink <- function(dataset, pedfile, mapfile, delimiter, assn="FALSE"){ pheno <- 1 linetag <- "0 0 0" #write the pedfile calls <- dataset$calls calls[which(is.na(calls))] <- "00" if(delimiter=="group"){ popname <- as.character(dataset$group) } else if(delimiter=="status"){ popname <- as.character(dataset$status) } else if (delimiter=="cycle"){ popname <- paste(as.character(dataset$cycle), as.character(dataset$status), sep="_") } if(assn=="TRUE"){ pheno <- as.numeric(as.factor(dataset$group)) linetag <- "" } lines <- apply(rbind(popname, dimnames(calls)[[2]], pheno, calls), 2, function(line) { var <- unlist(strsplit(line[-(1:2)], split="")) paste(c(line[1:2], linetag, var), collapse=" ") }) cat(lines, file=pedfile, sep="\n") #write the mapfile mapdata <- cbind(as.character(dataset$LG), as.character(dataset$markernames), rep(0,length(dataset$LG)), as.character(dataset$position)) write.table(mapdata, file=mapfile, col.names=F, row.names=F, quote=F, sep="\t") } #write out a fasta file write.fasta <- function(calls, outfile) { missing <- is.na(calls) calls[which(missing==TRUE)] <- "NN" lines <- apply(rbind(dimnames(calls)[[2]], calls), 2, function(line){ allsequence <- unlist(strsplit(line[-1], split="")) allsequence <- matrix(allsequence, nrow=2, byrow=FALSE) newline1 <- c(paste(">",line[1], sep=""), paste(allsequence[1,], collapse="")) newline2 <- c(paste(">",line[1], sep=""), paste(allsequence[2,], collapse="")) bothlines <- cbind(newline1, newline2) return(bothlines) }) cat(lines, file=outfile, sep="\n") } write.window <- function(dataset, prefix, window=1000, step=1000){ measures <- dataset$calls positions <- 1:dim(measures)[[1]] checkpts <- seq((window/2), length(positions)-window/2, by=step) sapply(checkpts, function(x){ current <- measures[(positions >= ((x+1)-window/2)) & (positions <= (x+window/2)),] filename <- paste(prefix, x, sep="_") outfile <- file(filename, "w") write.fasta(current, outfile) close(outfile) return(filename) }) } phaser <- function(dataset, chromosome, snpfile, hapfile, popfile) { calls <- dataset$calls[dataset$LG==chromosome,dataset$status=="Outbred"] calls[which(is.na(calls))] <- "BB" ind <- dim(calls)[[2]] loc <- dim(calls)[[1]] cat(ind, loc, sep="\n", file=snpfile) pops <- as.numeric(dataset$group[dataset$lines%in%dimnames(calls)[[2]]]) lines <- apply(rbind(dimnames(calls)[[2]], calls), 2, function(line){ allsequence <- unlist(strsplit(line[-1], split="")) allsequence <- matrix(allsequence, nrow=2, byrow=FALSE) name <- paste("#", line[1], sep=" ") allsequence[which(allsequence=="B")] <- "?" newline1 <- paste(as.character(allsequence[1,]), collapse="") newline2 <- paste(allsequence[2,], collapse="") alllines <- c(name, newline1, newline2) return(alllines) }) cat(lines, file=snpfile, sep="\n", append=T) haps <- dataset$calls[dataset$LG==chromosome,dataset$status!="Outbred"] haps <- remove.hets(haps) hapind <- dim(haps)[[2]] haploc <- dim(haps)[[1]] haps[which(is.na(haps))] <- "BB" happops <- as.numeric(dataset$group[dataset$lines%in%dimnames(haps)[[2]]]) cat(hapind, sep="\n", file=hapfile) haplines <- apply(rbind(dimnames(haps)[[2]], haps), 2, function(line){ allsequence <- unlist(strsplit(line[-1], split="")) allsequence <- matrix(allsequence, nrow=2, byrow=FALSE) name <- paste("#", line[1], sep=" ") allsequence[which(allsequence=="B")] <- "?" newline1 <- paste(as.character(allsequence[1,]), collapse="") alllines <- c(name, newline1) return(alllines) }) cat(haplines, file=hapfile, sep="\n", append=T) pops <- c(pops, happops) cat(pops, file=popfile, sep=" ") } inbredplinker <- function(dataset, pedfile, mapfile) { calls <- dataset$calls # calls[is.na(calls)] <- 0 markername <- dataset$markernames physmap <- dataset$position chrom <- dataset$LG genmap <- round(dataset$F2map, digits=7) #genmap[is.na(genmap)] <- 0 mapdata <- sprintf("%d\t%s\t%.7f\t%d", chrom, markername, genmap, physmap) cat(mapdata, file = mapfile, sep = "\n") strains <- dimnames(calls)[[2]] lines <- apply(rbind(colnames(calls), calls), 2, function(line) { var <- paste(rep(line[-1], each = 2), collapse = " ") strain <- line[1] printline <- sprintf("%s 1 0 0 0 1 %s", strain, var ) return(printline) } ) cat(lines, file= pedfile, sep="\n") } write.hapalleles <- function(dataset, chromosome, file){ strains <- dimnames(dataset$calls)[[2]] nstrains <- length(strains) strains <- paste(dimnames(dataset$calls)[[2]], collapse=" ") nmarkers <- length(dataset$markernames[dataset$LG==chromosome]) out <- file(file, "w") on.exit(close(out)) calls <- dataset$calls[dataset$LG==chromosome,] markernames <- dimnames(calls)[[1]] f2map <- dataset$F2map[dataset$LG==chromosome] cat(sprintf("markers %d strains %s\n", nmarkers, nstrains), file=out) cat(sprintf("strain_names %s\n", strains), file=out) for(i in 1:length(markernames)){ cat(sprintf("marker %s, 3, %.2f\n", markernames[i], f2map[i]), file=out) tabled <- create.problines(calls[i,]) write.table(tabled, file=out, append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE) } } create.problines <- function(marker){ alleles <- unique(marker) allele1number <- sum(marker==alleles[1]) allele2number <- sum(marker==alleles[2]) allele1prob <- 1/allele1number allele2prob <- 1/allele2number allele1vector <- marker==alleles[1] allele2vector <- marker==alleles[2] allele1vector[allele1vector==TRUE] <- allele1prob allele1vector[allele1vector==FALSE] <- 0.000 allele1vector <- sprintf("%.3f", allele1vector) allele2vector[allele2vector==TRUE] <- allele2prob allele2vector[allele2vector==FALSE] <- 0.000 allele2vector <- sprintf("%.3f", allele2vector) navector <- rep(1/length(marker), length(marker)) navector <- sprintf("%.3f", navector) line1 <- c("allele", "ND", navector) line2 <- c("allele", alleles[1], allele1vector) line3 <- c("allele", alleles[2], allele2vector) alleleframe <- rbind(line1,line2,line3) return(alleleframe) } write.happygenotypes <- function(dataset, chromosome, file){ calls <- dataset$calls[dataset$LG==chromosome,] strains <- dimnames(calls)[[2]] lines <- apply(rbind(colnames(calls), calls), 2, function(line) { var <- paste(rep(line[-1], each = 2), collapse = " ") strain <- line[1] printline <- sprintf("%s 1 %s", strain, var ) return(printline) } ) cat(lines, file= file, sep="\n") }
/rcode/writing_functions.R
no_license
jpgerke/RRS56k
R
false
false
7,156
r
write.flopfile <- function(dataset, outfile){ #transpose the matrix and replace NA's calls <- dataset$calls calls <- t(calls) missing <- is.na(calls) calls[which(missing==TRUE)] <- "--" #add the sequence position calls <- rbind(as.character(dataset$position), calls) #add chromosome calls <- rbind(as.character(dataset$LG), calls) #add line names first_column <- c("Chromosome", "Position", dimnames(dataset$calls)[[2]]) calls <- cbind(first_column, calls) write.table(calls, file=outfile, quote=F, row.names=F, col.names=F, sep=" ") } #write a dataset to PLINK outfiles write.plink <- function(dataset, pedfile, mapfile, delimiter, assn="FALSE"){ pheno <- 1 linetag <- "0 0 0" #write the pedfile calls <- dataset$calls calls[which(is.na(calls))] <- "00" if(delimiter=="group"){ popname <- as.character(dataset$group) } else if(delimiter=="status"){ popname <- as.character(dataset$status) } else if (delimiter=="cycle"){ popname <- paste(as.character(dataset$cycle), as.character(dataset$status), sep="_") } if(assn=="TRUE"){ pheno <- as.numeric(as.factor(dataset$group)) linetag <- "" } lines <- apply(rbind(popname, dimnames(calls)[[2]], pheno, calls), 2, function(line) { var <- unlist(strsplit(line[-(1:2)], split="")) paste(c(line[1:2], linetag, var), collapse=" ") }) cat(lines, file=pedfile, sep="\n") #write the mapfile mapdata <- cbind(as.character(dataset$LG), as.character(dataset$markernames), rep(0,length(dataset$LG)), as.character(dataset$position)) write.table(mapdata, file=mapfile, col.names=F, row.names=F, quote=F, sep="\t") } #write out a fasta file write.fasta <- function(calls, outfile) { missing <- is.na(calls) calls[which(missing==TRUE)] <- "NN" lines <- apply(rbind(dimnames(calls)[[2]], calls), 2, function(line){ allsequence <- unlist(strsplit(line[-1], split="")) allsequence <- matrix(allsequence, nrow=2, byrow=FALSE) newline1 <- c(paste(">",line[1], sep=""), paste(allsequence[1,], collapse="")) newline2 <- c(paste(">",line[1], sep=""), paste(allsequence[2,], collapse="")) bothlines <- cbind(newline1, newline2) return(bothlines) }) cat(lines, file=outfile, sep="\n") } write.window <- function(dataset, prefix, window=1000, step=1000){ measures <- dataset$calls positions <- 1:dim(measures)[[1]] checkpts <- seq((window/2), length(positions)-window/2, by=step) sapply(checkpts, function(x){ current <- measures[(positions >= ((x+1)-window/2)) & (positions <= (x+window/2)),] filename <- paste(prefix, x, sep="_") outfile <- file(filename, "w") write.fasta(current, outfile) close(outfile) return(filename) }) } phaser <- function(dataset, chromosome, snpfile, hapfile, popfile) { calls <- dataset$calls[dataset$LG==chromosome,dataset$status=="Outbred"] calls[which(is.na(calls))] <- "BB" ind <- dim(calls)[[2]] loc <- dim(calls)[[1]] cat(ind, loc, sep="\n", file=snpfile) pops <- as.numeric(dataset$group[dataset$lines%in%dimnames(calls)[[2]]]) lines <- apply(rbind(dimnames(calls)[[2]], calls), 2, function(line){ allsequence <- unlist(strsplit(line[-1], split="")) allsequence <- matrix(allsequence, nrow=2, byrow=FALSE) name <- paste("#", line[1], sep=" ") allsequence[which(allsequence=="B")] <- "?" newline1 <- paste(as.character(allsequence[1,]), collapse="") newline2 <- paste(allsequence[2,], collapse="") alllines <- c(name, newline1, newline2) return(alllines) }) cat(lines, file=snpfile, sep="\n", append=T) haps <- dataset$calls[dataset$LG==chromosome,dataset$status!="Outbred"] haps <- remove.hets(haps) hapind <- dim(haps)[[2]] haploc <- dim(haps)[[1]] haps[which(is.na(haps))] <- "BB" happops <- as.numeric(dataset$group[dataset$lines%in%dimnames(haps)[[2]]]) cat(hapind, sep="\n", file=hapfile) haplines <- apply(rbind(dimnames(haps)[[2]], haps), 2, function(line){ allsequence <- unlist(strsplit(line[-1], split="")) allsequence <- matrix(allsequence, nrow=2, byrow=FALSE) name <- paste("#", line[1], sep=" ") allsequence[which(allsequence=="B")] <- "?" newline1 <- paste(as.character(allsequence[1,]), collapse="") alllines <- c(name, newline1) return(alllines) }) cat(haplines, file=hapfile, sep="\n", append=T) pops <- c(pops, happops) cat(pops, file=popfile, sep=" ") } inbredplinker <- function(dataset, pedfile, mapfile) { calls <- dataset$calls # calls[is.na(calls)] <- 0 markername <- dataset$markernames physmap <- dataset$position chrom <- dataset$LG genmap <- round(dataset$F2map, digits=7) #genmap[is.na(genmap)] <- 0 mapdata <- sprintf("%d\t%s\t%.7f\t%d", chrom, markername, genmap, physmap) cat(mapdata, file = mapfile, sep = "\n") strains <- dimnames(calls)[[2]] lines <- apply(rbind(colnames(calls), calls), 2, function(line) { var <- paste(rep(line[-1], each = 2), collapse = " ") strain <- line[1] printline <- sprintf("%s 1 0 0 0 1 %s", strain, var ) return(printline) } ) cat(lines, file= pedfile, sep="\n") } write.hapalleles <- function(dataset, chromosome, file){ strains <- dimnames(dataset$calls)[[2]] nstrains <- length(strains) strains <- paste(dimnames(dataset$calls)[[2]], collapse=" ") nmarkers <- length(dataset$markernames[dataset$LG==chromosome]) out <- file(file, "w") on.exit(close(out)) calls <- dataset$calls[dataset$LG==chromosome,] markernames <- dimnames(calls)[[1]] f2map <- dataset$F2map[dataset$LG==chromosome] cat(sprintf("markers %d strains %s\n", nmarkers, nstrains), file=out) cat(sprintf("strain_names %s\n", strains), file=out) for(i in 1:length(markernames)){ cat(sprintf("marker %s, 3, %.2f\n", markernames[i], f2map[i]), file=out) tabled <- create.problines(calls[i,]) write.table(tabled, file=out, append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE) } } create.problines <- function(marker){ alleles <- unique(marker) allele1number <- sum(marker==alleles[1]) allele2number <- sum(marker==alleles[2]) allele1prob <- 1/allele1number allele2prob <- 1/allele2number allele1vector <- marker==alleles[1] allele2vector <- marker==alleles[2] allele1vector[allele1vector==TRUE] <- allele1prob allele1vector[allele1vector==FALSE] <- 0.000 allele1vector <- sprintf("%.3f", allele1vector) allele2vector[allele2vector==TRUE] <- allele2prob allele2vector[allele2vector==FALSE] <- 0.000 allele2vector <- sprintf("%.3f", allele2vector) navector <- rep(1/length(marker), length(marker)) navector <- sprintf("%.3f", navector) line1 <- c("allele", "ND", navector) line2 <- c("allele", alleles[1], allele1vector) line3 <- c("allele", alleles[2], allele2vector) alleleframe <- rbind(line1,line2,line3) return(alleleframe) } write.happygenotypes <- function(dataset, chromosome, file){ calls <- dataset$calls[dataset$LG==chromosome,] strains <- dimnames(calls)[[2]] lines <- apply(rbind(colnames(calls), calls), 2, function(line) { var <- paste(rep(line[-1], each = 2), collapse = " ") strain <- line[1] printline <- sprintf("%s 1 %s", strain, var ) return(printline) } ) cat(lines, file= file, sep="\n") }
#--------------------------------------**--------------------------------------# # File Name: universal_clean_data.r # Purpose: # # Creation Date: 18-05-2015 # Last Modified: Mon May 18 13:03:17 2015 # Created By: # #--------------------------------------**--------------------------------------# # # FORTRAN and C: # source('~/R/shlib/C_FORTRAN.shlib.r') # .Fortran("subroutine name",as.integer(input1),as.double(input2), etc) # getpests = function(alpha.est){ beta.est = 4*alpha.est HTVset1 = readRDS("~/dmc2015/data/featureMatrix/HTVset1.rds") p1 = (sum(HTVset1$H$coupon1Used) + alpha.est)/(nrow(HTVset1$H) + alpha.est + beta.est) p2 = (sum(HTVset1$H$coupon2Used) + alpha.est)/(nrow(HTVset1$H) + alpha.est + beta.est) p3 = (sum(HTVset1$H$coupon3Used) + alpha.est)/(nrow(HTVset1$H) + alpha.est + beta.est) return(c(p1,p2,p3)) } universal_clean_data = function(path2ucd = "~/dmc2015/data/clean_data/universalCleanData.rds") { require(dplyr) d = readRDS(path2ucd) source("~/dmc2015/ian/R/clean_factor.r") d = clean_factor(d,"couponID","cpn") d = clean_factor(d,"brand","brand") d = clean_factor(d,"productGroup","prod") d = clean_factor(d,"categoryIDs") d$ShopFast = 1*(d$TimeBtwnRecOrder < 28) d$EarlyRec = 1*(d$TimeBtwnSentRec < 8) d$Shop60 = floor(d$orderTimeTime) d$Shop30 = floor(d$orderTimeTime * 60/30)*30/60 d$Shop15 = floor(d$orderTimeTime * 60/15)*15/60 d$RecExpire60 = floor(d$TimeBtwnSentRec) d$RecOrder60 = floor(d$TimeBtwnRecOrder) d$OrderExpire60 = floor(d$TimeBtwnOrderExpire) d$basePrice_price_ratio1 = d$basePrice1/d$price1 d$basePrice_price_ratio2 = d$basePrice2/d$price2 d$basePrice_price_ratio3 = d$basePrice3/d$price3 d = d[,c(1,3,2,43,45,4,44,46:49,33:37,38:42,50:57,5:12,58,13:20,59,21:28,60,29:32)] d1 = d[,c(1:29,60,30:38,57)] names(d1)[31:40] = gsub("1","",names(d1)[31:40]) d1$couponCol = 1 d2 = d[,c(1:29,60,39:47,58)] names(d2)[31:40] = gsub("2","",names(d2)[31:40]) d2$couponCol = 2 d3 = d[,c(1:29,60,48:56,59)] names(d3)[31:40] = gsub("3","",names(d3)[31:40]) d3$couponCol = 3 dm = rbind(d1,d2) %>% rbind(d3) source("~/dmc2015/ian/r/splitColumn.R") dmc = splitColumn(dm,"categoryIDs","orderID",splitby=":") dmc = dmc[,-which(names(dmc) == "categoryIDs")] dmc = clean_factor(dmc,"categoryIDs",scrape_off="cat") ## add this grouping to the data d = d %>% mutate(basketGroup = (1*(basketValue > 185) + 1*(basketValue > 137) + 1*(basketValue > 92))) dm = dm %>% mutate(basketGroup = (1*(basketValue > 185) + 1*(basketValue > 137) + 1*(basketValue > 92))) alpha.est = 24 p = getpests(alpha.est) return(list("d" = d,"dm" = dm, "dmc" = dmc)) }
/ian/universal_clean_data.r
no_license
imouzon/dmc2015
R
false
false
2,732
r
#--------------------------------------**--------------------------------------# # File Name: universal_clean_data.r # Purpose: # # Creation Date: 18-05-2015 # Last Modified: Mon May 18 13:03:17 2015 # Created By: # #--------------------------------------**--------------------------------------# # # FORTRAN and C: # source('~/R/shlib/C_FORTRAN.shlib.r') # .Fortran("subroutine name",as.integer(input1),as.double(input2), etc) # getpests = function(alpha.est){ beta.est = 4*alpha.est HTVset1 = readRDS("~/dmc2015/data/featureMatrix/HTVset1.rds") p1 = (sum(HTVset1$H$coupon1Used) + alpha.est)/(nrow(HTVset1$H) + alpha.est + beta.est) p2 = (sum(HTVset1$H$coupon2Used) + alpha.est)/(nrow(HTVset1$H) + alpha.est + beta.est) p3 = (sum(HTVset1$H$coupon3Used) + alpha.est)/(nrow(HTVset1$H) + alpha.est + beta.est) return(c(p1,p2,p3)) } universal_clean_data = function(path2ucd = "~/dmc2015/data/clean_data/universalCleanData.rds") { require(dplyr) d = readRDS(path2ucd) source("~/dmc2015/ian/R/clean_factor.r") d = clean_factor(d,"couponID","cpn") d = clean_factor(d,"brand","brand") d = clean_factor(d,"productGroup","prod") d = clean_factor(d,"categoryIDs") d$ShopFast = 1*(d$TimeBtwnRecOrder < 28) d$EarlyRec = 1*(d$TimeBtwnSentRec < 8) d$Shop60 = floor(d$orderTimeTime) d$Shop30 = floor(d$orderTimeTime * 60/30)*30/60 d$Shop15 = floor(d$orderTimeTime * 60/15)*15/60 d$RecExpire60 = floor(d$TimeBtwnSentRec) d$RecOrder60 = floor(d$TimeBtwnRecOrder) d$OrderExpire60 = floor(d$TimeBtwnOrderExpire) d$basePrice_price_ratio1 = d$basePrice1/d$price1 d$basePrice_price_ratio2 = d$basePrice2/d$price2 d$basePrice_price_ratio3 = d$basePrice3/d$price3 d = d[,c(1,3,2,43,45,4,44,46:49,33:37,38:42,50:57,5:12,58,13:20,59,21:28,60,29:32)] d1 = d[,c(1:29,60,30:38,57)] names(d1)[31:40] = gsub("1","",names(d1)[31:40]) d1$couponCol = 1 d2 = d[,c(1:29,60,39:47,58)] names(d2)[31:40] = gsub("2","",names(d2)[31:40]) d2$couponCol = 2 d3 = d[,c(1:29,60,48:56,59)] names(d3)[31:40] = gsub("3","",names(d3)[31:40]) d3$couponCol = 3 dm = rbind(d1,d2) %>% rbind(d3) source("~/dmc2015/ian/r/splitColumn.R") dmc = splitColumn(dm,"categoryIDs","orderID",splitby=":") dmc = dmc[,-which(names(dmc) == "categoryIDs")] dmc = clean_factor(dmc,"categoryIDs",scrape_off="cat") ## add this grouping to the data d = d %>% mutate(basketGroup = (1*(basketValue > 185) + 1*(basketValue > 137) + 1*(basketValue > 92))) dm = dm %>% mutate(basketGroup = (1*(basketValue > 185) + 1*(basketValue > 137) + 1*(basketValue > 92))) alpha.est = 24 p = getpests(alpha.est) return(list("d" = d,"dm" = dm, "dmc" = dmc)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/transfer_operations.R \name{transfer_start_server} \alias{transfer_start_server} \title{Changes the state of a Secure File Transfer Protocol (SFTP) server from OFFLINE to ONLINE} \usage{ transfer_start_server(ServerId) } \arguments{ \item{ServerId}{[required] A system-assigned unique identifier for an SFTP server that you start.} } \description{ Changes the state of a Secure File Transfer Protocol (SFTP) server from \code{OFFLINE} to \code{ONLINE}. It has no impact on an SFTP server that is already \code{ONLINE}. An \code{ONLINE} server can accept and process file transfer jobs. } \details{ The state of \code{STARTING} indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of \code{START_FAILED} can indicate an error condition. No response is returned from this call. } \section{Request syntax}{ \preformatted{svc$start_server( ServerId = "string" ) } } \keyword{internal}
/cran/paws.migration/man/transfer_start_server.Rd
permissive
johnnytommy/paws
R
false
true
1,031
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/transfer_operations.R \name{transfer_start_server} \alias{transfer_start_server} \title{Changes the state of a Secure File Transfer Protocol (SFTP) server from OFFLINE to ONLINE} \usage{ transfer_start_server(ServerId) } \arguments{ \item{ServerId}{[required] A system-assigned unique identifier for an SFTP server that you start.} } \description{ Changes the state of a Secure File Transfer Protocol (SFTP) server from \code{OFFLINE} to \code{ONLINE}. It has no impact on an SFTP server that is already \code{ONLINE}. An \code{ONLINE} server can accept and process file transfer jobs. } \details{ The state of \code{STARTING} indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of \code{START_FAILED} can indicate an error condition. No response is returned from this call. } \section{Request syntax}{ \preformatted{svc$start_server( ServerId = "string" ) } } \keyword{internal}
######################################################## ############## FUNCTIONS ############# ######################################################## # Load Aaron's little functions for plotting with colorspace library(colorspace) library(plotrix) library(pheatmap) pal <- function(col, border = "light gray", ...) #Copy pasted from HCL-Based Color Palettes in R #http://cran.r-project.org/web/packages/colorspace/vignettes/hcl-colors.pdf { n <- length(col) plot(0, 0, type="n", xlim = c(0, 1), ylim = c(0, 1), axes = FALSE, xlab = "", ylab = "", ...) rect(0:(n-1)/n, 0, 1:n/n, 1, col = col, border = border) } #rainbow_hcl(10) #pal(rainbow_hcl(50,c=100,l=80)) #pal(sequential_hcl(50)) c.r.hcl<-function(x,n=1000, ...){ #cut rainbow_hcl xCut<-cut(x,breaks=n) colors<-rainbow_hcl(n=n, ...) out<-colors[xCut] return(out) } c.s.hcl<-function(x,n=1000,only.up.to=n, ...){ #cut sequantial_hcl xCut<-1 #these two lines guard against errors caused when this function is piped into c.d.hcl if(only.up.to>1) xCut <-cut(x,breaks=only.up.to) colors<-sequential_hcl(n=n, ...)[n:(n-only.up.to)]#reverse the order #if you don't want the full sequence, this will chop it short! out<-colors[xCut] return(out) } c.d.hcl<-function(x,n=1000, h=c(0,260),c=80,...){ #cut divergent_hcl #c is the max chroma, #hues: 0 is low, 260 is the higher. xNegInd <- which(x<0) xPosInd <- which(x>=0) nNeg <- length(xNegInd) #so we only get more faded parts on the side of the spectrum that doesn't have the same magnitude nPos <- length(xPosInd) biggerN<-max(nPos,nNeg) out<-rep(0,length(x)) if(!length(xNegInd)==0){ xNegCol<-c.s.hcl(abs(x[xNegInd]),n=biggerN,only.up.to=nNeg,h=h[1],c=c(c,0),...) out[xNegInd]<-xNegCol } if(!length(xPosInd)==0){ xPosCol<-c.s.hcl(x[xPosInd],n=biggerN,only.up.to=nPos,h=h[2],c=c(c,0),...) out[xPosInd]<-xPosCol } return(out) } ######################################################## ############## HEATMAPS ############# ######################################################## set.seed(148192) setwd('~/Documents/JH/EDA Versions/EDA Git Repo/Coursera') par(mfrow=c(3,1)) testcol<-c.r.hcl(0:18*20,c=60,l=50) pal(testcol) mycol<-c.d.hcl(-80:80,h=c(295,40),c=120) pal(mycol) mycol<-c.d.hcl(-80:80,h=c(0,260),c=80,l=c(30,90)) pal(mycol) #6 different group sizes, 2:7 #3 difficulty levels, with different max magnitudes of signal N<-40 p<-500 sd.x<-1 groupsizes<-rep(2:7,times=3) magnitudes<-rep(c(3,2,1)*sd.x,each=6) Xes<-list() maxIndex<-6*3 pb<-txtProgressBar(min = 1, max = maxIndex, char = "=", style = 3) for(i in 1:length(magnitudes)){ setTxtProgressBar(pb,i) mag.i<-magnitudes[i] #magnitude of max possible signal ngroups.i<-groupsizes[i] X = matrix(rnorm(N*p,mean=0,sd=sd.x),nrow=p) #Assign Groups smallestGroupSize<-0 while(smallestGroupSize<4) { groupId<-sample(ngroups.i,N,replace=T) smallestGroupSize <- min(table(groupId)) } #heatmap(X,col=mycol) #Add signals for(j in 2:ngroups.i){ #leave first group as ref sig.j<-runif(p,-1*mag.i,mag.i) X[,groupId==j] <- X[,groupId==j] + sig.j } QNum<-which(unique(magnitudes)==mag.i) VerNum<-which(unique(groupsizes)==ngroups.i) X.ind<-heatmap(X,col=mycol) dev.off() XSort<-X[X.ind$rowInd[p:1],] filename.i<-paste0('Heatmap_Images/' , 'Q-', QNum,'_Ver-', VerNum,'_Mag-',mag.i,'_ngroups-',ngroups.i,'.png') png(filename=filename.i,height=600,width=600) pheatmap(XSort,cluster_rows=FALSE,col=mycol) dev.off() filename.i<-paste0('Dendrogram_Images/' , 'Q-', QNum,'_Ver-', VerNum,'_Mag-',mag.i,'_ngroups-',ngroups.i,'.png') png(filename=filename.i,height=450,width=450) dx<-dist(t(X)) plot(hclust(dx),xlab='ID Number',sub='',ylab='Distance') dev.off() Xes[[i]]<-X } save.image('Clustering_coursera.RData') ############################################################ ############################################################ #Workspace X.ind<-heatmap(X,col=mycol) XSort<-X[X.ind$rowInd[p:1],X.ind$colInd] image(X) image(XSort) dev.off() quartz() png(filename='normTest.png') heatmap(X,col=mycol) dev.off() #quartz() #pheatmap(X,col=mycol,cluster_rows=FALSE) quartz() png(filename='prettyTest.png') pheatmap(XSort,col=mycol,cluster_rows=FALSE,cluster_cols=TRUE) dev.off() xind2<-pheatmap(XSort,col=mycol,cluster_rows=FALSE,cluster_cols=TRUE) quartz() plot(xind2[[2]]) #hclust is doing well!!!! X.ind<-heatmap(X,col=mycol) XSort<-X[X.ind$rowInd[p:1],] pheathcl<-pheatmap(XSort,cluster_rows=FALSE,plot=F) par(mfrow=c(2,1)) dx<-dist(t(X)) plot(hclust(dx)) plot(pheathcl[[2]])
/Coursera/heatmaps.R
no_license
Adamyazori/EDA-Project
R
false
false
4,576
r
######################################################## ############## FUNCTIONS ############# ######################################################## # Load Aaron's little functions for plotting with colorspace library(colorspace) library(plotrix) library(pheatmap) pal <- function(col, border = "light gray", ...) #Copy pasted from HCL-Based Color Palettes in R #http://cran.r-project.org/web/packages/colorspace/vignettes/hcl-colors.pdf { n <- length(col) plot(0, 0, type="n", xlim = c(0, 1), ylim = c(0, 1), axes = FALSE, xlab = "", ylab = "", ...) rect(0:(n-1)/n, 0, 1:n/n, 1, col = col, border = border) } #rainbow_hcl(10) #pal(rainbow_hcl(50,c=100,l=80)) #pal(sequential_hcl(50)) c.r.hcl<-function(x,n=1000, ...){ #cut rainbow_hcl xCut<-cut(x,breaks=n) colors<-rainbow_hcl(n=n, ...) out<-colors[xCut] return(out) } c.s.hcl<-function(x,n=1000,only.up.to=n, ...){ #cut sequantial_hcl xCut<-1 #these two lines guard against errors caused when this function is piped into c.d.hcl if(only.up.to>1) xCut <-cut(x,breaks=only.up.to) colors<-sequential_hcl(n=n, ...)[n:(n-only.up.to)]#reverse the order #if you don't want the full sequence, this will chop it short! out<-colors[xCut] return(out) } c.d.hcl<-function(x,n=1000, h=c(0,260),c=80,...){ #cut divergent_hcl #c is the max chroma, #hues: 0 is low, 260 is the higher. xNegInd <- which(x<0) xPosInd <- which(x>=0) nNeg <- length(xNegInd) #so we only get more faded parts on the side of the spectrum that doesn't have the same magnitude nPos <- length(xPosInd) biggerN<-max(nPos,nNeg) out<-rep(0,length(x)) if(!length(xNegInd)==0){ xNegCol<-c.s.hcl(abs(x[xNegInd]),n=biggerN,only.up.to=nNeg,h=h[1],c=c(c,0),...) out[xNegInd]<-xNegCol } if(!length(xPosInd)==0){ xPosCol<-c.s.hcl(x[xPosInd],n=biggerN,only.up.to=nPos,h=h[2],c=c(c,0),...) out[xPosInd]<-xPosCol } return(out) } ######################################################## ############## HEATMAPS ############# ######################################################## set.seed(148192) setwd('~/Documents/JH/EDA Versions/EDA Git Repo/Coursera') par(mfrow=c(3,1)) testcol<-c.r.hcl(0:18*20,c=60,l=50) pal(testcol) mycol<-c.d.hcl(-80:80,h=c(295,40),c=120) pal(mycol) mycol<-c.d.hcl(-80:80,h=c(0,260),c=80,l=c(30,90)) pal(mycol) #6 different group sizes, 2:7 #3 difficulty levels, with different max magnitudes of signal N<-40 p<-500 sd.x<-1 groupsizes<-rep(2:7,times=3) magnitudes<-rep(c(3,2,1)*sd.x,each=6) Xes<-list() maxIndex<-6*3 pb<-txtProgressBar(min = 1, max = maxIndex, char = "=", style = 3) for(i in 1:length(magnitudes)){ setTxtProgressBar(pb,i) mag.i<-magnitudes[i] #magnitude of max possible signal ngroups.i<-groupsizes[i] X = matrix(rnorm(N*p,mean=0,sd=sd.x),nrow=p) #Assign Groups smallestGroupSize<-0 while(smallestGroupSize<4) { groupId<-sample(ngroups.i,N,replace=T) smallestGroupSize <- min(table(groupId)) } #heatmap(X,col=mycol) #Add signals for(j in 2:ngroups.i){ #leave first group as ref sig.j<-runif(p,-1*mag.i,mag.i) X[,groupId==j] <- X[,groupId==j] + sig.j } QNum<-which(unique(magnitudes)==mag.i) VerNum<-which(unique(groupsizes)==ngroups.i) X.ind<-heatmap(X,col=mycol) dev.off() XSort<-X[X.ind$rowInd[p:1],] filename.i<-paste0('Heatmap_Images/' , 'Q-', QNum,'_Ver-', VerNum,'_Mag-',mag.i,'_ngroups-',ngroups.i,'.png') png(filename=filename.i,height=600,width=600) pheatmap(XSort,cluster_rows=FALSE,col=mycol) dev.off() filename.i<-paste0('Dendrogram_Images/' , 'Q-', QNum,'_Ver-', VerNum,'_Mag-',mag.i,'_ngroups-',ngroups.i,'.png') png(filename=filename.i,height=450,width=450) dx<-dist(t(X)) plot(hclust(dx),xlab='ID Number',sub='',ylab='Distance') dev.off() Xes[[i]]<-X } save.image('Clustering_coursera.RData') ############################################################ ############################################################ #Workspace X.ind<-heatmap(X,col=mycol) XSort<-X[X.ind$rowInd[p:1],X.ind$colInd] image(X) image(XSort) dev.off() quartz() png(filename='normTest.png') heatmap(X,col=mycol) dev.off() #quartz() #pheatmap(X,col=mycol,cluster_rows=FALSE) quartz() png(filename='prettyTest.png') pheatmap(XSort,col=mycol,cluster_rows=FALSE,cluster_cols=TRUE) dev.off() xind2<-pheatmap(XSort,col=mycol,cluster_rows=FALSE,cluster_cols=TRUE) quartz() plot(xind2[[2]]) #hclust is doing well!!!! X.ind<-heatmap(X,col=mycol) XSort<-X[X.ind$rowInd[p:1],] pheathcl<-pheatmap(XSort,cluster_rows=FALSE,plot=F) par(mfrow=c(2,1)) dx<-dist(t(X)) plot(hclust(dx)) plot(pheathcl[[2]])
\name{parse_ns_file} \alias{parse_ns_file} \title{Parses the NAMESPACE file for a package} \usage{ parse_ns_file(pkg = ".") } \arguments{ \item{pkg}{package description, can be path or package name. See \code{\link{as.package}} for more information} } \description{ Parses the NAMESPACE file for a package } \examples{ if (has_tests()) { parse_ns_file(devtest("testLoadHooks")) } } \keyword{internal}
/devtoolsVersion/devtools 14/man/parse_ns_file.Rd
no_license
connectthefuture/devtools-R-Forge
R
false
false
411
rd
\name{parse_ns_file} \alias{parse_ns_file} \title{Parses the NAMESPACE file for a package} \usage{ parse_ns_file(pkg = ".") } \arguments{ \item{pkg}{package description, can be path or package name. See \code{\link{as.package}} for more information} } \description{ Parses the NAMESPACE file for a package } \examples{ if (has_tests()) { parse_ns_file(devtest("testLoadHooks")) } } \keyword{internal}
% Generated by roxygen2 (4.0.2.9000): do not edit by hand % Please edit documentation in R/import_stage.r \name{build_import_stagerunner} \alias{build_import_stagerunner} \title{Build a stagerunner for importing data with backup sources.} \usage{ build_import_stagerunner(import_options) } \arguments{ \item{import_options}{list. Nested list, one adapter per list entry. These adapter parametrizations will get converted to legitimate IO adapters. (See the "adapter" reference class.)} } \description{ Build a stagerunner for importing data with backup sources. }
/man/build_import_stagerunner.Rd
permissive
kirillseva/syberiaStages
R
false
false
565
rd
% Generated by roxygen2 (4.0.2.9000): do not edit by hand % Please edit documentation in R/import_stage.r \name{build_import_stagerunner} \alias{build_import_stagerunner} \title{Build a stagerunner for importing data with backup sources.} \usage{ build_import_stagerunner(import_options) } \arguments{ \item{import_options}{list. Nested list, one adapter per list entry. These adapter parametrizations will get converted to legitimate IO adapters. (See the "adapter" reference class.)} } \description{ Build a stagerunner for importing data with backup sources. }
library(MPV) ### Name: tarimage ### Title: target image ### Aliases: tarimage ### Keywords: datasets ### ** Examples with(tarimage, image(x, y, xy))
/data/genthat_extracted_code/MPV/examples/tarimage.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
156
r
library(MPV) ### Name: tarimage ### Title: target image ### Aliases: tarimage ### Keywords: datasets ### ** Examples with(tarimage, image(x, y, xy))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/makeSeltxt.R \name{makeSeltxt} \alias{makeSeltxt} \title{makeSeltxt} \usage{ makeSeltxt( size_csv, size_col = "Measured.Length..cm.", fishery_col = "Fishery", time_col = "Year", sex = F, sex_col = "Sex", sex_vals = c("F", "M"), lBins = seq(50, 250, 5) ) } \arguments{ \item{size_csv}{csv with capture information; each row should be a fish, sex optional} \item{size_col}{character name of column with measurement information; will drop NA rows} \item{fishery_col}{character name of column with fishery names; used to form cuts} \item{time_col}{character name of column by which time should be grouped; currently only supports one value (e.g. 'year')} \item{sex}{logical. should we aggregate by sex? if so, the resultant text file will print females and males in that order} \item{sex_col}{character name of column with sexes} \item{sex_vals}{vector of sex designations, female then male} \item{lBins}{vector of bin breaks} } \description{ \code{makeSeltxt} reshapes a dataframe with size-at-capture data into a selectivity text file suitable for SS3 }
/man/makeSeltxt.Rd
no_license
mkapur/kaputils
R
false
true
1,153
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/makeSeltxt.R \name{makeSeltxt} \alias{makeSeltxt} \title{makeSeltxt} \usage{ makeSeltxt( size_csv, size_col = "Measured.Length..cm.", fishery_col = "Fishery", time_col = "Year", sex = F, sex_col = "Sex", sex_vals = c("F", "M"), lBins = seq(50, 250, 5) ) } \arguments{ \item{size_csv}{csv with capture information; each row should be a fish, sex optional} \item{size_col}{character name of column with measurement information; will drop NA rows} \item{fishery_col}{character name of column with fishery names; used to form cuts} \item{time_col}{character name of column by which time should be grouped; currently only supports one value (e.g. 'year')} \item{sex}{logical. should we aggregate by sex? if so, the resultant text file will print females and males in that order} \item{sex_col}{character name of column with sexes} \item{sex_vals}{vector of sex designations, female then male} \item{lBins}{vector of bin breaks} } \description{ \code{makeSeltxt} reshapes a dataframe with size-at-capture data into a selectivity text file suitable for SS3 }
# evaluates either a hyperpar setting or a feature set by resampling # must be already in correct format, either a named list of values or a named integer vector for features # logs point and results # return y-value(s), exectime, and potential erorr msg evalOptimizationState = function(learner, task, resampling, measures, par.set, bits.to.features, control, opt.path, show.info, dob, state, remove.nas) { setSlaveOptions() y = setNames(rep(NA_real_, length(measures)), vcapply(measures, measureAggrName)) errmsg = NA_character_ exec.time = NA_real_ set.pars.ok = TRUE learner2 = learner threshold = NULL log.fun = control$log.fun if (inherits(control, "TuneControl") || inherits(control, "TuneMultiCritControl")) { # set names before trafo state = setValueCNames(par.set, state) # transform parameters state = trafoValue(par.set, state) # remove NAs for dependencies state2 = if (remove.nas) removeMissingValues(state) else state learner2 = try(setHyperPars(learner, par.vals = state2), silent = TRUE) # if somebody above (eg tuner) prodcued bad settings, we catch this here and dont eval if (is.error(learner2)) { set.pars.ok = FALSE errmsg = as.character(learner2) if (show.info) messagef("[Tune-x] Setting hyperpars failed: %s", errmsg) } } else if (inherits(control, "FeatSelControl")) { task = subsetTask(task, features = bits.to.features(state, task)) } # if no problems: resample + measure time if (show.info) prev.stage = log.fun(learner, task, resampling, measures, par.set, control, opt.path, dob, state, NA_real_, remove.nas, stage = 1L) if (set.pars.ok) { exec.time = measureTime({ r = resample(learner2, task, resampling, measures = measures, show.info = FALSE) }) if (control$tune.threshold) { th.args = control$tune.threshold.args th.args$pred = r$pred th.args$measure = measures[[1L]] tune.th.res = do.call(tuneThreshold, th.args) threshold = tune.th.res$th # we need to eval 1 final time here, as tuneThreshold only works with 1 measure, # but we need yvec for all measures y = performance(setThreshold(r$pred, threshold = threshold), measures = measures) # names from resample are slightly different, set them correctly here names(y) = names(r$aggr) } else { y = r$aggr } # sort msgs by iters, so iter1, iter2, ... errmsgs = as.character(t(r$err.msgs[, -1L])) notna = !is.na(errmsgs) if (any(notna)) errmsg = errmsgs[notna][1L] } else { # we still need to define a non-NULL threshold, if tuning it was requested if (control$tune.threshold) threshold = NA_real_ } # if eval was not ok, everything should have been initailized to NAs if (show.info) log.fun(learner, task, resampling, measures, par.set, control, opt.path, dob, state, y, remove.nas, stage = 2L, prev.stage = prev.stage) list(y = y, exec.time = exec.time, errmsg = errmsg, threshold = threshold) } # evaluates a list of states by calling evalOptimizationState # must be already in correct format, either a named list of values or a named integer vector for features # might be done in parallel # logs point and results # adds points to path # returns list of lists, the single eval results evalOptimizationStates = function(learner, task, resampling, measures, par.set, bits.to.features, control, opt.path, show.info, states, dobs, eols, remove.nas, level) { n = length(states) if (length(dobs) == 1L) dobs = rep(dobs, n) if (length(eols) == 1L) eols = rep(eols, n) parallelLibrary("mlr", master = FALSE, level = level, show.info = FALSE) exportMlrOptions(level = level) res.list = parallelMap(evalOptimizationState, dobs, states, level = level, more.args = list(learner = learner, task = task, resampling = resampling, measures = measures, par.set = par.set, bits.to.features = bits.to.features, control = control, opt.path = opt.path, show.info = show.info, remove.nas = remove.nas)) # add stuff to opt.path for (i in seq_len(n)) { res = res.list[[i]] if (control$tune.threshold) { # add class names to threshold, if longer than 1 extra = as.list(res$threshold) names(extra) = stri_paste("threshold", ifelse(length(extra) > 1L, ".", ""), names(extra), ignore_null = TRUE) } else { extra = NULL } addOptPathEl(opt.path, x = as.list(states[[i]]), y = res$y, exec.time = res$exec.time, error.message = res$errmsg, dob = dobs[i], eol = eols[i], check.feasible = TRUE, extra = extra) } return(res.list) } evalOptimizationStatesTune = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, states, dobs, eols, remove.nas) { evalOptimizationStates(learner, task, resampling, measures, par.set, NULL, control, opt.path, show.info, states, dobs, eols, remove.nas, "mlr.tuneParams") } evalOptimizationStatesFeatSel = function(learner, task, resampling, measures, bits.to.features, control, opt.path, show.info, states, dobs, eols) { evalOptimizationStates(learner, task, resampling, measures, NULL, bits.to.features, control, opt.path, show.info, states, dobs, eols, FALSE, "mlr.selectFeatures") }
/R/evalOptimizationState.R
no_license
csjosiah/mlr
R
false
false
5,323
r
# evaluates either a hyperpar setting or a feature set by resampling # must be already in correct format, either a named list of values or a named integer vector for features # logs point and results # return y-value(s), exectime, and potential erorr msg evalOptimizationState = function(learner, task, resampling, measures, par.set, bits.to.features, control, opt.path, show.info, dob, state, remove.nas) { setSlaveOptions() y = setNames(rep(NA_real_, length(measures)), vcapply(measures, measureAggrName)) errmsg = NA_character_ exec.time = NA_real_ set.pars.ok = TRUE learner2 = learner threshold = NULL log.fun = control$log.fun if (inherits(control, "TuneControl") || inherits(control, "TuneMultiCritControl")) { # set names before trafo state = setValueCNames(par.set, state) # transform parameters state = trafoValue(par.set, state) # remove NAs for dependencies state2 = if (remove.nas) removeMissingValues(state) else state learner2 = try(setHyperPars(learner, par.vals = state2), silent = TRUE) # if somebody above (eg tuner) prodcued bad settings, we catch this here and dont eval if (is.error(learner2)) { set.pars.ok = FALSE errmsg = as.character(learner2) if (show.info) messagef("[Tune-x] Setting hyperpars failed: %s", errmsg) } } else if (inherits(control, "FeatSelControl")) { task = subsetTask(task, features = bits.to.features(state, task)) } # if no problems: resample + measure time if (show.info) prev.stage = log.fun(learner, task, resampling, measures, par.set, control, opt.path, dob, state, NA_real_, remove.nas, stage = 1L) if (set.pars.ok) { exec.time = measureTime({ r = resample(learner2, task, resampling, measures = measures, show.info = FALSE) }) if (control$tune.threshold) { th.args = control$tune.threshold.args th.args$pred = r$pred th.args$measure = measures[[1L]] tune.th.res = do.call(tuneThreshold, th.args) threshold = tune.th.res$th # we need to eval 1 final time here, as tuneThreshold only works with 1 measure, # but we need yvec for all measures y = performance(setThreshold(r$pred, threshold = threshold), measures = measures) # names from resample are slightly different, set them correctly here names(y) = names(r$aggr) } else { y = r$aggr } # sort msgs by iters, so iter1, iter2, ... errmsgs = as.character(t(r$err.msgs[, -1L])) notna = !is.na(errmsgs) if (any(notna)) errmsg = errmsgs[notna][1L] } else { # we still need to define a non-NULL threshold, if tuning it was requested if (control$tune.threshold) threshold = NA_real_ } # if eval was not ok, everything should have been initailized to NAs if (show.info) log.fun(learner, task, resampling, measures, par.set, control, opt.path, dob, state, y, remove.nas, stage = 2L, prev.stage = prev.stage) list(y = y, exec.time = exec.time, errmsg = errmsg, threshold = threshold) } # evaluates a list of states by calling evalOptimizationState # must be already in correct format, either a named list of values or a named integer vector for features # might be done in parallel # logs point and results # adds points to path # returns list of lists, the single eval results evalOptimizationStates = function(learner, task, resampling, measures, par.set, bits.to.features, control, opt.path, show.info, states, dobs, eols, remove.nas, level) { n = length(states) if (length(dobs) == 1L) dobs = rep(dobs, n) if (length(eols) == 1L) eols = rep(eols, n) parallelLibrary("mlr", master = FALSE, level = level, show.info = FALSE) exportMlrOptions(level = level) res.list = parallelMap(evalOptimizationState, dobs, states, level = level, more.args = list(learner = learner, task = task, resampling = resampling, measures = measures, par.set = par.set, bits.to.features = bits.to.features, control = control, opt.path = opt.path, show.info = show.info, remove.nas = remove.nas)) # add stuff to opt.path for (i in seq_len(n)) { res = res.list[[i]] if (control$tune.threshold) { # add class names to threshold, if longer than 1 extra = as.list(res$threshold) names(extra) = stri_paste("threshold", ifelse(length(extra) > 1L, ".", ""), names(extra), ignore_null = TRUE) } else { extra = NULL } addOptPathEl(opt.path, x = as.list(states[[i]]), y = res$y, exec.time = res$exec.time, error.message = res$errmsg, dob = dobs[i], eol = eols[i], check.feasible = TRUE, extra = extra) } return(res.list) } evalOptimizationStatesTune = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, states, dobs, eols, remove.nas) { evalOptimizationStates(learner, task, resampling, measures, par.set, NULL, control, opt.path, show.info, states, dobs, eols, remove.nas, "mlr.tuneParams") } evalOptimizationStatesFeatSel = function(learner, task, resampling, measures, bits.to.features, control, opt.path, show.info, states, dobs, eols) { evalOptimizationStates(learner, task, resampling, measures, NULL, bits.to.features, control, opt.path, show.info, states, dobs, eols, FALSE, "mlr.selectFeatures") }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DVARS.R \name{DVARS} \alias{DVARS} \title{DVARS} \usage{ DVARS( X, normalize = TRUE, cutoff_DPD = 5, cutoff_ZD = qnorm(1 - 0.05/nrow(as.matrix2(X))), verbose = FALSE ) } \arguments{ \item{X}{a \eqn{T} by \eqn{N} numeric matrix representing an fMRI run. There should not be any missing data (\code{NA} or \code{NaN}).} \item{normalize}{Normalize the data as proposed in the paper? Default: \code{TRUE}. Normalization removes constant-zero voxels, scales by 100 / the median of the mean image, and then centers each voxel on its mean. To replicate Afyouni and Nichols' procedure for the HCP MPP data, since the HCP scans are already normalized to 10,000, just divide the data by 100 and center the voxels on their means: \code{Y <- Y/100; DVARS(t(Y - apply(Y, 1, mean)))} where \code{Y} is the \eqn{V} by \eqn{T} data matrix. Note that while voxel centering doesn't affect DVARS, it does affect DPD and ZD.} \item{cutoff_DPD, cutoff_ZD}{Numeric outlier cutoffs. Timepoints exceeding these cutoffs will be flagged as outliers.} \item{verbose}{Should occasional updates be printed? Default is \code{FALSE}.} } \value{ A list with components \describe{ \item{measure}{A data.frame with \eqn{T} rows, each column being a different variant of DVARS.} \item{measure_info}{"DVARS"} \item{outlier_cutoff}{The outlier cutoff value(s).} \item{outlier_flag}{A logical data.frame with \eqn{T} rows, where \code{TRUE} indicates suspected outlier presence.} } } \description{ Computes the DSE decomposition and DVARS-related statistics. Based on code from github.com/asoroosh/DVARS . } \section{References}{ \itemize{ \item{Afyouni, S. & Nichols, T. E. Insight and inference for DVARS. NeuroImage 172, 291-312 (2018).} } }
/man/DVARS.Rd
no_license
neuroconductor/fMRIscrub
R
false
true
1,803
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DVARS.R \name{DVARS} \alias{DVARS} \title{DVARS} \usage{ DVARS( X, normalize = TRUE, cutoff_DPD = 5, cutoff_ZD = qnorm(1 - 0.05/nrow(as.matrix2(X))), verbose = FALSE ) } \arguments{ \item{X}{a \eqn{T} by \eqn{N} numeric matrix representing an fMRI run. There should not be any missing data (\code{NA} or \code{NaN}).} \item{normalize}{Normalize the data as proposed in the paper? Default: \code{TRUE}. Normalization removes constant-zero voxels, scales by 100 / the median of the mean image, and then centers each voxel on its mean. To replicate Afyouni and Nichols' procedure for the HCP MPP data, since the HCP scans are already normalized to 10,000, just divide the data by 100 and center the voxels on their means: \code{Y <- Y/100; DVARS(t(Y - apply(Y, 1, mean)))} where \code{Y} is the \eqn{V} by \eqn{T} data matrix. Note that while voxel centering doesn't affect DVARS, it does affect DPD and ZD.} \item{cutoff_DPD, cutoff_ZD}{Numeric outlier cutoffs. Timepoints exceeding these cutoffs will be flagged as outliers.} \item{verbose}{Should occasional updates be printed? Default is \code{FALSE}.} } \value{ A list with components \describe{ \item{measure}{A data.frame with \eqn{T} rows, each column being a different variant of DVARS.} \item{measure_info}{"DVARS"} \item{outlier_cutoff}{The outlier cutoff value(s).} \item{outlier_flag}{A logical data.frame with \eqn{T} rows, where \code{TRUE} indicates suspected outlier presence.} } } \description{ Computes the DSE decomposition and DVARS-related statistics. Based on code from github.com/asoroosh/DVARS . } \section{References}{ \itemize{ \item{Afyouni, S. & Nichols, T. E. Insight and inference for DVARS. NeuroImage 172, 291-312 (2018).} } }
#' Display pokemon palettes. #' #' Display 10 pokemon palettes starting from a name or number. #' If no name or number is given, 10 of the better palettes are #' displayed. Pokedex is a Trademark of Nintendo. #' #'@inheritParams pokepal #'@param cb A number between 1 and 4 to select ten of 40 colourblind friendly #' (Deuteranomaly) palettes. \code{pokemon} is ignored if used, but \code{spread} #' works as normal. #' #'@name pokedex #'@details If \code{spread} is given an integer, the full palette is #' clustered into that many groups (ward clustering in HSV space). #' The most common colour in each cluster is then returned. It is #' hoped this will give a good balance between reflecting the pokemons #' colouring while giving relatively distinct colours. #' #' Thanks to Luis Verde for the colourblind suitable selection. #'@examples #'pokedex() #'pokedex('Metapod') #'pokedex(5, spread = 2) #'pokedex(cb = 3) #'pokedex(cb = 2, spread = 6) #'@export pokedex <- function(pokemon = NULL, spread = NULL, cb = NULL){ if(is.null(pokemon)){ pokeNs <- c(6, 17, 114, 137, 156, 191, 193, 283, 311, 318) } colourblindFriendly <- c(1,2,3,9,10,12,18,19,29,32,39,42,43,44,61, 63,65,66,69,72,73,101,109,107,109,110,116,126,128,130,131,134, 135,136,140,141,149,150,156,157) if(is.numeric(cb)){ if(cb > 4) stop('cb must be between 1 and 4') pokemon <- NULL pokeNs <- colourblindFriendly[((cb - 1) * 10 + 1):((cb - 1) * 10 + 10)] } # ensure lower case. if(is.character(pokemon)){ pokemon <- tolower(pokemon) } if(is.numeric(pokemon)){ pokeNs <- pokemon:(pokemon + 9) } else if(is.character(pokemon)){ start <- which(names(pokeColours) == pokemon) pokeNs <- start:(start + 9) } if(is.null(spread)){ nCols <- sapply(pokeNs, function(x) length(pokepal(x))) } else { nCols <- sapply(pokeNs, function(x) length(pokepal(x))) nCols[nCols > spread] <- spread } xlim <- max(nCols) oldpar <- graphics::par(mgp = c(2, 0.25, 0), mai=c(1.02,0.82,0.82,0.42)) on.exit(graphics::par(oldpar)) graphics::par(mar = c(1, 7, 1, 1)) graphics::plot(1,1,xlim=c(0,xlim), ylim=c(0, 10), type="n", axes=FALSE, bty="n", xlab="", ylab="") for(i in 1:10){ if(is.null(spread)){ nColours <- NULL } else { nColours <- nCols[i] } graphics::rect(xleft = 0:(nCols[i] - 1), ybottom = 10 - i, xright = 1:nCols[i], ytop = 11 - i - 0.2, col = pokepal(pokeNs[i], nColours), border="light grey") } graphics::text(rep(-0.1, 10), (10:1) - 0.6, labels = paste(pokeNs, ':', names(pokeColours)[pokeNs]), xpd = TRUE, adj = 1) }
/R/pokedex.R
no_license
RDAdams/palettetown
R
false
false
2,741
r
#' Display pokemon palettes. #' #' Display 10 pokemon palettes starting from a name or number. #' If no name or number is given, 10 of the better palettes are #' displayed. Pokedex is a Trademark of Nintendo. #' #'@inheritParams pokepal #'@param cb A number between 1 and 4 to select ten of 40 colourblind friendly #' (Deuteranomaly) palettes. \code{pokemon} is ignored if used, but \code{spread} #' works as normal. #' #'@name pokedex #'@details If \code{spread} is given an integer, the full palette is #' clustered into that many groups (ward clustering in HSV space). #' The most common colour in each cluster is then returned. It is #' hoped this will give a good balance between reflecting the pokemons #' colouring while giving relatively distinct colours. #' #' Thanks to Luis Verde for the colourblind suitable selection. #'@examples #'pokedex() #'pokedex('Metapod') #'pokedex(5, spread = 2) #'pokedex(cb = 3) #'pokedex(cb = 2, spread = 6) #'@export pokedex <- function(pokemon = NULL, spread = NULL, cb = NULL){ if(is.null(pokemon)){ pokeNs <- c(6, 17, 114, 137, 156, 191, 193, 283, 311, 318) } colourblindFriendly <- c(1,2,3,9,10,12,18,19,29,32,39,42,43,44,61, 63,65,66,69,72,73,101,109,107,109,110,116,126,128,130,131,134, 135,136,140,141,149,150,156,157) if(is.numeric(cb)){ if(cb > 4) stop('cb must be between 1 and 4') pokemon <- NULL pokeNs <- colourblindFriendly[((cb - 1) * 10 + 1):((cb - 1) * 10 + 10)] } # ensure lower case. if(is.character(pokemon)){ pokemon <- tolower(pokemon) } if(is.numeric(pokemon)){ pokeNs <- pokemon:(pokemon + 9) } else if(is.character(pokemon)){ start <- which(names(pokeColours) == pokemon) pokeNs <- start:(start + 9) } if(is.null(spread)){ nCols <- sapply(pokeNs, function(x) length(pokepal(x))) } else { nCols <- sapply(pokeNs, function(x) length(pokepal(x))) nCols[nCols > spread] <- spread } xlim <- max(nCols) oldpar <- graphics::par(mgp = c(2, 0.25, 0), mai=c(1.02,0.82,0.82,0.42)) on.exit(graphics::par(oldpar)) graphics::par(mar = c(1, 7, 1, 1)) graphics::plot(1,1,xlim=c(0,xlim), ylim=c(0, 10), type="n", axes=FALSE, bty="n", xlab="", ylab="") for(i in 1:10){ if(is.null(spread)){ nColours <- NULL } else { nColours <- nCols[i] } graphics::rect(xleft = 0:(nCols[i] - 1), ybottom = 10 - i, xright = 1:nCols[i], ytop = 11 - i - 0.2, col = pokepal(pokeNs[i], nColours), border="light grey") } graphics::text(rep(-0.1, 10), (10:1) - 0.6, labels = paste(pokeNs, ':', names(pokeColours)[pokeNs]), xpd = TRUE, adj = 1) }
24570a78e3ff78883dd1cae9a3455105 cycle_sched_4_9_1.sat.qdimacs 36093 108581
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Tentrup/cycle-sched/cycle_sched_4_9_1.sat/cycle_sched_4_9_1.sat.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
75
r
24570a78e3ff78883dd1cae9a3455105 cycle_sched_4_9_1.sat.qdimacs 36093 108581
# Problem Set 2 Solution Template # mysoln is a list to store your answers # the first element of the list is a vector of names for the students in your group # make sure these match the names shown on the pdf document provided by the MFE office # using group 1 as an example: mysoln = list(student = c("Molin Liang", "Meghana Rao", "Chengbo Du", "Shardul Kulkarni")) # 1 # your intermediary code to get your answers here # save down your final answers for part a, b, and c price_func = function(coupon_rate, year, ytm){ price = 0 for (i in 1:year){ price = price + 100 * coupon_rate / (1 + ytm) ^ i } price = price + 100 / (1 + ytm) ^ year return(price) } ytm = 0.06 coupon_rate = 0 year = 3 price = price_func(coupon_rate = 0, year = 3, ytm = 0.06) a = c(price,ytm) #ytm in decimal form ytm = 0.055 coupon_rate = 0.06 year = 2 price = price_func(coupon_rate = 0.06, year = 2, ytm = 0.055) b = c(price,ytm) #ytm in decimal form ytm = 0.063 coupon_rate = 0.08 year = 4 price = price_func(coupon_rate = 0.08, year = 4, ytm = 0.063) c = c(price,ytm) #ytm in decimal form # add answers to list for "Q1" mysoln[["Q1"]] = list(a=a, b=b, c=c) # 2 # your intermediary code to get your answers here # save down your final answers couponx = 100 * 0.04 * 0.5 pricex = 100.98 par = 100 r.6month = ((par + couponx) / pricex - 1) * 2 coupony = 100 * 0.06 * 0.5 pricey = 103.59 r.1yr = (par + coupony) / (pricey - coupony / (1 + r.6month)) - 1 a = c(r.6month, r.1yr) #in decimal form # add answers to list for "Q2" mysoln[["Q2"]] = list(a=a) # 3 # your intermediary code to get your answers here # save down your final answers # suppose we buy x Bond A, y Bond B, z Bond C # To eliminate any future payments, we have # 100x + 5y + 7z = 0 # 105y + 107z = 0 # x = (-2/105)z # y = (-105/107)z # suppose we short 1 Bond C priceA = 95.238 priceB = 98.438 priceC = 103.370 arbitrage = -2/105 * priceA - 105/107 * priceB + priceC arbitrage # Put the answer in your PDF writeup mysoln
/HW2/lecture2p_huanyu.R
no_license
JohnnyBarber/Investment
R
false
false
1,992
r
# Problem Set 2 Solution Template # mysoln is a list to store your answers # the first element of the list is a vector of names for the students in your group # make sure these match the names shown on the pdf document provided by the MFE office # using group 1 as an example: mysoln = list(student = c("Molin Liang", "Meghana Rao", "Chengbo Du", "Shardul Kulkarni")) # 1 # your intermediary code to get your answers here # save down your final answers for part a, b, and c price_func = function(coupon_rate, year, ytm){ price = 0 for (i in 1:year){ price = price + 100 * coupon_rate / (1 + ytm) ^ i } price = price + 100 / (1 + ytm) ^ year return(price) } ytm = 0.06 coupon_rate = 0 year = 3 price = price_func(coupon_rate = 0, year = 3, ytm = 0.06) a = c(price,ytm) #ytm in decimal form ytm = 0.055 coupon_rate = 0.06 year = 2 price = price_func(coupon_rate = 0.06, year = 2, ytm = 0.055) b = c(price,ytm) #ytm in decimal form ytm = 0.063 coupon_rate = 0.08 year = 4 price = price_func(coupon_rate = 0.08, year = 4, ytm = 0.063) c = c(price,ytm) #ytm in decimal form # add answers to list for "Q1" mysoln[["Q1"]] = list(a=a, b=b, c=c) # 2 # your intermediary code to get your answers here # save down your final answers couponx = 100 * 0.04 * 0.5 pricex = 100.98 par = 100 r.6month = ((par + couponx) / pricex - 1) * 2 coupony = 100 * 0.06 * 0.5 pricey = 103.59 r.1yr = (par + coupony) / (pricey - coupony / (1 + r.6month)) - 1 a = c(r.6month, r.1yr) #in decimal form # add answers to list for "Q2" mysoln[["Q2"]] = list(a=a) # 3 # your intermediary code to get your answers here # save down your final answers # suppose we buy x Bond A, y Bond B, z Bond C # To eliminate any future payments, we have # 100x + 5y + 7z = 0 # 105y + 107z = 0 # x = (-2/105)z # y = (-105/107)z # suppose we short 1 Bond C priceA = 95.238 priceB = 98.438 priceC = 103.370 arbitrage = -2/105 * priceA - 105/107 * priceB + priceC arbitrage # Put the answer in your PDF writeup mysoln
library(cpr) ### Name: matrix_rank ### Title: Rank of a Matrix ### Aliases: matrix_rank ### ** Examples # Check the rank of a matrix mat <- matrix(rnorm(25000 * 120), nrow = 25000) Matrix::rankMatrix(mat)[1] matrix_rank(mat) # A full rank B-spline basis bmat <- bsplines(seq(0, 1, length = 100), df = 15) matrix_rank(bmat) # A rank deficient B-spline basis bmat <- bsplines(seq(0, 1, length = 100), iknots = c(0.001, 0.002)) ncol(bmat) matrix_rank(bmat)
/data/genthat_extracted_code/cpr/examples/matrix_rank.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
464
r
library(cpr) ### Name: matrix_rank ### Title: Rank of a Matrix ### Aliases: matrix_rank ### ** Examples # Check the rank of a matrix mat <- matrix(rnorm(25000 * 120), nrow = 25000) Matrix::rankMatrix(mat)[1] matrix_rank(mat) # A full rank B-spline basis bmat <- bsplines(seq(0, 1, length = 100), df = 15) matrix_rank(bmat) # A rank deficient B-spline basis bmat <- bsplines(seq(0, 1, length = 100), iknots = c(0.001, 0.002)) ncol(bmat) matrix_rank(bmat)
## A pair of functions that work together to implement a caching mechanism for ## efficiently working out the inverse of an invertible matrix ## The cache is especially useful when the calculation needs to be ## peformed multiple times, e.g. in a loop makeCacheMatrix <- function(x = matrix()) { ## creates a special "vector", which is a list containing functions to ## set and get the values of the vector ## set and get the inverse of an invertible matrix i = NULL set = function(y) { x <<- y i <<- NULL } get <- function() x setinverse <- function(inv) i <<- inv getinverse <- function() i list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } cacheSolve <- function(x, ...) { ## Returns the inverse of a matrix (x), either from the cache if it exists, ## or calculated by solve, in which case save back to cache for next time inverse = x$getinverse() if (!is.null(inverse)){ message("getting cached data") ## inverse was calculated before and exists in the cache return(inverse) } # else calculate m = x$get() inverse = solve(m, ...) x$setinverse(inverse) # set the value in the cache for next time return(inverse) } ## To run and test the functions, define a square (invertible) matrix: ## m = matrix(c(2, 4, 3, 1, 5, 7,4,7,10), nrow=3, ncol=3) ## Then set up the cache: ## m <- makeCacheMatrix(M) ## Then calculate the inverse for the first time. This will return a value, but with no message "getting cached data" ## cacheSolve(m) ## Now re-run on the same matrix. This will return a value with the message "getting cached data" ## cacheSolve(m)
/cachematrix.R
no_license
archanalytics/ProgrammingAssignment2
R
false
false
1,751
r
## A pair of functions that work together to implement a caching mechanism for ## efficiently working out the inverse of an invertible matrix ## The cache is especially useful when the calculation needs to be ## peformed multiple times, e.g. in a loop makeCacheMatrix <- function(x = matrix()) { ## creates a special "vector", which is a list containing functions to ## set and get the values of the vector ## set and get the inverse of an invertible matrix i = NULL set = function(y) { x <<- y i <<- NULL } get <- function() x setinverse <- function(inv) i <<- inv getinverse <- function() i list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } cacheSolve <- function(x, ...) { ## Returns the inverse of a matrix (x), either from the cache if it exists, ## or calculated by solve, in which case save back to cache for next time inverse = x$getinverse() if (!is.null(inverse)){ message("getting cached data") ## inverse was calculated before and exists in the cache return(inverse) } # else calculate m = x$get() inverse = solve(m, ...) x$setinverse(inverse) # set the value in the cache for next time return(inverse) } ## To run and test the functions, define a square (invertible) matrix: ## m = matrix(c(2, 4, 3, 1, 5, 7,4,7,10), nrow=3, ncol=3) ## Then set up the cache: ## m <- makeCacheMatrix(M) ## Then calculate the inverse for the first time. This will return a value, but with no message "getting cached data" ## cacheSolve(m) ## Now re-run on the same matrix. This will return a value with the message "getting cached data" ## cacheSolve(m)
hpc <- read.csv2("household_power_consumption.txt") hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y") hpc$Sub_metering_1 <- as.numeric(as.character(hpc$Sub_metering_1)) hpc$Sub_metering_2 <- as.numeric(as.character(hpc$Sub_metering_2)) hpc$Sub_metering_3 <- as.numeric(as.character(hpc$Sub_metering_3)) hpc$Voltage <- as.numeric(as.character(hpc$Voltage)) hpc$Global_reactive_power <- as.numeric(as.character(hpc$Global_reactive_power)) hpc$DateTime <- strptime(paste(as.character(hpc$Date), hpc$Time, sep=":"), format="%Y-%m-%d:%H:%M:%S") hpc <- hpc[hpc$Date == as.Date("2007-02-01") | hpc$Date == as.Date("2007-02-02"),] par(mfcol = c(2, 2)) plot(hpc$DateTime, hpc$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "") plot(hpc$DateTime, hpc$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "") lines(hpc$DateTime, hpc$Sub_metering_2, type="l",col="red") lines(hpc$DateTime, hpc$Sub_metering_3, type="l",col="blue") legend("topright", pch = "_", col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(hpc$DateTime, hpc$Voltage, type = "l", ylab = "Voltage", xlab = "datetime") plot(hpc$DateTime, hpc$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime") dev.copy(png, file = "plot4.png") dev.off()
/plot4.R
no_license
David2102/ExData_Plotting1
R
false
false
1,328
r
hpc <- read.csv2("household_power_consumption.txt") hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y") hpc$Sub_metering_1 <- as.numeric(as.character(hpc$Sub_metering_1)) hpc$Sub_metering_2 <- as.numeric(as.character(hpc$Sub_metering_2)) hpc$Sub_metering_3 <- as.numeric(as.character(hpc$Sub_metering_3)) hpc$Voltage <- as.numeric(as.character(hpc$Voltage)) hpc$Global_reactive_power <- as.numeric(as.character(hpc$Global_reactive_power)) hpc$DateTime <- strptime(paste(as.character(hpc$Date), hpc$Time, sep=":"), format="%Y-%m-%d:%H:%M:%S") hpc <- hpc[hpc$Date == as.Date("2007-02-01") | hpc$Date == as.Date("2007-02-02"),] par(mfcol = c(2, 2)) plot(hpc$DateTime, hpc$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "") plot(hpc$DateTime, hpc$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "") lines(hpc$DateTime, hpc$Sub_metering_2, type="l",col="red") lines(hpc$DateTime, hpc$Sub_metering_3, type="l",col="blue") legend("topright", pch = "_", col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(hpc$DateTime, hpc$Voltage, type = "l", ylab = "Voltage", xlab = "datetime") plot(hpc$DateTime, hpc$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime") dev.copy(png, file = "plot4.png") dev.off()
#.libPaths("c:/software/Rpackages") library(tidyverse) library(plyr) #library(spdep) library(gmm) library(emplik) library(BELSpatial) # spatial files ACA #aus_nb<-read.gal("Data/ACA data/SA2_2011_AUST_qv7.gal", override.id = TRUE) #class(aus_nb) #W<-nb2mat(aus_nb,style="B") #rownames(W)<-c() #ind <- upper.tri(W) #W[ind] <- t(W)[ind] #saveRDS(W,"Data/ACA data/W.RDS",version = 2) W<- readRDS("Data/W.RDS") ni<-rowSums(W) # no. of neighbours for each area R<-diag(ni) for(i in 1:nrow(R)) { R[i,which(W[i,]==1)]<- -1 } lung_data<-read_csv("Data/lung.data.csv") cov<-read_csv("Data/SA2 Data_v2.csv") lung_data_final<-lung_data[complete.cases(lung_data),] lung_data_final<-rename(lung_data_final,c("cancer.lung.sa2"="sa2")) lung_data_final2<-inner_join(lung_data_final,cov,by="sa2") lung_data_final2<-lung_data_final2%>% rename(.,c("ra4cat"="regions")) lung_data_final2$regions[lung_data_final2$regions==3]<-2 lung_data_final2$regions[lung_data_final2$regions==4]<-3 # for females y<- exp(lung_data_final2$y.lung.female) n<- length(y) # no. of observations lung_data_final2<-mutate(lung_data_final2,cities=ifelse(lung_data_final2$regions==1,1,0)) lung_data_final2<-mutate(lung_data_final2,regional=ifelse(lung_data_final2$regions==2,1,0)) x<- cbind(1,lung_data_final2$cities,lung_data_final2$regional) p<- dim(x)[2] # no. of covariates alpha_1<-1 # hyperparamter for tau prior alpha_2<-0.01 # hyperparamter for tau prior tau_inv_init<- rgamma(1,alpha_1,alpha_2) # using IG prior(1,1) for tau_inv tau_init<- 1/tau_inv_init g<- 10# G prior evaluated at 10 for regression coefficients' prior (Zellner prior) prior_mean_beta<- rep(0,p) # p is the number of regression parameters, in case of one covariate, p=2 beta_init<- rnorm(3,prior_mean_beta, (1/g)*tau_inv_init) wi_init<- 1/length(y) # y be the response variable from the data psi_init <- rep(0,n) var<- exp(lung_data_final2$sd.lung.female) # calculating MELE of Beta, beta_mele wi=wi_init # using gmm package to calculate initial values of beta g<- log(y)~ x[,2]+x[,3] H<-x[,-1] beta_mele<- unname(gel(g,H,c(0,0,0))$coefficients) mu_init<- exp(x%*% beta_mele + psi_init) beta_init<-beta_mele wi_mu<- el.test(y-mu_init,0)$wts # computing el weights using emplik package wi_mu<-wi_mu/sum(wi_mu) # sum(wi) = 1 and wi>0 constraints wi<-wi_mu # fitting BEL BYM model taking rho= 1 library(parallel) cluster<-makeCluster(3) #clusterEvalQ(cl=cluster,.libPaths("c:/software/Rpackages")) clusterEvalQ(cl=cluster,library(BELSpatial)) clusterExport(cl=cluster,varlist = c("y","x","n","p","var","beta_init", "psi_init", "tau_init","R", "wi")) SBEL_BYM_lung_female<-clusterApply(cl=cluster, x=1:3, function(z){BEL_leroux_new(y,x,n,p,var,rho=1,niter=10000, beta_init, psi_init, tau_init,R, wi, sd_psi=0.003, sd_beta=0.0008, sd_tau=0.5)}) save(SBEL_BYM_lung_female,file="Results/SBEL_BYM_lung_female_10000.RData")
/Rscripts/SBEL BYM lung female.R
no_license
Farzana-Jahan/BEL-Meta
R
false
false
3,023
r
#.libPaths("c:/software/Rpackages") library(tidyverse) library(plyr) #library(spdep) library(gmm) library(emplik) library(BELSpatial) # spatial files ACA #aus_nb<-read.gal("Data/ACA data/SA2_2011_AUST_qv7.gal", override.id = TRUE) #class(aus_nb) #W<-nb2mat(aus_nb,style="B") #rownames(W)<-c() #ind <- upper.tri(W) #W[ind] <- t(W)[ind] #saveRDS(W,"Data/ACA data/W.RDS",version = 2) W<- readRDS("Data/W.RDS") ni<-rowSums(W) # no. of neighbours for each area R<-diag(ni) for(i in 1:nrow(R)) { R[i,which(W[i,]==1)]<- -1 } lung_data<-read_csv("Data/lung.data.csv") cov<-read_csv("Data/SA2 Data_v2.csv") lung_data_final<-lung_data[complete.cases(lung_data),] lung_data_final<-rename(lung_data_final,c("cancer.lung.sa2"="sa2")) lung_data_final2<-inner_join(lung_data_final,cov,by="sa2") lung_data_final2<-lung_data_final2%>% rename(.,c("ra4cat"="regions")) lung_data_final2$regions[lung_data_final2$regions==3]<-2 lung_data_final2$regions[lung_data_final2$regions==4]<-3 # for females y<- exp(lung_data_final2$y.lung.female) n<- length(y) # no. of observations lung_data_final2<-mutate(lung_data_final2,cities=ifelse(lung_data_final2$regions==1,1,0)) lung_data_final2<-mutate(lung_data_final2,regional=ifelse(lung_data_final2$regions==2,1,0)) x<- cbind(1,lung_data_final2$cities,lung_data_final2$regional) p<- dim(x)[2] # no. of covariates alpha_1<-1 # hyperparamter for tau prior alpha_2<-0.01 # hyperparamter for tau prior tau_inv_init<- rgamma(1,alpha_1,alpha_2) # using IG prior(1,1) for tau_inv tau_init<- 1/tau_inv_init g<- 10# G prior evaluated at 10 for regression coefficients' prior (Zellner prior) prior_mean_beta<- rep(0,p) # p is the number of regression parameters, in case of one covariate, p=2 beta_init<- rnorm(3,prior_mean_beta, (1/g)*tau_inv_init) wi_init<- 1/length(y) # y be the response variable from the data psi_init <- rep(0,n) var<- exp(lung_data_final2$sd.lung.female) # calculating MELE of Beta, beta_mele wi=wi_init # using gmm package to calculate initial values of beta g<- log(y)~ x[,2]+x[,3] H<-x[,-1] beta_mele<- unname(gel(g,H,c(0,0,0))$coefficients) mu_init<- exp(x%*% beta_mele + psi_init) beta_init<-beta_mele wi_mu<- el.test(y-mu_init,0)$wts # computing el weights using emplik package wi_mu<-wi_mu/sum(wi_mu) # sum(wi) = 1 and wi>0 constraints wi<-wi_mu # fitting BEL BYM model taking rho= 1 library(parallel) cluster<-makeCluster(3) #clusterEvalQ(cl=cluster,.libPaths("c:/software/Rpackages")) clusterEvalQ(cl=cluster,library(BELSpatial)) clusterExport(cl=cluster,varlist = c("y","x","n","p","var","beta_init", "psi_init", "tau_init","R", "wi")) SBEL_BYM_lung_female<-clusterApply(cl=cluster, x=1:3, function(z){BEL_leroux_new(y,x,n,p,var,rho=1,niter=10000, beta_init, psi_init, tau_init,R, wi, sd_psi=0.003, sd_beta=0.0008, sd_tau=0.5)}) save(SBEL_BYM_lung_female,file="Results/SBEL_BYM_lung_female_10000.RData")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_summclust.R \name{plot.summclust} \alias{plot.summclust} \title{Plotting method for objects of type \code{summclust}} \usage{ \method{plot}{summclust}(x, ...) } \arguments{ \item{x}{An object of type \code{summclust}} \item{...}{other optional function arguments} } \value{ A list containing \item{residual_leverage}{A \code{ggplot} of the residual leverages} \item{coef_leverage}{A \code{ggplot} of the coefficient leverages} \item{coef_beta}{A \code{ggplot} of the leave-one-out cluster jackknife regression coefficients} } \description{ Plots residual leverage, partial leverage and the leave-one-cluster-out regression coefficients } \details{ Note that the function requires \code{ggplot2} to be installed. } \examples{ \donttest{ if(requireNamespace("summclust") && requireNamespace("haven")){ library(summclust) library(haven) nlswork <- read_dta("http://www.stata-press.com/data/r9/nlswork.dta") # drop NAs at the moment nlswork <- nlswork[, c("ln_wage", "grade", "age", "birth_yr", "union", "race", "msp", "ind_code")] nlswork <- na.omit(nlswork) lm_fit <- lm( ln_wage ~ union + race + msp + as.factor(birth_yr) + as.factor(age) + as.factor(grade), data = nlswork) res <- summclust( obj = lm_fit, params = c("msp", "union"), cluster = ~ind_code, ) plot(res) } } } \references{ MacKinnon, James G., Morten Ørregaard Nielsen, and Matthew D. Webb. "Leverage, influence, and the jackknife in clustered regression models: Reliable inference using summclust." arXiv preprint arXiv:2205.03288 (2022). }
/man/plot.summclust.Rd
permissive
s3alfisc/summclust
R
false
true
1,613
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_summclust.R \name{plot.summclust} \alias{plot.summclust} \title{Plotting method for objects of type \code{summclust}} \usage{ \method{plot}{summclust}(x, ...) } \arguments{ \item{x}{An object of type \code{summclust}} \item{...}{other optional function arguments} } \value{ A list containing \item{residual_leverage}{A \code{ggplot} of the residual leverages} \item{coef_leverage}{A \code{ggplot} of the coefficient leverages} \item{coef_beta}{A \code{ggplot} of the leave-one-out cluster jackknife regression coefficients} } \description{ Plots residual leverage, partial leverage and the leave-one-cluster-out regression coefficients } \details{ Note that the function requires \code{ggplot2} to be installed. } \examples{ \donttest{ if(requireNamespace("summclust") && requireNamespace("haven")){ library(summclust) library(haven) nlswork <- read_dta("http://www.stata-press.com/data/r9/nlswork.dta") # drop NAs at the moment nlswork <- nlswork[, c("ln_wage", "grade", "age", "birth_yr", "union", "race", "msp", "ind_code")] nlswork <- na.omit(nlswork) lm_fit <- lm( ln_wage ~ union + race + msp + as.factor(birth_yr) + as.factor(age) + as.factor(grade), data = nlswork) res <- summclust( obj = lm_fit, params = c("msp", "union"), cluster = ~ind_code, ) plot(res) } } } \references{ MacKinnon, James G., Morten Ørregaard Nielsen, and Matthew D. Webb. "Leverage, influence, and the jackknife in clustered regression models: Reliable inference using summclust." arXiv preprint arXiv:2205.03288 (2022). }
library(iml) ### Name: plot.LocalModel ### Title: Plot Local Model ### Aliases: plot.LocalModel ### ** Examples if (require("randomForest")) { # First we fit a machine learning model on the Boston housing data data("Boston", package = "MASS") X = Boston[-which(names(Boston) == "medv")] rf = randomForest(medv ~ ., data = Boston, ntree = 50) mod = Predictor$new(rf, data = X) # Explain the first instance of the dataset with the LocalModel method: x.interest = X[1,] lemon = LocalModel$new(mod, x.interest = x.interest, k = 2) plot(lemon) }
/data/genthat_extracted_code/iml/examples/plot.LocalModel.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
550
r
library(iml) ### Name: plot.LocalModel ### Title: Plot Local Model ### Aliases: plot.LocalModel ### ** Examples if (require("randomForest")) { # First we fit a machine learning model on the Boston housing data data("Boston", package = "MASS") X = Boston[-which(names(Boston) == "medv")] rf = randomForest(medv ~ ., data = Boston, ntree = 50) mod = Predictor$new(rf, data = X) # Explain the first instance of the dataset with the LocalModel method: x.interest = X[1,] lemon = LocalModel$new(mod, x.interest = x.interest, k = 2) plot(lemon) }
plotData <- function(d, x, y, z = NULL, line = F, spline = F, spar = 1, xlim = c(0, max(d[, x]))) { if(is.null(z)) { z.dat <- rep(1, dim(d)[1]) } else { z.dat <- d[, z] } symbols(x = d[, x], y = d[, y], circles = (sqrt(z.dat / pi)), inches = 1/10, ann = T, bg = "steelblue2", fg = "black", xlim = xlim, ylim = c(0, max(d[, y])), xlab = x, ylab = y, las = 1) title(main = paste('Cor =', round(cor(d[, x], d[, y]), 2))) if(line) { linm <- lm(d[, y] ~ d[, x]) abline(a = linm$coefficients[1], b = linm$coefficients[2], col = "gray50") } if(spline) { spl <- smooth.spline(d[, x], d[, y], spar = spar) lines(spl, col = "red") } saveplot <- recordPlot() return(saveplot) }
/plotData.R
permissive
veghp/R_scripts
R
false
false
729
r
plotData <- function(d, x, y, z = NULL, line = F, spline = F, spar = 1, xlim = c(0, max(d[, x]))) { if(is.null(z)) { z.dat <- rep(1, dim(d)[1]) } else { z.dat <- d[, z] } symbols(x = d[, x], y = d[, y], circles = (sqrt(z.dat / pi)), inches = 1/10, ann = T, bg = "steelblue2", fg = "black", xlim = xlim, ylim = c(0, max(d[, y])), xlab = x, ylab = y, las = 1) title(main = paste('Cor =', round(cor(d[, x], d[, y]), 2))) if(line) { linm <- lm(d[, y] ~ d[, x]) abline(a = linm$coefficients[1], b = linm$coefficients[2], col = "gray50") } if(spline) { spl <- smooth.spline(d[, x], d[, y], spar = spar) lines(spl, col = "red") } saveplot <- recordPlot() return(saveplot) }
#Determine working directory and set path for data dowload Path<-getwd() #Path <‐ "C:/Users/Valeriy/Programming-R/3 Getting and Cleaning Data" setwd(Path) if(!file.exists("./data3")){dir.create("./data3")} #Dowload data Url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(Url,destfile="./data3/Dataset.zip") unzip(zipfile="./data3/Dataset.zip",exdir="./data3") #Load libraries library(dplyr) library(tidyr) library(data.table) #Load training and test sets Path <- file.path(Path, "data3", "UCI HAR Dataset") setwd(Path) # subject files SubjectTrain <- tbl_df(read.table(file.path(Path, "train", "subject_train.txt"))) SubjectTest <- tbl_df(read.table(file.path(Path, "test" , "subject_test.txt" ))) # activity files ActivityTrain <- tbl_df(read.table(file.path(Path, "train", "Y_train.txt"))) ActivityTest <- tbl_df(read.table(file.path(Path, "test" , "Y_test.txt" ))) # data files. DataTrain <- tbl_df(read.table(file.path(Path, "train", "X_train.txt" ))) DataTest <- tbl_df(read.table(file.path(Path, "test" , "X_test.txt" ))) #merge the training and the test sets by row allSubjectData <- rbind(SubjectTrain, SubjectTest) setnames(allSubjectData, "V1", "subject") allActivityData<- rbind(ActivityTrain, ActivityTest) setnames(allActivityData, "V1", "activityNum") #combine the DATA training and test files dataTable <- rbind(DataTrain, DataTest) # name variables according to feature information dataFeatures <- tbl_df(read.table(file.path(Path, "features.txt"))) setnames(dataFeatures, names(dataFeatures), c("featureNum", "featureName")) colnames(dataTable) <- dataFeatures$featureName #column names for activity labels activityLabels<- tbl_df(read.table(file.path(Path, "activity_labels.txt"))) setnames(activityLabels, names(activityLabels), c("activityNum","activityName")) # Merge columns alldSubjectActivity<- cbind(allSubjectData, allActivityData) dataTable <- cbind(alldSubjectActivity, dataTable) # Reading "features.txt" and extracting only the mean and standard deviation FeaturesMeanStd <- grep("mean\\(\\)|std\\(\\)",dataFeatures$featureName,value=TRUE) # Taking only measurements for the mean and standard deviation and add "subject","activityNum" FeaturesMeanStd <- union(c("subject","activityNum"), FeaturesMeanStd) dataTable<- subset(dataTable,select=FeaturesMeanStd) #enter name of activity into dataTable dataTable <- merge(activityLabels, dataTable , by="activityNum", all.x=TRUE) dataTable$activityName <- as.character(dataTable$activityName) #print merged dataset write.table(dataTable, "mergedData.txt", row.names = F) # Create a tidy data set with variable means sorted by subject and Activity dataTable$activityName <- as.character(dataTable$activityName) dataAggr<- aggregate(. ~ subject - activityName, data = dataTable, mean) dataTable<- tbl_df(arrange(dataAggr,subject,activityName)) # Create a tidy data set write.table(dataTable, "finalData.txt", row.name=FALSE)
/run_analysis.R
no_license
vporoyko/Getting-and-Cleaning-Data-Course-Project
R
false
false
2,971
r
#Determine working directory and set path for data dowload Path<-getwd() #Path <‐ "C:/Users/Valeriy/Programming-R/3 Getting and Cleaning Data" setwd(Path) if(!file.exists("./data3")){dir.create("./data3")} #Dowload data Url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(Url,destfile="./data3/Dataset.zip") unzip(zipfile="./data3/Dataset.zip",exdir="./data3") #Load libraries library(dplyr) library(tidyr) library(data.table) #Load training and test sets Path <- file.path(Path, "data3", "UCI HAR Dataset") setwd(Path) # subject files SubjectTrain <- tbl_df(read.table(file.path(Path, "train", "subject_train.txt"))) SubjectTest <- tbl_df(read.table(file.path(Path, "test" , "subject_test.txt" ))) # activity files ActivityTrain <- tbl_df(read.table(file.path(Path, "train", "Y_train.txt"))) ActivityTest <- tbl_df(read.table(file.path(Path, "test" , "Y_test.txt" ))) # data files. DataTrain <- tbl_df(read.table(file.path(Path, "train", "X_train.txt" ))) DataTest <- tbl_df(read.table(file.path(Path, "test" , "X_test.txt" ))) #merge the training and the test sets by row allSubjectData <- rbind(SubjectTrain, SubjectTest) setnames(allSubjectData, "V1", "subject") allActivityData<- rbind(ActivityTrain, ActivityTest) setnames(allActivityData, "V1", "activityNum") #combine the DATA training and test files dataTable <- rbind(DataTrain, DataTest) # name variables according to feature information dataFeatures <- tbl_df(read.table(file.path(Path, "features.txt"))) setnames(dataFeatures, names(dataFeatures), c("featureNum", "featureName")) colnames(dataTable) <- dataFeatures$featureName #column names for activity labels activityLabels<- tbl_df(read.table(file.path(Path, "activity_labels.txt"))) setnames(activityLabels, names(activityLabels), c("activityNum","activityName")) # Merge columns alldSubjectActivity<- cbind(allSubjectData, allActivityData) dataTable <- cbind(alldSubjectActivity, dataTable) # Reading "features.txt" and extracting only the mean and standard deviation FeaturesMeanStd <- grep("mean\\(\\)|std\\(\\)",dataFeatures$featureName,value=TRUE) # Taking only measurements for the mean and standard deviation and add "subject","activityNum" FeaturesMeanStd <- union(c("subject","activityNum"), FeaturesMeanStd) dataTable<- subset(dataTable,select=FeaturesMeanStd) #enter name of activity into dataTable dataTable <- merge(activityLabels, dataTable , by="activityNum", all.x=TRUE) dataTable$activityName <- as.character(dataTable$activityName) #print merged dataset write.table(dataTable, "mergedData.txt", row.names = F) # Create a tidy data set with variable means sorted by subject and Activity dataTable$activityName <- as.character(dataTable$activityName) dataAggr<- aggregate(. ~ subject - activityName, data = dataTable, mean) dataTable<- tbl_df(arrange(dataAggr,subject,activityName)) # Create a tidy data set write.table(dataTable, "finalData.txt", row.name=FALSE)
# Copyright 2018 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and limitations under the License. #' Get the valid values for a facet (that you can use in [bcdc_search()]) #' #' @param facet the facet(s) for which to retrieve valid values. Can be one or #' more of: #' `"license_id", "download_audience", "type", "res_format", "sector", "organization"` #' #' @return A data frame of values for the selected facet #' @export #' #' @examples #' \dontrun{ #' bcdc_search_facets("type") #' } bcdc_search_facets <- function(facet = c("license_id", "download_audience", "type", "res_format", "sector", "organization")) { if(!has_internet()) stop("No access to internet", call. = FALSE) facet <- match.arg(facet, several.ok = TRUE) query <- paste0("\"", facet, "\"", collapse = ",") query <- paste0("[", query, "]") cli <- bcdc_http_client(paste0(base_url(), "action/package_search")) r <- cli$get(query = list(facet.field = query, rows = 0)) r$raise_for_status() res <- jsonlite::fromJSON(r$parse("UTF-8")) stopifnot(res$success) facet_list <- res$result$search_facets facet_dfs <- lapply(facet_list, function(x) { x$items$facet <- x$title x$items[, c("facet", setdiff(names(x$items), "facet"))] } ) dplyr::bind_rows(facet_dfs) } #' Return a full list of the names of B.C. Data Catalogue records #' #' @return A character vector of the names of B.C. Data Catalogue records #' @export bcdc_list <- function() { if(!has_internet()) stop("No access to internet", call. = FALSE) l_new_ret <- 1 ret <- character() offset <- 0 limit <- 1000 while (l_new_ret) { cli <- bcdc_http_client(paste0(base_url(), "action/package_list")) r <- cli$get(query = list(offset = offset, limit = limit)) r$raise_for_status() res <- jsonlite::fromJSON(r$parse("UTF-8")) stopifnot(res$success) new_ret <- unlist(res$result) ret <- c(ret, new_ret) l_new_ret <- length(new_ret) offset <- offset + limit } ret } #' Search the B.C. Data Catalogue #' #' @param ... search terms #' @param license_id the type of license (see `bcdc_search_facets("license_id")`). #' @param download_audience download audience #' (see `bcdc_search_facets("download_audience")`). Default `"Public"` #' @param type type of resource (see `bcdc_search_facets("type")`) #' @param res_format format of resource (see `bcdc_search_facets("res_format")`) #' @param sector sector of government from which the data comes #' (see `bcdc_search_facets("sector")`) #' @param organization government organization that manages the data #' (see `bcdc_search_facets("organization")`) #' @param n number of results to return. Default `100` #' #' @return A list containing the records that match the search #' @export #' #' @examples #' \dontrun{ #' bcdc_search("forest") #' bcdc_search("regional district", type = "Geographic", res_format = "fgdb") #' } bcdc_search <- function(..., license_id = NULL, download_audience = "Public", type = NULL, res_format=NULL, sector = NULL, organization = NULL, n = 100) { if(!has_internet()) stop("No access to internet", call. = FALSE) # TODO: allow terms to be passed as a vector, and allow use of | for OR terms <- paste0(compact(list(...)), collapse = "+") facets <- compact(list(license_id = license_id, download_audience = download_audience, type = type, res_format = res_format, sector = sector, organization = organization )) lapply(names(facets), function(x) { facet_vals <- bcdc_search_facets(x) if (!facets[x] %in% facet_vals$name) { stop(facets[x], " is not a valid value for ", x, call. = FALSE) } }) query <- paste0( terms, ifelse(nzchar(terms), "+", ""), paste(names(facets), facets, sep = ":", collapse = "+")) query <- gsub("\\s+", "%20", query) cli <- bcdc_http_client(paste0(base_url(), "action/package_search")) # Use I(query) to treat query as is, so that things like + and : # aren't encoded as %2B, %3A etc r <- cli$get(query = list(q = I(query), rows = n)) r$raise_for_status res <- jsonlite::fromJSON(r$parse("UTF-8"), simplifyVector = FALSE) stopifnot(res$success) cont <- res$result n_found <- cont$count if(n_found > n){ message("Found ", n_found, " matches. Returning the first ", n, ".\nTo see them all, rerun the search and set the 'n' argument to ", n_found, ".") } ret <- cont$results names(ret) <- vapply(ret, `[[`, "name", FUN.VALUE = character(1)) ret <- lapply(ret, as.bcdc_record) as.bcdc_recordlist(ret) } #' Show a single B.C. Data Catalogue record #' #' @param id the human-readable name, permalink ID, or #' URL of the record. #' #' It is advised to use the permament ID for a record rather than the #' human-readable name to guard against future name changes of the record. #' If you use the human-readable name a warning will be issued once per #' session. You can silence these warnings altogether by setting an option: #' `options("silence_named_get_record_warning" = TRUE)` - which you can put #' in your .Rprofile file so the option persists across sessions. #' #' @return A list containing the metadata for the record #' @export #' #' @examples #' \dontrun{ #' bcdc_get_record("https://catalogue.data.gov.bc.ca/dataset/bc-airports") #' bcdc_get_record("bc-airports") #' bcdc_get_record("https://catalogue.data.gov.bc.ca/dataset/76b1b7a3-2112-4444-857a-afccf7b20da8") #' bcdc_get_record("76b1b7a3-2112-4444-857a-afccf7b20da8") #' } bcdc_get_record <- function(id) { if(!has_internet()) stop("No access to internet", call. = FALSE) id <- slug_from_url(id) cli <- bcdc_http_client(paste0(base_url(), "action/package_show")) r <- cli$get(query = list(id = id)) if (r$status_code == 404){ stop(paste0("'", id, "' is not a valid record id or name in the BC data catalogue"), call. = FALSE) } r$raise_for_status() res <- jsonlite::fromJSON(r$parse("UTF-8"), simplifyVector = FALSE) stopifnot(res$success) ret <- res$result if (ret$id != id) { get_record_warn_once( "It is advised to use the permanent id ('", ret$id, "') ", "rather than the name of the record ('", id, "') to guard against future name changes.\n" ) } as.bcdc_record(ret) } format_record <- function(pkg) { pkg$details <- dplyr::bind_rows(pkg$details) # Create a resources data frame res_df <- resource_to_tibble(pkg$resources) res_df$bcdata_available <- other_format_available(res_df) | wfs_available(res_df) pkg$resource_df <- res_df pkg } as.bcdc_record <- function(x) { x <- format_record(x) class(x) <- "bcdc_record" x } as.bcdc_recordlist <- function(x) { class(x) <- "bcdc_recordlist" x }
/R/bcdc_search.R
permissive
steffilazerte/bcdata
R
false
false
7,566
r
# Copyright 2018 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and limitations under the License. #' Get the valid values for a facet (that you can use in [bcdc_search()]) #' #' @param facet the facet(s) for which to retrieve valid values. Can be one or #' more of: #' `"license_id", "download_audience", "type", "res_format", "sector", "organization"` #' #' @return A data frame of values for the selected facet #' @export #' #' @examples #' \dontrun{ #' bcdc_search_facets("type") #' } bcdc_search_facets <- function(facet = c("license_id", "download_audience", "type", "res_format", "sector", "organization")) { if(!has_internet()) stop("No access to internet", call. = FALSE) facet <- match.arg(facet, several.ok = TRUE) query <- paste0("\"", facet, "\"", collapse = ",") query <- paste0("[", query, "]") cli <- bcdc_http_client(paste0(base_url(), "action/package_search")) r <- cli$get(query = list(facet.field = query, rows = 0)) r$raise_for_status() res <- jsonlite::fromJSON(r$parse("UTF-8")) stopifnot(res$success) facet_list <- res$result$search_facets facet_dfs <- lapply(facet_list, function(x) { x$items$facet <- x$title x$items[, c("facet", setdiff(names(x$items), "facet"))] } ) dplyr::bind_rows(facet_dfs) } #' Return a full list of the names of B.C. Data Catalogue records #' #' @return A character vector of the names of B.C. Data Catalogue records #' @export bcdc_list <- function() { if(!has_internet()) stop("No access to internet", call. = FALSE) l_new_ret <- 1 ret <- character() offset <- 0 limit <- 1000 while (l_new_ret) { cli <- bcdc_http_client(paste0(base_url(), "action/package_list")) r <- cli$get(query = list(offset = offset, limit = limit)) r$raise_for_status() res <- jsonlite::fromJSON(r$parse("UTF-8")) stopifnot(res$success) new_ret <- unlist(res$result) ret <- c(ret, new_ret) l_new_ret <- length(new_ret) offset <- offset + limit } ret } #' Search the B.C. Data Catalogue #' #' @param ... search terms #' @param license_id the type of license (see `bcdc_search_facets("license_id")`). #' @param download_audience download audience #' (see `bcdc_search_facets("download_audience")`). Default `"Public"` #' @param type type of resource (see `bcdc_search_facets("type")`) #' @param res_format format of resource (see `bcdc_search_facets("res_format")`) #' @param sector sector of government from which the data comes #' (see `bcdc_search_facets("sector")`) #' @param organization government organization that manages the data #' (see `bcdc_search_facets("organization")`) #' @param n number of results to return. Default `100` #' #' @return A list containing the records that match the search #' @export #' #' @examples #' \dontrun{ #' bcdc_search("forest") #' bcdc_search("regional district", type = "Geographic", res_format = "fgdb") #' } bcdc_search <- function(..., license_id = NULL, download_audience = "Public", type = NULL, res_format=NULL, sector = NULL, organization = NULL, n = 100) { if(!has_internet()) stop("No access to internet", call. = FALSE) # TODO: allow terms to be passed as a vector, and allow use of | for OR terms <- paste0(compact(list(...)), collapse = "+") facets <- compact(list(license_id = license_id, download_audience = download_audience, type = type, res_format = res_format, sector = sector, organization = organization )) lapply(names(facets), function(x) { facet_vals <- bcdc_search_facets(x) if (!facets[x] %in% facet_vals$name) { stop(facets[x], " is not a valid value for ", x, call. = FALSE) } }) query <- paste0( terms, ifelse(nzchar(terms), "+", ""), paste(names(facets), facets, sep = ":", collapse = "+")) query <- gsub("\\s+", "%20", query) cli <- bcdc_http_client(paste0(base_url(), "action/package_search")) # Use I(query) to treat query as is, so that things like + and : # aren't encoded as %2B, %3A etc r <- cli$get(query = list(q = I(query), rows = n)) r$raise_for_status res <- jsonlite::fromJSON(r$parse("UTF-8"), simplifyVector = FALSE) stopifnot(res$success) cont <- res$result n_found <- cont$count if(n_found > n){ message("Found ", n_found, " matches. Returning the first ", n, ".\nTo see them all, rerun the search and set the 'n' argument to ", n_found, ".") } ret <- cont$results names(ret) <- vapply(ret, `[[`, "name", FUN.VALUE = character(1)) ret <- lapply(ret, as.bcdc_record) as.bcdc_recordlist(ret) } #' Show a single B.C. Data Catalogue record #' #' @param id the human-readable name, permalink ID, or #' URL of the record. #' #' It is advised to use the permament ID for a record rather than the #' human-readable name to guard against future name changes of the record. #' If you use the human-readable name a warning will be issued once per #' session. You can silence these warnings altogether by setting an option: #' `options("silence_named_get_record_warning" = TRUE)` - which you can put #' in your .Rprofile file so the option persists across sessions. #' #' @return A list containing the metadata for the record #' @export #' #' @examples #' \dontrun{ #' bcdc_get_record("https://catalogue.data.gov.bc.ca/dataset/bc-airports") #' bcdc_get_record("bc-airports") #' bcdc_get_record("https://catalogue.data.gov.bc.ca/dataset/76b1b7a3-2112-4444-857a-afccf7b20da8") #' bcdc_get_record("76b1b7a3-2112-4444-857a-afccf7b20da8") #' } bcdc_get_record <- function(id) { if(!has_internet()) stop("No access to internet", call. = FALSE) id <- slug_from_url(id) cli <- bcdc_http_client(paste0(base_url(), "action/package_show")) r <- cli$get(query = list(id = id)) if (r$status_code == 404){ stop(paste0("'", id, "' is not a valid record id or name in the BC data catalogue"), call. = FALSE) } r$raise_for_status() res <- jsonlite::fromJSON(r$parse("UTF-8"), simplifyVector = FALSE) stopifnot(res$success) ret <- res$result if (ret$id != id) { get_record_warn_once( "It is advised to use the permanent id ('", ret$id, "') ", "rather than the name of the record ('", id, "') to guard against future name changes.\n" ) } as.bcdc_record(ret) } format_record <- function(pkg) { pkg$details <- dplyr::bind_rows(pkg$details) # Create a resources data frame res_df <- resource_to_tibble(pkg$resources) res_df$bcdata_available <- other_format_available(res_df) | wfs_available(res_df) pkg$resource_df <- res_df pkg } as.bcdc_record <- function(x) { x <- format_record(x) class(x) <- "bcdc_record" x } as.bcdc_recordlist <- function(x) { class(x) <- "bcdc_recordlist" x }
# Internal functions ## Check whether something is a whole number is_whole_number <- function(x){ if(is.integer(x)){ TRUE } else if(is.numeric(x)){ if((x%%1) == 0 ){ TRUE } else { FALSE } } else { FALSE } } is_whole_positive <- function(x){ if(is_whole_number(x) && x >= 0) TRUE else FALSE }
/R/internal_helpers.R
no_license
vishalbelsare/xnet
R
false
false
338
r
# Internal functions ## Check whether something is a whole number is_whole_number <- function(x){ if(is.integer(x)){ TRUE } else if(is.numeric(x)){ if((x%%1) == 0 ){ TRUE } else { FALSE } } else { FALSE } } is_whole_positive <- function(x){ if(is_whole_number(x) && x >= 0) TRUE else FALSE }
library(ape) testtree <- read.tree("6299_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="6299_0_unrooted.txt")
/codeml_files/newick_trees_processed/6299_0/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
135
r
library(ape) testtree <- read.tree("6299_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="6299_0_unrooted.txt")
#' @useDynLib processx, .registration = TRUE, .fixes = "c_" NULL ## Workaround an R CMD check false positive dummy_r6 <- function() R6::R6Class #' External process #' #' @description #' Managing external processes from R is not trivial, and this #' class aims to help with this deficiency. It is essentially a small #' wrapper around the `system` base R function, to return the process #' id of the started process, and set its standard output and error #' streams. The process id is then used to manage the process. #' #' @param n Number of characters or lines to read. #' @param grace Currently not used. #' @param close_connections Whether to close standard input, standard #' output, standard error connections and the poll connection, after #' killing the process. #' @param timeout Timeout in milliseconds, for the wait or the I/O #' polling. #' #' @section Polling: #' The `poll_io()` function polls the standard output and standard #' error connections of a process, with a timeout. If there is output #' in either of them, or they are closed (e.g. because the process exits) #' `poll_io()` returns immediately. #' #' In addition to polling a single process, the [poll()] function #' can poll the output of several processes, and returns as soon as any #' of them has generated output (or exited). #' #' @section Cleaning up background processes: #' processx kills processes that are not referenced any more (if `cleanup` #' is set to `TRUE`), or the whole subprocess tree (if `cleanup_tree` is #' also set to `TRUE`). #' #' The cleanup happens when the references of the processes object are #' garbage collected. To clean up earlier, you can call the `kill()` or #' `kill_tree()` method of the process(es), from an `on.exit()` expression, #' or an error handler: #' ```r #' process_manager <- function() { #' on.exit({ #' try(p1$kill(), silent = TRUE) #' try(p2$kill(), silent = TRUE) #' }, add = TRUE) #' p1 <- process$new("sleep", "3") #' p2 <- process$new("sleep", "10") #' p1$wait() #' p2$wait() #' } #' process_manager() #' ``` #' #' If you interrupt `process_manager()` or an error happens then both `p1` #' and `p2` are cleaned up immediately. Their connections will also be #' closed. The same happens at a regular exit. #' #' @export #' @examplesIf identical(Sys.getenv("IN_PKGDOWN"), "true") #' p <- process$new("sleep", "2") #' p$is_alive() #' p #' p$kill() #' p$is_alive() #' #' p <- process$new("sleep", "1") #' p$is_alive() #' Sys.sleep(2) #' p$is_alive() process <- R6::R6Class( "process", cloneable = FALSE, public = list( #' @description #' Start a new process in the background, and then return immediately. #' #' @return R6 object representing the process. #' @param command Character scalar, the command to run. #' Note that this argument is not passed to a shell, so no #' tilde-expansion or variable substitution is performed on it. #' It should not be quoted with [base::shQuote()]. See #' [base::normalizePath()] for tilde-expansion. #' @param args Character vector, arguments to the command. They will be #' passed to the process as is, without a shell transforming them, #' They don't need to be escaped. #' @param stdin What to do with the standard input. Possible values: #' * `NULL`: set to the _null device_, i.e. no standard input is #' provided; #' * a file name, use this file as standard input; #' * `"|"`: create a (writeable) connection for stdin. #' * `""` (empty string): inherit it from the main R process. If the #' main R process does not have a standard input stream, e.g. in #' RGui on Windows, then an error is thrown. #' @param stdout What to do with the standard output. Possible values: #' * `NULL`: discard it; #' * a string, redirect it to this file; #' * `"|"`: create a connection for it. #' * `""` (empty string): inherit it from the main R process. If the #' main R process does not have a standard output stream, e.g. in #' RGui on Windows, then an error is thrown. #' @param stderr What to do with the standard error. Possible values: #' * `NULL`: discard it; #' * a string, redirect it to this file; #' * `"|"`: create a connection for it; #' * `"2>&1"`: redirect it to the same connection (i.e. pipe or file) #' as `stdout`. `"2>&1"` is a way to keep standard output and error #' correctly interleaved. #' * `""` (empty string): inherit it from the main R process. If the #' main R process does not have a standard error stream, e.g. in #' RGui on Windows, then an error is thrown. #' @param pty Whether to create a pseudo terminal (pty) for the #' background process. This is currently only supported on Unix #' systems, but not supported on Solaris. #' If it is `TRUE`, then the `stdin`, `stdout` and `stderr` arguments #' must be `NULL`. If a pseudo terminal is created, then processx #' will create pipes for standard input and standard output. There is #' no separate pipe for standard error, because there is no way to #' distinguish between stdout and stderr on a pty. Note that the #' standard output connection of the pty is _blocking_, so we always #' poll the standard output connection before reading from it using #' the `$read_output()` method. Also, because `$read_output_lines()` #' could still block if no complete line is available, this function #' always fails if the process has a pty. Use `$read_output()` to #' read from ptys. #' @param pty_options Unix pseudo terminal options, a named list. see #' [default_pty_options()] for details and defaults. #' @param connections A list of processx connections to pass to the #' child process. This is an experimental feature currently. #' @param poll_connection Whether to create an extra connection to the #' process that allows polling, even if the standard input and #' standard output are not pipes. If this is `NULL` (the default), #' then this connection will be only created if standard output and #' standard error are not pipes, and `connections` is an empty list. #' If the poll connection is created, you can query it via #' `p$get_poll_connection()` and it is also included in the response #' to `p$poll_io()` and [poll()]. The numeric file descriptor of the #' poll connection comes right after `stderr` (2), and the #' connections listed in `connections`. #' @param env Environment variables of the child process. If `NULL`, #' the parent's environment is inherited. On Windows, many programs #' cannot function correctly if some environment variables are not #' set, so we always set `HOMEDRIVE`, `HOMEPATH`, `LOGONSERVER`, #' `PATH`, `SYSTEMDRIVE`, `SYSTEMROOT`, `TEMP`, `USERDOMAIN`, #' `USERNAME`, `USERPROFILE` and `WINDIR`. To append new environment #' variables to the ones set in the current process, specify #' `"current"` in `env`, without a name, and the appended ones with #' names. The appended ones can overwrite the current ones. #' @param cleanup Whether to kill the process when the `process` #' object is garbage collected. #' @param cleanup_tree Whether to kill the process and its child #' process tree when the `process` object is garbage collected. #' @param wd Working directory of the process. It must exist. #' If `NULL`, then the current working directory is used. #' @param echo_cmd Whether to print the command to the screen before #' running it. #' @param supervise Whether to register the process with a supervisor. #' If `TRUE`, the supervisor will ensure that the process is #' killed when the R process exits. #' @param windows_verbatim_args Whether to omit quoting the arguments #' on Windows. It is ignored on other platforms. #' @param windows_hide_window Whether to hide the application's window #' on Windows. It is ignored on other platforms. #' @param windows_detached_process Whether to use the #' `DETACHED_PROCESS` flag on Windows. If this is `TRUE`, then #' the child process will have no attached console, even if the #' parent had one. #' @param encoding The encoding to assume for `stdin`, `stdout` and #' `stderr`. By default the encoding of the current locale is #' used. Note that `processx` always reencodes the output of the #' `stdout` and `stderr` streams in UTF-8 currently. #' If you want to read them without any conversion, on all platforms, #' specify `"UTF-8"` as encoding. #' @param post_process An optional function to run when the process has #' finished. Currently it only runs if `$get_result()` is called. #' It is only run once. initialize = function(command = NULL, args = character(), stdin = NULL, stdout = NULL, stderr = NULL, pty = FALSE, pty_options = list(), connections = list(), poll_connection = NULL, env = NULL, cleanup = TRUE, cleanup_tree = FALSE, wd = NULL, echo_cmd = FALSE, supervise = FALSE, windows_verbatim_args = FALSE, windows_hide_window = FALSE, windows_detached_process = !cleanup, encoding = "", post_process = NULL) process_initialize(self, private, command, args, stdin, stdout, stderr, pty, pty_options, connections, poll_connection, env, cleanup, cleanup_tree, wd, echo_cmd, supervise, windows_verbatim_args, windows_hide_window, windows_detached_process, encoding, post_process), #' @description #' Cleanup method that is called when the `process` object is garbage #' collected. If requested so in the process constructor, then it #' eliminates all processes in the process's subprocess tree. finalize = function() { if (!is.null(private$tree_id) && private$cleanup_tree && ps::ps_is_supported()) self$kill_tree() }, #' @description #' Terminate the process. It also terminate all of its child #' processes, except if they have created a new process group (on Unix), #' or job object (on Windows). It returns `TRUE` if the process #' was terminated, and `FALSE` if it was not (because it was #' already finished/dead when `processx` tried to terminate it). kill = function(grace = 0.1, close_connections = TRUE) process_kill(self, private, grace, close_connections), #' @description #' Process tree cleanup. It terminates the process #' (if still alive), together with any child (or grandchild, etc.) #' processes. It uses the _ps_ package, so that needs to be installed, #' and _ps_ needs to support the current platform as well. Process tree #' cleanup works by marking the process with an environment variable, #' which is inherited in all child processes. This allows finding #' descendents, even if they are orphaned, i.e. they are not connected #' to the root of the tree cleanup in the process tree any more. #' `$kill_tree()` returns a named integer vector of the process ids that #' were killed, the names are the names of the processes (e.g. `"sleep"`, #' `"notepad.exe"`, `"Rterm.exe"`, etc.). kill_tree = function(grace = 0.1, close_connections = TRUE) process_kill_tree(self, private, grace, close_connections), #' @description #' Send a signal to the process. On Windows only the #' `SIGINT`, `SIGTERM` and `SIGKILL` signals are interpreted, #' and the special 0 signal. The first three all kill the process. The 0 #' signal returns `TRUE` if the process is alive, and `FALSE` #' otherwise. On Unix all signals are supported that the OS supports, #' and the 0 signal as well. #' @param signal An integer scalar, the id of the signal to send to #' the process. See [tools::pskill()] for the list of signals. signal = function(signal) process_signal(self, private, signal), #' @description #' Send an interrupt to the process. On Unix this is a #' `SIGINT` signal, and it is usually equivalent to pressing CTRL+C at #' the terminal prompt. On Windows, it is a CTRL+BREAK keypress. #' Applications may catch these events. By default they will quit. interrupt = function() process_interrupt(self, private), #' @description #' Query the process id. #' @return Integer scalar, the process id of the process. get_pid = function() process_get_pid(self, private), #' @description Check if the process is alive. #' @return Logical scalar. is_alive = function() process_is_alive(self, private), #' @description #' Wait until the process finishes, or a timeout happens. #' Note that if the process never finishes, and the timeout is infinite #' (the default), then R will never regain control. In some rare cases, #' `$wait()` might take a bit longer than specified to time out. This #' happens on Unix, when another package overwrites the processx #' `SIGCHLD` signal handler, after the processx process has started. #' One such package is parallel, if used with fork clusters, e.g. #' through `parallel::mcparallel()`. #' @return It returns the process itself, invisibly. wait = function(timeout = -1) process_wait(self, private, timeout), #' @description #' `$get_exit_status` returns the exit code of the process if it has #' finished and `NULL` otherwise. On Unix, in some rare cases, the exit #' status might be `NA`. This happens if another package (or R itself) #' overwrites the processx `SIGCHLD` handler, after the processx process #' has started. In these cases processx cannot determine the real exit #' status of the process. One such package is parallel, if used with #' fork clusters, e.g. through the `parallel::mcparallel()` function. get_exit_status = function() process_get_exit_status(self, private), #' @description #' `format(p)` or `p$format()` creates a string representation of the #' process, usually for printing. format = function() process_format(self, private), #' @description #' `print(p)` or `p$print()` shows some information about the #' process on the screen, whether it is running and it's process id, etc. print = function() process_print(self, private), #' @description #' `$get_start_time()` returns the time when the process was #' started. get_start_time = function() process_get_start_time(self, private), #' @description #' `$is_supervised()` returns whether the process is being tracked by #' supervisor process. is_supervised = function() process_is_supervised(self, private), #' @description #' `$supervise()` if passed `TRUE`, tells the supervisor to start #' tracking the process. If `FALSE`, tells the supervisor to stop #' tracking the process. Note that even if the supervisor is disabled #' for a process, if it was started with `cleanup = TRUE`, the process #' will still be killed when the object is garbage collected. #' @param status Whether to turn on of off the supervisor for this #' process. supervise = function(status) process_supervise(self, private, status), ## Output #' @description #' `$read_output()` reads from the standard output connection of the #' process. If the standard output connection was not requested, then #' then it returns an error. It uses a non-blocking text connection. This #' will work only if `stdout="|"` was used. Otherwise, it will throw an #' error. read_output = function(n = -1) process_read_output(self, private, n), #' @description #' `$read_error()` is similar to `$read_output`, but it reads #' from the standard error stream. read_error = function(n = -1) process_read_error(self, private, n), #' @description #' `$read_output_lines()` reads lines from standard output connection #' of the process. If the standard output connection was not requested, #' then it returns an error. It uses a non-blocking text connection. #' This will work only if `stdout="|"` was used. Otherwise, it will #' throw an error. read_output_lines = function(n = -1) process_read_output_lines(self, private, n), #' @description #' `$read_error_lines()` is similar to `$read_output_lines`, but #' it reads from the standard error stream. read_error_lines = function(n = -1) process_read_error_lines(self, private, n), #' @description #' `$is_incomplete_output()` return `FALSE` if the other end of #' the standard output connection was closed (most probably because the #' process exited). It return `TRUE` otherwise. is_incomplete_output = function() process_is_incompelete_output(self, private), #' @description #' `$is_incomplete_error()` return `FALSE` if the other end of #' the standard error connection was closed (most probably because the #' process exited). It return `TRUE` otherwise. is_incomplete_error = function() process_is_incompelete_error(self, private), #' @description #' `$has_input_connection()` return `TRUE` if there is a connection #' object for standard input; in other words, if `stdout="|"`. It returns #' `FALSE` otherwise. has_input_connection = function() process_has_input_connection(self, private), #' @description #' `$has_output_connection()` returns `TRUE` if there is a connection #' object for standard output; in other words, if `stdout="|"`. It returns #' `FALSE` otherwise. has_output_connection = function() process_has_output_connection(self, private), #' @description #' `$has_error_connection()` returns `TRUE` if there is a connection #' object for standard error; in other words, if `stderr="|"`. It returns #' `FALSE` otherwise. has_error_connection = function() process_has_error_connection(self, private), #' @description #' `$has_poll_connection()` return `TRUE` if there is a poll connection, #' `FALSE` otherwise. has_poll_connection = function() process_has_poll_connection(self, private), #' @description #' `$get_input_connection()` returns a connection object, to the #' standard input stream of the process. get_input_connection = function() process_get_input_connection(self, private), #' @description #' `$get_output_connection()` returns a connection object, to the #' standard output stream of the process. get_output_connection = function() process_get_output_connection(self, private), #' @description #' `$get_error_conneciton()` returns a connection object, to the #' standard error stream of the process. get_error_connection = function() process_get_error_connection(self, private), #' @description #' `$read_all_output()` waits for all standard output from the process. #' It does not return until the process has finished. #' Note that this process involves waiting for the process to finish, #' polling for I/O and potentially several `readLines()` calls. #' It returns a character scalar. This will return content only if #' `stdout="|"` was used. Otherwise, it will throw an error. read_all_output = function() process_read_all_output(self, private), #' @description #' `$read_all_error()` waits for all standard error from the process. #' It does not return until the process has finished. #' Note that this process involves waiting for the process to finish, #' polling for I/O and potentially several `readLines()` calls. #' It returns a character scalar. This will return content only if #' `stderr="|"` was used. Otherwise, it will throw an error. read_all_error = function() process_read_all_error(self, private), #' @description #' `$read_all_output_lines()` waits for all standard output lines #' from a process. It does not return until the process has finished. #' Note that this process involves waiting for the process to finish, #' polling for I/O and potentially several `readLines()` calls. #' It returns a character vector. This will return content only if #' `stdout="|"` was used. Otherwise, it will throw an error. read_all_output_lines = function() process_read_all_output_lines(self, private), #' @description #' `$read_all_error_lines()` waits for all standard error lines from #' a process. It does not return until the process has finished. #' Note that this process involves waiting for the process to finish, #' polling for I/O and potentially several `readLines()` calls. #' It returns a character vector. This will return content only if #' `stderr="|"` was used. Otherwise, it will throw an error. read_all_error_lines = function() process_read_all_error_lines(self, private), #' @description #' `$write_input()` writes the character vector (separated by `sep`) to #' the standard input of the process. It will be converted to the specified #' encoding. This operation is non-blocking, and it will return, even if #' the write fails (because the write buffer is full), or if it suceeds #' partially (i.e. not the full string is written). It returns with a raw #' vector, that contains the bytes that were not written. You can supply #' this raw vector to `$write_input()` again, until it is fully written, #' and then the return value will be `raw(0)` (invisibly). #' #' @param str Character or raw vector to write to the standard input #' of the process. If a character vector with a marked encoding, #' it will be converted to `encoding`. #' @param sep Separator to add between `str` elements if it is a #' character vector. It is ignored if `str` is a raw vector. #' @return Leftover text (as a raw vector), that was not written. write_input = function(str, sep = "\n") process_write_input(self, private, str, sep), #' @description #' `$get_input_file()` if the `stdin` argument was a filename, #' this returns the absolute path to the file. If `stdin` was `"|"` or #' `NULL`, this simply returns that value. get_input_file = function() process_get_input_file(self, private), #' @description #' `$get_output_file()` if the `stdout` argument was a filename, #' this returns the absolute path to the file. If `stdout` was `"|"` or #' `NULL`, this simply returns that value. get_output_file = function() process_get_output_file(self, private), #' @description #' `$get_error_file()` if the `stderr` argument was a filename, #' this returns the absolute path to the file. If `stderr` was `"|"` or #' `NULL`, this simply returns that value. get_error_file = function() process_get_error_file(self, private), #' @description #' `$poll_io()` polls the process's connections for I/O. See more in #' the _Polling_ section, and see also the [poll()] function #' to poll on multiple processes. poll_io = function(timeout) process_poll_io(self, private, timeout), #' @description #' `$get_poll_connetion()` returns the poll connection, if the process has #' one. get_poll_connection = function() process_get_poll_connection(self, private), #' @description #' `$get_result()` returns the result of the post processesing function. #' It can only be called once the process has finished. If the process has #' no post-processing function, then `NULL` is returned. get_result = function() process_get_result(self, private), #' @description #' `$as_ps_handle()` returns a [ps::ps_handle] object, corresponding to #' the process. as_ps_handle = function() process_as_ps_handle(self, private), #' @description #' Calls [ps::ps_name()] to get the process name. get_name = function() ps_method(ps::ps_name, self), #' @description #' Calls [ps::ps_exe()] to get the path of the executable. get_exe = function() ps_method(ps::ps_exe, self), #' @description #' Calls [ps::ps_cmdline()] to get the command line. get_cmdline = function() ps_method(ps::ps_cmdline, self), #' @description #' Calls [ps::ps_status()] to get the process status. get_status = function() ps_method(ps::ps_status, self), #' @description #' calls [ps::ps_username()] to get the username. get_username = function() ps_method(ps::ps_username, self), #' @description #' Calls [ps::ps_cwd()] to get the current working directory. get_wd = function() ps_method(ps::ps_cwd, self), #' @description #' Calls [ps::ps_cpu_times()] to get CPU usage data. get_cpu_times = function() ps_method(ps::ps_cpu_times, self), #' @description #' Calls [ps::ps_memory_info()] to get memory data. get_memory_info = function() ps_method(ps::ps_memory_info, self), #' @description #' Calls [ps::ps_suspend()] to suspend the process. suspend = function() ps_method(ps::ps_suspend, self), #' @description #' Calls [ps::ps_resume()] to resume a suspended process. resume = function() ps_method(ps::ps_resume, self) ), private = list( command = NULL, # Save 'command' argument here args = NULL, # Save 'args' argument here cleanup = NULL, # cleanup argument cleanup_tree = NULL, # cleanup_tree argument stdin = NULL, # stdin argument or stream stdout = NULL, # stdout argument or stream stderr = NULL, # stderr argument or stream pty = NULL, # whether we should create a PTY pty_options = NULL, # various PTY options pstdin = NULL, # the original stdin argument pstdout = NULL, # the original stdout argument pstderr = NULL, # the original stderr argument cleanfiles = NULL, # which temp stdout/stderr file(s) to clean up wd = NULL, # working directory (or NULL for current) starttime = NULL, # timestamp of start echo_cmd = NULL, # whether to echo the command windows_verbatim_args = NULL, windows_hide_window = NULL, status = NULL, # C file handle supervised = FALSE, # Whether process is tracked by supervisor stdin_pipe = NULL, stdout_pipe = NULL, stderr_pipe = NULL, poll_pipe = NULL, encoding = "", env = NULL, connections = list(), post_process = NULL, post_process_result = NULL, post_process_done = FALSE, tree_id = NULL, get_short_name = function() process_get_short_name(self, private), close_connections = function() process_close_connections(self, private) ) ) ## See the C source code for a discussion about the implementation ## of these methods process_wait <- function(self, private, timeout) { "!DEBUG process_wait `private$get_short_name()`" rethrow_call_with_cleanup( c_processx_wait, private$status, as.integer(timeout), private$get_short_name() ) invisible(self) } process_is_alive <- function(self, private) { "!DEBUG process_is_alive `private$get_short_name()`" rethrow_call(c_processx_is_alive, private$status, private$get_short_name()) } process_get_exit_status <- function(self, private) { "!DEBUG process_get_exit_status `private$get_short_name()`" rethrow_call(c_processx_get_exit_status, private$status, private$get_short_name()) } process_signal <- function(self, private, signal) { "!DEBUG process_signal `private$get_short_name()` `signal`" rethrow_call(c_processx_signal, private$status, as.integer(signal), private$get_short_name()) } process_interrupt <- function(self, private) { "!DEBUG process_interrupt `private$get_short_name()`" if (os_type() == "windows") { pid <- as.character(self$get_pid()) st <- run(get_tool("interrupt"), c(pid, "c"), error_on_status = FALSE) if (st$status == 0) TRUE else FALSE } else { rethrow_call(c_processx_interrupt, private$status, private$get_short_name()) } } process_kill <- function(self, private, grace, close_connections) { "!DEBUG process_kill '`private$get_short_name()`', pid `self$get_pid()`" ret <- rethrow_call(c_processx_kill, private$status, as.numeric(grace), private$get_short_name()) if (close_connections) private$close_connections() ret } process_kill_tree <- function(self, private, grace, close_connections) { "!DEBUG process_kill_tree '`private$get_short_name()`', pid `self$get_pid()`" if (!ps::ps_is_supported()) { throw(new_not_implemented_error( "kill_tree is not supported on this platform")) } ret <- get("ps_kill_tree", asNamespace("ps"))(private$tree_id) if (close_connections) private$close_connections() ret } process_get_start_time <- function(self, private) { format_unix_time(private$starttime) } process_get_pid <- function(self, private) { rethrow_call(c_processx_get_pid, private$status) } process_is_supervised <- function(self, private) { private$supervised } process_supervise <- function(self, private, status) { if (status && !self$is_supervised()) { supervisor_watch_pid(self$get_pid()) private$supervised <- TRUE } else if (!status && self$is_supervised()) { supervisor_unwatch_pid(self$get_pid()) private$supervised <- FALSE } } process_get_result <- function(self, private) { if (self$is_alive()) throw(new_error("Process is still alive")) if (!private$post_process_done && is.function(private$post_process)) { private$post_process_result <- private$post_process() private$post_process_done <- TRUE } private$post_process_result } process_as_ps_handle <- function(self, private) { ps::ps_handle(self$get_pid(), self$get_start_time()) } ps_method <- function(fun, self) { fun(ps::ps_handle(self$get_pid(), self$get_start_time())) } process_close_connections <- function(self, private) { for (f in c("stdin_pipe", "stdout_pipe", "stderr_pipe", "poll_pipe")) { if (!is.null(p <- private[[f]])) { rethrow_call(c_processx_connection_close, p) } } } #' Default options for pseudo terminals (ptys) #' #' @return Named list of default values of pty options. #' #' Options and default values: #' * `echo` whether to keep the echo on the terminal. `FALSE` turns echo #' off. #' * `rows` the (initial) terminal size, number of rows. #' * `cols` the (initial) terminal size, number of columns. #' #' @export default_pty_options <- function() { list( echo = FALSE, rows = 25L, cols = 80L ) }
/R/process.R
permissive
aviralg/processx
R
false
false
31,166
r
#' @useDynLib processx, .registration = TRUE, .fixes = "c_" NULL ## Workaround an R CMD check false positive dummy_r6 <- function() R6::R6Class #' External process #' #' @description #' Managing external processes from R is not trivial, and this #' class aims to help with this deficiency. It is essentially a small #' wrapper around the `system` base R function, to return the process #' id of the started process, and set its standard output and error #' streams. The process id is then used to manage the process. #' #' @param n Number of characters or lines to read. #' @param grace Currently not used. #' @param close_connections Whether to close standard input, standard #' output, standard error connections and the poll connection, after #' killing the process. #' @param timeout Timeout in milliseconds, for the wait or the I/O #' polling. #' #' @section Polling: #' The `poll_io()` function polls the standard output and standard #' error connections of a process, with a timeout. If there is output #' in either of them, or they are closed (e.g. because the process exits) #' `poll_io()` returns immediately. #' #' In addition to polling a single process, the [poll()] function #' can poll the output of several processes, and returns as soon as any #' of them has generated output (or exited). #' #' @section Cleaning up background processes: #' processx kills processes that are not referenced any more (if `cleanup` #' is set to `TRUE`), or the whole subprocess tree (if `cleanup_tree` is #' also set to `TRUE`). #' #' The cleanup happens when the references of the processes object are #' garbage collected. To clean up earlier, you can call the `kill()` or #' `kill_tree()` method of the process(es), from an `on.exit()` expression, #' or an error handler: #' ```r #' process_manager <- function() { #' on.exit({ #' try(p1$kill(), silent = TRUE) #' try(p2$kill(), silent = TRUE) #' }, add = TRUE) #' p1 <- process$new("sleep", "3") #' p2 <- process$new("sleep", "10") #' p1$wait() #' p2$wait() #' } #' process_manager() #' ``` #' #' If you interrupt `process_manager()` or an error happens then both `p1` #' and `p2` are cleaned up immediately. Their connections will also be #' closed. The same happens at a regular exit. #' #' @export #' @examplesIf identical(Sys.getenv("IN_PKGDOWN"), "true") #' p <- process$new("sleep", "2") #' p$is_alive() #' p #' p$kill() #' p$is_alive() #' #' p <- process$new("sleep", "1") #' p$is_alive() #' Sys.sleep(2) #' p$is_alive() process <- R6::R6Class( "process", cloneable = FALSE, public = list( #' @description #' Start a new process in the background, and then return immediately. #' #' @return R6 object representing the process. #' @param command Character scalar, the command to run. #' Note that this argument is not passed to a shell, so no #' tilde-expansion or variable substitution is performed on it. #' It should not be quoted with [base::shQuote()]. See #' [base::normalizePath()] for tilde-expansion. #' @param args Character vector, arguments to the command. They will be #' passed to the process as is, without a shell transforming them, #' They don't need to be escaped. #' @param stdin What to do with the standard input. Possible values: #' * `NULL`: set to the _null device_, i.e. no standard input is #' provided; #' * a file name, use this file as standard input; #' * `"|"`: create a (writeable) connection for stdin. #' * `""` (empty string): inherit it from the main R process. If the #' main R process does not have a standard input stream, e.g. in #' RGui on Windows, then an error is thrown. #' @param stdout What to do with the standard output. Possible values: #' * `NULL`: discard it; #' * a string, redirect it to this file; #' * `"|"`: create a connection for it. #' * `""` (empty string): inherit it from the main R process. If the #' main R process does not have a standard output stream, e.g. in #' RGui on Windows, then an error is thrown. #' @param stderr What to do with the standard error. Possible values: #' * `NULL`: discard it; #' * a string, redirect it to this file; #' * `"|"`: create a connection for it; #' * `"2>&1"`: redirect it to the same connection (i.e. pipe or file) #' as `stdout`. `"2>&1"` is a way to keep standard output and error #' correctly interleaved. #' * `""` (empty string): inherit it from the main R process. If the #' main R process does not have a standard error stream, e.g. in #' RGui on Windows, then an error is thrown. #' @param pty Whether to create a pseudo terminal (pty) for the #' background process. This is currently only supported on Unix #' systems, but not supported on Solaris. #' If it is `TRUE`, then the `stdin`, `stdout` and `stderr` arguments #' must be `NULL`. If a pseudo terminal is created, then processx #' will create pipes for standard input and standard output. There is #' no separate pipe for standard error, because there is no way to #' distinguish between stdout and stderr on a pty. Note that the #' standard output connection of the pty is _blocking_, so we always #' poll the standard output connection before reading from it using #' the `$read_output()` method. Also, because `$read_output_lines()` #' could still block if no complete line is available, this function #' always fails if the process has a pty. Use `$read_output()` to #' read from ptys. #' @param pty_options Unix pseudo terminal options, a named list. see #' [default_pty_options()] for details and defaults. #' @param connections A list of processx connections to pass to the #' child process. This is an experimental feature currently. #' @param poll_connection Whether to create an extra connection to the #' process that allows polling, even if the standard input and #' standard output are not pipes. If this is `NULL` (the default), #' then this connection will be only created if standard output and #' standard error are not pipes, and `connections` is an empty list. #' If the poll connection is created, you can query it via #' `p$get_poll_connection()` and it is also included in the response #' to `p$poll_io()` and [poll()]. The numeric file descriptor of the #' poll connection comes right after `stderr` (2), and the #' connections listed in `connections`. #' @param env Environment variables of the child process. If `NULL`, #' the parent's environment is inherited. On Windows, many programs #' cannot function correctly if some environment variables are not #' set, so we always set `HOMEDRIVE`, `HOMEPATH`, `LOGONSERVER`, #' `PATH`, `SYSTEMDRIVE`, `SYSTEMROOT`, `TEMP`, `USERDOMAIN`, #' `USERNAME`, `USERPROFILE` and `WINDIR`. To append new environment #' variables to the ones set in the current process, specify #' `"current"` in `env`, without a name, and the appended ones with #' names. The appended ones can overwrite the current ones. #' @param cleanup Whether to kill the process when the `process` #' object is garbage collected. #' @param cleanup_tree Whether to kill the process and its child #' process tree when the `process` object is garbage collected. #' @param wd Working directory of the process. It must exist. #' If `NULL`, then the current working directory is used. #' @param echo_cmd Whether to print the command to the screen before #' running it. #' @param supervise Whether to register the process with a supervisor. #' If `TRUE`, the supervisor will ensure that the process is #' killed when the R process exits. #' @param windows_verbatim_args Whether to omit quoting the arguments #' on Windows. It is ignored on other platforms. #' @param windows_hide_window Whether to hide the application's window #' on Windows. It is ignored on other platforms. #' @param windows_detached_process Whether to use the #' `DETACHED_PROCESS` flag on Windows. If this is `TRUE`, then #' the child process will have no attached console, even if the #' parent had one. #' @param encoding The encoding to assume for `stdin`, `stdout` and #' `stderr`. By default the encoding of the current locale is #' used. Note that `processx` always reencodes the output of the #' `stdout` and `stderr` streams in UTF-8 currently. #' If you want to read them without any conversion, on all platforms, #' specify `"UTF-8"` as encoding. #' @param post_process An optional function to run when the process has #' finished. Currently it only runs if `$get_result()` is called. #' It is only run once. initialize = function(command = NULL, args = character(), stdin = NULL, stdout = NULL, stderr = NULL, pty = FALSE, pty_options = list(), connections = list(), poll_connection = NULL, env = NULL, cleanup = TRUE, cleanup_tree = FALSE, wd = NULL, echo_cmd = FALSE, supervise = FALSE, windows_verbatim_args = FALSE, windows_hide_window = FALSE, windows_detached_process = !cleanup, encoding = "", post_process = NULL) process_initialize(self, private, command, args, stdin, stdout, stderr, pty, pty_options, connections, poll_connection, env, cleanup, cleanup_tree, wd, echo_cmd, supervise, windows_verbatim_args, windows_hide_window, windows_detached_process, encoding, post_process), #' @description #' Cleanup method that is called when the `process` object is garbage #' collected. If requested so in the process constructor, then it #' eliminates all processes in the process's subprocess tree. finalize = function() { if (!is.null(private$tree_id) && private$cleanup_tree && ps::ps_is_supported()) self$kill_tree() }, #' @description #' Terminate the process. It also terminate all of its child #' processes, except if they have created a new process group (on Unix), #' or job object (on Windows). It returns `TRUE` if the process #' was terminated, and `FALSE` if it was not (because it was #' already finished/dead when `processx` tried to terminate it). kill = function(grace = 0.1, close_connections = TRUE) process_kill(self, private, grace, close_connections), #' @description #' Process tree cleanup. It terminates the process #' (if still alive), together with any child (or grandchild, etc.) #' processes. It uses the _ps_ package, so that needs to be installed, #' and _ps_ needs to support the current platform as well. Process tree #' cleanup works by marking the process with an environment variable, #' which is inherited in all child processes. This allows finding #' descendents, even if they are orphaned, i.e. they are not connected #' to the root of the tree cleanup in the process tree any more. #' `$kill_tree()` returns a named integer vector of the process ids that #' were killed, the names are the names of the processes (e.g. `"sleep"`, #' `"notepad.exe"`, `"Rterm.exe"`, etc.). kill_tree = function(grace = 0.1, close_connections = TRUE) process_kill_tree(self, private, grace, close_connections), #' @description #' Send a signal to the process. On Windows only the #' `SIGINT`, `SIGTERM` and `SIGKILL` signals are interpreted, #' and the special 0 signal. The first three all kill the process. The 0 #' signal returns `TRUE` if the process is alive, and `FALSE` #' otherwise. On Unix all signals are supported that the OS supports, #' and the 0 signal as well. #' @param signal An integer scalar, the id of the signal to send to #' the process. See [tools::pskill()] for the list of signals. signal = function(signal) process_signal(self, private, signal), #' @description #' Send an interrupt to the process. On Unix this is a #' `SIGINT` signal, and it is usually equivalent to pressing CTRL+C at #' the terminal prompt. On Windows, it is a CTRL+BREAK keypress. #' Applications may catch these events. By default they will quit. interrupt = function() process_interrupt(self, private), #' @description #' Query the process id. #' @return Integer scalar, the process id of the process. get_pid = function() process_get_pid(self, private), #' @description Check if the process is alive. #' @return Logical scalar. is_alive = function() process_is_alive(self, private), #' @description #' Wait until the process finishes, or a timeout happens. #' Note that if the process never finishes, and the timeout is infinite #' (the default), then R will never regain control. In some rare cases, #' `$wait()` might take a bit longer than specified to time out. This #' happens on Unix, when another package overwrites the processx #' `SIGCHLD` signal handler, after the processx process has started. #' One such package is parallel, if used with fork clusters, e.g. #' through `parallel::mcparallel()`. #' @return It returns the process itself, invisibly. wait = function(timeout = -1) process_wait(self, private, timeout), #' @description #' `$get_exit_status` returns the exit code of the process if it has #' finished and `NULL` otherwise. On Unix, in some rare cases, the exit #' status might be `NA`. This happens if another package (or R itself) #' overwrites the processx `SIGCHLD` handler, after the processx process #' has started. In these cases processx cannot determine the real exit #' status of the process. One such package is parallel, if used with #' fork clusters, e.g. through the `parallel::mcparallel()` function. get_exit_status = function() process_get_exit_status(self, private), #' @description #' `format(p)` or `p$format()` creates a string representation of the #' process, usually for printing. format = function() process_format(self, private), #' @description #' `print(p)` or `p$print()` shows some information about the #' process on the screen, whether it is running and it's process id, etc. print = function() process_print(self, private), #' @description #' `$get_start_time()` returns the time when the process was #' started. get_start_time = function() process_get_start_time(self, private), #' @description #' `$is_supervised()` returns whether the process is being tracked by #' supervisor process. is_supervised = function() process_is_supervised(self, private), #' @description #' `$supervise()` if passed `TRUE`, tells the supervisor to start #' tracking the process. If `FALSE`, tells the supervisor to stop #' tracking the process. Note that even if the supervisor is disabled #' for a process, if it was started with `cleanup = TRUE`, the process #' will still be killed when the object is garbage collected. #' @param status Whether to turn on of off the supervisor for this #' process. supervise = function(status) process_supervise(self, private, status), ## Output #' @description #' `$read_output()` reads from the standard output connection of the #' process. If the standard output connection was not requested, then #' then it returns an error. It uses a non-blocking text connection. This #' will work only if `stdout="|"` was used. Otherwise, it will throw an #' error. read_output = function(n = -1) process_read_output(self, private, n), #' @description #' `$read_error()` is similar to `$read_output`, but it reads #' from the standard error stream. read_error = function(n = -1) process_read_error(self, private, n), #' @description #' `$read_output_lines()` reads lines from standard output connection #' of the process. If the standard output connection was not requested, #' then it returns an error. It uses a non-blocking text connection. #' This will work only if `stdout="|"` was used. Otherwise, it will #' throw an error. read_output_lines = function(n = -1) process_read_output_lines(self, private, n), #' @description #' `$read_error_lines()` is similar to `$read_output_lines`, but #' it reads from the standard error stream. read_error_lines = function(n = -1) process_read_error_lines(self, private, n), #' @description #' `$is_incomplete_output()` return `FALSE` if the other end of #' the standard output connection was closed (most probably because the #' process exited). It return `TRUE` otherwise. is_incomplete_output = function() process_is_incompelete_output(self, private), #' @description #' `$is_incomplete_error()` return `FALSE` if the other end of #' the standard error connection was closed (most probably because the #' process exited). It return `TRUE` otherwise. is_incomplete_error = function() process_is_incompelete_error(self, private), #' @description #' `$has_input_connection()` return `TRUE` if there is a connection #' object for standard input; in other words, if `stdout="|"`. It returns #' `FALSE` otherwise. has_input_connection = function() process_has_input_connection(self, private), #' @description #' `$has_output_connection()` returns `TRUE` if there is a connection #' object for standard output; in other words, if `stdout="|"`. It returns #' `FALSE` otherwise. has_output_connection = function() process_has_output_connection(self, private), #' @description #' `$has_error_connection()` returns `TRUE` if there is a connection #' object for standard error; in other words, if `stderr="|"`. It returns #' `FALSE` otherwise. has_error_connection = function() process_has_error_connection(self, private), #' @description #' `$has_poll_connection()` return `TRUE` if there is a poll connection, #' `FALSE` otherwise. has_poll_connection = function() process_has_poll_connection(self, private), #' @description #' `$get_input_connection()` returns a connection object, to the #' standard input stream of the process. get_input_connection = function() process_get_input_connection(self, private), #' @description #' `$get_output_connection()` returns a connection object, to the #' standard output stream of the process. get_output_connection = function() process_get_output_connection(self, private), #' @description #' `$get_error_conneciton()` returns a connection object, to the #' standard error stream of the process. get_error_connection = function() process_get_error_connection(self, private), #' @description #' `$read_all_output()` waits for all standard output from the process. #' It does not return until the process has finished. #' Note that this process involves waiting for the process to finish, #' polling for I/O and potentially several `readLines()` calls. #' It returns a character scalar. This will return content only if #' `stdout="|"` was used. Otherwise, it will throw an error. read_all_output = function() process_read_all_output(self, private), #' @description #' `$read_all_error()` waits for all standard error from the process. #' It does not return until the process has finished. #' Note that this process involves waiting for the process to finish, #' polling for I/O and potentially several `readLines()` calls. #' It returns a character scalar. This will return content only if #' `stderr="|"` was used. Otherwise, it will throw an error. read_all_error = function() process_read_all_error(self, private), #' @description #' `$read_all_output_lines()` waits for all standard output lines #' from a process. It does not return until the process has finished. #' Note that this process involves waiting for the process to finish, #' polling for I/O and potentially several `readLines()` calls. #' It returns a character vector. This will return content only if #' `stdout="|"` was used. Otherwise, it will throw an error. read_all_output_lines = function() process_read_all_output_lines(self, private), #' @description #' `$read_all_error_lines()` waits for all standard error lines from #' a process. It does not return until the process has finished. #' Note that this process involves waiting for the process to finish, #' polling for I/O and potentially several `readLines()` calls. #' It returns a character vector. This will return content only if #' `stderr="|"` was used. Otherwise, it will throw an error. read_all_error_lines = function() process_read_all_error_lines(self, private), #' @description #' `$write_input()` writes the character vector (separated by `sep`) to #' the standard input of the process. It will be converted to the specified #' encoding. This operation is non-blocking, and it will return, even if #' the write fails (because the write buffer is full), or if it suceeds #' partially (i.e. not the full string is written). It returns with a raw #' vector, that contains the bytes that were not written. You can supply #' this raw vector to `$write_input()` again, until it is fully written, #' and then the return value will be `raw(0)` (invisibly). #' #' @param str Character or raw vector to write to the standard input #' of the process. If a character vector with a marked encoding, #' it will be converted to `encoding`. #' @param sep Separator to add between `str` elements if it is a #' character vector. It is ignored if `str` is a raw vector. #' @return Leftover text (as a raw vector), that was not written. write_input = function(str, sep = "\n") process_write_input(self, private, str, sep), #' @description #' `$get_input_file()` if the `stdin` argument was a filename, #' this returns the absolute path to the file. If `stdin` was `"|"` or #' `NULL`, this simply returns that value. get_input_file = function() process_get_input_file(self, private), #' @description #' `$get_output_file()` if the `stdout` argument was a filename, #' this returns the absolute path to the file. If `stdout` was `"|"` or #' `NULL`, this simply returns that value. get_output_file = function() process_get_output_file(self, private), #' @description #' `$get_error_file()` if the `stderr` argument was a filename, #' this returns the absolute path to the file. If `stderr` was `"|"` or #' `NULL`, this simply returns that value. get_error_file = function() process_get_error_file(self, private), #' @description #' `$poll_io()` polls the process's connections for I/O. See more in #' the _Polling_ section, and see also the [poll()] function #' to poll on multiple processes. poll_io = function(timeout) process_poll_io(self, private, timeout), #' @description #' `$get_poll_connetion()` returns the poll connection, if the process has #' one. get_poll_connection = function() process_get_poll_connection(self, private), #' @description #' `$get_result()` returns the result of the post processesing function. #' It can only be called once the process has finished. If the process has #' no post-processing function, then `NULL` is returned. get_result = function() process_get_result(self, private), #' @description #' `$as_ps_handle()` returns a [ps::ps_handle] object, corresponding to #' the process. as_ps_handle = function() process_as_ps_handle(self, private), #' @description #' Calls [ps::ps_name()] to get the process name. get_name = function() ps_method(ps::ps_name, self), #' @description #' Calls [ps::ps_exe()] to get the path of the executable. get_exe = function() ps_method(ps::ps_exe, self), #' @description #' Calls [ps::ps_cmdline()] to get the command line. get_cmdline = function() ps_method(ps::ps_cmdline, self), #' @description #' Calls [ps::ps_status()] to get the process status. get_status = function() ps_method(ps::ps_status, self), #' @description #' calls [ps::ps_username()] to get the username. get_username = function() ps_method(ps::ps_username, self), #' @description #' Calls [ps::ps_cwd()] to get the current working directory. get_wd = function() ps_method(ps::ps_cwd, self), #' @description #' Calls [ps::ps_cpu_times()] to get CPU usage data. get_cpu_times = function() ps_method(ps::ps_cpu_times, self), #' @description #' Calls [ps::ps_memory_info()] to get memory data. get_memory_info = function() ps_method(ps::ps_memory_info, self), #' @description #' Calls [ps::ps_suspend()] to suspend the process. suspend = function() ps_method(ps::ps_suspend, self), #' @description #' Calls [ps::ps_resume()] to resume a suspended process. resume = function() ps_method(ps::ps_resume, self) ), private = list( command = NULL, # Save 'command' argument here args = NULL, # Save 'args' argument here cleanup = NULL, # cleanup argument cleanup_tree = NULL, # cleanup_tree argument stdin = NULL, # stdin argument or stream stdout = NULL, # stdout argument or stream stderr = NULL, # stderr argument or stream pty = NULL, # whether we should create a PTY pty_options = NULL, # various PTY options pstdin = NULL, # the original stdin argument pstdout = NULL, # the original stdout argument pstderr = NULL, # the original stderr argument cleanfiles = NULL, # which temp stdout/stderr file(s) to clean up wd = NULL, # working directory (or NULL for current) starttime = NULL, # timestamp of start echo_cmd = NULL, # whether to echo the command windows_verbatim_args = NULL, windows_hide_window = NULL, status = NULL, # C file handle supervised = FALSE, # Whether process is tracked by supervisor stdin_pipe = NULL, stdout_pipe = NULL, stderr_pipe = NULL, poll_pipe = NULL, encoding = "", env = NULL, connections = list(), post_process = NULL, post_process_result = NULL, post_process_done = FALSE, tree_id = NULL, get_short_name = function() process_get_short_name(self, private), close_connections = function() process_close_connections(self, private) ) ) ## See the C source code for a discussion about the implementation ## of these methods process_wait <- function(self, private, timeout) { "!DEBUG process_wait `private$get_short_name()`" rethrow_call_with_cleanup( c_processx_wait, private$status, as.integer(timeout), private$get_short_name() ) invisible(self) } process_is_alive <- function(self, private) { "!DEBUG process_is_alive `private$get_short_name()`" rethrow_call(c_processx_is_alive, private$status, private$get_short_name()) } process_get_exit_status <- function(self, private) { "!DEBUG process_get_exit_status `private$get_short_name()`" rethrow_call(c_processx_get_exit_status, private$status, private$get_short_name()) } process_signal <- function(self, private, signal) { "!DEBUG process_signal `private$get_short_name()` `signal`" rethrow_call(c_processx_signal, private$status, as.integer(signal), private$get_short_name()) } process_interrupt <- function(self, private) { "!DEBUG process_interrupt `private$get_short_name()`" if (os_type() == "windows") { pid <- as.character(self$get_pid()) st <- run(get_tool("interrupt"), c(pid, "c"), error_on_status = FALSE) if (st$status == 0) TRUE else FALSE } else { rethrow_call(c_processx_interrupt, private$status, private$get_short_name()) } } process_kill <- function(self, private, grace, close_connections) { "!DEBUG process_kill '`private$get_short_name()`', pid `self$get_pid()`" ret <- rethrow_call(c_processx_kill, private$status, as.numeric(grace), private$get_short_name()) if (close_connections) private$close_connections() ret } process_kill_tree <- function(self, private, grace, close_connections) { "!DEBUG process_kill_tree '`private$get_short_name()`', pid `self$get_pid()`" if (!ps::ps_is_supported()) { throw(new_not_implemented_error( "kill_tree is not supported on this platform")) } ret <- get("ps_kill_tree", asNamespace("ps"))(private$tree_id) if (close_connections) private$close_connections() ret } process_get_start_time <- function(self, private) { format_unix_time(private$starttime) } process_get_pid <- function(self, private) { rethrow_call(c_processx_get_pid, private$status) } process_is_supervised <- function(self, private) { private$supervised } process_supervise <- function(self, private, status) { if (status && !self$is_supervised()) { supervisor_watch_pid(self$get_pid()) private$supervised <- TRUE } else if (!status && self$is_supervised()) { supervisor_unwatch_pid(self$get_pid()) private$supervised <- FALSE } } process_get_result <- function(self, private) { if (self$is_alive()) throw(new_error("Process is still alive")) if (!private$post_process_done && is.function(private$post_process)) { private$post_process_result <- private$post_process() private$post_process_done <- TRUE } private$post_process_result } process_as_ps_handle <- function(self, private) { ps::ps_handle(self$get_pid(), self$get_start_time()) } ps_method <- function(fun, self) { fun(ps::ps_handle(self$get_pid(), self$get_start_time())) } process_close_connections <- function(self, private) { for (f in c("stdin_pipe", "stdout_pipe", "stderr_pipe", "poll_pipe")) { if (!is.null(p <- private[[f]])) { rethrow_call(c_processx_connection_close, p) } } } #' Default options for pseudo terminals (ptys) #' #' @return Named list of default values of pty options. #' #' Options and default values: #' * `echo` whether to keep the echo on the terminal. `FALSE` turns echo #' off. #' * `rows` the (initial) terminal size, number of rows. #' * `cols` the (initial) terminal size, number of columns. #' #' @export default_pty_options <- function() { list( echo = FALSE, rows = 25L, cols = 80L ) }
library(dplyr); library(leaflet) ; library(rgdal) ; library(rmapshaper) ; library(ggmap) uk <- readOGR("uk.geojson", "OGRGeoJSON") uk_simplified <- ms_simplify(uk, keep = 0.1) cities <- data.frame( city = as.character(c("Aberdeen","Aldershot","Barnsley","Basildon","Belfast","Birkenhead","Birmingham", "Blackburn","Blackpool","Bournemouth","Bradford","Brighton","Bristol","Burnley","Cambridge", "Cardiff","Chatham","Coventry","Crawley","Derby","Doncaster","Dundee","Edinburgh", "Exeter","Glasgow","Gloucester","Huddersfield","Hull","Ipswich","Leeds","Leicester", "Liverpool","London","Luton","Manchester","Mansfield","Middlesbrough","Milton Keynes", "Newcastle","Newport","Northampton","Norwich","Nottingham","Oxford","Peterborough", "Plymouth","Portsmouth","Preston","Reading","Sheffield","Slough","Southampton", "Southend","Stoke","Sunderland","Swansea","Swindon","Telford","Wakefield", "Warrington","Wigan","Worthing","York"))) cities <- mutate(cities, address = paste(city, ", United Kingdom", sep = '')) %>% mutate_geocode(address, source = "google") bins <- c(0, 29, 39, 49, 59, 69, Inf) pal <- colorBin("Blues", domain = uk$Pct_Leave, bins = bins) labels <- sprintf( "<strong>%s</strong><br/>%g%% Leave", uk$name, round(uk$Pct_Leave, 1) ) %>% lapply(htmltools::HTML) leaflet() %>% setView(lng = 13.399340, lat = 52.516823, zoom = 4) %>% addProviderTiles(providers$Stamen.Watercolor) %>% addProviderTiles(providers$Stamen.TonerLines, options = providerTileOptions(opacity = 0.35)) %>% addTiles(urlTemplate = "", attribution = 'Contains National Statistics data © Crown copyright and database right [2017] and OS data © Crown copyright and database right [2017].') %>% addPolygons(data = uk_simplified, fillColor = ~pal(Pct_Leave), weight = 0.5, opacity = 1, color = "white", fillOpacity = 0.8, highlight = highlightOptions(weight = 3, color = "#FFFF00", fillOpacity = 0.7, bringToFront = TRUE), label = labels, labelOptions = labelOptions( style = list("font-weight" = "normal", padding = "3px 8px"), textsize = "12px", direction = "auto")) %>% addCircleMarkers(data = cities, ~lon, ~lat, radius = 4, color = "black", stroke = TRUE, fillColor = "white", fillOpacity = 1, weight = 1, label = ~city, labelOptions = labelOptions(noHide = T, direction = 'top', offset = c(0, -25), textOnly = TRUE, style=list( 'color'='white', 'text-shadow'= '0px 1px 1px #000000', 'font-family'= 'Helvetica', 'font-size' = '10px')), group = "Cities") %>% addLayersControl(position = 'topleft', overlayGroups = "Cities", options = layersControlOptions(collapsed = FALSE)) %>% hideGroup("Cities") %>% addLegend(position = "bottomleft", colors = RColorBrewer::brewer.pal(6, "Blues"), labels = c("0-29%", "30-39%", "40-49%", "50-59%", "60-69%", "70% or more"), opacity = 0.8, title = "% voting to Leave") %>% addLegend(position = "topright", colors = NULL, labels = NULL, title = htmltools::HTML("EU referendum results<br><em><small>23 June 2016</em></small>")) %>% addLegend(position = "bottomright", colors = NULL, labels = NULL, title = htmltools::HTML("<small><a href='http://www.electoralcommission.org.uk/__data/assets/file/0014/212135/EU-referendum-result-data.csv'>Raw data</a> and <a href='https://github.com/rcatlord/spatial/blob/master/article50/leavers.R'>code</a></small>")) %>% addEasyButton( easyButton( icon='fa-snowflake-o', title='Reset', position = "topleft", onClick=JS("function(btn, map){ map.setView([54.898260, -2.935810], 6);}")))
/scripts/article50/leavers.R
no_license
rcatlord/spatial
R
false
false
4,349
r
library(dplyr); library(leaflet) ; library(rgdal) ; library(rmapshaper) ; library(ggmap) uk <- readOGR("uk.geojson", "OGRGeoJSON") uk_simplified <- ms_simplify(uk, keep = 0.1) cities <- data.frame( city = as.character(c("Aberdeen","Aldershot","Barnsley","Basildon","Belfast","Birkenhead","Birmingham", "Blackburn","Blackpool","Bournemouth","Bradford","Brighton","Bristol","Burnley","Cambridge", "Cardiff","Chatham","Coventry","Crawley","Derby","Doncaster","Dundee","Edinburgh", "Exeter","Glasgow","Gloucester","Huddersfield","Hull","Ipswich","Leeds","Leicester", "Liverpool","London","Luton","Manchester","Mansfield","Middlesbrough","Milton Keynes", "Newcastle","Newport","Northampton","Norwich","Nottingham","Oxford","Peterborough", "Plymouth","Portsmouth","Preston","Reading","Sheffield","Slough","Southampton", "Southend","Stoke","Sunderland","Swansea","Swindon","Telford","Wakefield", "Warrington","Wigan","Worthing","York"))) cities <- mutate(cities, address = paste(city, ", United Kingdom", sep = '')) %>% mutate_geocode(address, source = "google") bins <- c(0, 29, 39, 49, 59, 69, Inf) pal <- colorBin("Blues", domain = uk$Pct_Leave, bins = bins) labels <- sprintf( "<strong>%s</strong><br/>%g%% Leave", uk$name, round(uk$Pct_Leave, 1) ) %>% lapply(htmltools::HTML) leaflet() %>% setView(lng = 13.399340, lat = 52.516823, zoom = 4) %>% addProviderTiles(providers$Stamen.Watercolor) %>% addProviderTiles(providers$Stamen.TonerLines, options = providerTileOptions(opacity = 0.35)) %>% addTiles(urlTemplate = "", attribution = 'Contains National Statistics data © Crown copyright and database right [2017] and OS data © Crown copyright and database right [2017].') %>% addPolygons(data = uk_simplified, fillColor = ~pal(Pct_Leave), weight = 0.5, opacity = 1, color = "white", fillOpacity = 0.8, highlight = highlightOptions(weight = 3, color = "#FFFF00", fillOpacity = 0.7, bringToFront = TRUE), label = labels, labelOptions = labelOptions( style = list("font-weight" = "normal", padding = "3px 8px"), textsize = "12px", direction = "auto")) %>% addCircleMarkers(data = cities, ~lon, ~lat, radius = 4, color = "black", stroke = TRUE, fillColor = "white", fillOpacity = 1, weight = 1, label = ~city, labelOptions = labelOptions(noHide = T, direction = 'top', offset = c(0, -25), textOnly = TRUE, style=list( 'color'='white', 'text-shadow'= '0px 1px 1px #000000', 'font-family'= 'Helvetica', 'font-size' = '10px')), group = "Cities") %>% addLayersControl(position = 'topleft', overlayGroups = "Cities", options = layersControlOptions(collapsed = FALSE)) %>% hideGroup("Cities") %>% addLegend(position = "bottomleft", colors = RColorBrewer::brewer.pal(6, "Blues"), labels = c("0-29%", "30-39%", "40-49%", "50-59%", "60-69%", "70% or more"), opacity = 0.8, title = "% voting to Leave") %>% addLegend(position = "topright", colors = NULL, labels = NULL, title = htmltools::HTML("EU referendum results<br><em><small>23 June 2016</em></small>")) %>% addLegend(position = "bottomright", colors = NULL, labels = NULL, title = htmltools::HTML("<small><a href='http://www.electoralcommission.org.uk/__data/assets/file/0014/212135/EU-referendum-result-data.csv'>Raw data</a> and <a href='https://github.com/rcatlord/spatial/blob/master/article50/leavers.R'>code</a></small>")) %>% addEasyButton( easyButton( icon='fa-snowflake-o', title='Reset', position = "topleft", onClick=JS("function(btn, map){ map.setView([54.898260, -2.935810], 6);}")))
# Scratch plots # Text ideas ---- # Link to City site: https://data.cambridgema.gov/dataset/Eco-Totem-Broadway-Bicycle-Counts-by-Date/9yzv-hx4u # Add info on what to do with the buttons, other actions # Add link to GitHub repo # Plot ideas ---- # Daily # ideas: interactive time series plots with different time units as options on x: # date, day of week, year # y: entries, exits, total # Hourly # Hour along x # color for month # drop-down filter for year... with compare? # System time zone: set in command line: sudo timedatectl set-timezone America/New_York # Heat map: columns for months, rows for hour of day, color for count or intensity of riders gp <- ggplot(hourly_hour_month, aes(x = as.numeric(hour), y = total, color = month)) + geom_point() + geom_smooth(se = F, span = 0.3) + xlab('Hour of day') + ylab('Average count') + theme_bw() ggplotly(gp, tooltip= c('total', 'month')) # Entries should be West bound -- much lower in 2016-2018, when Longfellow bridge was closed. # Exits is East bound, going in to boston daily %>% group_by(year) %>% summarize(mean(entries), mean(exits)) # Weekly view day_of_week = daily %>% group_by(year, day_of_week) %>% dplyr::summarize(total = mean(total, na.rm=T), entries = mean(entries, na.rm=T), exits = mean(exits, na.rm=T)) ggplot(daily, aes(x = day_of_week, y = total, group = year)) + geom_point() gp <- ggplot(daily, aes(x = day_of_week, y = total, color = as.factor(year))) + geom_point(aes(text = date)) + xlab('Day of week') + theme_bw() ggplotly(gp, tooltip= c('total', 'date'))
/plotscratch.R
no_license
mirotchnick/BikeCount
R
false
false
1,632
r
# Scratch plots # Text ideas ---- # Link to City site: https://data.cambridgema.gov/dataset/Eco-Totem-Broadway-Bicycle-Counts-by-Date/9yzv-hx4u # Add info on what to do with the buttons, other actions # Add link to GitHub repo # Plot ideas ---- # Daily # ideas: interactive time series plots with different time units as options on x: # date, day of week, year # y: entries, exits, total # Hourly # Hour along x # color for month # drop-down filter for year... with compare? # System time zone: set in command line: sudo timedatectl set-timezone America/New_York # Heat map: columns for months, rows for hour of day, color for count or intensity of riders gp <- ggplot(hourly_hour_month, aes(x = as.numeric(hour), y = total, color = month)) + geom_point() + geom_smooth(se = F, span = 0.3) + xlab('Hour of day') + ylab('Average count') + theme_bw() ggplotly(gp, tooltip= c('total', 'month')) # Entries should be West bound -- much lower in 2016-2018, when Longfellow bridge was closed. # Exits is East bound, going in to boston daily %>% group_by(year) %>% summarize(mean(entries), mean(exits)) # Weekly view day_of_week = daily %>% group_by(year, day_of_week) %>% dplyr::summarize(total = mean(total, na.rm=T), entries = mean(entries, na.rm=T), exits = mean(exits, na.rm=T)) ggplot(daily, aes(x = day_of_week, y = total, group = year)) + geom_point() gp <- ggplot(daily, aes(x = day_of_week, y = total, color = as.factor(year))) + geom_point(aes(text = date)) + xlab('Day of week') + theme_bw() ggplotly(gp, tooltip= c('total', 'date'))
# USGS GNIS ID piped file, download 2019-09-06, NationalFile_20190901.txt # FEATURE_ID|FEATURE_NAME|FEATURE_CLASS|STATE_ALPHA|STATE_NUMERIC|COUNTY_NAME|COUNTY_NUMERIC|PRIMARY_LAT_DMS|PRIM_LONG_DMS|PRIM_LAT_DEC|PRIM_LONG_DEC|SOURCE_LAT_DMS|SOURCE_LONG_DMS|SOURCE_LAT_DEC|SOURCE_LONG_DEC|ELEV_IN_M|ELEV_IN_FT|MAP_NAME|DATE_CREATED|DATE_EDITED cols <- readLines(con = file.path("Data","Input","Standard","NationalFile_20190901.txt"), 1) cols <- as.vector(str_split(cols,"\\|", simplify = TRUE)) check <- read.table(file.path("Data","Input","Standard","NationalFile_20190901.txt"), header = TRUE, sep = "|", col.names = cols, nrows = 10, stringsAsFactors = FALSE) colClasses <- c(rep("character", 3), "NULL", "integer","NULL","integer","NULL","NULL", "numeric", "numeric", rep("NULL", 9)) check <- read.table(file.path("Data","Input","Standard","NationalFile_20190901.txt"), header = TRUE, sep = "|", col.names = cols, colClasses = colClasses, skip = 635, nrows = 10, stringsAsFactors = FALSE) df.GNIS <- read.table(file.path("Data","Input","Standard","NationalFile_20190901.txt"), header = TRUE, sep = "|", col.names = cols, colClasses = colClasses, # nrows = 471131, stringsAsFactors = FALSE, quote = "", comment.char = "") df.GNIS$STFIPS <- factor(sprintf("%02d",df.GNIS$STATE_NUMERIC)) df.GNIS$FIPS <- factor(sprintf("%05d",1000*df.GNIS$STATE_NUMERIC + df.GNIS$COUNTY_NUMERIC)) df.GNIS <- df.GNIS[,c(1,2,3,9,8,6,7)] df.GNIS$FEATURE_CLASS <- factor(df.GNIS$FEATURE_CLASS) colnames(df.GNIS)[1] <- "GNIS_ID" df.GNIS$name <- gsub("'","",tolower(df.GNIS$FEATURE_NAME)) df.GNIS$name <- gsub("/","-", df.GNIS$name, fixed = TRUE) df.GNIS$name <- gsub(" - ","-",df.GNIS$name, fixed = TRUE) df.GNIS$name <- str_squish(sub("[\\(][[:print:]]+[\\)]","", df.GNIS$name)) df.GNIS$name <- sub("(city of )|(( city)$)","", df.GNIS$name) df.GNIS$name <- sub("(township of )|( (township)$)","", df.GNIS$name) df.GNIS$name <- sub("(town of )|(( town)$)","", df.GNIS$name) df.GNIS$name <- sub("(village of )|(( village)$)","", df.GNIS$name) df.GNIS <- df.GNIS[!(grepl("county", df.GNIS$name) & df.GNIS$FEATURE_CLASS %in% c("Civil","Locale")),] save(df.GNIS,file = file.path("Data","Input","Standard","df.GNIS.RData"))
/Scripts/Load Input/USGS_GNIS_Data.R
no_license
fahmidah/bridge.collapses
R
false
false
2,444
r
# USGS GNIS ID piped file, download 2019-09-06, NationalFile_20190901.txt # FEATURE_ID|FEATURE_NAME|FEATURE_CLASS|STATE_ALPHA|STATE_NUMERIC|COUNTY_NAME|COUNTY_NUMERIC|PRIMARY_LAT_DMS|PRIM_LONG_DMS|PRIM_LAT_DEC|PRIM_LONG_DEC|SOURCE_LAT_DMS|SOURCE_LONG_DMS|SOURCE_LAT_DEC|SOURCE_LONG_DEC|ELEV_IN_M|ELEV_IN_FT|MAP_NAME|DATE_CREATED|DATE_EDITED cols <- readLines(con = file.path("Data","Input","Standard","NationalFile_20190901.txt"), 1) cols <- as.vector(str_split(cols,"\\|", simplify = TRUE)) check <- read.table(file.path("Data","Input","Standard","NationalFile_20190901.txt"), header = TRUE, sep = "|", col.names = cols, nrows = 10, stringsAsFactors = FALSE) colClasses <- c(rep("character", 3), "NULL", "integer","NULL","integer","NULL","NULL", "numeric", "numeric", rep("NULL", 9)) check <- read.table(file.path("Data","Input","Standard","NationalFile_20190901.txt"), header = TRUE, sep = "|", col.names = cols, colClasses = colClasses, skip = 635, nrows = 10, stringsAsFactors = FALSE) df.GNIS <- read.table(file.path("Data","Input","Standard","NationalFile_20190901.txt"), header = TRUE, sep = "|", col.names = cols, colClasses = colClasses, # nrows = 471131, stringsAsFactors = FALSE, quote = "", comment.char = "") df.GNIS$STFIPS <- factor(sprintf("%02d",df.GNIS$STATE_NUMERIC)) df.GNIS$FIPS <- factor(sprintf("%05d",1000*df.GNIS$STATE_NUMERIC + df.GNIS$COUNTY_NUMERIC)) df.GNIS <- df.GNIS[,c(1,2,3,9,8,6,7)] df.GNIS$FEATURE_CLASS <- factor(df.GNIS$FEATURE_CLASS) colnames(df.GNIS)[1] <- "GNIS_ID" df.GNIS$name <- gsub("'","",tolower(df.GNIS$FEATURE_NAME)) df.GNIS$name <- gsub("/","-", df.GNIS$name, fixed = TRUE) df.GNIS$name <- gsub(" - ","-",df.GNIS$name, fixed = TRUE) df.GNIS$name <- str_squish(sub("[\\(][[:print:]]+[\\)]","", df.GNIS$name)) df.GNIS$name <- sub("(city of )|(( city)$)","", df.GNIS$name) df.GNIS$name <- sub("(township of )|( (township)$)","", df.GNIS$name) df.GNIS$name <- sub("(town of )|(( town)$)","", df.GNIS$name) df.GNIS$name <- sub("(village of )|(( village)$)","", df.GNIS$name) df.GNIS <- df.GNIS[!(grepl("county", df.GNIS$name) & df.GNIS$FEATURE_CLASS %in% c("Civil","Locale")),] save(df.GNIS,file = file.path("Data","Input","Standard","df.GNIS.RData"))
data("nuts2006") ## Proportional Symbols # Dataser head(nuts0.df) # Countries plot plot(nuts0.spdf) # Population plot on proportional symbols propSymbolsLayer(spdf = nuts0.spdf, df = nuts0.df, var = "pop2008") # Layout plot layoutLayer(title = "Countries Population in Europe", sources = "Eurostat, 2008", scale = NULL, frame = TRUE, col = "black", coltitle = "white", bg = "#D9F5FF", south = TRUE, extent = nuts0.spdf) # Countries plot plot(nuts0.spdf, col = "grey60",border = "grey20", add=TRUE) # Population plot on proportional symbols propSymbolsLayer(spdf = nuts0.spdf, df = nuts0.df, var = "pop2008", k = 0.01, symbols = "square", col = "#920000", legend.pos = "right", legend.title.txt = "Total\npopulation (2008)", legend.style = "c") ## Choropleth Layer head(nuts2.df) nuts2.df$unemprate <- nuts2.df$unemp2008/nuts2.df$act2008*100 choroLayer(spdf = nuts2.spdf, df = nuts2.df, var = "unemprate") choroLayer(spdf = nuts2.spdf, df = nuts2.df, var = "unemprate", method = "quantile", nclass = 8, lwd = 0.5, col = carto.pal(pal1 = "turquoise.pal", n1 = 8), border = "grey40", add = FALSE, legend.pos = "right", legend.title.txt = "Unemployement\nrate (%)", legend.values.rnd = 1) plot(nuts0.spdf, add=T, border = "grey40") layoutLayer(title = "Unemployement in Europe", sources = "Eurostat, 2008", frame = TRUE, col = "black", south = TRUE, coltitle = "white") vignette(topic = "cartography")
/pgm/basic_examples_cartography.R
no_license
rCarto/greece_2016
R
false
false
1,783
r
data("nuts2006") ## Proportional Symbols # Dataser head(nuts0.df) # Countries plot plot(nuts0.spdf) # Population plot on proportional symbols propSymbolsLayer(spdf = nuts0.spdf, df = nuts0.df, var = "pop2008") # Layout plot layoutLayer(title = "Countries Population in Europe", sources = "Eurostat, 2008", scale = NULL, frame = TRUE, col = "black", coltitle = "white", bg = "#D9F5FF", south = TRUE, extent = nuts0.spdf) # Countries plot plot(nuts0.spdf, col = "grey60",border = "grey20", add=TRUE) # Population plot on proportional symbols propSymbolsLayer(spdf = nuts0.spdf, df = nuts0.df, var = "pop2008", k = 0.01, symbols = "square", col = "#920000", legend.pos = "right", legend.title.txt = "Total\npopulation (2008)", legend.style = "c") ## Choropleth Layer head(nuts2.df) nuts2.df$unemprate <- nuts2.df$unemp2008/nuts2.df$act2008*100 choroLayer(spdf = nuts2.spdf, df = nuts2.df, var = "unemprate") choroLayer(spdf = nuts2.spdf, df = nuts2.df, var = "unemprate", method = "quantile", nclass = 8, lwd = 0.5, col = carto.pal(pal1 = "turquoise.pal", n1 = 8), border = "grey40", add = FALSE, legend.pos = "right", legend.title.txt = "Unemployement\nrate (%)", legend.values.rnd = 1) plot(nuts0.spdf, add=T, border = "grey40") layoutLayer(title = "Unemployement in Europe", sources = "Eurostat, 2008", frame = TRUE, col = "black", south = TRUE, coltitle = "white") vignette(topic = "cartography")
context("indices") invisible(connect()) test_that("index_get", { if (!es_version() < 120) { a <- index_get(index = 'shakespeare') expect_equal(names(a), "shakespeare") expect_is(a, "list") expect_is(a$shakespeare, "list") expect_equal(length(a$shakespeare$aliases), 0) expect_error(index_get("adfadfadsfasdfadfasdfsf"), 'no such index||IndexMissingException') } }) test_that("index_exists", { expect_true(index_exists(index = 'shakespeare')) expect_false(index_exists(index = 'asdfasdfadfasdfasfasdf')) }) test_that("index_create", { ind <- "stuff_yy" invisible(tryCatch(index_delete(index = ind, verbose = FALSE), error = function(e) e)) a <- index_create(index = ind, verbose = FALSE) expect_true(a[[1]]) expect_named(a, expected = "acknowledged") expect_is(a, "list") expect_error(index_create("/"), "Invalid index name") }) test_that("index_create fails on illegal characters", { expect_error(index_create("a\\b"), "Invalid index name") expect_error(index_create("a/b"), "Invalid index name") expect_error(index_create("a*b"), "Invalid index name") expect_error(index_create("a?b"), "Invalid index name") expect_error(index_create("a\"b"), "Invalid index name") expect_error(index_create("a<b"), "Invalid index name") expect_error(index_create("a>b"), "Invalid index name") expect_error(index_create("a|b"), "Invalid index name") expect_error(index_create("a,b"), "Invalid index name") expect_error(index_create("a b"), "Invalid index name") }) test_that("index_delete", { nm <- "stuff_zz" invisible(tryCatch(index_delete(index = nm, verbose = FALSE), error = function(e) e)) a <- index_create(index = nm, verbose = FALSE) b <- index_delete(nm, verbose = FALSE) expect_true(b[[1]]) expect_named(b, expected = "acknowledged") expect_is(b, "list") expect_error(index_delete("adfadfafafasdfasdfasfasfasfd", verbose=FALSE), "no such index||IndexMissingException") }) # test_that("index_close, index_open", { # invisible(tryCatch(index_delete('test_close_open', verbose = FALSE), error = function(e) e)) # index_create('test_close_open', verbose = FALSE) # index_open('test_close_open') # # expect_true(index_close('test_close_open')[[1]]) # expect_true(index_open('test_close_open')[[1]]) # expect_error(index_close("adfadfafafasdfasdfasfasfasfd"), "Not Found") # expect_error(index_open("adfadfafafasdfasdfasfasfasfd"), "Not Found") # }) test_that("index_stats", { a <- index_stats('shakespeare') expect_is(a, "list") expect_named(a$indices, "shakespeare") expect_error(index_stats("adfadfafafasdfasdfasfasfasfd", verbose=FALSE), "no such index||IndexMissingException") }) test_that("index_segments", { a <- index_segments('shakespeare') expect_is(a, "list") expect_named(a$indices, "shakespeare") expect_error(index_segments("adfadfafafasdfasdfasfasfasfd", verbose=FALSE), "no such index||IndexMissingException") }) test_that("index_recovery", { if (!es_version() < 110) { a <- index_recovery('shakespeare') expect_is(a, "list") expect_named(a$shakespeare, "shards") expect_error(index_recovery("adfadfafafasdfasdfasfasfasfd", verbose=FALSE), "no such index||IndexMissingException") } }) ## cleanup ----------------------------------- invisible(index_delete("stuff_yy", verbose = FALSE)) # invisible(index_delete('test_close_open', verbose = FALSE))
/tests/testthat/test-indices.R
permissive
irichgreen/elastic
R
false
false
3,395
r
context("indices") invisible(connect()) test_that("index_get", { if (!es_version() < 120) { a <- index_get(index = 'shakespeare') expect_equal(names(a), "shakespeare") expect_is(a, "list") expect_is(a$shakespeare, "list") expect_equal(length(a$shakespeare$aliases), 0) expect_error(index_get("adfadfadsfasdfadfasdfsf"), 'no such index||IndexMissingException') } }) test_that("index_exists", { expect_true(index_exists(index = 'shakespeare')) expect_false(index_exists(index = 'asdfasdfadfasdfasfasdf')) }) test_that("index_create", { ind <- "stuff_yy" invisible(tryCatch(index_delete(index = ind, verbose = FALSE), error = function(e) e)) a <- index_create(index = ind, verbose = FALSE) expect_true(a[[1]]) expect_named(a, expected = "acknowledged") expect_is(a, "list") expect_error(index_create("/"), "Invalid index name") }) test_that("index_create fails on illegal characters", { expect_error(index_create("a\\b"), "Invalid index name") expect_error(index_create("a/b"), "Invalid index name") expect_error(index_create("a*b"), "Invalid index name") expect_error(index_create("a?b"), "Invalid index name") expect_error(index_create("a\"b"), "Invalid index name") expect_error(index_create("a<b"), "Invalid index name") expect_error(index_create("a>b"), "Invalid index name") expect_error(index_create("a|b"), "Invalid index name") expect_error(index_create("a,b"), "Invalid index name") expect_error(index_create("a b"), "Invalid index name") }) test_that("index_delete", { nm <- "stuff_zz" invisible(tryCatch(index_delete(index = nm, verbose = FALSE), error = function(e) e)) a <- index_create(index = nm, verbose = FALSE) b <- index_delete(nm, verbose = FALSE) expect_true(b[[1]]) expect_named(b, expected = "acknowledged") expect_is(b, "list") expect_error(index_delete("adfadfafafasdfasdfasfasfasfd", verbose=FALSE), "no such index||IndexMissingException") }) # test_that("index_close, index_open", { # invisible(tryCatch(index_delete('test_close_open', verbose = FALSE), error = function(e) e)) # index_create('test_close_open', verbose = FALSE) # index_open('test_close_open') # # expect_true(index_close('test_close_open')[[1]]) # expect_true(index_open('test_close_open')[[1]]) # expect_error(index_close("adfadfafafasdfasdfasfasfasfd"), "Not Found") # expect_error(index_open("adfadfafafasdfasdfasfasfasfd"), "Not Found") # }) test_that("index_stats", { a <- index_stats('shakespeare') expect_is(a, "list") expect_named(a$indices, "shakespeare") expect_error(index_stats("adfadfafafasdfasdfasfasfasfd", verbose=FALSE), "no such index||IndexMissingException") }) test_that("index_segments", { a <- index_segments('shakespeare') expect_is(a, "list") expect_named(a$indices, "shakespeare") expect_error(index_segments("adfadfafafasdfasdfasfasfasfd", verbose=FALSE), "no such index||IndexMissingException") }) test_that("index_recovery", { if (!es_version() < 110) { a <- index_recovery('shakespeare') expect_is(a, "list") expect_named(a$shakespeare, "shards") expect_error(index_recovery("adfadfafafasdfasdfasfasfasfd", verbose=FALSE), "no such index||IndexMissingException") } }) ## cleanup ----------------------------------- invisible(index_delete("stuff_yy", verbose = FALSE)) # invisible(index_delete('test_close_open', verbose = FALSE))
newletter <- function(x){ rando <- sample(letters[1:26], 1) return(rando) }
/newRPackage/R/newletter.R
no_license
hmichalak/NewHomework6
R
false
false
80
r
newletter <- function(x){ rando <- sample(letters[1:26], 1) return(rando) }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/KDE_Function.R \name{KDE_height} \alias{KDE_height} \title{Single-Point One-Dimensional Gausssian Kernel Density Estimate} \usage{ KDE_height(p, d, bw) } \arguments{ \item{p}{A real number} \item{d}{A vector of observations} \item{bw}{Bandwidth} } \value{ A numeric density estimate } \description{ Computes the one-dimensional guassian KDE for a single point. }
/man/KDE_height.Rd
no_license
phstallworth/spptrees
R
false
false
452
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/KDE_Function.R \name{KDE_height} \alias{KDE_height} \title{Single-Point One-Dimensional Gausssian Kernel Density Estimate} \usage{ KDE_height(p, d, bw) } \arguments{ \item{p}{A real number} \item{d}{A vector of observations} \item{bw}{Bandwidth} } \value{ A numeric density estimate } \description{ Computes the one-dimensional guassian KDE for a single point. }
#reading the file household_power_consumption.txt from the working directory f <- read.csv("household_power_consumption.txt", sep=";") #converting date and time f$Date <- strptime(paste(f$Date, f$Time), format = "%d/%m/%Y %H:%M:%S") #subsetting the date range f01 <- subset(f, as.Date(Date) == as.Date("2007-02-01")) f02 <- subset(f, as.Date(Date) == as.Date("2007-02-02")) f <- rbind(f01, f02) #drawing an actual histogram hist(as.numeric(f$Global_active_power), col="red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)") dev.copy(png, file="plot1.png", width=480, height=480) dev.off()
/plot1.R
no_license
kacyk73/ExData_Plotting1
R
false
false
630
r
#reading the file household_power_consumption.txt from the working directory f <- read.csv("household_power_consumption.txt", sep=";") #converting date and time f$Date <- strptime(paste(f$Date, f$Time), format = "%d/%m/%Y %H:%M:%S") #subsetting the date range f01 <- subset(f, as.Date(Date) == as.Date("2007-02-01")) f02 <- subset(f, as.Date(Date) == as.Date("2007-02-02")) f <- rbind(f01, f02) #drawing an actual histogram hist(as.numeric(f$Global_active_power), col="red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)") dev.copy(png, file="plot1.png", width=480, height=480) dev.off()
########################## #Problem 5: Insurance Data ########################## library(glmnet) library(corrplot) library(MASS) insurancedata = read.table("InsuranceData.txt", header=T) head(insurancedata) #pull out non-numeric fields and put the newpol column in front because it will #be our "Y". This makes it easier to interpret the correlation matrices insuranceNumeric = insurancedata[,c(6, 2:5, 8)] head(insuranceNumeric) #########Normal Linear Regression: #[on a training set (30 out of 47 obs)] lmFit = lm(newpol ~ ., data=insuranceNumeric[1:30, ]) # train on the first 30 summary(lmFit) plot(lmFit) #goes through several plots that test model assumptions #explicitly create the training and test sets: xTrain = insuranceNumeric[1:30, 2:6] yTrain = insuranceNumeric[1:30, 1] xTest = insuranceNumeric[31:47, 2:6] yTest = insuranceNumeric[31:47, 1] #How well does this model predict on the test data: yHat = predict(lmFit, xTest) #feed xTest data into lmFit model to get predicted y values yHat #confirm that you only have predicted y values for the test set (obs. 31:47) rse = sqrt(sum((yTest - yHat)^2) / 24) #compute predicted rmse using degrees of freedom #specified in lm (lmFit) rse #visually compare a few of the predicted and actual values: head(yHat) head(yTrain) ######################################## #Lasso Regularized Regression with no CV: ########################################## #create training and test sets. we will use the training set to get lambda then get #the predicted rmse from the test set. xTrain = insuranceNumeric[1:30, 2:6] yTrain = insuranceNumeric[1:30, 1] xTest = insuranceNumeric[31:47, 2:6] yTest = insuranceNumeric[31:47, 1] lassoFit = glmnet(as.matrix(xTrain), yTrain) #glmnet on the training data lassoFit plot(lassoFit, label=T) #plot shows you which betas are coming into be important and what #their coeff are. As lambda increases you get more and more var. contributions until all are #included #here we can manually look for the best value of lambda summary(lassoFit) # not much help :( print(lassoFit) # shows you # of variables involved in the model (indirectly via DF; # DF = n - #variables - 1); # %dev column is like R^2 (% of variance explained); lambda value. Keep in mind that the model #that explains the most (highest %dev) is not necessarily the best model, though for our #purposes here, we will assume it is. Here, it says lambda = 0.004723 is the best. ########################### #Lasso Regression with CV: ########################### #Use lasso with cross validation on the training set to find an even better lambda: cvFit <- cv.glmnet(as.matrix(xTrain), yTrain) lassoFit = glmnet(as.matrix(xTrain), yTrain, lambda = cvFit$lambda.min, set.seed(523)) #glmnet on the #training data lassoFit # Let's look at the coefficients for the model at a specific lambda. We will choose the #lambda chosen by CV (3 lines of code above): coef(lassoFit, s=0.3108) #this gives us the coefficients for the variables included in the #model at a certain value of lambda. Note: s = lambda #graph different values of lambda vs. rse: par(mar=c(3, 3, 3, 3)) #sets the margins so you can see the graph better plot(cvFit) #here we are looking at the log of the lambda. we are looking for what the #coefficients are where we have the best lambda. we are looking for the lambda that gives the #minimum residual error on the crossvalidated sets (least prediction error), and we can ask #R to find it for us (though it doesn't explicity tell us what the value is). #? this plot doesn't really match up with the optimized value of lambda chosen by CV above #compute rse on the training dataset yHat = predict(lassoFit, as.matrix(xTrain), s=0.3108) rse = sqrt(sum((yTrain - yHat)^2) / 24) rse #visually compare a few of the predicted and actual values: head(yHat) head(yTrain) #compute rse on the test dataset (predicted rse) yPredict = predict(lassoFit, as.matrix(xTest), s=0.3108) rsePredict = sqrt(sum((yTest - yPredict)^2) / 24) rsePredict #visually compare a few of the predicted and actual values: head(yHat) head(yTrain) ################################################## #part 4: plot the residuals vs. predicted values ################################################# resid = yTest - yPredict yep<- data.frame(resid, yPredict) yep library(ggplot2) library(gcookbook) graph <- ggplot(yep, aes(x=yPredict, y=resid)) + geom_point() + theme_bw() graph + xlab("Predicted Values") + ylab("Residuals") + geom_hline(yintercept=0) plot(yPredict, resid, xlab="Predicted", ylab="Residuals") abline(0, 0) #puts the horizon on the graph ######################################################## #Lasso Regularized Regression with CV in a different way: ######################################################### #create training and test sets. we will use CV on the training set to get lambda then get #the predicted rmse from the test set. xTrain = insuranceNumeric[1:30, 2:6] yTrain = insuranceNumeric[1:30, 1] xTest = insuranceNumeric[31:47, 2:6] yTest = insuranceNumeric[31:47, 1] # One of the nice things about this function is its # ability to test with cross validation to choose the lamba. Gets you an optimized lambda cvfit = cv.glmnet(as.matrix(xTrain), yTrain) #cross-validated glmnet on the training data cvfit par(mar=c(3, 3, 3, 3)) #sets the margins so you can see the graph better plot(cvfit) #here we are looking at the log of the lambda. we are looking for what the #coefficients are where we have the best lambda. we are looking for the lambda that gives the #minimum residual error on the crossvalidated sets (least prediction error), and we can ask #R to find it for us (though it doesn't explicity tell us what the value is) and give us the #model: coef(cvfit, s="lambda.min") # Note we are getting a lot more variable contributions here #than we would expect given the correlation matrix #we want to know which lambda gives the lowest rse on the predict set yPredict = predict(cvfit, newx=as.matrix(xTest), s="lambda.min") rsePredict = sqrt(sum((yTest - yPredict)^2) / 24) rsePredict #the rse here (on the predict set) should be way lower than that given by the lm #why isn't it? head(insuranceNumeric) ###################################### #Problem 6: Employment Data ###################################### library(car) library(corrplot) library(ggplot2) library(MASS) employ =read.csv("EmploymentData.csv") head(employ) employNumeric = employ[2:10] head(employNumeric) # Check out the correlation of the original data corM = cor(employNumeric) corrplot(corM, method="ellipse") #note: multicollinearity is crazy strong corrplot(corM, method="ellipse", order="AOE") print(corM) # Check out the covariance of the original data covM = cov(employNumeric) print(covM) ################# PCA: p = princomp(employNumeric) summary(p) plot(p) abline(1,0, col="red") #allows you to notice which PCs have a SD above 1 for the cut-off #princomp() calculates the rotated matrix for you so you don't have to multiply it out by #hand: p$scores p$sdev p$loadings #this is a matrix of the eigen vectors, and it tries to hide the things #that aren't contributing ############# Rotate the components (PCA Factor Analysis): library(psych) p2 = psych::principal(employNumeric, rotate="varimax", nfactors=2, scores=TRUE) print(p2$loadings, cutoff=.4, sort=F) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this print(p2$loadings, cutoff=.4, sort=T) p2$loadings #note that this uses a much lower cutoff p2$scores #look at new scores (how each sample scales along each PC). NOTE: these scores are #centered => they will always be around a mean of zero (so don't try to compare them directly #to the data multiplied by the rotation matirx) #The rows are in the same order as the original data so you can compare the original values to #their new scores on the PCs #create a data frame that contains the countries and their new scores: x = as.matrix(p2$scores) #first start with a matrix so you can reverse the signs g = x*-1 #I am doing this because the sign is trivial, and it makes better interpretive sense #if they are opposite of what R has defaulted them to s = as.data.frame(g) s$Country <-employ[, 1] #attach the country names as a field called 'Country' s = s[, c(3, 1, 2)] #reorder them to put the country name first s attach(s) #allows you to call variables without naming the dataset first (e.g. RC1 vs. s$RC1) #(adds the variable names to R's current session) s[order(RC1),] #sort the dataframe rows by highest and lowest values of RC1 s[order(RC2),] #sort the dataframe rows by highest and lowest values of RC2 #See how original data values rank according to Agr, the primary component of RC1 head(employ) attach(employ) employ[order(Agr),] #See how original data values rank according to Min, the primary component of RC2 employ[order(Min), c(1, 3, 2, 4:10)] #just reordered the columns so that Min is up front # Run a correlation test to see which correlations are significant library(psych) options("scipen"=100, "digits"=5) round(cor(employNumeric), 2) #gives you the correlation matrix; rounds it off to 2 decimals MCorrTest = corr.test(employNumeric, adjust="none") #this tests correlations for #statistical significance. gives you a set of P values. adjust="none" => makes it so that the # correlation matrix is perfectly symmetric MCorrTest #remember, P < 0.1 is significant because we were asked to use a 90% C.L. M = MCorrTest$p #this shows you the p values without rounding M #need to use the un-rounded version for the MTest below: # Now, for each element, see if it is < .1 (or whatever significance) and set the entry to # true = significant or false. keep in mind, we are running a massive number #of T-tests here, and the chances of making a type I error are high, so it is better to be # a bit more stringent and choose a high C.L. MTest = ifelse(M < .1, T, F) MTest # Now lets see how many significant correlations there are for each variable. We can do # this by summing the columns of the matrix colSums(MTest) - 1 # We have to subtract 1 for the diagonal elements (self-correlation) #we can use this to see if there are any variables that are overcorrelated (correlated with a #ton of other variables at a statistically significant level) #Based upon the above test, Agr is collinear with more than 65% of the other predictors. This #could be considered overcorrelation, and thus the variation seen in Agr may already be #accounted for in the other variables. This may justify its removal from the model, and we can #see if removing it has the added bonus of making our PCs more easily interpretable: head(employNumeric) p3 = psych::principal(employNumeric[, 2:9], rotate="varimax", nfactors=2, scores=TRUE) print(p3$loadings, cutoff=.4, sort=F) ######################################## # Problem 7: Census Data ######################################## census =read.csv("CensusData.csv") head(census) ################# PCA: p = princomp(census) summary(p) plot(p) abline(1,0, col="red") #allows you to notice which PCs have a SD above 1 for the cut-off p$loadings #create a dataset with a scaled median home value: census2 <- as.matrix(census) smhv <- (census2[, 5] * 1/100000) census2 = as.data.frame(census2) census2$SMedianHomeVal <- smhv census2 = census2[, c(1:4, 6)] head(census2) #PCA with scaled median home value: p2 = princomp(census2) summary(p2) plot(p2) abline(1,0, col="red") #allows you to notice which PCs have a SD above 1 for the cut-off p2$loadings #PCA with correlation matrix (standardized values): p3 = princomp(census, cor=T) summary(p3) plot(p3) abline(1,0, col="red") p3$loadings head(p3$scores) #note how standarization makes the values all fall within a small and similar #range #look at correlations corrplot(cor(census), method = 'ellipse', order='AOE') # Run a correlation test to see which correlations are significant library(psych) options("scipen"=100, "digits"=5) round(cor(census), 2) #gives you the correlation matrix; rounds it off to 2 decimals MCorrTest = corr.test(census, adjust="none") #this tests correlations for #statistical significance. gives you a set of P values. adjust="none" => makes it so that the # correlation matrix is perfectly symmetric MCorrTest #remember, P < 0.05 is significant because we were asked to use a 95% C.L. M = MCorrTest$p #this shows you the p values without rounding M #need to use the un-rounded version for the MTest below: # Now, for each element, see if it is < .05 (or whatever significance) and set the entry to # true = significant or false. keep in mind, we are running a massive number #of T-tests here, and the chances of making a type I error are high, so it is better to be # a bit more stringent and choose a high C.L. MTest = ifelse(M < .05, T, F) MTest # Now lets see how many significant correlations there are for each variable. We can do # this by summing the columns of the matrix colSums(MTest) - 1 # We have to subtract 1 for the diagonal elements (self-correlation) #we can use this to see if there are any variables that are overcorrelated (correlated with a #ton of other variables at a statistically significant level) ############################## # Problem 8: Track Record Data ############################## track = read.table("TrackRecordData.txt", header=T) head(track) #transform hours to seconds: trackseconds<- transform(track, m800=m800*60, m1500=m1500*60, m5000=m5000*60, m10000=m10000*60, Marathon=Marathon*60) head(trackseconds) tracknumeric = trackseconds[, 2:9] ################################# #Non-Standardized Computations: #PCA with covariance matrix (non-standardized values): p = princomp(tracknumeric, cor=F) summary(p) plot(p) abline(1,0, col="red") p$loadings #Rotate the components (PCA Factor Analysis): library(psych) pROTATED = psych::principal(tracknumeric, covar=TRUE, rotate="varimax", nfactors=2, scores=TRUE) print(pROTATED$loadings, cutoff=.4, sort=F) #the goal of varimax is to get the loadings to be #either a 1 or a 0, and a cutoff of .4 helps achieve this print(pROTATED$loadings, cutoff=.4, sort=T) pROTATED$loadings #note that this uses a much lower cutoff ############################### #Standardized Computations: #PCA with correlation matrix (standardized values): library(psych) pstandardized = princomp(tracknumeric, cor=T) summary(pstandardized) plot(pstandardized) abline(1,0, col="red") pstandardized$loadings #STANDARDIZED PCA Factor Analysis (PC rotation): pstandardizedROTATED1 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=1, scores=TRUE) print(pstandardizedROTATED1$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this #################### pstandardizedROTATED2 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=2, scores=TRUE) print(pstandardizedROTATED2$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this ##################### pstandardizedROTATED3 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=3, scores=TRUE) print(pstandardizedROTATED3$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this #################### pstandardizedROTATED4 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=4, scores=TRUE) print(pstandardizedROTATED4$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this #################### pstandardizedROTATED5 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=5, scores=TRUE) print(pstandardizedROTATED5$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this ##################### pstandardizedROTATED3b = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=3, scores=TRUE) print(pstandardizedROTATED3b$loadings, cutoff=.7, sort=T) #use a cutoff of 0.7 instead ##################### #Let's see if anything is over-correlated, and if it is, if removing it will #enhance the interpretibility of the PCs #################### # Check out the correlation of the original data corM = cor(tracknumeric) corrplot(corM, method="ellipse") #note: multicollinearity is crazy strong corrplot(corM, method="ellipse", order="AOE") print(corM) # Check out the covariance of the original data covM = cov(tracknumeric) print(covM) # Run a correlation test to see which correlations are significant library(psych) options("scipen"=100, "digits"=5) round(cor(tracknumeric), 2) #gives you the correlation matrix; rounds it off to 2 decimals MCorrTest = corr.test(tracknumeric, adjust="none") #this tests correlations for #statistical significance. gives you a set of P values. adjust="none" => makes it so that the # correlation matrix is perfectly symmetric MCorrTest #remember, P < 0.01 is significant because we were asked to use a 99% C.L. M = MCorrTest$p #this shows you the p values without rounding M #need to use the un-rounded version for the MTest below: # Now, for each element, see if it is < .01 (or whatever significance) and set the entry to # true = significant or false. keep in mind, we are running a massive number #of T-tests here, and the chances of making a type I error are high, so it is better to be # a bit more stringent and choose a high C.L. MTest = ifelse(M < .01, T, F) MTest # Now lets see how many significant correlations there are for each variable. We can do # this by summing the columns of the matrix colSums(MTest) - 1 # We have to subtract 1 for the diagonal elements (self-correlation) #we can use this to see if there are any variables that are overcorrelated (correlated with a #ton of other variables at a statistically significant level) ########### #Try PCA with a few overcorrelated variables removed: ########## track2 = tracknumeric[, c(1, 3:5)] pstandardizedROTATEDa = psych::principal(track2, covar=FALSE, rotate="varimax", nfactors=1, scores=TRUE) print(pstandardizedROTATEDa$loadings, cutoff=.7, sort=T) ############ pstandardizedROTATEDb = psych::principal(track2, covar=FALSE, rotate="varimax", nfactors=2, scores=TRUE) print(pstandardizedROTATEDb$loadings, cutoff=.7, sort=T) ############# pstandardizedROTATEDc = psych::principal(track2, covar=FALSE, rotate="varimax", nfactors=3, scores=TRUE) print(pstandardizedROTATEDc$loadings, cutoff=.7, sort=T) ############ pstandardizedROTATEDd = psych::principal(track2, covar=FALSE, rotate="varimax", nfactors=4, scores=TRUE) print(pstandardizedROTATEDd$loadings, cutoff=.7, sort=T) ########################## #Common Factor Analysis: ########################## cfa2 = factanal(tracknumeric, 2) #using 2 components print(cfa2$loadings, cutoff=.7, sort=T) #note here that the % variance is calculated differently # in CFA so it won't be the same as it PCAFA, but they should be close. summary(cfa2) #not helpful :( #Common Factor Analysis doesn't have a nice plot like a scree plot :( ################# cfa3 = factanal(tracknumeric, 3) print(cfa3$loadings, cutoff=.7, sort=T) ################# cfa4 = factanal(tracknumeric, 4) print(cfa4$loadings, cutoff=.7, sort=T) ################# cfa5 = factanal(tracknumeric, 5) print(cfa5$loadings, cutoff=.7, sort=T)
/R Code.R
no_license
rchesak/PCA-CFA-Regularized-Regression
R
false
false
20,503
r
########################## #Problem 5: Insurance Data ########################## library(glmnet) library(corrplot) library(MASS) insurancedata = read.table("InsuranceData.txt", header=T) head(insurancedata) #pull out non-numeric fields and put the newpol column in front because it will #be our "Y". This makes it easier to interpret the correlation matrices insuranceNumeric = insurancedata[,c(6, 2:5, 8)] head(insuranceNumeric) #########Normal Linear Regression: #[on a training set (30 out of 47 obs)] lmFit = lm(newpol ~ ., data=insuranceNumeric[1:30, ]) # train on the first 30 summary(lmFit) plot(lmFit) #goes through several plots that test model assumptions #explicitly create the training and test sets: xTrain = insuranceNumeric[1:30, 2:6] yTrain = insuranceNumeric[1:30, 1] xTest = insuranceNumeric[31:47, 2:6] yTest = insuranceNumeric[31:47, 1] #How well does this model predict on the test data: yHat = predict(lmFit, xTest) #feed xTest data into lmFit model to get predicted y values yHat #confirm that you only have predicted y values for the test set (obs. 31:47) rse = sqrt(sum((yTest - yHat)^2) / 24) #compute predicted rmse using degrees of freedom #specified in lm (lmFit) rse #visually compare a few of the predicted and actual values: head(yHat) head(yTrain) ######################################## #Lasso Regularized Regression with no CV: ########################################## #create training and test sets. we will use the training set to get lambda then get #the predicted rmse from the test set. xTrain = insuranceNumeric[1:30, 2:6] yTrain = insuranceNumeric[1:30, 1] xTest = insuranceNumeric[31:47, 2:6] yTest = insuranceNumeric[31:47, 1] lassoFit = glmnet(as.matrix(xTrain), yTrain) #glmnet on the training data lassoFit plot(lassoFit, label=T) #plot shows you which betas are coming into be important and what #their coeff are. As lambda increases you get more and more var. contributions until all are #included #here we can manually look for the best value of lambda summary(lassoFit) # not much help :( print(lassoFit) # shows you # of variables involved in the model (indirectly via DF; # DF = n - #variables - 1); # %dev column is like R^2 (% of variance explained); lambda value. Keep in mind that the model #that explains the most (highest %dev) is not necessarily the best model, though for our #purposes here, we will assume it is. Here, it says lambda = 0.004723 is the best. ########################### #Lasso Regression with CV: ########################### #Use lasso with cross validation on the training set to find an even better lambda: cvFit <- cv.glmnet(as.matrix(xTrain), yTrain) lassoFit = glmnet(as.matrix(xTrain), yTrain, lambda = cvFit$lambda.min, set.seed(523)) #glmnet on the #training data lassoFit # Let's look at the coefficients for the model at a specific lambda. We will choose the #lambda chosen by CV (3 lines of code above): coef(lassoFit, s=0.3108) #this gives us the coefficients for the variables included in the #model at a certain value of lambda. Note: s = lambda #graph different values of lambda vs. rse: par(mar=c(3, 3, 3, 3)) #sets the margins so you can see the graph better plot(cvFit) #here we are looking at the log of the lambda. we are looking for what the #coefficients are where we have the best lambda. we are looking for the lambda that gives the #minimum residual error on the crossvalidated sets (least prediction error), and we can ask #R to find it for us (though it doesn't explicity tell us what the value is). #? this plot doesn't really match up with the optimized value of lambda chosen by CV above #compute rse on the training dataset yHat = predict(lassoFit, as.matrix(xTrain), s=0.3108) rse = sqrt(sum((yTrain - yHat)^2) / 24) rse #visually compare a few of the predicted and actual values: head(yHat) head(yTrain) #compute rse on the test dataset (predicted rse) yPredict = predict(lassoFit, as.matrix(xTest), s=0.3108) rsePredict = sqrt(sum((yTest - yPredict)^2) / 24) rsePredict #visually compare a few of the predicted and actual values: head(yHat) head(yTrain) ################################################## #part 4: plot the residuals vs. predicted values ################################################# resid = yTest - yPredict yep<- data.frame(resid, yPredict) yep library(ggplot2) library(gcookbook) graph <- ggplot(yep, aes(x=yPredict, y=resid)) + geom_point() + theme_bw() graph + xlab("Predicted Values") + ylab("Residuals") + geom_hline(yintercept=0) plot(yPredict, resid, xlab="Predicted", ylab="Residuals") abline(0, 0) #puts the horizon on the graph ######################################################## #Lasso Regularized Regression with CV in a different way: ######################################################### #create training and test sets. we will use CV on the training set to get lambda then get #the predicted rmse from the test set. xTrain = insuranceNumeric[1:30, 2:6] yTrain = insuranceNumeric[1:30, 1] xTest = insuranceNumeric[31:47, 2:6] yTest = insuranceNumeric[31:47, 1] # One of the nice things about this function is its # ability to test with cross validation to choose the lamba. Gets you an optimized lambda cvfit = cv.glmnet(as.matrix(xTrain), yTrain) #cross-validated glmnet on the training data cvfit par(mar=c(3, 3, 3, 3)) #sets the margins so you can see the graph better plot(cvfit) #here we are looking at the log of the lambda. we are looking for what the #coefficients are where we have the best lambda. we are looking for the lambda that gives the #minimum residual error on the crossvalidated sets (least prediction error), and we can ask #R to find it for us (though it doesn't explicity tell us what the value is) and give us the #model: coef(cvfit, s="lambda.min") # Note we are getting a lot more variable contributions here #than we would expect given the correlation matrix #we want to know which lambda gives the lowest rse on the predict set yPredict = predict(cvfit, newx=as.matrix(xTest), s="lambda.min") rsePredict = sqrt(sum((yTest - yPredict)^2) / 24) rsePredict #the rse here (on the predict set) should be way lower than that given by the lm #why isn't it? head(insuranceNumeric) ###################################### #Problem 6: Employment Data ###################################### library(car) library(corrplot) library(ggplot2) library(MASS) employ =read.csv("EmploymentData.csv") head(employ) employNumeric = employ[2:10] head(employNumeric) # Check out the correlation of the original data corM = cor(employNumeric) corrplot(corM, method="ellipse") #note: multicollinearity is crazy strong corrplot(corM, method="ellipse", order="AOE") print(corM) # Check out the covariance of the original data covM = cov(employNumeric) print(covM) ################# PCA: p = princomp(employNumeric) summary(p) plot(p) abline(1,0, col="red") #allows you to notice which PCs have a SD above 1 for the cut-off #princomp() calculates the rotated matrix for you so you don't have to multiply it out by #hand: p$scores p$sdev p$loadings #this is a matrix of the eigen vectors, and it tries to hide the things #that aren't contributing ############# Rotate the components (PCA Factor Analysis): library(psych) p2 = psych::principal(employNumeric, rotate="varimax", nfactors=2, scores=TRUE) print(p2$loadings, cutoff=.4, sort=F) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this print(p2$loadings, cutoff=.4, sort=T) p2$loadings #note that this uses a much lower cutoff p2$scores #look at new scores (how each sample scales along each PC). NOTE: these scores are #centered => they will always be around a mean of zero (so don't try to compare them directly #to the data multiplied by the rotation matirx) #The rows are in the same order as the original data so you can compare the original values to #their new scores on the PCs #create a data frame that contains the countries and their new scores: x = as.matrix(p2$scores) #first start with a matrix so you can reverse the signs g = x*-1 #I am doing this because the sign is trivial, and it makes better interpretive sense #if they are opposite of what R has defaulted them to s = as.data.frame(g) s$Country <-employ[, 1] #attach the country names as a field called 'Country' s = s[, c(3, 1, 2)] #reorder them to put the country name first s attach(s) #allows you to call variables without naming the dataset first (e.g. RC1 vs. s$RC1) #(adds the variable names to R's current session) s[order(RC1),] #sort the dataframe rows by highest and lowest values of RC1 s[order(RC2),] #sort the dataframe rows by highest and lowest values of RC2 #See how original data values rank according to Agr, the primary component of RC1 head(employ) attach(employ) employ[order(Agr),] #See how original data values rank according to Min, the primary component of RC2 employ[order(Min), c(1, 3, 2, 4:10)] #just reordered the columns so that Min is up front # Run a correlation test to see which correlations are significant library(psych) options("scipen"=100, "digits"=5) round(cor(employNumeric), 2) #gives you the correlation matrix; rounds it off to 2 decimals MCorrTest = corr.test(employNumeric, adjust="none") #this tests correlations for #statistical significance. gives you a set of P values. adjust="none" => makes it so that the # correlation matrix is perfectly symmetric MCorrTest #remember, P < 0.1 is significant because we were asked to use a 90% C.L. M = MCorrTest$p #this shows you the p values without rounding M #need to use the un-rounded version for the MTest below: # Now, for each element, see if it is < .1 (or whatever significance) and set the entry to # true = significant or false. keep in mind, we are running a massive number #of T-tests here, and the chances of making a type I error are high, so it is better to be # a bit more stringent and choose a high C.L. MTest = ifelse(M < .1, T, F) MTest # Now lets see how many significant correlations there are for each variable. We can do # this by summing the columns of the matrix colSums(MTest) - 1 # We have to subtract 1 for the diagonal elements (self-correlation) #we can use this to see if there are any variables that are overcorrelated (correlated with a #ton of other variables at a statistically significant level) #Based upon the above test, Agr is collinear with more than 65% of the other predictors. This #could be considered overcorrelation, and thus the variation seen in Agr may already be #accounted for in the other variables. This may justify its removal from the model, and we can #see if removing it has the added bonus of making our PCs more easily interpretable: head(employNumeric) p3 = psych::principal(employNumeric[, 2:9], rotate="varimax", nfactors=2, scores=TRUE) print(p3$loadings, cutoff=.4, sort=F) ######################################## # Problem 7: Census Data ######################################## census =read.csv("CensusData.csv") head(census) ################# PCA: p = princomp(census) summary(p) plot(p) abline(1,0, col="red") #allows you to notice which PCs have a SD above 1 for the cut-off p$loadings #create a dataset with a scaled median home value: census2 <- as.matrix(census) smhv <- (census2[, 5] * 1/100000) census2 = as.data.frame(census2) census2$SMedianHomeVal <- smhv census2 = census2[, c(1:4, 6)] head(census2) #PCA with scaled median home value: p2 = princomp(census2) summary(p2) plot(p2) abline(1,0, col="red") #allows you to notice which PCs have a SD above 1 for the cut-off p2$loadings #PCA with correlation matrix (standardized values): p3 = princomp(census, cor=T) summary(p3) plot(p3) abline(1,0, col="red") p3$loadings head(p3$scores) #note how standarization makes the values all fall within a small and similar #range #look at correlations corrplot(cor(census), method = 'ellipse', order='AOE') # Run a correlation test to see which correlations are significant library(psych) options("scipen"=100, "digits"=5) round(cor(census), 2) #gives you the correlation matrix; rounds it off to 2 decimals MCorrTest = corr.test(census, adjust="none") #this tests correlations for #statistical significance. gives you a set of P values. adjust="none" => makes it so that the # correlation matrix is perfectly symmetric MCorrTest #remember, P < 0.05 is significant because we were asked to use a 95% C.L. M = MCorrTest$p #this shows you the p values without rounding M #need to use the un-rounded version for the MTest below: # Now, for each element, see if it is < .05 (or whatever significance) and set the entry to # true = significant or false. keep in mind, we are running a massive number #of T-tests here, and the chances of making a type I error are high, so it is better to be # a bit more stringent and choose a high C.L. MTest = ifelse(M < .05, T, F) MTest # Now lets see how many significant correlations there are for each variable. We can do # this by summing the columns of the matrix colSums(MTest) - 1 # We have to subtract 1 for the diagonal elements (self-correlation) #we can use this to see if there are any variables that are overcorrelated (correlated with a #ton of other variables at a statistically significant level) ############################## # Problem 8: Track Record Data ############################## track = read.table("TrackRecordData.txt", header=T) head(track) #transform hours to seconds: trackseconds<- transform(track, m800=m800*60, m1500=m1500*60, m5000=m5000*60, m10000=m10000*60, Marathon=Marathon*60) head(trackseconds) tracknumeric = trackseconds[, 2:9] ################################# #Non-Standardized Computations: #PCA with covariance matrix (non-standardized values): p = princomp(tracknumeric, cor=F) summary(p) plot(p) abline(1,0, col="red") p$loadings #Rotate the components (PCA Factor Analysis): library(psych) pROTATED = psych::principal(tracknumeric, covar=TRUE, rotate="varimax", nfactors=2, scores=TRUE) print(pROTATED$loadings, cutoff=.4, sort=F) #the goal of varimax is to get the loadings to be #either a 1 or a 0, and a cutoff of .4 helps achieve this print(pROTATED$loadings, cutoff=.4, sort=T) pROTATED$loadings #note that this uses a much lower cutoff ############################### #Standardized Computations: #PCA with correlation matrix (standardized values): library(psych) pstandardized = princomp(tracknumeric, cor=T) summary(pstandardized) plot(pstandardized) abline(1,0, col="red") pstandardized$loadings #STANDARDIZED PCA Factor Analysis (PC rotation): pstandardizedROTATED1 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=1, scores=TRUE) print(pstandardizedROTATED1$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this #################### pstandardizedROTATED2 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=2, scores=TRUE) print(pstandardizedROTATED2$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this ##################### pstandardizedROTATED3 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=3, scores=TRUE) print(pstandardizedROTATED3$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this #################### pstandardizedROTATED4 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=4, scores=TRUE) print(pstandardizedROTATED4$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this #################### pstandardizedROTATED5 = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=5, scores=TRUE) print(pstandardizedROTATED5$loadings, cutoff=.4, sort=T) #the goal of varimax is to get the loadings to be either #a 1 or a 0, amd a cutoff of .4 helps achieve this ##################### pstandardizedROTATED3b = psych::principal(tracknumeric, covar=FALSE, rotate="varimax", nfactors=3, scores=TRUE) print(pstandardizedROTATED3b$loadings, cutoff=.7, sort=T) #use a cutoff of 0.7 instead ##################### #Let's see if anything is over-correlated, and if it is, if removing it will #enhance the interpretibility of the PCs #################### # Check out the correlation of the original data corM = cor(tracknumeric) corrplot(corM, method="ellipse") #note: multicollinearity is crazy strong corrplot(corM, method="ellipse", order="AOE") print(corM) # Check out the covariance of the original data covM = cov(tracknumeric) print(covM) # Run a correlation test to see which correlations are significant library(psych) options("scipen"=100, "digits"=5) round(cor(tracknumeric), 2) #gives you the correlation matrix; rounds it off to 2 decimals MCorrTest = corr.test(tracknumeric, adjust="none") #this tests correlations for #statistical significance. gives you a set of P values. adjust="none" => makes it so that the # correlation matrix is perfectly symmetric MCorrTest #remember, P < 0.01 is significant because we were asked to use a 99% C.L. M = MCorrTest$p #this shows you the p values without rounding M #need to use the un-rounded version for the MTest below: # Now, for each element, see if it is < .01 (or whatever significance) and set the entry to # true = significant or false. keep in mind, we are running a massive number #of T-tests here, and the chances of making a type I error are high, so it is better to be # a bit more stringent and choose a high C.L. MTest = ifelse(M < .01, T, F) MTest # Now lets see how many significant correlations there are for each variable. We can do # this by summing the columns of the matrix colSums(MTest) - 1 # We have to subtract 1 for the diagonal elements (self-correlation) #we can use this to see if there are any variables that are overcorrelated (correlated with a #ton of other variables at a statistically significant level) ########### #Try PCA with a few overcorrelated variables removed: ########## track2 = tracknumeric[, c(1, 3:5)] pstandardizedROTATEDa = psych::principal(track2, covar=FALSE, rotate="varimax", nfactors=1, scores=TRUE) print(pstandardizedROTATEDa$loadings, cutoff=.7, sort=T) ############ pstandardizedROTATEDb = psych::principal(track2, covar=FALSE, rotate="varimax", nfactors=2, scores=TRUE) print(pstandardizedROTATEDb$loadings, cutoff=.7, sort=T) ############# pstandardizedROTATEDc = psych::principal(track2, covar=FALSE, rotate="varimax", nfactors=3, scores=TRUE) print(pstandardizedROTATEDc$loadings, cutoff=.7, sort=T) ############ pstandardizedROTATEDd = psych::principal(track2, covar=FALSE, rotate="varimax", nfactors=4, scores=TRUE) print(pstandardizedROTATEDd$loadings, cutoff=.7, sort=T) ########################## #Common Factor Analysis: ########################## cfa2 = factanal(tracknumeric, 2) #using 2 components print(cfa2$loadings, cutoff=.7, sort=T) #note here that the % variance is calculated differently # in CFA so it won't be the same as it PCAFA, but they should be close. summary(cfa2) #not helpful :( #Common Factor Analysis doesn't have a nice plot like a scree plot :( ################# cfa3 = factanal(tracknumeric, 3) print(cfa3$loadings, cutoff=.7, sort=T) ################# cfa4 = factanal(tracknumeric, 4) print(cfa4$loadings, cutoff=.7, sort=T) ################# cfa5 = factanal(tracknumeric, 5) print(cfa5$loadings, cutoff=.7, sort=T)
#' \pkg{guess} adjust estimates of learning for guessing related bias. #' #' It implements the method discussed in \url{http://gsood.com/research/papers/guess.pdf} #' @name guess #' @docType package #' @importFrom stats chisq.test sd #' @importFrom Rsolnp solnp NULL
/R/guess.R
permissive
soodoku/guess
R
false
false
267
r
#' \pkg{guess} adjust estimates of learning for guessing related bias. #' #' It implements the method discussed in \url{http://gsood.com/research/papers/guess.pdf} #' @name guess #' @docType package #' @importFrom stats chisq.test sd #' @importFrom Rsolnp solnp NULL
#=============================================================== # Display the current working directory getwd(); # If necessary, change the path below to the directory where the data files are stored. # "." means current directory. On Windows use a forward slash / instead of the usual \. workingDir = "C:/Users/hwang1/Documents/2016_analysis/hgg_pathway_activity_nullhypothesis testing"; setwd(workingDir); dir() # read log2FC between NTRK and PDGFRA act <- read.delim("pathway_activity.txt", header = TRUE, sep = "\t") head(act) z <- act$LogFC #Bootstrap to generate a list consists of 10000 acitivites each come # from summarization of 22 randomly sampled FCs # aP is 1.45 #Among the 22 compenents of PI3K-AKT pathway that have DE phos #change, functional annation (Ci) for protein 4,8,21 is -1, the #rest of proteins are +1 aP =list() for (i in 1:10000) { a =sample(z, 22) a[4] <- -a[4] a[8] <- -a[8] a[21] <- -a[21] aPi <- sum(a)/sqrt(length(a)) aP = c(aP, aPi) } length(aP) pvalue = sum(aP > 1.45)/10000 pvalue
/JUMPn/JUMPna/PI3K_AKT_Activity.r
no_license
xd-self/HGG_Source_Code
R
false
false
1,036
r
#=============================================================== # Display the current working directory getwd(); # If necessary, change the path below to the directory where the data files are stored. # "." means current directory. On Windows use a forward slash / instead of the usual \. workingDir = "C:/Users/hwang1/Documents/2016_analysis/hgg_pathway_activity_nullhypothesis testing"; setwd(workingDir); dir() # read log2FC between NTRK and PDGFRA act <- read.delim("pathway_activity.txt", header = TRUE, sep = "\t") head(act) z <- act$LogFC #Bootstrap to generate a list consists of 10000 acitivites each come # from summarization of 22 randomly sampled FCs # aP is 1.45 #Among the 22 compenents of PI3K-AKT pathway that have DE phos #change, functional annation (Ci) for protein 4,8,21 is -1, the #rest of proteins are +1 aP =list() for (i in 1:10000) { a =sample(z, 22) a[4] <- -a[4] a[8] <- -a[8] a[21] <- -a[21] aPi <- sum(a)/sqrt(length(a)) aP = c(aP, aPi) } length(aP) pvalue = sum(aP > 1.45)/10000 pvalue
# load the relevant library library(XML) # choose the relevant URL derbyURL="http://www.britishchamps.com/tables/" # get the content webPage=htmlTreeParse(derbyURL,useInternal=TRUE) # get the tiers! they are all denoted by h5 tags tiers=xpathSApply(webPage,"//h5",xmlValue) # get out the table headings tableNames=xpathSApply(webPage,"//table[@class='leagueengine_season_table']/tr/th",xmlValue)[1:9] # splits data into a data frame and delete the blank second column longList=xpathApply(webPage,"//table/tr/td",xmlValue) bigTable=data.frame(matrix(unlist(longList), nrow=72, byrow=T))[,-2] names(bigTable)=tableNames bigTable$tier=NA # associate the name of the tier with the tierNum=0 for(i in 1:nrow(bigTable)){ if(bigTable$'#'[i]==1){ tierNum=tierNum+1; } bigTable$tier[i]=tiers[tierNum] } # change the factor into a numeric so we can analyse it! bigTable$F=as.numeric(as.character(bigTable$F)) # Let's take a look at our table of leagues, ready for analysis bigTable
/derbyleagues.R
no_license
thisisnic/xml-parsing
R
false
false
991
r
# load the relevant library library(XML) # choose the relevant URL derbyURL="http://www.britishchamps.com/tables/" # get the content webPage=htmlTreeParse(derbyURL,useInternal=TRUE) # get the tiers! they are all denoted by h5 tags tiers=xpathSApply(webPage,"//h5",xmlValue) # get out the table headings tableNames=xpathSApply(webPage,"//table[@class='leagueengine_season_table']/tr/th",xmlValue)[1:9] # splits data into a data frame and delete the blank second column longList=xpathApply(webPage,"//table/tr/td",xmlValue) bigTable=data.frame(matrix(unlist(longList), nrow=72, byrow=T))[,-2] names(bigTable)=tableNames bigTable$tier=NA # associate the name of the tier with the tierNum=0 for(i in 1:nrow(bigTable)){ if(bigTable$'#'[i]==1){ tierNum=tierNum+1; } bigTable$tier[i]=tiers[tierNum] } # change the factor into a numeric so we can analyse it! bigTable$F=as.numeric(as.character(bigTable$F)) # Let's take a look at our table of leagues, ready for analysis bigTable
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/ops-%m+%.r \name{rollback} \alias{rollback} \title{Roll back date to last day of previous month} \usage{ rollback(dates, roll_to_first = FALSE, preserve_hms = TRUE) } \arguments{ \item{dates}{A POSIXct, POSIXlt or Date class object.} \item{roll_to_first}{Rollback to the first day of the month instead of the last day of the previous month} \item{preserve_hms}{Retains the same hour, minute, and second information? If FALSE, the new date will be at 00:00:00.} } \value{ A date-time object of class POSIXlt, POSIXct or Date, whose day has been adjusted to the last day of the previous month, or to the first day of the month. } \description{ rollback changes a date to the last day of the previous month or to the first day of the month. Optionally, the new date can retain the same hour, minute, and second information. } \examples{ date <- ymd("2010-03-03") # "2010-03-03 UTC" rollback(date) # "2010-02-28 UTC" dates <- date + months(0:2) # "2010-03-03 UTC" "2010-04-03 UTC" "2010-05-03 UTC" rollback(dates) # "2010-02-28 UTC" "2010-03-31 UTC" "2010-04-30 UTC" date <- ymd_hms("2010-03-03 12:44:22") rollback(date) # "2010-02-28 12:44:22 UTC" rollback(date, roll_to_first = TRUE) # "2010-03-01 12:44:22 UTC" rollback(date, preserve_hms = FALSE) # "2010-02-28 UTC" rollback(date, roll_to_first = TRUE, preserve_hms = FALSE) # "2010-03-01 UTC" }
/man/rollback.Rd
no_license
fieldryand/lubridate
R
false
false
1,437
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/ops-%m+%.r \name{rollback} \alias{rollback} \title{Roll back date to last day of previous month} \usage{ rollback(dates, roll_to_first = FALSE, preserve_hms = TRUE) } \arguments{ \item{dates}{A POSIXct, POSIXlt or Date class object.} \item{roll_to_first}{Rollback to the first day of the month instead of the last day of the previous month} \item{preserve_hms}{Retains the same hour, minute, and second information? If FALSE, the new date will be at 00:00:00.} } \value{ A date-time object of class POSIXlt, POSIXct or Date, whose day has been adjusted to the last day of the previous month, or to the first day of the month. } \description{ rollback changes a date to the last day of the previous month or to the first day of the month. Optionally, the new date can retain the same hour, minute, and second information. } \examples{ date <- ymd("2010-03-03") # "2010-03-03 UTC" rollback(date) # "2010-02-28 UTC" dates <- date + months(0:2) # "2010-03-03 UTC" "2010-04-03 UTC" "2010-05-03 UTC" rollback(dates) # "2010-02-28 UTC" "2010-03-31 UTC" "2010-04-30 UTC" date <- ymd_hms("2010-03-03 12:44:22") rollback(date) # "2010-02-28 12:44:22 UTC" rollback(date, roll_to_first = TRUE) # "2010-03-01 12:44:22 UTC" rollback(date, preserve_hms = FALSE) # "2010-02-28 UTC" rollback(date, roll_to_first = TRUE, preserve_hms = FALSE) # "2010-03-01 UTC" }
#========================normlized crime in AANDHRA PARDESH===================== library(ggplot2) theme_set(theme_bw()) library(dplyr) library(ggplot2) table=read.csv("E:/project/crimes.csv") data2=table %>% group_by(DISTRICT) %>% filter(STATE.UT== "ANDHRA PRADESH") %>% summarise(all_crimes=sum(Rape,Kidnapping.and.Abduction,Dowry.Deaths,Assault.on.women.with.intent.to.outrage.her.modesty,Insult.to.modesty.of.Women,Cruelty.by.Husband.or.his.Relatives,Importation.of.Girls)) View(data2) print(all_crimes) data=data.frame(data2) print(data2) data$avg = round((data$all_crimes - mean(data$all_crimes))/sd(data$all_crimes),2) data$chart_type = ifelse(data$avg < 0,"below","above") data=data[order(data$avg),] ggplot(data,aes(x=DISTRICT,y=avg))+ geom_bar(stat = 'identity',aes(fill=chart_type),width = 0.5)+ scale_fill_manual(name="crimes",labels=c("above avg","below avg"), values = c("above"="#00ba38","below"="#f8766d"))+ labs(subtitle = "normlized crimes in ANDHRA PRADESH", title = "DIverging Bars")+ coord_flip()
/Q4/1/district_1.r
no_license
PurvishaThakkar/datascience-project-in-R-studio
R
false
false
1,056
r
#========================normlized crime in AANDHRA PARDESH===================== library(ggplot2) theme_set(theme_bw()) library(dplyr) library(ggplot2) table=read.csv("E:/project/crimes.csv") data2=table %>% group_by(DISTRICT) %>% filter(STATE.UT== "ANDHRA PRADESH") %>% summarise(all_crimes=sum(Rape,Kidnapping.and.Abduction,Dowry.Deaths,Assault.on.women.with.intent.to.outrage.her.modesty,Insult.to.modesty.of.Women,Cruelty.by.Husband.or.his.Relatives,Importation.of.Girls)) View(data2) print(all_crimes) data=data.frame(data2) print(data2) data$avg = round((data$all_crimes - mean(data$all_crimes))/sd(data$all_crimes),2) data$chart_type = ifelse(data$avg < 0,"below","above") data=data[order(data$avg),] ggplot(data,aes(x=DISTRICT,y=avg))+ geom_bar(stat = 'identity',aes(fill=chart_type),width = 0.5)+ scale_fill_manual(name="crimes",labels=c("above avg","below avg"), values = c("above"="#00ba38","below"="#f8766d"))+ labs(subtitle = "normlized crimes in ANDHRA PRADESH", title = "DIverging Bars")+ coord_flip()
dtmPlot <- function(dtm, records, title) { freq <- dtm %>% #removeSparseTerms(sparse) %>% col_sums() %>% sort(decreasing = TRUE) %>% data.frame(word = names(.), freq = .) # BAR PLOT ggplot(freq[records,], aes(x = reorder(word, freq), y = freq)) + geom_bar(stat= "identity", fill = "blue", alpha = 0.6) + coord_flip() + ggtitle(title) }
/Analyse/RCode/fn_dtmPlot.R
no_license
tonyjward/trends-in-data-science
R
false
false
493
r
dtmPlot <- function(dtm, records, title) { freq <- dtm %>% #removeSparseTerms(sparse) %>% col_sums() %>% sort(decreasing = TRUE) %>% data.frame(word = names(.), freq = .) # BAR PLOT ggplot(freq[records,], aes(x = reorder(word, freq), y = freq)) + geom_bar(stat= "identity", fill = "blue", alpha = 0.6) + coord_flip() + ggtitle(title) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/1_validateFunc_headers.R \name{L_validateFunction} \alias{L_validateFunction} \title{Logical validation function} \usage{ L_validateFunction(value) } \arguments{ \item{value}{value to validate} } \description{ Logical validation function }
/man/L_validateFunction.Rd
permissive
LE2P/rBSRN
R
false
true
318
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/1_validateFunc_headers.R \name{L_validateFunction} \alias{L_validateFunction} \title{Logical validation function} \usage{ L_validateFunction(value) } \arguments{ \item{value}{value to validate} } \description{ Logical validation function }
#' --- #' title: "Processing ALE data" #' author: "Michael Cysouw" #' date: "`r Sys.Date()`" #' --- # make html-version of this manual with: # rmarkdown::render("manualData.R") # load functions library(qlcData) library(qlcMatrix) source("code/prepareData.R") # =============== # error checking based on clusters on original input reportMissing(1:346) # make raw profile all <- sapply(1:346, read.ALE.words) write.profile(unlist(all), file = "sandbox/ALEprofile.txt", normalize = "NFD", sep = "") # make graphemic profile # remove stress, parenthesis, ellipsis... words <- gsub("['ˌ…\\(\\)†‿]","",unlist(all)) tok <- tokenize(words , profile = "data/ALEprofileNFDgraphemes.tsv" , file.out = "sandbox/ALEgraphemes" , sep = " " , silent = TRUE , normalize = "NFD" , regex = TRUE ) write.profile(tok$strings$tokenized, sep = " ", file = "sandbox/ALEgraphemes.txt") # reduced profile, only minimal reduction in graphemes # most graphemes arise because of combinations tok <- tokenize(unlist(all) , profile = "data/ALEprofileNFDreduce.tsv" , sep = "" , silent = TRUE , normalize = "NFD" , regex = FALSE , transliterate = "Reduce" ) tok <- tokenize(tok$strings$transliterated , profile = "data/ALEprofileNFDgraphemes.tsv" , file.out = "sandbox/ALEgraphemes_reduce" , sep = " " , silent = TRUE , normalize = "NFD" , regex = TRUE ) write.profile(tok$strings$tokenized, sep = " ", file = "sandbox/ALEgraphemes_reduce.txt") # ======================= # reduce and tokenize input, and simplify to simple IPA, make profile of grapheme clusters # add quick and dirty cognate clustering and alignment using lingPy tmp <- prepare(346, cutoff = 0.85) # to see all intermediate steps, try this # tmp <- prepare(1, full = TRUE, cutoff = 0.85) # many alignments sapply(c(8,9), prepare) # show Session Info # sessionInfo()
/manualData.R
no_license
SimonGreenhill/ALE
R
false
false
1,905
r
#' --- #' title: "Processing ALE data" #' author: "Michael Cysouw" #' date: "`r Sys.Date()`" #' --- # make html-version of this manual with: # rmarkdown::render("manualData.R") # load functions library(qlcData) library(qlcMatrix) source("code/prepareData.R") # =============== # error checking based on clusters on original input reportMissing(1:346) # make raw profile all <- sapply(1:346, read.ALE.words) write.profile(unlist(all), file = "sandbox/ALEprofile.txt", normalize = "NFD", sep = "") # make graphemic profile # remove stress, parenthesis, ellipsis... words <- gsub("['ˌ…\\(\\)†‿]","",unlist(all)) tok <- tokenize(words , profile = "data/ALEprofileNFDgraphemes.tsv" , file.out = "sandbox/ALEgraphemes" , sep = " " , silent = TRUE , normalize = "NFD" , regex = TRUE ) write.profile(tok$strings$tokenized, sep = " ", file = "sandbox/ALEgraphemes.txt") # reduced profile, only minimal reduction in graphemes # most graphemes arise because of combinations tok <- tokenize(unlist(all) , profile = "data/ALEprofileNFDreduce.tsv" , sep = "" , silent = TRUE , normalize = "NFD" , regex = FALSE , transliterate = "Reduce" ) tok <- tokenize(tok$strings$transliterated , profile = "data/ALEprofileNFDgraphemes.tsv" , file.out = "sandbox/ALEgraphemes_reduce" , sep = " " , silent = TRUE , normalize = "NFD" , regex = TRUE ) write.profile(tok$strings$tokenized, sep = " ", file = "sandbox/ALEgraphemes_reduce.txt") # ======================= # reduce and tokenize input, and simplify to simple IPA, make profile of grapheme clusters # add quick and dirty cognate clustering and alignment using lingPy tmp <- prepare(346, cutoff = 0.85) # to see all intermediate steps, try this # tmp <- prepare(1, full = TRUE, cutoff = 0.85) # many alignments sapply(c(8,9), prepare) # show Session Info # sessionInfo()
## ============================================ ## PLOT OF THE STATES DISTRIBUTION BY TIME UNIT ## ============================================ seqmtplot <- function(seqdata, group = NULL, main = "auto", ...) { seqplot(seqdata, group=group, type="mt", main=main, ...) }
/R/seqmtplot.R
no_license
cran/TraMineR
R
false
false
273
r
## ============================================ ## PLOT OF THE STATES DISTRIBUTION BY TIME UNIT ## ============================================ seqmtplot <- function(seqdata, group = NULL, main = "auto", ...) { seqplot(seqdata, group=group, type="mt", main=main, ...) }
##### optimizing branin in 2D with multipoint proposal ##### library(mlrMBO) library(ggplot2) library(smoof) set.seed(2) # FIXME: does not work for seed == 1 configureMlr(show.learner.output = FALSE) pause = interactive() obj.fun = makeBraninFunction() ctrl = makeMBOControl(propose.points = 5L) ctrl = setMBOControlInfill(ctrl, crit = makeMBOInfillCritMeanResponse()) ctrl = setMBOControlTermination(ctrl, iters = 10L) ctrl = setMBOControlMultiPoint(ctrl, method = "moimbo", moimbo.objective = "ei.dist", moimbo.dist = "nearest.neighbor", moimbo.maxit = 200L ) #lrn = makeMBOLearner(ctrl, obj.fun) #FIXME: Remove lrn after #314 is fixed lrn = makeLearner("regr.km", predict.type = "se") design = generateDesign(10L, getParamSet(obj.fun), fun = lhs::maximinLHS) run = exampleRun(obj.fun, design = design, learner = lrn, control = ctrl, points.per.dim = 50L, show.info = TRUE) print(run) plotExampleRun(run, pause = pause, gg.objects = list(theme_bw()))
/inst/examples/ex_2d_numeric_multipoint.R
no_license
mb706/mlrMBO
R
false
false
969
r
##### optimizing branin in 2D with multipoint proposal ##### library(mlrMBO) library(ggplot2) library(smoof) set.seed(2) # FIXME: does not work for seed == 1 configureMlr(show.learner.output = FALSE) pause = interactive() obj.fun = makeBraninFunction() ctrl = makeMBOControl(propose.points = 5L) ctrl = setMBOControlInfill(ctrl, crit = makeMBOInfillCritMeanResponse()) ctrl = setMBOControlTermination(ctrl, iters = 10L) ctrl = setMBOControlMultiPoint(ctrl, method = "moimbo", moimbo.objective = "ei.dist", moimbo.dist = "nearest.neighbor", moimbo.maxit = 200L ) #lrn = makeMBOLearner(ctrl, obj.fun) #FIXME: Remove lrn after #314 is fixed lrn = makeLearner("regr.km", predict.type = "se") design = generateDesign(10L, getParamSet(obj.fun), fun = lhs::maximinLHS) run = exampleRun(obj.fun, design = design, learner = lrn, control = ctrl, points.per.dim = 50L, show.info = TRUE) print(run) plotExampleRun(run, pause = pause, gg.objects = list(theme_bw()))
setwd("~/WBI") zipped_files <- list.files("C:\\Users\\C6728215\\Documents\\WBI\\zipped data\\") for (i in 1:length(zipped_files)) { unzip(zipfile = paste0("C:\\Users\\C6728215\\Documents\\WBI\\zipped data\\", zipped_files[i]), exdir = "C:\\Users\\C6728215\\Documents\\WBI\\raw data") }
/wdi-data-scripts/unzip files.R
no_license
cmoore9123/wdi_dashboard
R
false
false
330
r
setwd("~/WBI") zipped_files <- list.files("C:\\Users\\C6728215\\Documents\\WBI\\zipped data\\") for (i in 1:length(zipped_files)) { unzip(zipfile = paste0("C:\\Users\\C6728215\\Documents\\WBI\\zipped data\\", zipped_files[i]), exdir = "C:\\Users\\C6728215\\Documents\\WBI\\raw data") }
# Read data file X <- read.table('gbm_input.csv', header = TRUE, sep = ',') # Gradient boost it library(gbm) for (index in 1:20) { # Work out which bits need to be predicted test <- is.na(X[,index]) train <- !test # Go GBM! my_gbm <- gbm.fit(X[train,21:dim(X)[2]], X[train,index], n.trees=10000, distribution="gaussian", interaction.depth=3, shrinkage=0.01) X[test,index] <- predict.gbm(my_gbm,X[test,21:dim(X)[2]],10000) } # Write output write.table(X, 'gbm_output.csv', sep = ',', row.names = FALSE)
/source/gbm/basic_gbm.r
no_license
pgnepal/GEFCOM2012
R
false
false
522
r
# Read data file X <- read.table('gbm_input.csv', header = TRUE, sep = ',') # Gradient boost it library(gbm) for (index in 1:20) { # Work out which bits need to be predicted test <- is.na(X[,index]) train <- !test # Go GBM! my_gbm <- gbm.fit(X[train,21:dim(X)[2]], X[train,index], n.trees=10000, distribution="gaussian", interaction.depth=3, shrinkage=0.01) X[test,index] <- predict.gbm(my_gbm,X[test,21:dim(X)[2]],10000) } # Write output write.table(X, 'gbm_output.csv', sep = ',', row.names = FALSE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conn_HDFS.R \name{hdfsConn} \alias{hdfsConn} \title{Connect to Data Source on HDFS} \usage{ hdfsConn(loc, type = "sequence", autoYes = FALSE, reset = FALSE, verbose = TRUE) } \arguments{ \item{loc}{location on HDFS for the data source} \item{type}{the type of data ("map", "sequence", "text")} \item{autoYes}{automatically answer "yes" to questions about creating a path on HDFS} \item{reset}{should existing metadata for this object be overwritten?} \item{verbose}{logical - print messages about what is being done} } \value{ a "kvConnection" object of class "hdfsConn" } \description{ Connect to a data source on HDFS } \details{ This simply creates a "connection" to a directory on HDFS (which need not have data in it). To actually do things with this data, see \code{\link{ddo}}, etc. } \examples{ \dontrun{ # connect to empty HDFS directory conn <- hdfsConn("/test/irisSplit") # add some data addData(conn, list(list("1", iris[1:10,]))) addData(conn, list(list("2", iris[11:110,]))) addData(conn, list(list("3", iris[111:150,]))) # represent it as a distributed data frame hdd <- ddf(conn) } } \seealso{ \code{\link{addData}}, \code{\link{ddo}}, \code{\link{ddf}}, \code{\link{localDiskConn}} } \author{ Ryan Hafen }
/man/hdfsConn.Rd
permissive
hafen/datadr
R
false
true
1,324
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conn_HDFS.R \name{hdfsConn} \alias{hdfsConn} \title{Connect to Data Source on HDFS} \usage{ hdfsConn(loc, type = "sequence", autoYes = FALSE, reset = FALSE, verbose = TRUE) } \arguments{ \item{loc}{location on HDFS for the data source} \item{type}{the type of data ("map", "sequence", "text")} \item{autoYes}{automatically answer "yes" to questions about creating a path on HDFS} \item{reset}{should existing metadata for this object be overwritten?} \item{verbose}{logical - print messages about what is being done} } \value{ a "kvConnection" object of class "hdfsConn" } \description{ Connect to a data source on HDFS } \details{ This simply creates a "connection" to a directory on HDFS (which need not have data in it). To actually do things with this data, see \code{\link{ddo}}, etc. } \examples{ \dontrun{ # connect to empty HDFS directory conn <- hdfsConn("/test/irisSplit") # add some data addData(conn, list(list("1", iris[1:10,]))) addData(conn, list(list("2", iris[11:110,]))) addData(conn, list(list("3", iris[111:150,]))) # represent it as a distributed data frame hdd <- ddf(conn) } } \seealso{ \code{\link{addData}}, \code{\link{ddo}}, \code{\link{ddf}}, \code{\link{localDiskConn}} } \author{ Ryan Hafen }
library(glmnet) mydata = read.table("./TrainingSet/RF/central_nervous_system.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.85,family="gaussian",standardize=FALSE) sink('./Model/EN/Classifier/central_nervous_system/central_nervous_system_088.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Classifier/central_nervous_system/central_nervous_system_088.R
no_license
leon1003/QSMART
R
false
false
399
r
library(glmnet) mydata = read.table("./TrainingSet/RF/central_nervous_system.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.85,family="gaussian",standardize=FALSE) sink('./Model/EN/Classifier/central_nervous_system/central_nervous_system_088.txt',append=TRUE) print(glm$glmnet.fit) sink()
cont <- set_base$period2 %>% select(-period_label, -period, -chrono) %>% distinct(id, facet) %>% mutate(pres = 1L) %>% pivot_wider(names_from = facet, values_from = pres, values_fill = 0L) %>% pivot_longer(c(starts_with("TRB"), starts_with("SBK"), starts_with("LgK"), "Proto Ene."), names_to = "facet") %>% mutate(facet = factor(facet, levels = labs_chrono$facets)) %>% arrange(id, facet) %>% group_by(id) %>% mutate(prev = if_else(value == 1, lag(value), 0L), prev = if_else(is.na(prev), 0L, prev)) %>% ungroup(id) %>% select(-value) %>% mutate(variable = "cont") %>% rename(value = prev) n_set <- set_base$period2 %>% select(-starts_with("period"), -chrono) %>% add_region() %>% group_by(reg, facet) %>% summarise(sum = n()) cont %>% add_region() %>% select(-variable) %>% group_by(reg, facet) %>% summarise(n = sum(value, na.rm = TRUE)) %>% full_join(n_set) %>% mutate( n = if_else(is.na(n), 0L, n), sum = if_else(is.na(sum), 0L, sum), perc = (n / sum) * 100, perc = if_else(perc == 0 | is.nan(perc), NA_real_, perc), reg = if_else(reg == "B", "East Bohemia", "Morava river catchment"), # chrono_label = labs_chrono$chrono2[chrono], # chrono_label = factor(chrono_label, levels = labs_chrono$chrono2) facet = factor(facet, levels = labs_chrono$facets) ) %>% ggplot(aes(facet, perc)) + geom_col(color = "black", fill = "white", position = "dodge", show.legend = FALSE) + facet_wrap(vars(reg), ncol = 1) + labs(x = "Pottery group", y = "Continuity of settlements (%)") + theme_bw() + theme(axis.text.x = element_text(angle = -90, hjust = 0)) ggsave(here::here("groups_chrono_continuity2.svg"), width = 7, height = 5)
/analysis/code/continuity.R
permissive
petrpajdla/settlements
R
false
false
1,748
r
cont <- set_base$period2 %>% select(-period_label, -period, -chrono) %>% distinct(id, facet) %>% mutate(pres = 1L) %>% pivot_wider(names_from = facet, values_from = pres, values_fill = 0L) %>% pivot_longer(c(starts_with("TRB"), starts_with("SBK"), starts_with("LgK"), "Proto Ene."), names_to = "facet") %>% mutate(facet = factor(facet, levels = labs_chrono$facets)) %>% arrange(id, facet) %>% group_by(id) %>% mutate(prev = if_else(value == 1, lag(value), 0L), prev = if_else(is.na(prev), 0L, prev)) %>% ungroup(id) %>% select(-value) %>% mutate(variable = "cont") %>% rename(value = prev) n_set <- set_base$period2 %>% select(-starts_with("period"), -chrono) %>% add_region() %>% group_by(reg, facet) %>% summarise(sum = n()) cont %>% add_region() %>% select(-variable) %>% group_by(reg, facet) %>% summarise(n = sum(value, na.rm = TRUE)) %>% full_join(n_set) %>% mutate( n = if_else(is.na(n), 0L, n), sum = if_else(is.na(sum), 0L, sum), perc = (n / sum) * 100, perc = if_else(perc == 0 | is.nan(perc), NA_real_, perc), reg = if_else(reg == "B", "East Bohemia", "Morava river catchment"), # chrono_label = labs_chrono$chrono2[chrono], # chrono_label = factor(chrono_label, levels = labs_chrono$chrono2) facet = factor(facet, levels = labs_chrono$facets) ) %>% ggplot(aes(facet, perc)) + geom_col(color = "black", fill = "white", position = "dodge", show.legend = FALSE) + facet_wrap(vars(reg), ncol = 1) + labs(x = "Pottery group", y = "Continuity of settlements (%)") + theme_bw() + theme(axis.text.x = element_text(angle = -90, hjust = 0)) ggsave(here::here("groups_chrono_continuity2.svg"), width = 7, height = 5)
set.seed(14) #Application to dataset seeds data("seeds") X <- as.matrix(seeds[,1:7]) id <- as.numeric(seeds[,8]) n <- dim(X)[1] p <- dim(X)[2] K <- max(id) #run the traditional K-means algorithm M.K <- kmeans(X, K) id.km <- M.K$cluster ClassAgree(id.km, id) #run the Manly K-means algorithm M.MK <- Manly.Kmeans(X, id = id.km, la = matrix(0.1, K, p)) ClassAgree(M.MK$id, id) #run Gaussian mixture model M.Gauss <- Manly.EM(X, id = id.km, la = matrix(0, K, p)) ClassAgree(M.Gauss$id, id) #run the EM algorithm M.EM <- Manly.EM(X, id = id.km, la = matrix(0.1, K, p)) ClassAgree(M.EM$id, id) #run the forward selection M.F <- Manly.select(X, M.Gauss, method = "forward", silent = TRUE) ClassAgree(M.F$id, id) #run the backward algorithm M.B <- Manly.select(X, M.EM, method = "backward", silent = TRUE) ClassAgree(M.B$id, id)
/demo/seeds.R
no_license
cran/ManlyMix
R
false
false
833
r
set.seed(14) #Application to dataset seeds data("seeds") X <- as.matrix(seeds[,1:7]) id <- as.numeric(seeds[,8]) n <- dim(X)[1] p <- dim(X)[2] K <- max(id) #run the traditional K-means algorithm M.K <- kmeans(X, K) id.km <- M.K$cluster ClassAgree(id.km, id) #run the Manly K-means algorithm M.MK <- Manly.Kmeans(X, id = id.km, la = matrix(0.1, K, p)) ClassAgree(M.MK$id, id) #run Gaussian mixture model M.Gauss <- Manly.EM(X, id = id.km, la = matrix(0, K, p)) ClassAgree(M.Gauss$id, id) #run the EM algorithm M.EM <- Manly.EM(X, id = id.km, la = matrix(0.1, K, p)) ClassAgree(M.EM$id, id) #run the forward selection M.F <- Manly.select(X, M.Gauss, method = "forward", silent = TRUE) ClassAgree(M.F$id, id) #run the backward algorithm M.B <- Manly.select(X, M.EM, method = "backward", silent = TRUE) ClassAgree(M.B$id, id)
rm(list = ls()) mydir <- "/home/hazraa/Documents/0_arsenic_small/real_revision/impute" setwd(mydir) load("clean_data3.Rdata") available.cases <- sum(!is.na(Y[ , 1])) #-------------------- library(parallel) library(doParallel) ncores <- 20 cl <- makeCluster(ncores) registerDoParallel(cl) coverage.all <- parLapply(cl, 1:available.cases, function(rep.no){ library(fields) load("clean_data3.Rdata") total.cases <- nrow(S) censored.cases <- sum(is.na(Y[ , 1])) avoid.cases <- which(is.na(Y[ , 1])) available.cases <- sum(!is.na(Y[ , 1])) keep.cases <- which(!is.na(Y[ , 1])) source("auxfunctions.R") source("update_params.R") source("mcmc.R") test.cases <- keep.cases[rep.no] Y.test <- t(as.matrix(Y[test.cases, ])) S.test <- t(as.matrix(S[test.cases, ])) X.test <- t(as.matrix(X[test.cases, ])) train.cases <- sort(setdiff(1:nrow(S), test.cases)) Y.train <- Y.all[train.cases, ] S.train <- S[train.cases, ] X.train <- X[train.cases, ] censor.val <- log(0.5) censor.cases <- which(is.na(Y.train[ , 1])) Y.train[censor.cases, 1] <- censor.val iters <- 4000 burn <- 2000 fit.observed.GP <- mcmc.GP(Y.train, S.train, S.test, X.train, X.test, cutoff = censor.val, censor.cases, beta.init = NULL, Sigma.init = NULL, latent.init = NULL, phi.init = 0.1 * max(rdist.earth(S, miles= F)), r.init = 0.5, # priors sd.beta = 100, shape.Sigma = 0.01, rate.Sigma = 0.01, phi.upper = 0.5 * max(rdist.earth(S, miles= F)), # mcmc settings iters = iters, burn = burn, thin = 5) cis90 <- sapply(1:ncol(Y.test), function(var.no){ cis <- quantile(fit.observed.GP$Y.pred[-c(1:burn), 1, var.no], probs = c(0.05, 0.95)) cis}) cis95 <- sapply(1:ncol(Y.test), function(var.no){ cis <- quantile(fit.observed.GP$Y.pred[-c(1:burn), 1, var.no], probs = c(0.025, 0.975)) cis}) print(rep.no) list(cis90 = cis90, cis95 = cis95)}) stopCluster(cl) save(coverage.all, file = "coverage.all.Rdata") # set.seed(rep.no) # test.cases <- sort(sample(rest.cases, ntest)) # distmat.censor.rest <- rdist(S[censor.cases, ], S[rest.cases, ]) # test.cases <- rest.cases[which(colMeans(distmat.censor.rest) <= sort(colMeans(distmat.censor.rest))[ntest])] # censor.val <- as.vector(quantile(Y.all[ , 1], probs = censor.level)) # censor.cases <- which(Y.all[ , 1] <= censor.val) # rest.cases <- setdiff(1:nrow(S), censor.cases) # set.seed(rep.no) # test.cases <- sort(sample(rest.cases, ntest)) # train.cases <- sort(setdiff(rest.cases, test.cases)) # final.train.cases <- sort(union(censor.cases, train.cases)) # Y.train <- Y.all[final.train.cases, ] # Y.test <- Y.all[test.cases, ] # S.train <- S[final.train.cases, ] # S.test <- S[test.cases, ] # X.train <- X[final.train.cases, ] # X.test <- X[test.cases, ] # censor.cases <- which(Y.train[ , 1] <= censor.val)
/data_application/crossvalidation/run_fit_mcmc.R
no_license
arnabstatswithR/Arsenic-contamination-mapping
R
false
false
3,062
r
rm(list = ls()) mydir <- "/home/hazraa/Documents/0_arsenic_small/real_revision/impute" setwd(mydir) load("clean_data3.Rdata") available.cases <- sum(!is.na(Y[ , 1])) #-------------------- library(parallel) library(doParallel) ncores <- 20 cl <- makeCluster(ncores) registerDoParallel(cl) coverage.all <- parLapply(cl, 1:available.cases, function(rep.no){ library(fields) load("clean_data3.Rdata") total.cases <- nrow(S) censored.cases <- sum(is.na(Y[ , 1])) avoid.cases <- which(is.na(Y[ , 1])) available.cases <- sum(!is.na(Y[ , 1])) keep.cases <- which(!is.na(Y[ , 1])) source("auxfunctions.R") source("update_params.R") source("mcmc.R") test.cases <- keep.cases[rep.no] Y.test <- t(as.matrix(Y[test.cases, ])) S.test <- t(as.matrix(S[test.cases, ])) X.test <- t(as.matrix(X[test.cases, ])) train.cases <- sort(setdiff(1:nrow(S), test.cases)) Y.train <- Y.all[train.cases, ] S.train <- S[train.cases, ] X.train <- X[train.cases, ] censor.val <- log(0.5) censor.cases <- which(is.na(Y.train[ , 1])) Y.train[censor.cases, 1] <- censor.val iters <- 4000 burn <- 2000 fit.observed.GP <- mcmc.GP(Y.train, S.train, S.test, X.train, X.test, cutoff = censor.val, censor.cases, beta.init = NULL, Sigma.init = NULL, latent.init = NULL, phi.init = 0.1 * max(rdist.earth(S, miles= F)), r.init = 0.5, # priors sd.beta = 100, shape.Sigma = 0.01, rate.Sigma = 0.01, phi.upper = 0.5 * max(rdist.earth(S, miles= F)), # mcmc settings iters = iters, burn = burn, thin = 5) cis90 <- sapply(1:ncol(Y.test), function(var.no){ cis <- quantile(fit.observed.GP$Y.pred[-c(1:burn), 1, var.no], probs = c(0.05, 0.95)) cis}) cis95 <- sapply(1:ncol(Y.test), function(var.no){ cis <- quantile(fit.observed.GP$Y.pred[-c(1:burn), 1, var.no], probs = c(0.025, 0.975)) cis}) print(rep.no) list(cis90 = cis90, cis95 = cis95)}) stopCluster(cl) save(coverage.all, file = "coverage.all.Rdata") # set.seed(rep.no) # test.cases <- sort(sample(rest.cases, ntest)) # distmat.censor.rest <- rdist(S[censor.cases, ], S[rest.cases, ]) # test.cases <- rest.cases[which(colMeans(distmat.censor.rest) <= sort(colMeans(distmat.censor.rest))[ntest])] # censor.val <- as.vector(quantile(Y.all[ , 1], probs = censor.level)) # censor.cases <- which(Y.all[ , 1] <= censor.val) # rest.cases <- setdiff(1:nrow(S), censor.cases) # set.seed(rep.no) # test.cases <- sort(sample(rest.cases, ntest)) # train.cases <- sort(setdiff(rest.cases, test.cases)) # final.train.cases <- sort(union(censor.cases, train.cases)) # Y.train <- Y.all[final.train.cases, ] # Y.test <- Y.all[test.cases, ] # S.train <- S[final.train.cases, ] # S.test <- S[test.cases, ] # X.train <- X[final.train.cases, ] # X.test <- X[test.cases, ] # censor.cases <- which(Y.train[ , 1] <= censor.val)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NBA.R \docType{data} \name{NBA} \alias{NBA} \title{NBA Players stats since 1950} \format{ A data frame with 24691 rows and 53 variables: \describe{ \item{Player}{name} \item{Pos}{Position} ... } } \source{ \url{https://www.kaggle.com/drgilermo/nba-players-stats/data} } \usage{ NBA } \description{ The data-set contains aggregate individual statistics for 67 NBA seasons. from basic box-score attributes such as points, assists, rebounds etc., to more advanced money-ball like features such as Value Over Replacement. } \keyword{datasets}
/man/NBA.Rd
no_license
SelinaDing/NBAplayer
R
false
true
617
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NBA.R \docType{data} \name{NBA} \alias{NBA} \title{NBA Players stats since 1950} \format{ A data frame with 24691 rows and 53 variables: \describe{ \item{Player}{name} \item{Pos}{Position} ... } } \source{ \url{https://www.kaggle.com/drgilermo/nba-players-stats/data} } \usage{ NBA } \description{ The data-set contains aggregate individual statistics for 67 NBA seasons. from basic box-score attributes such as points, assists, rebounds etc., to more advanced money-ball like features such as Value Over Replacement. } \keyword{datasets}
library(tigerstats) ### Name: CIProp ### Title: Confidence Intervals (for one population proportion) ### Aliases: CIProp ### ** Examples ## Not run: ##D if (require(manipulate)) CIProp() ## End(Not run)
/data/genthat_extracted_code/tigerstats/examples/CIProp.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
211
r
library(tigerstats) ### Name: CIProp ### Title: Confidence Intervals (for one population proportion) ### Aliases: CIProp ### ** Examples ## Not run: ##D if (require(manipulate)) CIProp() ## End(Not run)
/lecture_4/lecture4-script_koko.R
no_license
kovacskokokornel/Rcoding_CEU
R
false
false
10,044
r
#Based on 1_Table1A_latinName_vs_ASFIS from 2016 library(plyr) input_path <- "Q:/scientific-projects/eu-data-collection/Work_Plan/2019/scripts/EUMAP_table_1a/input" linkage <- read.table(file.path(input_path, 'EUMAP_Table1A_Linkage_EUROSTAT and EC_TAC_V0.csv'), sep = ";", header = T, colClasses = ("character")) #There is something wrong with the input file share<-subset(share, latinName!="Elasmobranchii") ASFIS <- read.table(file.path(sharePath,'ASFIS_sp_Feb_2012.txt'), header=TRUE, sep="\t", as.is=TRUE) ASFIS$latinName<-ASFIS$Scientific_name #Compare latinName to ASFIS asfis<-merge(share,ASFIS,all.x=T,by.x=c("latinName"),by.y=c("latinName")) asfisNo<-subset(asfis,is.na(Scientific_name)) #run spp program to find the correct species asfis$latinName_new<-ifelse(asfis$latinName=="Ammodytidae", "Ammodytes spp,Ammodytes tobianus", ifelse(asfis$latinName=="Anarhichas spp.", "Anarhichas spp,Anarhichas minor,Anarhichas denticulatus,Anarhichas lupus", ifelse(asfis$latinName=="Aphanopus spp.", "Aphanopus spp,Aphanopus intermedius,Aphanopus carbo", ifelse(asfis$latinName=="Argentina spp.","Argentina spp,Argentina sphyraena,Argentina silus,Argentina kagoshimae,Argentina elongata", ifelse(asfis$latinName=="Aristeomorpha foliacea", "Aristaeomorpha foliacea", ifelse(asfis$latinName=="Beryx spp.", "Beryx spp,Beryx splendens,Beryx decadactylus", ifelse(asfis$latinName=="Illex spp., Todarodes spp.", "Illex spp,Todarodes spp,Illex oxygonius,Illex argentinus,Illex coindetii,Illex illecebrosus,Todarodes angolensis,Todarodes pacificus,Todarodes filippovae,Todarodes sagittatus", ifelse(asfis$latinName=="Capros aper", "Capros aper,Caproidae", ifelse(asfis$latinName=="Pandalus spp.", "Pandalus spp,Pandalus amplus,Pandalus nipponensis,Pandalus kessleri,Pandalus goniurus,Pandalus danae,Pandalus montagui,Pandalus jordani,Pandalus borealis,Pandalus platyceros,Pandalus hypsinotus", ifelse(asfis$latinName=="Scomber colias", "Scomber japonicus", ifelse(asfis$latinName=="Scomber spp.", "Scomber spp,Scomber australasicus,Scomber scombrus,Scomber japonicus", ifelse(asfis$latinName=="Solea vulgaris","Solea solea", ifelse(asfis$latinName=="Trigla lucerna", "Chelidonichthys lucerna", ifelse(asfis$latinName=="Trisopterus esmarki", "Trisopterus esmarkii", ifelse(asfis$latinName=="Trisopterus spp.", "Trisopterus spp,Trisopterus luscus,Trisopterus minutus,Trisopterus esmarkii", asfis$latinName))))))))))))))) asfis$latinComm<-ifelse(is.na(asfis$X3A_CODE),"latinName_old not in ASFIS", "NA") names(asfis) share_new<-rename(asfis, c("latinName"="latinName_old", "latinName_new"="latinName")) names(share) names(share_new) for (i in 1:nrow(share_new)) { share_new$reportingName[i] <- unlist(strsplit(share_new$latinName[i], split=","))[1] } share_new<-share_new[, c("region","sppName","latinName","stockID","area","areaBis","TAC.area.description","reportingName","latinName_old")] asfis_new<-merge(share_new,ASFIS,all.x=T,by.x=c("latinName"),by.y=c("latinName")) asfis_newNo<-subset(asfis_new,is.na(Scientific_name)) write.table(share_new,file.path(sharePath,"EUMAP_Table1A_Linkage_EUROSTAT and EC_TAC_DNK_v3.csv"), sep=";",row.names=FALSE)
/1_table_1a_linkage_table_correcting_latinName.R
no_license
KirstenBirchHaakansson/EUMAP_git
R
false
false
3,600
r
#Based on 1_Table1A_latinName_vs_ASFIS from 2016 library(plyr) input_path <- "Q:/scientific-projects/eu-data-collection/Work_Plan/2019/scripts/EUMAP_table_1a/input" linkage <- read.table(file.path(input_path, 'EUMAP_Table1A_Linkage_EUROSTAT and EC_TAC_V0.csv'), sep = ";", header = T, colClasses = ("character")) #There is something wrong with the input file share<-subset(share, latinName!="Elasmobranchii") ASFIS <- read.table(file.path(sharePath,'ASFIS_sp_Feb_2012.txt'), header=TRUE, sep="\t", as.is=TRUE) ASFIS$latinName<-ASFIS$Scientific_name #Compare latinName to ASFIS asfis<-merge(share,ASFIS,all.x=T,by.x=c("latinName"),by.y=c("latinName")) asfisNo<-subset(asfis,is.na(Scientific_name)) #run spp program to find the correct species asfis$latinName_new<-ifelse(asfis$latinName=="Ammodytidae", "Ammodytes spp,Ammodytes tobianus", ifelse(asfis$latinName=="Anarhichas spp.", "Anarhichas spp,Anarhichas minor,Anarhichas denticulatus,Anarhichas lupus", ifelse(asfis$latinName=="Aphanopus spp.", "Aphanopus spp,Aphanopus intermedius,Aphanopus carbo", ifelse(asfis$latinName=="Argentina spp.","Argentina spp,Argentina sphyraena,Argentina silus,Argentina kagoshimae,Argentina elongata", ifelse(asfis$latinName=="Aristeomorpha foliacea", "Aristaeomorpha foliacea", ifelse(asfis$latinName=="Beryx spp.", "Beryx spp,Beryx splendens,Beryx decadactylus", ifelse(asfis$latinName=="Illex spp., Todarodes spp.", "Illex spp,Todarodes spp,Illex oxygonius,Illex argentinus,Illex coindetii,Illex illecebrosus,Todarodes angolensis,Todarodes pacificus,Todarodes filippovae,Todarodes sagittatus", ifelse(asfis$latinName=="Capros aper", "Capros aper,Caproidae", ifelse(asfis$latinName=="Pandalus spp.", "Pandalus spp,Pandalus amplus,Pandalus nipponensis,Pandalus kessleri,Pandalus goniurus,Pandalus danae,Pandalus montagui,Pandalus jordani,Pandalus borealis,Pandalus platyceros,Pandalus hypsinotus", ifelse(asfis$latinName=="Scomber colias", "Scomber japonicus", ifelse(asfis$latinName=="Scomber spp.", "Scomber spp,Scomber australasicus,Scomber scombrus,Scomber japonicus", ifelse(asfis$latinName=="Solea vulgaris","Solea solea", ifelse(asfis$latinName=="Trigla lucerna", "Chelidonichthys lucerna", ifelse(asfis$latinName=="Trisopterus esmarki", "Trisopterus esmarkii", ifelse(asfis$latinName=="Trisopterus spp.", "Trisopterus spp,Trisopterus luscus,Trisopterus minutus,Trisopterus esmarkii", asfis$latinName))))))))))))))) asfis$latinComm<-ifelse(is.na(asfis$X3A_CODE),"latinName_old not in ASFIS", "NA") names(asfis) share_new<-rename(asfis, c("latinName"="latinName_old", "latinName_new"="latinName")) names(share) names(share_new) for (i in 1:nrow(share_new)) { share_new$reportingName[i] <- unlist(strsplit(share_new$latinName[i], split=","))[1] } share_new<-share_new[, c("region","sppName","latinName","stockID","area","areaBis","TAC.area.description","reportingName","latinName_old")] asfis_new<-merge(share_new,ASFIS,all.x=T,by.x=c("latinName"),by.y=c("latinName")) asfis_newNo<-subset(asfis_new,is.na(Scientific_name)) write.table(share_new,file.path(sharePath,"EUMAP_Table1A_Linkage_EUROSTAT and EC_TAC_DNK_v3.csv"), sep=";",row.names=FALSE)
\name{showDF} \alias{showDF} \title{Create an HTML table using DT package with fixed columns} \usage{ showDF(data, ...) } \arguments{ \item{data}{data object (either a matrix or a data frame).} \item{...}{Additional arguments used by dDT::atatable() function.} } \value{ returns an object of \code{datatables} and \code{htmlwidget}. } \description{ Create an HTML table using DT package with fixed columns } \examples{ showDF(iris) }
/man/showDF.Rd
no_license
tgirke/systemPipeR
R
false
false
435
rd
\name{showDF} \alias{showDF} \title{Create an HTML table using DT package with fixed columns} \usage{ showDF(data, ...) } \arguments{ \item{data}{data object (either a matrix or a data frame).} \item{...}{Additional arguments used by dDT::atatable() function.} } \value{ returns an object of \code{datatables} and \code{htmlwidget}. } \description{ Create an HTML table using DT package with fixed columns } \examples{ showDF(iris) }
## Asociamos los literales de calle de catastro a los de Ivima para cada calle de este último listado. ## así podremos llevar el año de construcción de catastro a Ivima para utilizarlo como predictor en el ## modelo. También asociaremos las coordenadas xy de catastro para poblar el barrio en Ivima desde el ## shape de barrios library(data.table) library(stringr) library(tidyr) library(rgdal) library(readxl) clean.data.dir <- '../../../data/clean' file.fincas.ivima <- paste(clean.data.dir, "/IVIMA/fincas_ivima.csv", sep = "") file.fincas.cat <- paste(clean.data.dir, "/ficheros_preparados/BI_28_900_U_2016-01-23.csv.gz", sep = "") file.tipos.via.cat <- paste(clean.data.dir, "/CAT/tipo_via_cat.csv", sep = "") file.calles.cruzadas <- paste(clean.data.dir, "/IVIMA/calles_cruzadas.csv", sep = "") ruta.shp <- str_c(clean.data.dir, '/SHP/Barrios Madrid') # El objetivo es normalizar las direcciones de Ivima y catastro para poder relacionar los datos de ambas fuentes en base a # tipo_via, nombre_via, numero_finca, letra finca fincas_ivima <- data.table(read.table(file = file.fincas.ivima, header = T, comment.char = "", sep = "^")) fincas_catastro <- data.table(read.table(gzfile(file.fincas.cat), header = T, sep = '^', encoding = 'utf-8')) fincas_catastro <- data.table(fincas_catastro) fincas_ivima <- data.table(fincas_ivima) # Separamos el tipo de via del nombre de la via en los datos IVIMA fincas_ivima$tipo_via <- data.frame(str_split(fincas_ivima$Calle, " ", n=2, simplify = T))[[1]] fincas_ivima$nombre_via <- data.frame(str_split(fincas_ivima$Calle, " ", n=2, simplify = T))[[2]] ## Preparamos los tipos de via de catastro con su descripción en formato tabla tipos_via_cat <- data.table(read.csv(file.tipos.via.cat, header = T, sep = ';')) tipos_via_cat <- cbind(tipos_via_cat, data.table(str_split(tipos_via_cat$descripcion, ",", simplify = T))) tipos_via_cat$V1 <- str_trim(tipos_via_cat$V1) tipos_via_cat$V2 <- str_trim(tipos_via_cat$V2) tipos_via_cat <- melt(tipos_via_cat,c("cod_tipo_via", "descripcion"), na.rm = T) tipos_via_cat <- tipos_via_cat[value != ""] tipos_via_cat$variable <- NULL colnames(tipos_via_cat) <- c("cod_tipo_via", "descripcion", "tipo_via") # Eliminamos las tildes del tipo de via Ivima y del nombre de via fincas_ivima$tipo_via <- chartr('ÁÉÍÓÚ','AEIOU', fincas_ivima$tipo_via) fincas_ivima$nombre_via <- chartr('ÁÉÍÓÚ','AEIOU', fincas_ivima$nombre_via) ## Verificamos que los tipos de via de IVIMA y catastro coinciden sus literales tipos_via_ivima <- data.table(table(fincas_ivima$tipo_via)) merge(tipos_via_ivima, tipos_via_cat,all.x = T, by.x = "V1", by.y = "tipo_via") ## Hay 1 finca sin tipo de via y 7 en "BULEVAR", que no existe en castastro que no podremos usar en el modelo ## Llevamos el tipo de via de catastro a la tabla de IVIMA tipos_via_cat$descripcion <- NULL colnames(tipos_via_cat) = c("tipo_via_cat", "tipo_via") fincas_ivima <- merge(fincas_ivima, tipos_via_cat, all.x = T, by.x = "tipo_via", by.y = "tipo_via") ## Eliminamos las filas de Ivima que no tienen tipo de via correcto de catastro fincas_ivima <- drop_na(fincas_ivima, tipo_via_cat) ## Preparamos las matrices para cruzar los literales de calle de catastro y de ivima callejero_ivima <- fincas_ivima[, .N, by = .(tipo_via_cat, nombre_via)] callejero_cat <- fincas_catastro[, .N, by = .(tipo_via, nombre_via)] colnames(callejero_ivima) <- c("tipo_via", "nombre_via_ivima", "N") colnames(callejero_cat) <- c("tipo_via", "nombre_via_cat", "N") callejero_ivima$tipo_via <- as.character(callejero_ivima$tipo_via) callejero_ivima$nombre_via_ivima <- as.character(callejero_ivima$nombre_via_ivima) callejero_cat$tipo_via <- as.character(callejero_cat$tipo_via) callejero_cat$nombre_via_cat <- as.character(callejero_cat$nombre_via_cat) ### Hay que poner un if, el proceso manual sólo se ejecuta si no lo hemos ejecutado aún, creando el fichero de salida ### si el fichero de salida ya existe, no ejecutamos el proceso manual. Objetivo obtener "calles_cruzadas", que asocia ### los literales de calle de catastro con los de Ivima para poder enriquecer la tabla de fincas Ivima if (file.exists(file.calles.cruzadas) == F) { cruce_directo <- merge(callejero_ivima, callejero_cat, all.x = T, by.x = c("tipo_via", "nombre_via_ivima"), by.y = c("tipo_via", "nombre_via_cat")) ## Separamos las calles en las que los literales de catastro e ivima son iguales calles_cruzadas <- cruce_directo[is.na(cruce_directo$N.y) == F] calles_pendientes <- cruce_directo[is.na(cruce_directo$N.y) == T] calles_pendientes$N.x <- NULL calles_pendientes$N.y <- NULL calles_cruzadas$N.x <- NULL calles_cruzadas$N.y <- NULL calles_cruzadas$nombre_via_cat <- calles_cruzadas$nombre_via_ivima matriz_lev <- merge(calles_pendientes, callejero_cat, all.x = T, by.x = c("tipo_via"), by.y = c("tipo_via"), allow.cartesian = T) matriz_lev$N <- NULL matriz_lev <- matriz_lev[, distancia := adist(nombre_via_ivima, nombre_via_cat)/max(c(nchar(nombre_via_ivima), nchar(nombre_via_cat))), by = "nombre_via_ivima"] matriz_lev <- matriz_lev[, ranking := rank(distancia, ties = "random"), by = "nombre_via_ivima"] ## Hay que revisar manual/visualmente las coincidencias, marcarlas como correctas añadir a calles cruzadas, eliminar de la matriz y repetir para el siguiente ## ranking, hasta conseguir una normalización suficiente matriz_analisis <- matriz_lev[ranking==1] if (nrow(matriz_analisis) >0) { matriz_analisis[order(matriz_analisis$ranking)] setcolorder(matriz_analisis, c("tipo_via","nombre_via_ivima", "nombre_via_cat", "ranking", "distancia")) setorder(matriz_analisis, tipo_via, nombre_via_ivima, ranking) matriz_analisis$correcta <- 1 # Por defecto todas las lineas aparecen marcadas correcta=1, borrar manualmente el 1 de las líneas que no coincidan matriz_analisis <- edit(matriz_analisis) calles_cruzadas <- rbind(calles_cruzadas, matriz_analisis[is.na(matriz_analisis$correcta) == F, c("tipo_via", "nombre_via_ivima", "nombre_via_cat")]) } # Eliminamos las cruzadas para analizar las de ranking 2 matriz_analisis <- matriz_lev[ranking==2] if (nrow(matriz_analisis)>0) { calles_cruzadas$mark <- 1 matriz_analisis <- merge(matriz_analisis, calles_cruzadas, all.x = T, by.x = c("tipo_via", "nombre_via_ivima"), by.y = c("tipo_via", "nombre_via_ivima")) matriz_analisis <- matriz_analisis[is.na(matriz_analisis$mark) == T, .(tipo_via,nombre_via_ivima, nombre_via_cat.x, distancia, ranking)] colnames(matriz_analisis) <- c("tipo_via", "nombre_via_ivima", "nombre_via_cat", "distancia", "ranking") matriz_analisis$correcta <- 1 matriz_analisis <- edit(matriz_analisis) matriz_analisis$mark <- 1 calles_cruzadas <- rbind(calles_cruzadas, matriz_analisis[is.na(matriz_analisis$correcta) == F, c("tipo_via", "nombre_via_ivima", "nombre_via_cat", "mark")]) } # Eliminamos las cruzadas para analizar las de ranking 3 matriz_analisis <- matriz_lev[ranking==3] if (nrow(matriz_analisis)>0){ calles_cruzadas$mark <- 1 matriz_analisis <- merge(matriz_analisis, calles_cruzadas, all.x = T, by.x = c("tipo_via", "nombre_via_ivima"), by.y = c("tipo_via", "nombre_via_ivima")) matriz_analisis <- matriz_analisis[is.na(matriz_analisis$mark) == T, .(tipo_via,nombre_via_ivima, nombre_via_cat.x, distancia, ranking)] colnames(matriz_analisis) <- c("tipo_via", "nombre_via_ivima", "nombre_via_cat", "distancia", "ranking") matriz_analisis$correcta <- 1 matriz_analisis <- edit(matriz_analisis) matriz_analisis$mark <- 1 calles_cruzadas <- rbind(calles_cruzadas, matriz_analisis[is.na(matriz_analisis$correcta) == F, c("tipo_via", "nombre_via_ivima", "nombre_via_cat", "mark")]) } ## La ganancia por seguir avanzando en el ranking es muy pequeña. Finalizamos las iteraciones y guardamos el resultado calles_cruzadas$mark <- NULL write.csv(calles_cruzadas, file.calles.cruzadas, row.names = F) } else { calles_cruzadas <- data.table(read.table(file = file.calles.cruzadas, header = T, comment.char = "", sep = ",")) } colnames(calles_cruzadas) <- c("tipo_via_cat", "nombre_via", "nombre_via_cat") fincas.ivima.enriquecidas <- merge(fincas_ivima, calles_cruzadas, by.x = c("tipo_via_cat", "nombre_via"), by.y=c("tipo_via_cat", "nombre_via")) fincas.ivima.enriquecidas$numfinca <- str_c(str_pad(fincas.ivima.enriquecidas$num_pol,width = 4,side = 'left',pad = '0'), fincas.ivima.enriquecidas$letra) fincas_catastro$numfinca <- str_c(str_pad(fincas_catastro$num_pol1, width = 4, side = 'left', pad = '0'), fincas_catastro$bis) portalero.catastro <- fincas_catastro[, .N, by = .(tipo_via, nombre_via, numfinca, x_coor, y_coor, anio_mejor)] portalero.catastro <- portalero.catastro[, ranking := rank(-anio_mejor, ties = "random"), by = c("tipo_via","nombre_via","numfinca")] portalero.catastro <- portalero.catastro[ranking==1] portalero.ivima <- fincas.ivima.enriquecidas[, .N, by = .(tipo_via_cat, nombre_via_cat, numfinca)] ## Enriquecemos el portalero de catastro con el barrio, ## obtenido por cruce espacial con el shape de barrios de madrid ## La capa de barrios de Madrid se puede obtener en ed50 de: http://www.madrid.org/nomecalles/DescargaBDTCorte.icm. (Delimitaciones territoriales, barrios) ## Cargamos la capa de barrios. Está en proyección EPGS:23030, ED50/UTM30 barrios.shp <- readOGR(dsn = ruta.shp, layer = "200001465", encoding = "latin-1") proj4string(barrios.shp) <- CRS("+init=epsg:23030") # Creamos la capa de puntos desde las xy del portalero de catastro ### Generamos el shape para poder importar directamente en GIS (solo las fincas con coordenadas),proyeccion EPGS:25830 portalero.cat.con.coor <- portalero.catastro[portalero.catastro$x_coor != 0,] coordenadas <- as.matrix(portalero.cat.con.coor[,.(x_coor, y_coor)]) capa.puntos <- SpatialPointsDataFrame(coordenadas, portalero.cat.con.coor, proj4string = CRS("+init=epsg:25830"), coords.nrs = c(4, 5), match.ID = T) capa.puntos <- spTransform(capa.puntos, CRS("+init=epsg:23030")) ## Agregamos los datos del barrio al df de portales portales.con.barrio <- over(capa.puntos, barrios.shp) portales.con.barrio$indice <- rownames(portales.con.barrio) capa.puntos$indice <- rownames(capa.puntos@data) portales.con.barrio <- merge(capa.puntos@data, portales.con.barrio, by.x = "indice", by.y = "indice") ## Eliminamos columnas sin interés, poblamos el barrio y nos quedamos sólo con fincas vivienda de catastro fincas_catastro <- fincas_catastro[clave_grupo_BI=='V',.(parcela_cat, cvia_DGC, tipo_via, nombre_via, num_pol1, bis, num_pol2, bis2, Km, bloque, escalera, planta, puerta, dir_resto, m2_BI, m2_solares_sin_div_hor, coef_finca, garage, anio_mejor, numfinca)] portales.con.barrio$idbarrio <- data.frame(str_split(portales.con.barrio$DESBDT, " ", n=2, simplify = T))[[1]] portales.con.barrio$desbarrio <- data.frame(str_split(portales.con.barrio$DESBDT, " ", n=2, simplify = T))[[2]] portales.con.barrio <- data.table(portales.con.barrio[,c("tipo_via", "nombre_via", "numfinca", "x_coor", "y_coor", "idbarrio", "desbarrio")]) fincas_catastro <- merge(fincas_catastro, portales.con.barrio, by.x = c("tipo_via", "nombre_via", "numfinca"), by.y = c("tipo_via", "nombre_via", "numfinca")) fincas.ivima.enriquecidas <- merge(fincas.ivima.enriquecidas, portales.con.barrio, by.x =c("tipo_via_cat", "nombre_via_cat", "numfinca"), by.y =c("tipo_via", "nombre_via", "numfinca") ) fincas.ivima.enriquecidas <- fincas.ivima.enriquecidas[,.(tipo_via_cat, nombre_via_cat, numfinca, metros, habitaciones, Garaje, Precio, eur_metro, eur_metro_round, planta_cat, num_pol, letra, x_coor, y_coor, idbarrio, desbarrio)] anio.max.finca <- fincas_catastro[, .(anio_max = max(anio_mejor)), by = .(tipo_via, nombre_via, numfinca)] fincas.ivima.enriquecidas <- merge(fincas.ivima.enriquecidas, anio.max.finca, by.x =c("tipo_via_cat", "nombre_via_cat", "numfinca"), by.y =c("tipo_via", "nombre_via", "numfinca") ) plantas_cat <- fincas_catastro[,.N, by = .(planta)] plantas_ivima <- fincas.ivima.enriquecidas[,.N, by = .(planta_cat)] ## Tenemos poca varieadad de plantas en Ivima. Para poder extender el modelo a todas las viviendas de catastro ## es mejor pasar la variable planta (categórica) a altura (entero) de modo que el modelo se pueda generalizar ## Convertimos las plantas de catastro en un numérico, que llevaremos luego a las fincas de Ivima ## Calculamos en catastro el número de plantas para cada finca, para dar un número de planta a los áticos ## Todos los literales de planta que empiecen por número son numéricos, los que no, si empiezan por A son áticos, el ## resto los asimilamos a Bajos ## extraemos los números plantas_cat$primer_caracter <- str_sub(plantas_cat$planta,1,1) plantas_cat$primer_caracter <- str_extract(plantas_cat$primer_caracter, '[a-zA-Z]') plantas_cat$altura <- ifelse(is.na(plantas_cat$primer_caracter)==F,plantas_cat$primer_caracter,str_extract(plantas_cat$planta, '[+-]*[0-9]+')) plantas_cat[grep('[B-Z]', altura), altura:='00'] ## Hay que calcular la altura de los áticos relativa a su edificio plantas_cat[,c('N','primer_caracter'):=NULL] fincas_catastro <- merge(fincas_catastro, plantas_cat, by.x = 'planta', by.y = 'planta') plantas_cat <- fincas_catastro[,.N, by = .(tipo_via, nombre_via, numfinca, altura)] plantas_cat <- plantas_cat[,.N, by = .(tipo_via, nombre_via, numfinca)] setnames(plantas_cat, 'N', 'alt_max') plantas_cat$atico <- 'A' fincas_catastro <- merge(fincas_catastro, plantas_cat, all.x = T, by.x=c('tipo_via', 'nombre_via', 'numfinca','altura'), by.y = c('tipo_via', 'nombre_via', 'numfinca','atico')) fincas_catastro[altura == 'A', altura:=as.character(alt_max)] fincas_catastro$alt_max <- NULL fincas_catastro$altura <- as.integer(fincas_catastro$altura) plantas_cat <- fincas_catastro[,.N, by = .(tipo_via, nombre_via, numfinca,planta, altura)] plantas_cat$N <- NULL setnames(plantas_cat, c('planta','tipo_via','nombre_via'), c('planta_cat','tipo_via_cat','nombre_via_cat')) ## La planta de fincas Ivima pasamos a número con el mismo criterio que en catastro ## Sólo recuperamos los áticos desde catastro fincas.ivima.enriquecidas$primer_caracter_planta <- str_sub(fincas.ivima.enriquecidas$planta_cat,1,1) fincas.ivima.enriquecidas$primer_caracter_planta <- str_extract(fincas.ivima.enriquecidas$primer_caracter_planta, '[a-zA-Z]') fincas.ivima.enriquecidas$altura <- ifelse(is.na(fincas.ivima.enriquecidas$primer_caracter_planta)==F, fincas.ivima.enriquecidas$primer_caracter_planta, str_extract(fincas.ivima.enriquecidas$planta, '[+-]*[0-9]+')) fincas.ivima.enriquecidas[grep('[B-Z]', altura), altura:='00'] fincas.ivima.enriquecidas$primer_caracter_planta <- NULL aticos.cat <- plantas_cat[grep('^A',planta_cat)] aticos.cat$planta_cat <- 'A' setnames(aticos.cat, c('altura','planta_cat'), c('altura_num','altura')) fincas.ivima.enriquecidas <- merge(fincas.ivima.enriquecidas, aticos.cat, all.x=T, by.x=c('tipo_via_cat', 'nombre_via_cat', 'numfinca', 'altura'), by.y=c('tipo_via_cat', 'nombre_via_cat', 'numfinca', 'altura')) fincas.ivima.enriquecidas$altura_num <- as.character(fincas.ivima.enriquecidas$altura_num) fincas.ivima.enriquecidas[is.na(altura_num)==T & altura != 'A', altura_num := altura] fincas.ivima.enriquecidas$altura_num <- as.numeric(fincas.ivima.enriquecidas$altura_num) #Corregimos el literal de barrio, los literales del shape tienen caracteres mal codificados #Obtenemos el listado de barrios de http://www.madrid.org/iestadis/fijas/clasificaciones/descarga/cobar15.xls listado.barrios <- data.table(read_excel(str_c(clean.data.dir, '/SHP/Barrios Madrid/cobar15.xls'),sheet = 'cobar15')) listado.barrios$idbarrio <- str_c(listado.barrios$`Código distrito`, listado.barrios$`Código barrio`) listado.barrios <- listado.barrios[, .N, by = .(idbarrio, `Literal barrio`)] listado.barrios$N <- NULL setnames(listado.barrios, 'Literal barrio', 'barrio') fincas_catastro <- merge(fincas_catastro, listado.barrios, by.x = 'idbarrio', by.y = 'idbarrio') fincas_catastro$desbarrio <- NULL fincas_catastro <- fincas_catastro[is.na(fincas_catastro$altura) == F,] fincas.ivima.enriquecidas <- merge(fincas.ivima.enriquecidas, listado.barrios, by.x = 'idbarrio', by.y = 'idbarrio') fincas.ivima.enriquecidas$desbarrio <- NULL fincas.ivima.enriquecidas <- fincas.ivima.enriquecidas[is.na(fincas.ivima.enriquecidas$altura_num) == F,] fincas.ivima.enriquecidas$idbarrio <- as.factor(fincas.ivima.enriquecidas$idbarrio) ## Ponemos en el tipo de via de catastro un literal reconocible en lugar de la abreviatura tipos_via_cat <- data.table(read.csv(file.tipos.via.cat, header = T, sep = ';')) tipos_via_cat$descripcion <- str_replace(tipos_via_cat$descripcion, ', ', '-') colnames(tipos_via_cat) = c('tipo_via', 'descripcion') fincas_catastro <- merge(fincas_catastro, tipos_via_cat, by.x = 'tipo_via', by.y ='tipo_via') write.csv(fincas_catastro, str_c(clean.data.dir, '/modelo/fincas_catastro.csv'), row.names = F, fileEncoding = 'UTF-8') write.csv(fincas.ivima.enriquecidas, str_c(clean.data.dir, '/modelo/fincas_ivima.csv'), row.names = F, fileEncoding = 'UTF-8')
/code/clean/R/01_Cruce_calles_fecha_edificios.R
no_license
gachet/Proyecto_MDS
R
false
false
17,448
r
## Asociamos los literales de calle de catastro a los de Ivima para cada calle de este último listado. ## así podremos llevar el año de construcción de catastro a Ivima para utilizarlo como predictor en el ## modelo. También asociaremos las coordenadas xy de catastro para poblar el barrio en Ivima desde el ## shape de barrios library(data.table) library(stringr) library(tidyr) library(rgdal) library(readxl) clean.data.dir <- '../../../data/clean' file.fincas.ivima <- paste(clean.data.dir, "/IVIMA/fincas_ivima.csv", sep = "") file.fincas.cat <- paste(clean.data.dir, "/ficheros_preparados/BI_28_900_U_2016-01-23.csv.gz", sep = "") file.tipos.via.cat <- paste(clean.data.dir, "/CAT/tipo_via_cat.csv", sep = "") file.calles.cruzadas <- paste(clean.data.dir, "/IVIMA/calles_cruzadas.csv", sep = "") ruta.shp <- str_c(clean.data.dir, '/SHP/Barrios Madrid') # El objetivo es normalizar las direcciones de Ivima y catastro para poder relacionar los datos de ambas fuentes en base a # tipo_via, nombre_via, numero_finca, letra finca fincas_ivima <- data.table(read.table(file = file.fincas.ivima, header = T, comment.char = "", sep = "^")) fincas_catastro <- data.table(read.table(gzfile(file.fincas.cat), header = T, sep = '^', encoding = 'utf-8')) fincas_catastro <- data.table(fincas_catastro) fincas_ivima <- data.table(fincas_ivima) # Separamos el tipo de via del nombre de la via en los datos IVIMA fincas_ivima$tipo_via <- data.frame(str_split(fincas_ivima$Calle, " ", n=2, simplify = T))[[1]] fincas_ivima$nombre_via <- data.frame(str_split(fincas_ivima$Calle, " ", n=2, simplify = T))[[2]] ## Preparamos los tipos de via de catastro con su descripción en formato tabla tipos_via_cat <- data.table(read.csv(file.tipos.via.cat, header = T, sep = ';')) tipos_via_cat <- cbind(tipos_via_cat, data.table(str_split(tipos_via_cat$descripcion, ",", simplify = T))) tipos_via_cat$V1 <- str_trim(tipos_via_cat$V1) tipos_via_cat$V2 <- str_trim(tipos_via_cat$V2) tipos_via_cat <- melt(tipos_via_cat,c("cod_tipo_via", "descripcion"), na.rm = T) tipos_via_cat <- tipos_via_cat[value != ""] tipos_via_cat$variable <- NULL colnames(tipos_via_cat) <- c("cod_tipo_via", "descripcion", "tipo_via") # Eliminamos las tildes del tipo de via Ivima y del nombre de via fincas_ivima$tipo_via <- chartr('ÁÉÍÓÚ','AEIOU', fincas_ivima$tipo_via) fincas_ivima$nombre_via <- chartr('ÁÉÍÓÚ','AEIOU', fincas_ivima$nombre_via) ## Verificamos que los tipos de via de IVIMA y catastro coinciden sus literales tipos_via_ivima <- data.table(table(fincas_ivima$tipo_via)) merge(tipos_via_ivima, tipos_via_cat,all.x = T, by.x = "V1", by.y = "tipo_via") ## Hay 1 finca sin tipo de via y 7 en "BULEVAR", que no existe en castastro que no podremos usar en el modelo ## Llevamos el tipo de via de catastro a la tabla de IVIMA tipos_via_cat$descripcion <- NULL colnames(tipos_via_cat) = c("tipo_via_cat", "tipo_via") fincas_ivima <- merge(fincas_ivima, tipos_via_cat, all.x = T, by.x = "tipo_via", by.y = "tipo_via") ## Eliminamos las filas de Ivima que no tienen tipo de via correcto de catastro fincas_ivima <- drop_na(fincas_ivima, tipo_via_cat) ## Preparamos las matrices para cruzar los literales de calle de catastro y de ivima callejero_ivima <- fincas_ivima[, .N, by = .(tipo_via_cat, nombre_via)] callejero_cat <- fincas_catastro[, .N, by = .(tipo_via, nombre_via)] colnames(callejero_ivima) <- c("tipo_via", "nombre_via_ivima", "N") colnames(callejero_cat) <- c("tipo_via", "nombre_via_cat", "N") callejero_ivima$tipo_via <- as.character(callejero_ivima$tipo_via) callejero_ivima$nombre_via_ivima <- as.character(callejero_ivima$nombre_via_ivima) callejero_cat$tipo_via <- as.character(callejero_cat$tipo_via) callejero_cat$nombre_via_cat <- as.character(callejero_cat$nombre_via_cat) ### Hay que poner un if, el proceso manual sólo se ejecuta si no lo hemos ejecutado aún, creando el fichero de salida ### si el fichero de salida ya existe, no ejecutamos el proceso manual. Objetivo obtener "calles_cruzadas", que asocia ### los literales de calle de catastro con los de Ivima para poder enriquecer la tabla de fincas Ivima if (file.exists(file.calles.cruzadas) == F) { cruce_directo <- merge(callejero_ivima, callejero_cat, all.x = T, by.x = c("tipo_via", "nombre_via_ivima"), by.y = c("tipo_via", "nombre_via_cat")) ## Separamos las calles en las que los literales de catastro e ivima son iguales calles_cruzadas <- cruce_directo[is.na(cruce_directo$N.y) == F] calles_pendientes <- cruce_directo[is.na(cruce_directo$N.y) == T] calles_pendientes$N.x <- NULL calles_pendientes$N.y <- NULL calles_cruzadas$N.x <- NULL calles_cruzadas$N.y <- NULL calles_cruzadas$nombre_via_cat <- calles_cruzadas$nombre_via_ivima matriz_lev <- merge(calles_pendientes, callejero_cat, all.x = T, by.x = c("tipo_via"), by.y = c("tipo_via"), allow.cartesian = T) matriz_lev$N <- NULL matriz_lev <- matriz_lev[, distancia := adist(nombre_via_ivima, nombre_via_cat)/max(c(nchar(nombre_via_ivima), nchar(nombre_via_cat))), by = "nombre_via_ivima"] matriz_lev <- matriz_lev[, ranking := rank(distancia, ties = "random"), by = "nombre_via_ivima"] ## Hay que revisar manual/visualmente las coincidencias, marcarlas como correctas añadir a calles cruzadas, eliminar de la matriz y repetir para el siguiente ## ranking, hasta conseguir una normalización suficiente matriz_analisis <- matriz_lev[ranking==1] if (nrow(matriz_analisis) >0) { matriz_analisis[order(matriz_analisis$ranking)] setcolorder(matriz_analisis, c("tipo_via","nombre_via_ivima", "nombre_via_cat", "ranking", "distancia")) setorder(matriz_analisis, tipo_via, nombre_via_ivima, ranking) matriz_analisis$correcta <- 1 # Por defecto todas las lineas aparecen marcadas correcta=1, borrar manualmente el 1 de las líneas que no coincidan matriz_analisis <- edit(matriz_analisis) calles_cruzadas <- rbind(calles_cruzadas, matriz_analisis[is.na(matriz_analisis$correcta) == F, c("tipo_via", "nombre_via_ivima", "nombre_via_cat")]) } # Eliminamos las cruzadas para analizar las de ranking 2 matriz_analisis <- matriz_lev[ranking==2] if (nrow(matriz_analisis)>0) { calles_cruzadas$mark <- 1 matriz_analisis <- merge(matriz_analisis, calles_cruzadas, all.x = T, by.x = c("tipo_via", "nombre_via_ivima"), by.y = c("tipo_via", "nombre_via_ivima")) matriz_analisis <- matriz_analisis[is.na(matriz_analisis$mark) == T, .(tipo_via,nombre_via_ivima, nombre_via_cat.x, distancia, ranking)] colnames(matriz_analisis) <- c("tipo_via", "nombre_via_ivima", "nombre_via_cat", "distancia", "ranking") matriz_analisis$correcta <- 1 matriz_analisis <- edit(matriz_analisis) matriz_analisis$mark <- 1 calles_cruzadas <- rbind(calles_cruzadas, matriz_analisis[is.na(matriz_analisis$correcta) == F, c("tipo_via", "nombre_via_ivima", "nombre_via_cat", "mark")]) } # Eliminamos las cruzadas para analizar las de ranking 3 matriz_analisis <- matriz_lev[ranking==3] if (nrow(matriz_analisis)>0){ calles_cruzadas$mark <- 1 matriz_analisis <- merge(matriz_analisis, calles_cruzadas, all.x = T, by.x = c("tipo_via", "nombre_via_ivima"), by.y = c("tipo_via", "nombre_via_ivima")) matriz_analisis <- matriz_analisis[is.na(matriz_analisis$mark) == T, .(tipo_via,nombre_via_ivima, nombre_via_cat.x, distancia, ranking)] colnames(matriz_analisis) <- c("tipo_via", "nombre_via_ivima", "nombre_via_cat", "distancia", "ranking") matriz_analisis$correcta <- 1 matriz_analisis <- edit(matriz_analisis) matriz_analisis$mark <- 1 calles_cruzadas <- rbind(calles_cruzadas, matriz_analisis[is.na(matriz_analisis$correcta) == F, c("tipo_via", "nombre_via_ivima", "nombre_via_cat", "mark")]) } ## La ganancia por seguir avanzando en el ranking es muy pequeña. Finalizamos las iteraciones y guardamos el resultado calles_cruzadas$mark <- NULL write.csv(calles_cruzadas, file.calles.cruzadas, row.names = F) } else { calles_cruzadas <- data.table(read.table(file = file.calles.cruzadas, header = T, comment.char = "", sep = ",")) } colnames(calles_cruzadas) <- c("tipo_via_cat", "nombre_via", "nombre_via_cat") fincas.ivima.enriquecidas <- merge(fincas_ivima, calles_cruzadas, by.x = c("tipo_via_cat", "nombre_via"), by.y=c("tipo_via_cat", "nombre_via")) fincas.ivima.enriquecidas$numfinca <- str_c(str_pad(fincas.ivima.enriquecidas$num_pol,width = 4,side = 'left',pad = '0'), fincas.ivima.enriquecidas$letra) fincas_catastro$numfinca <- str_c(str_pad(fincas_catastro$num_pol1, width = 4, side = 'left', pad = '0'), fincas_catastro$bis) portalero.catastro <- fincas_catastro[, .N, by = .(tipo_via, nombre_via, numfinca, x_coor, y_coor, anio_mejor)] portalero.catastro <- portalero.catastro[, ranking := rank(-anio_mejor, ties = "random"), by = c("tipo_via","nombre_via","numfinca")] portalero.catastro <- portalero.catastro[ranking==1] portalero.ivima <- fincas.ivima.enriquecidas[, .N, by = .(tipo_via_cat, nombre_via_cat, numfinca)] ## Enriquecemos el portalero de catastro con el barrio, ## obtenido por cruce espacial con el shape de barrios de madrid ## La capa de barrios de Madrid se puede obtener en ed50 de: http://www.madrid.org/nomecalles/DescargaBDTCorte.icm. (Delimitaciones territoriales, barrios) ## Cargamos la capa de barrios. Está en proyección EPGS:23030, ED50/UTM30 barrios.shp <- readOGR(dsn = ruta.shp, layer = "200001465", encoding = "latin-1") proj4string(barrios.shp) <- CRS("+init=epsg:23030") # Creamos la capa de puntos desde las xy del portalero de catastro ### Generamos el shape para poder importar directamente en GIS (solo las fincas con coordenadas),proyeccion EPGS:25830 portalero.cat.con.coor <- portalero.catastro[portalero.catastro$x_coor != 0,] coordenadas <- as.matrix(portalero.cat.con.coor[,.(x_coor, y_coor)]) capa.puntos <- SpatialPointsDataFrame(coordenadas, portalero.cat.con.coor, proj4string = CRS("+init=epsg:25830"), coords.nrs = c(4, 5), match.ID = T) capa.puntos <- spTransform(capa.puntos, CRS("+init=epsg:23030")) ## Agregamos los datos del barrio al df de portales portales.con.barrio <- over(capa.puntos, barrios.shp) portales.con.barrio$indice <- rownames(portales.con.barrio) capa.puntos$indice <- rownames(capa.puntos@data) portales.con.barrio <- merge(capa.puntos@data, portales.con.barrio, by.x = "indice", by.y = "indice") ## Eliminamos columnas sin interés, poblamos el barrio y nos quedamos sólo con fincas vivienda de catastro fincas_catastro <- fincas_catastro[clave_grupo_BI=='V',.(parcela_cat, cvia_DGC, tipo_via, nombre_via, num_pol1, bis, num_pol2, bis2, Km, bloque, escalera, planta, puerta, dir_resto, m2_BI, m2_solares_sin_div_hor, coef_finca, garage, anio_mejor, numfinca)] portales.con.barrio$idbarrio <- data.frame(str_split(portales.con.barrio$DESBDT, " ", n=2, simplify = T))[[1]] portales.con.barrio$desbarrio <- data.frame(str_split(portales.con.barrio$DESBDT, " ", n=2, simplify = T))[[2]] portales.con.barrio <- data.table(portales.con.barrio[,c("tipo_via", "nombre_via", "numfinca", "x_coor", "y_coor", "idbarrio", "desbarrio")]) fincas_catastro <- merge(fincas_catastro, portales.con.barrio, by.x = c("tipo_via", "nombre_via", "numfinca"), by.y = c("tipo_via", "nombre_via", "numfinca")) fincas.ivima.enriquecidas <- merge(fincas.ivima.enriquecidas, portales.con.barrio, by.x =c("tipo_via_cat", "nombre_via_cat", "numfinca"), by.y =c("tipo_via", "nombre_via", "numfinca") ) fincas.ivima.enriquecidas <- fincas.ivima.enriquecidas[,.(tipo_via_cat, nombre_via_cat, numfinca, metros, habitaciones, Garaje, Precio, eur_metro, eur_metro_round, planta_cat, num_pol, letra, x_coor, y_coor, idbarrio, desbarrio)] anio.max.finca <- fincas_catastro[, .(anio_max = max(anio_mejor)), by = .(tipo_via, nombre_via, numfinca)] fincas.ivima.enriquecidas <- merge(fincas.ivima.enriquecidas, anio.max.finca, by.x =c("tipo_via_cat", "nombre_via_cat", "numfinca"), by.y =c("tipo_via", "nombre_via", "numfinca") ) plantas_cat <- fincas_catastro[,.N, by = .(planta)] plantas_ivima <- fincas.ivima.enriquecidas[,.N, by = .(planta_cat)] ## Tenemos poca varieadad de plantas en Ivima. Para poder extender el modelo a todas las viviendas de catastro ## es mejor pasar la variable planta (categórica) a altura (entero) de modo que el modelo se pueda generalizar ## Convertimos las plantas de catastro en un numérico, que llevaremos luego a las fincas de Ivima ## Calculamos en catastro el número de plantas para cada finca, para dar un número de planta a los áticos ## Todos los literales de planta que empiecen por número son numéricos, los que no, si empiezan por A son áticos, el ## resto los asimilamos a Bajos ## extraemos los números plantas_cat$primer_caracter <- str_sub(plantas_cat$planta,1,1) plantas_cat$primer_caracter <- str_extract(plantas_cat$primer_caracter, '[a-zA-Z]') plantas_cat$altura <- ifelse(is.na(plantas_cat$primer_caracter)==F,plantas_cat$primer_caracter,str_extract(plantas_cat$planta, '[+-]*[0-9]+')) plantas_cat[grep('[B-Z]', altura), altura:='00'] ## Hay que calcular la altura de los áticos relativa a su edificio plantas_cat[,c('N','primer_caracter'):=NULL] fincas_catastro <- merge(fincas_catastro, plantas_cat, by.x = 'planta', by.y = 'planta') plantas_cat <- fincas_catastro[,.N, by = .(tipo_via, nombre_via, numfinca, altura)] plantas_cat <- plantas_cat[,.N, by = .(tipo_via, nombre_via, numfinca)] setnames(plantas_cat, 'N', 'alt_max') plantas_cat$atico <- 'A' fincas_catastro <- merge(fincas_catastro, plantas_cat, all.x = T, by.x=c('tipo_via', 'nombre_via', 'numfinca','altura'), by.y = c('tipo_via', 'nombre_via', 'numfinca','atico')) fincas_catastro[altura == 'A', altura:=as.character(alt_max)] fincas_catastro$alt_max <- NULL fincas_catastro$altura <- as.integer(fincas_catastro$altura) plantas_cat <- fincas_catastro[,.N, by = .(tipo_via, nombre_via, numfinca,planta, altura)] plantas_cat$N <- NULL setnames(plantas_cat, c('planta','tipo_via','nombre_via'), c('planta_cat','tipo_via_cat','nombre_via_cat')) ## La planta de fincas Ivima pasamos a número con el mismo criterio que en catastro ## Sólo recuperamos los áticos desde catastro fincas.ivima.enriquecidas$primer_caracter_planta <- str_sub(fincas.ivima.enriquecidas$planta_cat,1,1) fincas.ivima.enriquecidas$primer_caracter_planta <- str_extract(fincas.ivima.enriquecidas$primer_caracter_planta, '[a-zA-Z]') fincas.ivima.enriquecidas$altura <- ifelse(is.na(fincas.ivima.enriquecidas$primer_caracter_planta)==F, fincas.ivima.enriquecidas$primer_caracter_planta, str_extract(fincas.ivima.enriquecidas$planta, '[+-]*[0-9]+')) fincas.ivima.enriquecidas[grep('[B-Z]', altura), altura:='00'] fincas.ivima.enriquecidas$primer_caracter_planta <- NULL aticos.cat <- plantas_cat[grep('^A',planta_cat)] aticos.cat$planta_cat <- 'A' setnames(aticos.cat, c('altura','planta_cat'), c('altura_num','altura')) fincas.ivima.enriquecidas <- merge(fincas.ivima.enriquecidas, aticos.cat, all.x=T, by.x=c('tipo_via_cat', 'nombre_via_cat', 'numfinca', 'altura'), by.y=c('tipo_via_cat', 'nombre_via_cat', 'numfinca', 'altura')) fincas.ivima.enriquecidas$altura_num <- as.character(fincas.ivima.enriquecidas$altura_num) fincas.ivima.enriquecidas[is.na(altura_num)==T & altura != 'A', altura_num := altura] fincas.ivima.enriquecidas$altura_num <- as.numeric(fincas.ivima.enriquecidas$altura_num) #Corregimos el literal de barrio, los literales del shape tienen caracteres mal codificados #Obtenemos el listado de barrios de http://www.madrid.org/iestadis/fijas/clasificaciones/descarga/cobar15.xls listado.barrios <- data.table(read_excel(str_c(clean.data.dir, '/SHP/Barrios Madrid/cobar15.xls'),sheet = 'cobar15')) listado.barrios$idbarrio <- str_c(listado.barrios$`Código distrito`, listado.barrios$`Código barrio`) listado.barrios <- listado.barrios[, .N, by = .(idbarrio, `Literal barrio`)] listado.barrios$N <- NULL setnames(listado.barrios, 'Literal barrio', 'barrio') fincas_catastro <- merge(fincas_catastro, listado.barrios, by.x = 'idbarrio', by.y = 'idbarrio') fincas_catastro$desbarrio <- NULL fincas_catastro <- fincas_catastro[is.na(fincas_catastro$altura) == F,] fincas.ivima.enriquecidas <- merge(fincas.ivima.enriquecidas, listado.barrios, by.x = 'idbarrio', by.y = 'idbarrio') fincas.ivima.enriquecidas$desbarrio <- NULL fincas.ivima.enriquecidas <- fincas.ivima.enriquecidas[is.na(fincas.ivima.enriquecidas$altura_num) == F,] fincas.ivima.enriquecidas$idbarrio <- as.factor(fincas.ivima.enriquecidas$idbarrio) ## Ponemos en el tipo de via de catastro un literal reconocible en lugar de la abreviatura tipos_via_cat <- data.table(read.csv(file.tipos.via.cat, header = T, sep = ';')) tipos_via_cat$descripcion <- str_replace(tipos_via_cat$descripcion, ', ', '-') colnames(tipos_via_cat) = c('tipo_via', 'descripcion') fincas_catastro <- merge(fincas_catastro, tipos_via_cat, by.x = 'tipo_via', by.y ='tipo_via') write.csv(fincas_catastro, str_c(clean.data.dir, '/modelo/fincas_catastro.csv'), row.names = F, fileEncoding = 'UTF-8') write.csv(fincas.ivima.enriquecidas, str_c(clean.data.dir, '/modelo/fincas_ivima.csv'), row.names = F, fileEncoding = 'UTF-8')
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/localize.R \name{localize} \alias{localize} \alias{delocalize} \alias{install} \alias{repositories} \alias{add_libpaths} \title{Copy packages, folders, or files to or from google buckets.} \usage{ localize(source, destination, dry = TRUE) delocalize(source, destination, unlink = FALSE, dry = TRUE) install( pkgs, lib = .libPaths()[1], ..., version = BiocManager::version(), binary_base_url = "https://storage.googleapis.com/bioconductor_docker/packages", verbose = getOption("verbose") ) repositories( version = BiocManager::version(), binary_base_url = "https://storage.googleapis.com/bioconductor_docker/packages" ) add_libpaths(paths) } \arguments{ \item{source}{`character(1)`, a google storage bucket or local file system directory location.} \item{destination}{`character(1)`, a google storage bucket or local file system directory location.} \item{dry}{`logical(1)`, when `TRUE` (default), return the consequences of the operation without actually performing the operation.} \item{unlink}{`logical(1)` remove (unlink) the file or directory in `source`. Default: `FALSE`.} \item{pkgs}{`character()` packages to install from binary repository.} \item{lib}{`character(1)` library path (directory) in which to install `pkgs`; defaults to `.libPaths()[1]`.} \item{...}{additional arguments, passed to `install.packages()`.} \item{version}{`character(1)` or `package_version` Bioconductor version, e.g., "3.12".} \item{binary_base_url}{`character(1)` host and base path for binary package 'CRAN-style' repository; not usually required by the end-user.} \item{verbose}{`logical(1)` report on package installation progress?} \item{paths}{`character()`: vector of directories to add to `.libPaths()`. Paths that do not exist will be created.} } \value{ `localize()`: exit status of function `gsutil_rsync()`. `delocalize()`: exit status of function `gsutil_rsync()` `install()`: return value of `install.packages()`. `repositories()`: character() of binary (if available), Bioconductor, and CRAN repositories. `add_libpaths()`: updated .libPaths(), invisibly. } \description{ `localize()`: recursively synchronizes files from a Google storage bucket (`source`) to the local file system (`destination`). This command acts recursively on the `source` directory, and does not delete files in `destination` that are not in `source. `delocalize()`: synchronize files from a local file system (`source`) to a Google storage bucket (`destination`). This command acts recursively on the `source` directory, and does not delete files in `destination` that are not in `source`. `install()`: install R / Bioconductor packages, using fast pre-built 'binary' libraries if available. `repositories()`: repositories to search for binary (if available), Bioconductor, and CRAN packages. `add_libpaths()`: Add local library paths to `.libPaths()`. } \details{ `repositories()` prepends an additional repository URI to `BiocManager::repositories()`. The URI is formed by concatenating `binary_base_url`, the environment variables `TERRA_R_PLATFORM` and the 'major' and 'minor' components of `TERRA_R_PLATFORM_BINARY_VERSION` and `BiocManager::version()`. The URI is only prepended if a CRAN-style repostiory exists at that location, with binary package tar.gz content described by `src/contrib/PACKAGES.gz`. } \examples{ \dontrun{install(c('BiocParallel', 'BiocGenerics'))} repositories() \dontrun{add_libpaths("/tmp/host-site-library")} }
/man/localize.Rd
no_license
vjcitn/AnVIL
R
false
true
3,616
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/localize.R \name{localize} \alias{localize} \alias{delocalize} \alias{install} \alias{repositories} \alias{add_libpaths} \title{Copy packages, folders, or files to or from google buckets.} \usage{ localize(source, destination, dry = TRUE) delocalize(source, destination, unlink = FALSE, dry = TRUE) install( pkgs, lib = .libPaths()[1], ..., version = BiocManager::version(), binary_base_url = "https://storage.googleapis.com/bioconductor_docker/packages", verbose = getOption("verbose") ) repositories( version = BiocManager::version(), binary_base_url = "https://storage.googleapis.com/bioconductor_docker/packages" ) add_libpaths(paths) } \arguments{ \item{source}{`character(1)`, a google storage bucket or local file system directory location.} \item{destination}{`character(1)`, a google storage bucket or local file system directory location.} \item{dry}{`logical(1)`, when `TRUE` (default), return the consequences of the operation without actually performing the operation.} \item{unlink}{`logical(1)` remove (unlink) the file or directory in `source`. Default: `FALSE`.} \item{pkgs}{`character()` packages to install from binary repository.} \item{lib}{`character(1)` library path (directory) in which to install `pkgs`; defaults to `.libPaths()[1]`.} \item{...}{additional arguments, passed to `install.packages()`.} \item{version}{`character(1)` or `package_version` Bioconductor version, e.g., "3.12".} \item{binary_base_url}{`character(1)` host and base path for binary package 'CRAN-style' repository; not usually required by the end-user.} \item{verbose}{`logical(1)` report on package installation progress?} \item{paths}{`character()`: vector of directories to add to `.libPaths()`. Paths that do not exist will be created.} } \value{ `localize()`: exit status of function `gsutil_rsync()`. `delocalize()`: exit status of function `gsutil_rsync()` `install()`: return value of `install.packages()`. `repositories()`: character() of binary (if available), Bioconductor, and CRAN repositories. `add_libpaths()`: updated .libPaths(), invisibly. } \description{ `localize()`: recursively synchronizes files from a Google storage bucket (`source`) to the local file system (`destination`). This command acts recursively on the `source` directory, and does not delete files in `destination` that are not in `source. `delocalize()`: synchronize files from a local file system (`source`) to a Google storage bucket (`destination`). This command acts recursively on the `source` directory, and does not delete files in `destination` that are not in `source`. `install()`: install R / Bioconductor packages, using fast pre-built 'binary' libraries if available. `repositories()`: repositories to search for binary (if available), Bioconductor, and CRAN packages. `add_libpaths()`: Add local library paths to `.libPaths()`. } \details{ `repositories()` prepends an additional repository URI to `BiocManager::repositories()`. The URI is formed by concatenating `binary_base_url`, the environment variables `TERRA_R_PLATFORM` and the 'major' and 'minor' components of `TERRA_R_PLATFORM_BINARY_VERSION` and `BiocManager::version()`. The URI is only prepended if a CRAN-style repostiory exists at that location, with binary package tar.gz content described by `src/contrib/PACKAGES.gz`. } \examples{ \dontrun{install(c('BiocParallel', 'BiocGenerics'))} repositories() \dontrun{add_libpaths("/tmp/host-site-library")} }
#' Get all visits #' #' @param account_id your account id #' @param start_date start date (yyyy-mm-dd) #' @param end_date end date (yyyy-mm-dd) #' @param lead_id lead id. NULL by default #' #' @return data frame of all visits for all the leads (by default) or for one specific lead (if lead_id is provided) #' @export #' @importFrom rlang .data #' #' @examples #' \dontrun{ #' #' get_visits( #' account_id = "12345", #' start_date = "2020-11-01", #' end_date = "2020-11-30" #' ) #' } get_visits <- function(account_id = NULL, start_date = NULL, end_date = NULL, lead_id = NULL) { if(is.null(account_id)) { stop("account_id is missing") } if(is.null(start_date)) { stop("start_date is missing") } if(is.null(end_date)) { stop("end_date is missing") } next_page <- "" results <- list() if(is.null(lead_id)) { path = glue::glue("accounts/{account_id}/visits") } else { path = glue::glue("accounts/{account_id}/leads/{lead_id}/visits") } while(!is.null(next_page)) { if(next_page == "") { url <- httr::modify_url( base_url(), path = path, query = list(start_date = start_date, end_date = end_date, `page[size]` = 100) ) } else { url <- content$links$`next` } content <- call_api(url) next_page <- content$links$`next` results <- c(results, content$data) } results %>% purrr::map(~ c(id = .$id, .$attributes)) %>% tibble::tibble(visit = .) %>% tidyr::unnest_wider(visit) }
/R/visits.R
permissive
henrywangnl/RLeadfeeder
R
false
false
1,577
r
#' Get all visits #' #' @param account_id your account id #' @param start_date start date (yyyy-mm-dd) #' @param end_date end date (yyyy-mm-dd) #' @param lead_id lead id. NULL by default #' #' @return data frame of all visits for all the leads (by default) or for one specific lead (if lead_id is provided) #' @export #' @importFrom rlang .data #' #' @examples #' \dontrun{ #' #' get_visits( #' account_id = "12345", #' start_date = "2020-11-01", #' end_date = "2020-11-30" #' ) #' } get_visits <- function(account_id = NULL, start_date = NULL, end_date = NULL, lead_id = NULL) { if(is.null(account_id)) { stop("account_id is missing") } if(is.null(start_date)) { stop("start_date is missing") } if(is.null(end_date)) { stop("end_date is missing") } next_page <- "" results <- list() if(is.null(lead_id)) { path = glue::glue("accounts/{account_id}/visits") } else { path = glue::glue("accounts/{account_id}/leads/{lead_id}/visits") } while(!is.null(next_page)) { if(next_page == "") { url <- httr::modify_url( base_url(), path = path, query = list(start_date = start_date, end_date = end_date, `page[size]` = 100) ) } else { url <- content$links$`next` } content <- call_api(url) next_page <- content$links$`next` results <- c(results, content$data) } results %>% purrr::map(~ c(id = .$id, .$attributes)) %>% tibble::tibble(visit = .) %>% tidyr::unnest_wider(visit) }