content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
readData <- function(file.xlsx) { require(readxl) require(stringr) nms <- names(read_excel(file.xlsx, n_max=0)) datix <- which(grepl('Annotated Sequence|^Protein Accessions|File ID|Isolation Interference|Abundance', nms)) col_types_data <- ifelse(seq(length(nms)) %in% datix, 'guess', 'skip') dat.df <- as.data.frame(read_excel(file.xlsx, col_types=col_types_data)) colnames(dat.df) <- str_replace_all(colnames(dat.df), ' ', '') rownames(dat.df) <- make.unique(dat.df$AnnotatedSequence, sep=';') dat.df <- dat.df[-grep('AnnotatedSequence', colnames(dat.df))] dat.df$ProteinAccessions <- as.character(sapply(dat.df$'ProteinAccessions', function(s) unlist(strsplit(s, split='; '))[1])) return(dat.df) } readAnnotation <- function(file.xlsx) { require(readxl) require(stringr) nms <- names(read_excel(file.xlsx, n_max=0)) annotix <- which(grepl('Annotated Sequence|^Protein Accessions', nms)) col_types_annot <- ifelse(seq(length(nms)) %in% annotix, 'guess', 'skip') annot.df <- as.data.frame(read_excel(file.xlsx, col_types=col_types_annot)) colnames(annot.df) <- str_replace_all(colnames(annot.df), ' ', '') annot.df$ProteinAccessions <- as.character(sapply(annot.df$'ProteinAccessions', function(s) unlist(strsplit(s, split='; '))[1])) annot.df$uniqAnnotatedSequence <- make.unique(annot.df$AnnotatedSequence, sep=';') return(annot.df) } filterIsolationInterference <- function(df, percent=70) { percent <- as.numeric(percent) ix <- which(grepl('IsolationInterference', colnames(df))) isolationinterference <- df[, ix] df <- df[isolationinterference < percent & !is.na(isolationinterference), ] df[-ix] } filterChannels <- function(df, carrierORempty, ...) { xlst <- list(...) carrierORempty <- c(carrierORempty, unlist(xlst)) cix <- grep(paste(carrierORempty, collapse='|'), colnames(df)) df <- df[, -cix] } filterRows <- function(df) { emptyrows <- apply(df, 1, function(x) all(is.na(x))) df[!emptyrows, ] } partitionMSruns <- function(df) { runix <- which(grepl('FileID', colnames(df))) partition.lst <- split(df, factor(df[, runix, drop=TRUE])) partition.lst <- lapply(partition.lst, function(xdf) { prefix <- unique(xdf[, runix, drop=TRUE]) abundix <- which(grepl('Abundance:', colnames(xdf))) colnames(xdf)[abundix] <- gsub('Abundance:', paste0(prefix, '_'), colnames(xdf)[abundix]) xdf <- xdf[-runix] }) } partitionProteins <- function(df) { split(df, factor(df$ProteinAccessions)) } extrapolateMissing <- function(df) { # df is ctrl or treatment partial data.frame if (!anyNA(df)) { return(df) } else if (nrow(df) == 1) { fillRowColumn(df) } } fillRowColumn <- function(df) { res <- apply(df, 1, function(v) { if (sum(!is.na(v)) >= 2) { sampleAndReplace(v) } else { v } }) return(t(res)) } sampleAndReplace <- function(x) { numbermissing <- sum(is.na(x)) mn <- mean(x[!is.na(x)]) stdev <- mn * 0.1 x[which(is.na(x))] <- rnorm(numbermissing, mean=mn, sd=stdev) return(x) }
/SCMS.R
no_license
harvardinformatics/SingleCellProteomics
R
false
false
3,210
r
readData <- function(file.xlsx) { require(readxl) require(stringr) nms <- names(read_excel(file.xlsx, n_max=0)) datix <- which(grepl('Annotated Sequence|^Protein Accessions|File ID|Isolation Interference|Abundance', nms)) col_types_data <- ifelse(seq(length(nms)) %in% datix, 'guess', 'skip') dat.df <- as.data.frame(read_excel(file.xlsx, col_types=col_types_data)) colnames(dat.df) <- str_replace_all(colnames(dat.df), ' ', '') rownames(dat.df) <- make.unique(dat.df$AnnotatedSequence, sep=';') dat.df <- dat.df[-grep('AnnotatedSequence', colnames(dat.df))] dat.df$ProteinAccessions <- as.character(sapply(dat.df$'ProteinAccessions', function(s) unlist(strsplit(s, split='; '))[1])) return(dat.df) } readAnnotation <- function(file.xlsx) { require(readxl) require(stringr) nms <- names(read_excel(file.xlsx, n_max=0)) annotix <- which(grepl('Annotated Sequence|^Protein Accessions', nms)) col_types_annot <- ifelse(seq(length(nms)) %in% annotix, 'guess', 'skip') annot.df <- as.data.frame(read_excel(file.xlsx, col_types=col_types_annot)) colnames(annot.df) <- str_replace_all(colnames(annot.df), ' ', '') annot.df$ProteinAccessions <- as.character(sapply(annot.df$'ProteinAccessions', function(s) unlist(strsplit(s, split='; '))[1])) annot.df$uniqAnnotatedSequence <- make.unique(annot.df$AnnotatedSequence, sep=';') return(annot.df) } filterIsolationInterference <- function(df, percent=70) { percent <- as.numeric(percent) ix <- which(grepl('IsolationInterference', colnames(df))) isolationinterference <- df[, ix] df <- df[isolationinterference < percent & !is.na(isolationinterference), ] df[-ix] } filterChannels <- function(df, carrierORempty, ...) { xlst <- list(...) carrierORempty <- c(carrierORempty, unlist(xlst)) cix <- grep(paste(carrierORempty, collapse='|'), colnames(df)) df <- df[, -cix] } filterRows <- function(df) { emptyrows <- apply(df, 1, function(x) all(is.na(x))) df[!emptyrows, ] } partitionMSruns <- function(df) { runix <- which(grepl('FileID', colnames(df))) partition.lst <- split(df, factor(df[, runix, drop=TRUE])) partition.lst <- lapply(partition.lst, function(xdf) { prefix <- unique(xdf[, runix, drop=TRUE]) abundix <- which(grepl('Abundance:', colnames(xdf))) colnames(xdf)[abundix] <- gsub('Abundance:', paste0(prefix, '_'), colnames(xdf)[abundix]) xdf <- xdf[-runix] }) } partitionProteins <- function(df) { split(df, factor(df$ProteinAccessions)) } extrapolateMissing <- function(df) { # df is ctrl or treatment partial data.frame if (!anyNA(df)) { return(df) } else if (nrow(df) == 1) { fillRowColumn(df) } } fillRowColumn <- function(df) { res <- apply(df, 1, function(v) { if (sum(!is.na(v)) >= 2) { sampleAndReplace(v) } else { v } }) return(t(res)) } sampleAndReplace <- function(x) { numbermissing <- sum(is.na(x)) mn <- mean(x[!is.na(x)]) stdev <- mn * 0.1 x[which(is.na(x))] <- rnorm(numbermissing, mean=mn, sd=stdev) return(x) }
## ---- TrueValues ---------------------------------------- trueValue <- sim_obj %>% group_by(design, replicate, id) %>% transmute( p = map_dbl(obj, "p"), m = map_dbl(obj, "m"), n = map_dbl(obj, "n"), minerror = map(obj, "minerror"), trueBeta = map(obj, "beta"), testData = map(obj, ~data.frame(x = I(.x$testX), y = I(.x$testY))), sigmaTest = map(obj, ~cov(.x[["testX"]])), sigma = map(obj, ~with(.x, Sigma[-c(1:m), -c(1:m)])) )
/scripts/example1/06-true-values.r
no_license
therimalaya/simrel-m
R
false
false
497
r
## ---- TrueValues ---------------------------------------- trueValue <- sim_obj %>% group_by(design, replicate, id) %>% transmute( p = map_dbl(obj, "p"), m = map_dbl(obj, "m"), n = map_dbl(obj, "n"), minerror = map(obj, "minerror"), trueBeta = map(obj, "beta"), testData = map(obj, ~data.frame(x = I(.x$testX), y = I(.x$testY))), sigmaTest = map(obj, ~cov(.x[["testX"]])), sigma = map(obj, ~with(.x, Sigma[-c(1:m), -c(1:m)])) )
get_catalog_brfss <- function( data_name = "brfss" , output_dir , ... ){ data_page <- readLines( "https://www.cdc.gov/brfss/annual_data/annual_data.htm" ) available_years <- sort( unique( gsub( "(.*)/brfss/annual_data/annual_([0-9][0-9][0-9][0-9]).htm(.*)" , "\\2" , grep( "annual_data/annual_([0-9][0-9][0-9][0-9]).htm" , data_page , value = TRUE ) ) ) ) path_to_files <- ifelse( available_years < 1990 , paste0( "ftp://ftp.cdc.gov/pub/data/Brfss/CDBRFS" , substr( available_years , 3 , 4 ) , "_XPT.zip" ) , ifelse( available_years < 2002 , paste0( "ftp://ftp.cdc.gov/pub/data/Brfss/CDBRFS" , substr( available_years , 3 , 4 ) , "XPT.zip" ) , ifelse( available_years >= 2012 , paste0( "https://www.cdc.gov/brfss/annual_data/" , available_years , "/files/LLCP" , available_years , "ASC.ZIP" ) , ifelse( available_years == 2011 , "ftp://ftp.cdc.gov/pub/data/brfss/LLCP2011ASC.ZIP" , paste0( "ftp://ftp.cdc.gov/pub/data/brfss/cdbrfs" , ifelse( available_years == 2002 , available_years , substr( available_years , 3 , 4 ) ) , "asc.zip" ) ) ) ) ) sas_files <- ifelse( available_years < 2002 , NA , ifelse( available_years >= 2012 , paste0( "https://www.cdc.gov/brfss/annual_data/" , available_years , "/files/sasout" , substr( available_years , 3 , 4 ) , "_llcp.sas" ) , ifelse( available_years == 2011 , "https://www.cdc.gov/brfss/annual_data/2011/sasout11_llcp.sas" , paste0( "https://www.cdc.gov/brfss/annual_data/" , available_years , "/files/sasout" , substr( available_years , 3 , 4 ) , ifelse( available_years > 2006 , ".SAS" , ".sas" ) ) ) ) ) catalog <- data.frame( year = available_years , db_tablename = paste0( 'x' , available_years ) , full_url = path_to_files , sas_ri = sas_files , dbfolder = paste0( output_dir , "/MonetDB" ) , weight = c( rep( 'x_finalwt' , 18 ) , rep( 'xfinalwt' , 9 ) , rep( 'xllcpwt' , length( available_years ) - 27 ) ) , psu = c( rep( 'x_psu' , 18 ) , rep( 'xpsu' , length( available_years ) - 18 ) ) , strata = c( rep( 'x_ststr' , 18 ) , rep( 'xststr' , length( available_years ) - 18 ) ) , design_filename = paste0( output_dir , "/" , available_years , " design.rds" ) , stringsAsFactors = FALSE ) catalog } lodown_brfss <- function( data_name = "brfss" , catalog , ... ){ tf <- tempfile() ; impfile <- tempfile() ; sasfile <- tempfile() ; csvfile <- tempfile() for ( i in seq_len( nrow( catalog ) ) ){ # open the connection to the monetdblite database db <- DBI::dbConnect( MonetDBLite::MonetDBLite() , catalog[ i , 'dbfolder' ] ) # download the file cachaca( catalog[ i , "full_url" ] , tf , mode = 'wb' ) unzipped_files <- unzip_warn_fail( tf , exdir = paste0( tempdir() , "/unzips" ) ) if( is.na( catalog[ i , 'sas_ri' ] ) ){ # read the sas transport file into r x <- foreign::read.xport( unzipped_files ) # convert all column names in the table to all lowercase names( x ) <- tolower( names( x ) ) # do not allow this illegal sql column name names( x )[ names( x ) == 'level' ] <- 'level_' # immediately export the data table to a comma separated value (.csv) file, # also stored on the local hard drive write.csv( x , csvfile , row.names = FALSE ) # count the total number of records in the table # rows to check then read rtctr <- nrow( x ) # prepare to handle errors if they occur (and they do occur) # reset all try-error objects first.attempt <- second.attempt <- NULL # first try to read the csv file into the monet database with NAs for NA strings first.attempt <- try( DBI::dbWriteTable( db , catalog[ i , 'db_tablename' ] , csvfile , na.strings = "NA" , nrow.check = rtctr , lower.case.names = TRUE ) , silent = TRUE ) # if the dbWriteTable() function returns an error instead of working properly.. if( class( first.attempt ) == "try-error" ) { # try re-exporting the csv file (overwriting the original csv file) # using "" for the NA strings write.csv( x , csvfile , row.names = FALSE , na = "" ) # try to remove the data table from the monet database try( DBI::dbRemoveTable( db , catalog[ i , 'db_tablename' ] ) , silent = TRUE ) # and re-try reading the csv file directly into the monet database, this time with a different NA string setting second.attempt <- try( DBI::dbWriteTable( db , catalog[ i , 'db_tablename' ] , csvfile , na.strings = "" , nrow.check = rtctr , lower.case.names = TRUE ) , silent = TRUE ) } # if that still doesn't work, import the table manually if( class( second.attempt ) == "try-error" ) { # try to remove the data table from the monet database try( DBI::dbRemoveTable( db , catalog[ i , 'db_tablename' ] ) , silent = TRUE ) # determine the class of each element of the brfss data table (it's either numeric or its not) colTypes <- ifelse( sapply( x , class ) == 'numeric' , 'DOUBLE PRECISION' , 'VARCHAR(255)' ) # combine the column names with their respective types, # into a single character vector containing every field colDecl <- paste( names( x ) , colTypes ) # build the full sql CREATE TABLE string that will be used # to create the data table in the monet database sql.create <- sprintf( paste( "CREATE TABLE" , catalog[ i , 'db_tablename' ] , "(%s)" ) , paste( colDecl , collapse = ", " ) ) # create the table in the database DBI::dbSendQuery( db , sql.create ) # now build the sql command that will copy all records from the csv file (still on the local hard disk) # into the monet database, using the structure that's just been defined by the sql.create object above sql.update <- paste0( "copy " , rtctr , " offset 2 records into " , catalog[ i , 'db_tablename' ] , " from '" , csvfile , "' using delimiters ',' null as ''" ) # run the sql command DBI::dbSendQuery( db , sql.update ) } } else { # read the entire sas importation script into memory z <- readLines( catalog[ i , 'sas_ri' ] ) # throw out a few columns that cause importation trouble with monetdb if ( catalog[ i , 'year' ] == 2009 ) z <- z[ -159:-168 ] if ( catalog[ i , 'year' ] == 2011 ) z <- z[ !grepl( "CHILDAGE" , z ) ] if ( catalog[ i , 'year' ] == 2013 ) z[ 361:362 ] <- c( "_FRTLT1z 2259" , "_VEGLT1z 2260" ) if ( catalog[ i , 'year' ] == 2014 ) z[ 86 ] <- "COLGHOUS $ 64" if( catalog[ i , 'year' ] == 2015 ){ z <- gsub( "\\\f" , "" , z ) z <- gsub( "_FRTLT1 2056" , "_FRTLT1_ 2056" , z ) z <- gsub( "_VEGLT1 2057" , "_VEGLT1_ 2057" , z ) } # replace all underscores in variable names with x's z <- gsub( "_" , "x" , z , fixed = TRUE ) # throw out these three fields, which overlap other fields and therefore are not supported by SAScii # (see the details section at the bottom of page 9 of http://cran.r-project.org/web/packages/SAScii/SAScii.pdf for more detail) z <- z[ !grepl( "SEQNO" , z ) ] z <- z[ !grepl( "IDATE" , z ) ] z <- z[ !grepl( "PHONENUM" , z ) ] # remove all special characters z <- gsub( "\t" , " " , z , fixed = TRUE ) z <- gsub( "\f" , " " , z , fixed = TRUE ) # re-write the sas importation script to a file on the local hard drive writeLines( z , impfile ) # if it's 2013 or beyond.. if ( catalog[ i , 'year' ] >= 2013 ){ # create a read connection.. incon <- file( unzipped_files , "r") # ..and a write connection outcon <- file( sasfile , "w" ) # read through every line while( length( line <- readLines( incon , 1 , skipNul = TRUE ) ) > 0 ){ # remove the stray slash line <- gsub( "\\" , " " , line , fixed = TRUE ) # remove the stray everythings line <- gsub( "[^[:alnum:]///' \\.]" , " " , line ) # mac/unix converts some weird characters to two digits # while windows convers the to one. deal with it. line <- iconv( line , "" , "ASCII" , sub = "abcxyz" ) line <- gsub( "abcxyzabcxyz" , " " , line ) line <- gsub( "abcxyz" , " " , line ) # write the result to the output connection writeLines( line , outcon ) } # remove the original file.remove( unzipped_files ) # redirect the local filename to the new file unzipped_files <- sasfile # close both connections close( outcon ) close( incon ) } # actually run the read.SAScii.monetdb() function # and import the current fixed-width file into the monet database read_SAScii_monetdb ( unzipped_files , impfile , beginline = 70 , zipped = F , # the ascii file is no longer stored in a zipped file tl = TRUE , # convert all column names to lowercase tablename = catalog[ i , 'db_tablename' ] , # the table will be stored in the monet database as bYYYY.. for example, 2010 will be stored as the 'b2010' table connection = db ) } # add a column containing all ones to the current table DBI::dbSendQuery( db , paste0( 'alter table ' , catalog[ i , 'db_tablename' ] , ' add column one int' ) ) DBI::dbSendQuery( db , paste0( 'UPDATE ' , catalog[ i , 'db_tablename' ] , ' SET one = 1' ) ) # create a database-backed complex sample design object brfss_design <- survey::svydesign( weight = as.formula( paste( "~" , catalog[ i , 'weight' ] ) ) , nest = TRUE , strata = as.formula( paste( "~" , catalog[ i , 'strata' ] ) ) , id = as.formula( paste( "~" , catalog[ i , 'psu' ] ) ) , data = catalog[ i , 'db_tablename' ] , dbtype = "MonetDBLite" , dbname = catalog[ i , 'dbfolder' ] ) # save the complex sample survey design # into a single r data file (.rds) that can now be # analyzed quicker than anything else. saveRDS( brfss_design , file = catalog[ i , 'design_filename' ] ) # add the number of records to the catalog catalog[ i , 'case_count' ] <- nrow( brfss_design ) # repeat. # disconnect from the current monet database DBI::dbDisconnect( db , shutdown = TRUE ) # delete the temporary files suppressWarnings( file.remove( tf , impfile , unzipped_files , sasfile , csvfile ) ) cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " stored in '" , catalog[ i , 'db_tablename' ] , "'\r\n\n" ) ) } catalog }
/R/brfss.R
no_license
jjsjaime/lodown
R
false
false
10,849
r
get_catalog_brfss <- function( data_name = "brfss" , output_dir , ... ){ data_page <- readLines( "https://www.cdc.gov/brfss/annual_data/annual_data.htm" ) available_years <- sort( unique( gsub( "(.*)/brfss/annual_data/annual_([0-9][0-9][0-9][0-9]).htm(.*)" , "\\2" , grep( "annual_data/annual_([0-9][0-9][0-9][0-9]).htm" , data_page , value = TRUE ) ) ) ) path_to_files <- ifelse( available_years < 1990 , paste0( "ftp://ftp.cdc.gov/pub/data/Brfss/CDBRFS" , substr( available_years , 3 , 4 ) , "_XPT.zip" ) , ifelse( available_years < 2002 , paste0( "ftp://ftp.cdc.gov/pub/data/Brfss/CDBRFS" , substr( available_years , 3 , 4 ) , "XPT.zip" ) , ifelse( available_years >= 2012 , paste0( "https://www.cdc.gov/brfss/annual_data/" , available_years , "/files/LLCP" , available_years , "ASC.ZIP" ) , ifelse( available_years == 2011 , "ftp://ftp.cdc.gov/pub/data/brfss/LLCP2011ASC.ZIP" , paste0( "ftp://ftp.cdc.gov/pub/data/brfss/cdbrfs" , ifelse( available_years == 2002 , available_years , substr( available_years , 3 , 4 ) ) , "asc.zip" ) ) ) ) ) sas_files <- ifelse( available_years < 2002 , NA , ifelse( available_years >= 2012 , paste0( "https://www.cdc.gov/brfss/annual_data/" , available_years , "/files/sasout" , substr( available_years , 3 , 4 ) , "_llcp.sas" ) , ifelse( available_years == 2011 , "https://www.cdc.gov/brfss/annual_data/2011/sasout11_llcp.sas" , paste0( "https://www.cdc.gov/brfss/annual_data/" , available_years , "/files/sasout" , substr( available_years , 3 , 4 ) , ifelse( available_years > 2006 , ".SAS" , ".sas" ) ) ) ) ) catalog <- data.frame( year = available_years , db_tablename = paste0( 'x' , available_years ) , full_url = path_to_files , sas_ri = sas_files , dbfolder = paste0( output_dir , "/MonetDB" ) , weight = c( rep( 'x_finalwt' , 18 ) , rep( 'xfinalwt' , 9 ) , rep( 'xllcpwt' , length( available_years ) - 27 ) ) , psu = c( rep( 'x_psu' , 18 ) , rep( 'xpsu' , length( available_years ) - 18 ) ) , strata = c( rep( 'x_ststr' , 18 ) , rep( 'xststr' , length( available_years ) - 18 ) ) , design_filename = paste0( output_dir , "/" , available_years , " design.rds" ) , stringsAsFactors = FALSE ) catalog } lodown_brfss <- function( data_name = "brfss" , catalog , ... ){ tf <- tempfile() ; impfile <- tempfile() ; sasfile <- tempfile() ; csvfile <- tempfile() for ( i in seq_len( nrow( catalog ) ) ){ # open the connection to the monetdblite database db <- DBI::dbConnect( MonetDBLite::MonetDBLite() , catalog[ i , 'dbfolder' ] ) # download the file cachaca( catalog[ i , "full_url" ] , tf , mode = 'wb' ) unzipped_files <- unzip_warn_fail( tf , exdir = paste0( tempdir() , "/unzips" ) ) if( is.na( catalog[ i , 'sas_ri' ] ) ){ # read the sas transport file into r x <- foreign::read.xport( unzipped_files ) # convert all column names in the table to all lowercase names( x ) <- tolower( names( x ) ) # do not allow this illegal sql column name names( x )[ names( x ) == 'level' ] <- 'level_' # immediately export the data table to a comma separated value (.csv) file, # also stored on the local hard drive write.csv( x , csvfile , row.names = FALSE ) # count the total number of records in the table # rows to check then read rtctr <- nrow( x ) # prepare to handle errors if they occur (and they do occur) # reset all try-error objects first.attempt <- second.attempt <- NULL # first try to read the csv file into the monet database with NAs for NA strings first.attempt <- try( DBI::dbWriteTable( db , catalog[ i , 'db_tablename' ] , csvfile , na.strings = "NA" , nrow.check = rtctr , lower.case.names = TRUE ) , silent = TRUE ) # if the dbWriteTable() function returns an error instead of working properly.. if( class( first.attempt ) == "try-error" ) { # try re-exporting the csv file (overwriting the original csv file) # using "" for the NA strings write.csv( x , csvfile , row.names = FALSE , na = "" ) # try to remove the data table from the monet database try( DBI::dbRemoveTable( db , catalog[ i , 'db_tablename' ] ) , silent = TRUE ) # and re-try reading the csv file directly into the monet database, this time with a different NA string setting second.attempt <- try( DBI::dbWriteTable( db , catalog[ i , 'db_tablename' ] , csvfile , na.strings = "" , nrow.check = rtctr , lower.case.names = TRUE ) , silent = TRUE ) } # if that still doesn't work, import the table manually if( class( second.attempt ) == "try-error" ) { # try to remove the data table from the monet database try( DBI::dbRemoveTable( db , catalog[ i , 'db_tablename' ] ) , silent = TRUE ) # determine the class of each element of the brfss data table (it's either numeric or its not) colTypes <- ifelse( sapply( x , class ) == 'numeric' , 'DOUBLE PRECISION' , 'VARCHAR(255)' ) # combine the column names with their respective types, # into a single character vector containing every field colDecl <- paste( names( x ) , colTypes ) # build the full sql CREATE TABLE string that will be used # to create the data table in the monet database sql.create <- sprintf( paste( "CREATE TABLE" , catalog[ i , 'db_tablename' ] , "(%s)" ) , paste( colDecl , collapse = ", " ) ) # create the table in the database DBI::dbSendQuery( db , sql.create ) # now build the sql command that will copy all records from the csv file (still on the local hard disk) # into the monet database, using the structure that's just been defined by the sql.create object above sql.update <- paste0( "copy " , rtctr , " offset 2 records into " , catalog[ i , 'db_tablename' ] , " from '" , csvfile , "' using delimiters ',' null as ''" ) # run the sql command DBI::dbSendQuery( db , sql.update ) } } else { # read the entire sas importation script into memory z <- readLines( catalog[ i , 'sas_ri' ] ) # throw out a few columns that cause importation trouble with monetdb if ( catalog[ i , 'year' ] == 2009 ) z <- z[ -159:-168 ] if ( catalog[ i , 'year' ] == 2011 ) z <- z[ !grepl( "CHILDAGE" , z ) ] if ( catalog[ i , 'year' ] == 2013 ) z[ 361:362 ] <- c( "_FRTLT1z 2259" , "_VEGLT1z 2260" ) if ( catalog[ i , 'year' ] == 2014 ) z[ 86 ] <- "COLGHOUS $ 64" if( catalog[ i , 'year' ] == 2015 ){ z <- gsub( "\\\f" , "" , z ) z <- gsub( "_FRTLT1 2056" , "_FRTLT1_ 2056" , z ) z <- gsub( "_VEGLT1 2057" , "_VEGLT1_ 2057" , z ) } # replace all underscores in variable names with x's z <- gsub( "_" , "x" , z , fixed = TRUE ) # throw out these three fields, which overlap other fields and therefore are not supported by SAScii # (see the details section at the bottom of page 9 of http://cran.r-project.org/web/packages/SAScii/SAScii.pdf for more detail) z <- z[ !grepl( "SEQNO" , z ) ] z <- z[ !grepl( "IDATE" , z ) ] z <- z[ !grepl( "PHONENUM" , z ) ] # remove all special characters z <- gsub( "\t" , " " , z , fixed = TRUE ) z <- gsub( "\f" , " " , z , fixed = TRUE ) # re-write the sas importation script to a file on the local hard drive writeLines( z , impfile ) # if it's 2013 or beyond.. if ( catalog[ i , 'year' ] >= 2013 ){ # create a read connection.. incon <- file( unzipped_files , "r") # ..and a write connection outcon <- file( sasfile , "w" ) # read through every line while( length( line <- readLines( incon , 1 , skipNul = TRUE ) ) > 0 ){ # remove the stray slash line <- gsub( "\\" , " " , line , fixed = TRUE ) # remove the stray everythings line <- gsub( "[^[:alnum:]///' \\.]" , " " , line ) # mac/unix converts some weird characters to two digits # while windows convers the to one. deal with it. line <- iconv( line , "" , "ASCII" , sub = "abcxyz" ) line <- gsub( "abcxyzabcxyz" , " " , line ) line <- gsub( "abcxyz" , " " , line ) # write the result to the output connection writeLines( line , outcon ) } # remove the original file.remove( unzipped_files ) # redirect the local filename to the new file unzipped_files <- sasfile # close both connections close( outcon ) close( incon ) } # actually run the read.SAScii.monetdb() function # and import the current fixed-width file into the monet database read_SAScii_monetdb ( unzipped_files , impfile , beginline = 70 , zipped = F , # the ascii file is no longer stored in a zipped file tl = TRUE , # convert all column names to lowercase tablename = catalog[ i , 'db_tablename' ] , # the table will be stored in the monet database as bYYYY.. for example, 2010 will be stored as the 'b2010' table connection = db ) } # add a column containing all ones to the current table DBI::dbSendQuery( db , paste0( 'alter table ' , catalog[ i , 'db_tablename' ] , ' add column one int' ) ) DBI::dbSendQuery( db , paste0( 'UPDATE ' , catalog[ i , 'db_tablename' ] , ' SET one = 1' ) ) # create a database-backed complex sample design object brfss_design <- survey::svydesign( weight = as.formula( paste( "~" , catalog[ i , 'weight' ] ) ) , nest = TRUE , strata = as.formula( paste( "~" , catalog[ i , 'strata' ] ) ) , id = as.formula( paste( "~" , catalog[ i , 'psu' ] ) ) , data = catalog[ i , 'db_tablename' ] , dbtype = "MonetDBLite" , dbname = catalog[ i , 'dbfolder' ] ) # save the complex sample survey design # into a single r data file (.rds) that can now be # analyzed quicker than anything else. saveRDS( brfss_design , file = catalog[ i , 'design_filename' ] ) # add the number of records to the catalog catalog[ i , 'case_count' ] <- nrow( brfss_design ) # repeat. # disconnect from the current monet database DBI::dbDisconnect( db , shutdown = TRUE ) # delete the temporary files suppressWarnings( file.remove( tf , impfile , unzipped_files , sasfile , csvfile ) ) cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " stored in '" , catalog[ i , 'db_tablename' ] , "'\r\n\n" ) ) } catalog }
try(library(bindrcpp)) try(library(conveniencefunctions)) try(library(magrittr)) try(library(forcats)) try(library(dplyr)) try(library(purrr)) try(library(readr)) try(library(tidyr)) try(library(tibble)) try(library(ggplot2)) try(library(tidyverse)) try(library(stringr)) try(library(dMod)) try(library(cOde)) try(library(stats)) try(library(graphics)) try(library(grDevices)) try(library(utils)) try(library(datasets)) try(library(methods)) try(library(base)) setwd('~/tmp_f3uew_1_folder') rm(list = ls()) load('tmp_f3uew.RData') files <- list.files(pattern = '.so') for (f in files) dyn.load(f) .node <- 1 .runbgOutput <- try({ mstrust(obj, mypars, "test_fits", result_path = "test_fits", sd = 5, fits = 20, cores = detectFreeCores(), blather = T) }) save(.runbgOutput, file = 'tmp_f3uew_1_result.RData')
/fit/limaxfit/tmp_f3uew_1.R
no_license
matthiaskoenig/methacetin_fitting
R
false
false
811
r
try(library(bindrcpp)) try(library(conveniencefunctions)) try(library(magrittr)) try(library(forcats)) try(library(dplyr)) try(library(purrr)) try(library(readr)) try(library(tidyr)) try(library(tibble)) try(library(ggplot2)) try(library(tidyverse)) try(library(stringr)) try(library(dMod)) try(library(cOde)) try(library(stats)) try(library(graphics)) try(library(grDevices)) try(library(utils)) try(library(datasets)) try(library(methods)) try(library(base)) setwd('~/tmp_f3uew_1_folder') rm(list = ls()) load('tmp_f3uew.RData') files <- list.files(pattern = '.so') for (f in files) dyn.load(f) .node <- 1 .runbgOutput <- try({ mstrust(obj, mypars, "test_fits", result_path = "test_fits", sd = 5, fits = 20, cores = detectFreeCores(), blather = T) }) save(.runbgOutput, file = 'tmp_f3uew_1_result.RData')
library(cure4insect) load_common_data() .c4if=cure4insect:::.c4if .c4is=cure4insect:::.c4is rpa <- raster(system.file("extdata/pAspen.tif", package="cure4insect")) KT <- .c4if$KT load("d:/abmi/AB_data_v2018/data/analysis/kgrid_table_km.Rdata") load("d:/abmi/AB_data_v2019/misc/overlap/OverlapReg.RData") rownames(OverlapReg) <- OverlapReg$LinkID all(rownames(KT) == rownames(kgrid)) OverlapReg$pAspen <- kgrid[rownames(OverlapReg), "pAspen"] OverlapReg$wN <- OverlapReg$pAspen / (OverlapReg$pAspen + (1-OverlapReg$pForest)) kgrid$wN <- ifelse(KT$reg_nr == "Grassland", 0, 1) kgrid[rownames(OverlapReg), "wN"] <- OverlapReg$wN rol <- .make_raster(kgrid$wN, kgrid, rpa) plot(rol) writeRaster(rol, "~/repos/cure4insect/inst/extdata/wNorth.tif") #kgrid$wN2 <- ifelse(KT$reg_nr == "Grassland", 0, 1) #kgrid[rownames(OverlapReg), "wN2"] <- OverlapReg$pAspen #rol2 <- .make_raster(kgrid$wN2, kgrid, rpa) #plot(rol2) ## Drat library(drat) options("dratRepo"="~/repos/ABbiodiversity-drat") # only on setup! # pruneRepo(remove=TRUE) insertPackage("d:/abmi/AB_data_v2019/misc/cure4insect_0.1-1.tar.gz")
/birds/overlap-region.R
no_license
psolymos/abmianalytics
R
false
false
1,101
r
library(cure4insect) load_common_data() .c4if=cure4insect:::.c4if .c4is=cure4insect:::.c4is rpa <- raster(system.file("extdata/pAspen.tif", package="cure4insect")) KT <- .c4if$KT load("d:/abmi/AB_data_v2018/data/analysis/kgrid_table_km.Rdata") load("d:/abmi/AB_data_v2019/misc/overlap/OverlapReg.RData") rownames(OverlapReg) <- OverlapReg$LinkID all(rownames(KT) == rownames(kgrid)) OverlapReg$pAspen <- kgrid[rownames(OverlapReg), "pAspen"] OverlapReg$wN <- OverlapReg$pAspen / (OverlapReg$pAspen + (1-OverlapReg$pForest)) kgrid$wN <- ifelse(KT$reg_nr == "Grassland", 0, 1) kgrid[rownames(OverlapReg), "wN"] <- OverlapReg$wN rol <- .make_raster(kgrid$wN, kgrid, rpa) plot(rol) writeRaster(rol, "~/repos/cure4insect/inst/extdata/wNorth.tif") #kgrid$wN2 <- ifelse(KT$reg_nr == "Grassland", 0, 1) #kgrid[rownames(OverlapReg), "wN2"] <- OverlapReg$pAspen #rol2 <- .make_raster(kgrid$wN2, kgrid, rpa) #plot(rol2) ## Drat library(drat) options("dratRepo"="~/repos/ABbiodiversity-drat") # only on setup! # pruneRepo(remove=TRUE) insertPackage("d:/abmi/AB_data_v2019/misc/cure4insect_0.1-1.tar.gz")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/triad-tallies.R \name{triad_tallies} \alias{triad_tallies} \alias{triad_tallies} \alias{connectedTriples} \alias{oneTiedTriads} \alias{twoTiedTriads} \alias{threeTiedTriads} \title{Triad tallies} \usage{ connectedTriples(bigraph, graph = actor_projection(bigraph, name = "id")) oneTiedTriads(graph) twoTiedTriads(graph) threeTiedTriads(bigraph, graph = actor_projection(bigraph, name = "id")) } \arguments{ \item{bigraph}{The ambient affiliation network from which \code{graph} is projected} \item{graph}{A one-mode network} } \description{ These functions are called by the full triad census to handle triads of different types using the projection onto actor nodes. The name of each function indicates the number of edges that appear among the three actors of the triad in the projection. (Zero-edge triads do not need to be tallied; the total number of triads is easily calculated, and the difference between this number and the total number of triads with edges gives the number of triads without.) } \seealso{ Other triad census functions: \code{\link{project_census}}, \code{\link{triad_census}}, \code{\link{triad_closure_from_census}} }
/man/triad_tallies.Rd
no_license
petershan1119/bitriad
R
false
true
1,234
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/triad-tallies.R \name{triad_tallies} \alias{triad_tallies} \alias{triad_tallies} \alias{connectedTriples} \alias{oneTiedTriads} \alias{twoTiedTriads} \alias{threeTiedTriads} \title{Triad tallies} \usage{ connectedTriples(bigraph, graph = actor_projection(bigraph, name = "id")) oneTiedTriads(graph) twoTiedTriads(graph) threeTiedTriads(bigraph, graph = actor_projection(bigraph, name = "id")) } \arguments{ \item{bigraph}{The ambient affiliation network from which \code{graph} is projected} \item{graph}{A one-mode network} } \description{ These functions are called by the full triad census to handle triads of different types using the projection onto actor nodes. The name of each function indicates the number of edges that appear among the three actors of the triad in the projection. (Zero-edge triads do not need to be tallied; the total number of triads is easily calculated, and the difference between this number and the total number of triads with edges gives the number of triads without.) } \seealso{ Other triad census functions: \code{\link{project_census}}, \code{\link{triad_census}}, \code{\link{triad_closure_from_census}} }
library('ggplot2') values = c(c(0.025443, 0.005587),c(0.032057, 0.007040),c(0.0435947111, 0.00957403611),c(0.05449057777, 0.0119669277777),c(0.103089066666, 0.0226398666)) service=c(rep("0.25/0.5", 2),rep("0.5/1", 2),rep("1/2", 2),rep("2/4", 2), rep("4/8", 2)) resource=rep(c("vCPU", "memory"),5) data=data.frame(service,resource,values) ggplot(data, aes(fill=resource, y=values, x=service)) + geom_bar(stat="identity") + ylab("Price [$]") + ylim(0, 0.15) + xlab("AWS Fargate vCPU/memory [cores/GB]") + theme (text = element_text(size=12)) + scale_fill_manual("resource", values = c("vCPU" = "#C77CFF", "memory" = "#7CAE00")) ggsave(paste("ellipsoids_fargate_price.pdf"), width = 12, height = 9, units = "cm")
/1_ellipsoids/fargate_price.R
no_license
burkat/paper
R
false
false
715
r
library('ggplot2') values = c(c(0.025443, 0.005587),c(0.032057, 0.007040),c(0.0435947111, 0.00957403611),c(0.05449057777, 0.0119669277777),c(0.103089066666, 0.0226398666)) service=c(rep("0.25/0.5", 2),rep("0.5/1", 2),rep("1/2", 2),rep("2/4", 2), rep("4/8", 2)) resource=rep(c("vCPU", "memory"),5) data=data.frame(service,resource,values) ggplot(data, aes(fill=resource, y=values, x=service)) + geom_bar(stat="identity") + ylab("Price [$]") + ylim(0, 0.15) + xlab("AWS Fargate vCPU/memory [cores/GB]") + theme (text = element_text(size=12)) + scale_fill_manual("resource", values = c("vCPU" = "#C77CFF", "memory" = "#7CAE00")) ggsave(paste("ellipsoids_fargate_price.pdf"), width = 12, height = 9, units = "cm")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spatial_interaction_spot.R \name{do_permuttest_random_spot} \alias{do_permuttest_random_spot} \title{do_permuttest_random_spot} \usage{ do_permuttest_random_spot( sel_int, other_ints, select_ind, other_ind, name = "perm_1", proximityMat, expr_residual, set_seed = TRUE, seed_number = 1234 ) } \description{ calculate random values for spots } \keyword{internal}
/man/do_permuttest_random_spot.Rd
permissive
RubD/Giotto
R
false
true
458
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spatial_interaction_spot.R \name{do_permuttest_random_spot} \alias{do_permuttest_random_spot} \title{do_permuttest_random_spot} \usage{ do_permuttest_random_spot( sel_int, other_ints, select_ind, other_ind, name = "perm_1", proximityMat, expr_residual, set_seed = TRUE, seed_number = 1234 ) } \description{ calculate random values for spots } \keyword{internal}
library(bfp) set.seed (234) ## setting where the error occured beta0 <- 1 alpha1 <- 1 alpha2 <- c (1, 1) delta1 <- 1 sampleSize <- c (40, 100) sigma2 <- c (4, 3, 2, 1) hyperpara <- c (3.05, 3.4, 3.7, 3.95) h <- 1 i <- 4 j <- 4 thisN <- sampleSize[h] x <- matrix (runif (thisN * 3, 1, 4), nrow = thisN, ncol = 3) # predictor values w <- matrix (rbinom (thisN * 2, size = 1, prob = 0.5), nrow = thisN, ncol = 2) covData <- data.frame (x = x, w = w)# start data frame x1tr <- alpha1 * x[,1]^2 x2tr <- cbind (x[,2]^-0.5, x[,2]^-0.5 * log (x[,2])) %*% alpha2 w1tr <- delta1 * w[,1] predictorTerms <- x1tr + x2tr + w1tr # linear predictor thisPriorSpecs <- list(a = hyperpara[i], modelPrior="sparse") covData$y <- predictorTerms + rnorm (thisN, 0, sqrt (sigma2[j])) ## try it: modelNow <- BayesMfp (y ~ bfp (x.1) + bfp (x.2) + bfp (x.3) + uc (w.1) + uc (w.2), data = covData, priorSpecs = thisPriorSpecs, method = "exhaustive" )
/fuzzedpackages/bfp/tests/testSimulation.R
no_license
akhikolla/testpackages
R
false
false
1,058
r
library(bfp) set.seed (234) ## setting where the error occured beta0 <- 1 alpha1 <- 1 alpha2 <- c (1, 1) delta1 <- 1 sampleSize <- c (40, 100) sigma2 <- c (4, 3, 2, 1) hyperpara <- c (3.05, 3.4, 3.7, 3.95) h <- 1 i <- 4 j <- 4 thisN <- sampleSize[h] x <- matrix (runif (thisN * 3, 1, 4), nrow = thisN, ncol = 3) # predictor values w <- matrix (rbinom (thisN * 2, size = 1, prob = 0.5), nrow = thisN, ncol = 2) covData <- data.frame (x = x, w = w)# start data frame x1tr <- alpha1 * x[,1]^2 x2tr <- cbind (x[,2]^-0.5, x[,2]^-0.5 * log (x[,2])) %*% alpha2 w1tr <- delta1 * w[,1] predictorTerms <- x1tr + x2tr + w1tr # linear predictor thisPriorSpecs <- list(a = hyperpara[i], modelPrior="sparse") covData$y <- predictorTerms + rnorm (thisN, 0, sqrt (sigma2[j])) ## try it: modelNow <- BayesMfp (y ~ bfp (x.1) + bfp (x.2) + bfp (x.3) + uc (w.1) + uc (w.2), data = covData, priorSpecs = thisPriorSpecs, method = "exhaustive" )
% Generated by roxygen2 (4.0.0): do not edit by hand \name{gtmPosteriorMode} \alias{gtmPosteriorMode} \title{Posterior-mode projection of the samples used for training the given GTM.} \usage{ gtmPosteriorMode(model) } \arguments{ \item{model}{an instance of \code{gtm}} } \value{ a LxN matrix whose rows represent the projection of the N samples into the L-dimensional latent-space } \description{ Posterior-mode projection of the samples used for training the given GTM. } \seealso{ \code{\link{computeResponsibilities}} }
/man/gtmPosteriorMode.Rd
no_license
afwlehmann/gentopmap
R
false
false
527
rd
% Generated by roxygen2 (4.0.0): do not edit by hand \name{gtmPosteriorMode} \alias{gtmPosteriorMode} \title{Posterior-mode projection of the samples used for training the given GTM.} \usage{ gtmPosteriorMode(model) } \arguments{ \item{model}{an instance of \code{gtm}} } \value{ a LxN matrix whose rows represent the projection of the N samples into the L-dimensional latent-space } \description{ Posterior-mode projection of the samples used for training the given GTM. } \seealso{ \code{\link{computeResponsibilities}} }
# =================== # Tests for lav2nlsem # =================== library(nlsem) # ordinary lms # ============ lms_model <- ' xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta1 =~ y1 + y2 + y3 eta2 =~ y4 + y5 + y6 eta1 ~ xi2 + xi1:xi2 eta2 ~ xi1 + eta1 ' lav2nlsem(lms_model) # ordinary lms with different inputs for interaction # ================================================== # "none" # -- lms_model <- ' xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 ' lav2nlsem(lms_model) # not all interactions with xi defined # ------------------------------------ lms_model <- ' xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 xi3 =~ x7 + x8 + x9 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi1 + xi1:xi2 ' lav2nlsem(lms_model) # semm model # =========== semm_model <- specify_sem(num.x=6, num.y=3, num.xi=2, num.eta=1, xi="x1-x3,x4-x6", eta="y1-y3", num.classes=2, interaction="none") lav_model <- ' class: 1 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 class: 2 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 ' lav_semm_model <- lav2nlsem(lav_model) # identical(semm_model, lav_semm_model) # nsemm model # =========== lav_model <- ' class: 1 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 2 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 3 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi1 ' nsemm_model <- lav2nlsem(lav_model, constraints="direct1") ## with restrictions # nsemm model # =========== lav_model2 <- ' class: 1 xi1 =~ NA*x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 2 xi1 =~ 0*x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 3 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + 1*x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + 0*xi2 + xi1:xi1 ' nsemm_model2 <- lav2nlsem(lav_model2, constraints="direct1") nsemm_model2$matrices # with parameter labels (which should be ignored) # =========== lav_model <- ' class: 1 xi1 =~ t1*x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 2 xi1 =~ t2*x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 ' model <- lav2nlsem(lav_model, constraints="direct1") model$matrices
/tests/tests_for_lav2nlsem.R
no_license
cran/nlsem
R
false
false
2,555
r
# =================== # Tests for lav2nlsem # =================== library(nlsem) # ordinary lms # ============ lms_model <- ' xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta1 =~ y1 + y2 + y3 eta2 =~ y4 + y5 + y6 eta1 ~ xi2 + xi1:xi2 eta2 ~ xi1 + eta1 ' lav2nlsem(lms_model) # ordinary lms with different inputs for interaction # ================================================== # "none" # -- lms_model <- ' xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 ' lav2nlsem(lms_model) # not all interactions with xi defined # ------------------------------------ lms_model <- ' xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 xi3 =~ x7 + x8 + x9 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi1 + xi1:xi2 ' lav2nlsem(lms_model) # semm model # =========== semm_model <- specify_sem(num.x=6, num.y=3, num.xi=2, num.eta=1, xi="x1-x3,x4-x6", eta="y1-y3", num.classes=2, interaction="none") lav_model <- ' class: 1 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 class: 2 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 ' lav_semm_model <- lav2nlsem(lav_model) # identical(semm_model, lav_semm_model) # nsemm model # =========== lav_model <- ' class: 1 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 2 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 3 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi1 ' nsemm_model <- lav2nlsem(lav_model, constraints="direct1") ## with restrictions # nsemm model # =========== lav_model2 <- ' class: 1 xi1 =~ NA*x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 2 xi1 =~ 0*x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 3 xi1 =~ x1 + x2 + x3 xi2 =~ x4 + 1*x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + 0*xi2 + xi1:xi1 ' nsemm_model2 <- lav2nlsem(lav_model2, constraints="direct1") nsemm_model2$matrices # with parameter labels (which should be ignored) # =========== lav_model <- ' class: 1 xi1 =~ t1*x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 class: 2 xi1 =~ t2*x1 + x2 + x3 xi2 =~ x4 + x5 + x6 eta =~ y1 + y2 + y3 eta ~ xi1 + xi2 + xi1:xi2 ' model <- lav2nlsem(lav_model, constraints="direct1") model$matrices
% Generated by roxygen2 (4.0.1): do not edit by hand \name{list.clean} \alias{list.clean} \title{Clean a list by a function} \usage{ list.clean(.data, fun = is.null, recursive = FALSE) } \arguments{ \item{.data}{\code{list}} \item{fun}{A logical \code{function} for clean} \item{recursive}{\code{logical}. Should the list be cleaned recursively?} } \description{ Clean a list by a function } \examples{ \dontrun{ x <- list(a=NULL,b=NULL,c=NULL,d=1,e=2) list.clean(x) } }
/man/list.clean.Rd
permissive
timelyportfolio/rlist
R
false
false
474
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{list.clean} \alias{list.clean} \title{Clean a list by a function} \usage{ list.clean(.data, fun = is.null, recursive = FALSE) } \arguments{ \item{.data}{\code{list}} \item{fun}{A logical \code{function} for clean} \item{recursive}{\code{logical}. Should the list be cleaned recursively?} } \description{ Clean a list by a function } \examples{ \dontrun{ x <- list(a=NULL,b=NULL,c=NULL,d=1,e=2) list.clean(x) } }
var1<-sample(5) var2<-var1/10 var3<-c("My","Name","Is","Anthony","Gonsalvis") res<-data.frame(var1,var2,var3) names(res)<-c("var Int","var Float","var String") write.csv(res,"imp.csv",row.names=FALSE) res1<-read.csv("imp.csv") print(res1)
/DataMining-Lab-Vatsa/R/1/impexp.R
no_license
sreevatsabellary/Data_Mining_Lab
R
false
false
240
r
var1<-sample(5) var2<-var1/10 var3<-c("My","Name","Is","Anthony","Gonsalvis") res<-data.frame(var1,var2,var3) names(res)<-c("var Int","var Float","var String") write.csv(res,"imp.csv",row.names=FALSE) res1<-read.csv("imp.csv") print(res1)
### ========================================================================= ### Variant QA (as opposed to the calling filters) ### ------------------------------------------------------------------------- qaVariants <- function(x, qa.filters = VariantQAFilters(...), ...) { softFilter(x, qa.filters) } VariantQAFilters <- function(fisher.strand.p.value = 1e-4, min.mdfne = 10L) { FilterRules(c(mdfne = MedianDistFromNearestEndFilter(min.mdfne), fisherStrand = StrandFETFilter(fisher.strand.p.value))) } ## With new gmapR, this is only necessary for filtering the ref N's. ## In theory, we could keep positions with ref N and at least two alts. ## But this is OK for now. NonNRefFilter <- function() { function(x) { as.character(ref(x)) != 'N' } } ## Drops the ref rows (should not be necessary) NonRefFilter <- function() { function(x) { !is.na(alt(x)) } } ReadPosCountFilter <- function(read.pos.count = 1L) { function(x) { (if (!is.null(x$ncycles)) x$ncycles else x$n.read.pos) >= read.pos.count } } StrandFETFilter <- function(p.value = 1e-4) { function(x) { p <- with(mcols(x), fisher_p_vectorized(count.plus.ref, count.minus.ref, (count.plus.ref + count.plus), (count.minus.ref + count.minus))) p > p.value } } InternalReadPosBinFilter <- function(min.count = 1L) { function(x) { read_pos_columns <- grep("readPosCount", colnames(mcols(x)), value = TRUE) alt_columns <- grep("ref", read_pos_columns, invert = TRUE, value = TRUE) internal_columns <- tail(head(alt_columns, -1), -1) if (length(internal_columns) > 0L) rowSums(as.matrix(mcols(x)[internal_columns])) >= min.count else rep.int(TRUE, length(x)) } } t.test_welch <- function(m1, m2, s1, s2, n1, n2) { s <- s1 / n1 + s2 / n2 t <- (m1 - m2) / sqrt(s) v <- s^2 / ((s1 / n1)^2 / (n1 - 1L) + (s2 / n2)^2 / (n2 - 1L)) pt(-abs(t), v) * 2L } ReadPositionTTestFilter <- function(p.value.cutoff = 1e-4) { function(x) { p <- with(mcols(x), t.test_welch(read.pos.mean, read.pos.mean.ref, read.pos.var, read.pos.var.ref, rawAltDepth(x), rawTotalDepth(x))) ans <- p > p.value.cutoff ans[is.na(ans)] <- TRUE ans } } DistanceToNearestFilter <- function(min.dist = 10L) { function(x) { mcols(distanceToNearest(x))$distance > min.dist } } NeighborCountFilter <- function(max.count = 2L, window.size = 75L) { function(x) { countOverlaps(resize(x, window.size, fix = "center"), x) <= max.count } } IndelsNotSupportedFilter <- function() { function(x) { nzchar(ref(x)) & nzchar(alt(x)) } } MaskFilter <- function(mask) { function(x) { !overlapsAny(x, mask, ignore.strand = TRUE) } } MedianDistFromNearestEndFilter <- function(min.mdfne) { function(x) { x$mdfne >= min.mdfne } }
/R/qaVariants.R
no_license
gmbecker/VariantTools
R
false
false
2,982
r
### ========================================================================= ### Variant QA (as opposed to the calling filters) ### ------------------------------------------------------------------------- qaVariants <- function(x, qa.filters = VariantQAFilters(...), ...) { softFilter(x, qa.filters) } VariantQAFilters <- function(fisher.strand.p.value = 1e-4, min.mdfne = 10L) { FilterRules(c(mdfne = MedianDistFromNearestEndFilter(min.mdfne), fisherStrand = StrandFETFilter(fisher.strand.p.value))) } ## With new gmapR, this is only necessary for filtering the ref N's. ## In theory, we could keep positions with ref N and at least two alts. ## But this is OK for now. NonNRefFilter <- function() { function(x) { as.character(ref(x)) != 'N' } } ## Drops the ref rows (should not be necessary) NonRefFilter <- function() { function(x) { !is.na(alt(x)) } } ReadPosCountFilter <- function(read.pos.count = 1L) { function(x) { (if (!is.null(x$ncycles)) x$ncycles else x$n.read.pos) >= read.pos.count } } StrandFETFilter <- function(p.value = 1e-4) { function(x) { p <- with(mcols(x), fisher_p_vectorized(count.plus.ref, count.minus.ref, (count.plus.ref + count.plus), (count.minus.ref + count.minus))) p > p.value } } InternalReadPosBinFilter <- function(min.count = 1L) { function(x) { read_pos_columns <- grep("readPosCount", colnames(mcols(x)), value = TRUE) alt_columns <- grep("ref", read_pos_columns, invert = TRUE, value = TRUE) internal_columns <- tail(head(alt_columns, -1), -1) if (length(internal_columns) > 0L) rowSums(as.matrix(mcols(x)[internal_columns])) >= min.count else rep.int(TRUE, length(x)) } } t.test_welch <- function(m1, m2, s1, s2, n1, n2) { s <- s1 / n1 + s2 / n2 t <- (m1 - m2) / sqrt(s) v <- s^2 / ((s1 / n1)^2 / (n1 - 1L) + (s2 / n2)^2 / (n2 - 1L)) pt(-abs(t), v) * 2L } ReadPositionTTestFilter <- function(p.value.cutoff = 1e-4) { function(x) { p <- with(mcols(x), t.test_welch(read.pos.mean, read.pos.mean.ref, read.pos.var, read.pos.var.ref, rawAltDepth(x), rawTotalDepth(x))) ans <- p > p.value.cutoff ans[is.na(ans)] <- TRUE ans } } DistanceToNearestFilter <- function(min.dist = 10L) { function(x) { mcols(distanceToNearest(x))$distance > min.dist } } NeighborCountFilter <- function(max.count = 2L, window.size = 75L) { function(x) { countOverlaps(resize(x, window.size, fix = "center"), x) <= max.count } } IndelsNotSupportedFilter <- function() { function(x) { nzchar(ref(x)) & nzchar(alt(x)) } } MaskFilter <- function(mask) { function(x) { !overlapsAny(x, mask, ignore.strand = TRUE) } } MedianDistFromNearestEndFilter <- function(min.mdfne) { function(x) { x$mdfne >= min.mdfne } }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/pls.R \name{pls} \alias{pls} \title{Partial Least Squares regression} \usage{ pls(x, y, ncomp = 15, center = T, scale = F, cv = NULL, x.test = NULL, y.test = NULL, method = "simpls", alpha = 0.05, coeffs.ci = NULL, coeffs.alpha = 0.1, info = "", light = F, ncomp.selcrit = "min") } \arguments{ \item{x}{matrix with predictors.} \item{y}{matrix with responses.} \item{ncomp}{maximum number of components to calculate.} \item{center}{logical, center or not predictors and response values.} \item{scale}{logical, scale (standardize) or not predictors and response values.} \item{cv}{number of segments for cross-validation (if cv = 1, full cross-validation will be used).} \item{x.test}{matrix with predictors for test set.} \item{y.test}{matrix with responses for test set.} \item{method}{method for calculating PLS model.} \item{alpha}{significance level for calculating statistical limits for residuals.} \item{coeffs.ci}{method to calculate p-values and confidence intervals for regression coefficients (so far only jack-knifing is availavle: \code{='jk'}).} \item{coeffs.alpha}{significance level for calculating confidence intervals for regression coefficients.} \item{info}{short text with information about the model.} \item{light}{run normal or light (faster) version of PLS without calculationg some performance statistics.} \item{ncomp.selcrit}{criterion for selecting optimal number of components (\code{'min'} for first local minimum of RMSECV and \code{'wold'} for Wold's rule.)} } \value{ Returns an object of \code{pls} class with following fields: \item{ncomp }{number of components included to the model.} \item{ncomp.selected }{selected (optimal) number of components.} \item{xloadings }{matrix with loading values for x decomposition.} \item{yloadings }{matrix with loading values for y decomposition.} \item{weights }{matrix with PLS weights.} \item{selratio }{array with selectivity ratio values.} \item{vipscores }{matrix with VIP scores values.} \item{coeffs }{object of class \code{\link{regcoeffs}} with regression coefficients calculated for each component.} \item{info }{information about the model, provided by user when build the model.} \item{calres }{an object of class \code{\link{plsres}} with PLS results for a calibration data.} \item{testres }{an object of class \code{\link{plsres}} with PLS results for a test data, if it was provided.} \item{cvres }{an object of class \code{\link{plsres}} with PLS results for cross-validation, if this option was chosen.} } \description{ \code{pls} is used to calibrate, validate and use of partial least squares (PLS) regression model. } \details{ So far only SIMPLS method [1] is available, more coming soon. Implementation works both with one and multiple response variables. Like in \code{\link{pca}}, \code{pls} uses number of components (\code{ncomp}) as a minimum of number of objects - 1, number of x variables and the default or provided value. Regression coefficients, predictions and other results are calculated for each set of components from 1 to \code{ncomp}: 1, 1:2, 1:3, etc. The optimal number of components, (\code{ncomp.selected}), is found using Wold's R criterion, but can be adjusted by user using function (\code{\link{selectCompNum.pls}}). The selected optimal number of components is used for all default operations - predictions, plots, etc. Selectivity ratio [2] and VIP scores [3] are calculated for any PLS model authomatically, however while selectivity ratio values are calculated for all computed components, the VIP scores are computed only for selected components (to save calculation time) and recalculated every time when \code{selectCompNum()} is called for the model. Calculation of confidence intervals and p-values for regression coefficients are available only by jack-knifing so far. See help for \code{\link{regcoeffs}} objects for details. } \examples{ ### Examples of using PLS model class library(mdatools) ## 1. Make a PLS model for concentration of first component ## using full-cross validation and automatic detection of ## optimal number of components and show an overview data(simdata) x = simdata$spectra.c y = simdata$conc.c[, 1] model = pls(x, y, ncomp = 8, cv = 1) summary(model) plot(model) ## 2. Make a PLS model for concentration of first component ## using test set and 10 segment cross-validation and show overview data(simdata) x = simdata$spectra.c y = simdata$conc.c[, 1] x.t = simdata$spectra.t y.t = simdata$conc.t[, 1] model = pls(x, y, ncomp = 8, cv = 10, x.test = x.t, y.test = y.t) model = selectCompNum(model, 2) summary(model) plot(model) ## 3. Make a PLS model for concentration of first component ## using only test set validation and show overview data(simdata) x = simdata$spectra.c y = simdata$conc.c[, 1] x.t = simdata$spectra.t y.t = simdata$conc.t[, 1] model = pls(x, y, ncomp = 6, x.test = x.t, y.test = y.t) model = selectCompNum(model, 2) summary(model) plot(model) ## 4. Show variance and error plots for a PLS model par(mfrow = c(2, 2)) plotXCumVariance(model, type = 'h') plotYCumVariance(model, type = 'b', show.labels = TRUE, legend.position = 'bottomright') plotRMSE(model) plotRMSE(model, type = 'h', show.labels = TRUE) par(mfrow = c(1, 1)) ## 5. Show scores plots for a PLS model par(mfrow = c(2, 2)) plotXScores(model) plotXScores(model, comp = c(1, 3), show.labels = TRUE) plotXYScores(model) plotXYScores(model, comp = 2, show.labels = TRUE) par(mfrow = c(1, 1)) ## 6. Show loadings and coefficients plots for a PLS model par(mfrow = c(2, 2)) plotXLoadings(model) plotXLoadings(model, comp = c(1, 2), type = 'l') plotXYLoadings(model, comp = c(1, 2), legend.position = 'topleft') plotRegcoeffs(model) par(mfrow = c(1, 1)) ## 7. Show predictions and residuals plots for a PLS model par(mfrow = c(2, 2)) plotXResiduals(model, show.label = TRUE) plotYResiduals(model, show.label = TRUE) plotPredictions(model) plotPredictions(model, ncomp = 4, xlab = 'C, reference', ylab = 'C, predictions') par(mfrow = c(1, 1)) ## 8. Selectivity ratio and VIP scores plots par(mfrow = c(2, 2)) plotSelectivityRatio(model) plotSelectivityRatio(model, ncomp = 1) par(mfrow = c(1, 1)) ## 9. Variable selection with selectivity ratio selratio = getSelectivityRatio(model) selvar = !(selratio < 8) xsel = x[, selvar] modelsel = pls(xsel, y, ncomp = 6, cv = 1) modelsel = selectCompNum(modelsel, 3) summary(model) summary(modelsel) ## 10. Calculate average spectrum and show the selected variables i = 1:ncol(x) ms = apply(x, 2, mean) par(mfrow = c(2, 2)) plot(i, ms, type = 'p', pch = 16, col = 'red', main = 'Original variables') plotPredictions(model) plot(i, ms, type = 'p', pch = 16, col = 'lightgray', main = 'Selected variables') points(i[selvar], ms[selvar], col = 'red', pch = 16) plotPredictions(modelsel) par(mfrow = c(1, 1)) } \author{ Sergey Kucheryavskiy (svkucheryavski@gmail.com) } \references{ 1. S. de Jong, Chemometrics and Intelligent Laboratory Systems 18 (1993) 251-263. 2. Tarja Rajalahti et al. Chemometrics and Laboratory Systems, 95 (2009), 35-48. 3. Il-Gyo Chong, Chi-Hyuck Jun. Chemometrics and Laboratory Systems, 78 (2005), 103-112. } \seealso{ Methods for \code{pls} objects: \tabular{ll}{ \code{print} \tab prints information about a \code{pls} object.\cr \code{\link{summary.pls}} \tab shows performance statistics for the model.\cr \code{\link{plot.pls}} \tab shows plot overview of the model.\cr \code{\link{pls.simpls}} \tab implementation of SIMPLS algorithm.\cr \code{\link{predict.pls}} \tab applies PLS model to a new data.\cr \code{\link{selectCompNum.pls}} \tab set number of optimal components in the model.\cr \code{\link{plotPredictions.pls}} \tab shows predicted vs. measured plot.\cr \code{\link{plotRegcoeffs.pls}} \tab shows regression coefficients plot.\cr \code{\link{plotXScores.pls}} \tab shows scores plot for x decomposition.\cr \code{\link{plotXYScores.pls}} \tab shows scores plot for x and y decomposition.\cr \code{\link{plotXLoadings.pls}} \tab shows loadings plot for x decomposition.\cr \code{\link{plotXYLoadings.pls}} \tab shows loadings plot for x and y decomposition.\cr \code{\link{plotRMSE.pls}} \tab shows RMSE plot.\cr \code{\link{plotXVariance.pls}} \tab shows explained variance plot for x decomposition.\cr \code{\link{plotYVariance.pls}} \tab shows explained variance plot for y decomposition.\cr \code{\link{plotXCumVariance.pls}} \tab shows cumulative explained variance plot for y decomposition.\cr \code{\link{plotYCumVariance.pls}} \tab shows cumulative explained variance plot for y decomposition.\cr \code{\link{plotXResiduals.pls}} \tab shows T2 vs. Q plot for x decomposition.\cr \code{\link{plotYResiduals.pls}} \tab shows residuals plot for y values.\cr \code{\link{plotSelectivityRatio.pls}} \tab shows plot with selectivity ratio values.\cr \code{\link{plotVIPScores.pls}} \tab shows plot with VIP scores values.\cr \code{\link{getSelectivityRatio.pls}} \tab returns vector with selectivity ratio values.\cr \code{\link{getVIPScores.pls}} \tab returns vector with VIP scores values.\cr \code{\link{getRegcoeffs.pls}} \tab returns matrix with regression coefficients.\cr } Most of the methods for plotting data (except loadings and regression coefficients) are also available for PLS results (\code{\link{plsres}}) objects. There is also a randomization test for PLS-regression (\code{\link{randtest}}). }
/man/pls.Rd
no_license
zeehio/mdatools
R
false
false
9,493
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/pls.R \name{pls} \alias{pls} \title{Partial Least Squares regression} \usage{ pls(x, y, ncomp = 15, center = T, scale = F, cv = NULL, x.test = NULL, y.test = NULL, method = "simpls", alpha = 0.05, coeffs.ci = NULL, coeffs.alpha = 0.1, info = "", light = F, ncomp.selcrit = "min") } \arguments{ \item{x}{matrix with predictors.} \item{y}{matrix with responses.} \item{ncomp}{maximum number of components to calculate.} \item{center}{logical, center or not predictors and response values.} \item{scale}{logical, scale (standardize) or not predictors and response values.} \item{cv}{number of segments for cross-validation (if cv = 1, full cross-validation will be used).} \item{x.test}{matrix with predictors for test set.} \item{y.test}{matrix with responses for test set.} \item{method}{method for calculating PLS model.} \item{alpha}{significance level for calculating statistical limits for residuals.} \item{coeffs.ci}{method to calculate p-values and confidence intervals for regression coefficients (so far only jack-knifing is availavle: \code{='jk'}).} \item{coeffs.alpha}{significance level for calculating confidence intervals for regression coefficients.} \item{info}{short text with information about the model.} \item{light}{run normal or light (faster) version of PLS without calculationg some performance statistics.} \item{ncomp.selcrit}{criterion for selecting optimal number of components (\code{'min'} for first local minimum of RMSECV and \code{'wold'} for Wold's rule.)} } \value{ Returns an object of \code{pls} class with following fields: \item{ncomp }{number of components included to the model.} \item{ncomp.selected }{selected (optimal) number of components.} \item{xloadings }{matrix with loading values for x decomposition.} \item{yloadings }{matrix with loading values for y decomposition.} \item{weights }{matrix with PLS weights.} \item{selratio }{array with selectivity ratio values.} \item{vipscores }{matrix with VIP scores values.} \item{coeffs }{object of class \code{\link{regcoeffs}} with regression coefficients calculated for each component.} \item{info }{information about the model, provided by user when build the model.} \item{calres }{an object of class \code{\link{plsres}} with PLS results for a calibration data.} \item{testres }{an object of class \code{\link{plsres}} with PLS results for a test data, if it was provided.} \item{cvres }{an object of class \code{\link{plsres}} with PLS results for cross-validation, if this option was chosen.} } \description{ \code{pls} is used to calibrate, validate and use of partial least squares (PLS) regression model. } \details{ So far only SIMPLS method [1] is available, more coming soon. Implementation works both with one and multiple response variables. Like in \code{\link{pca}}, \code{pls} uses number of components (\code{ncomp}) as a minimum of number of objects - 1, number of x variables and the default or provided value. Regression coefficients, predictions and other results are calculated for each set of components from 1 to \code{ncomp}: 1, 1:2, 1:3, etc. The optimal number of components, (\code{ncomp.selected}), is found using Wold's R criterion, but can be adjusted by user using function (\code{\link{selectCompNum.pls}}). The selected optimal number of components is used for all default operations - predictions, plots, etc. Selectivity ratio [2] and VIP scores [3] are calculated for any PLS model authomatically, however while selectivity ratio values are calculated for all computed components, the VIP scores are computed only for selected components (to save calculation time) and recalculated every time when \code{selectCompNum()} is called for the model. Calculation of confidence intervals and p-values for regression coefficients are available only by jack-knifing so far. See help for \code{\link{regcoeffs}} objects for details. } \examples{ ### Examples of using PLS model class library(mdatools) ## 1. Make a PLS model for concentration of first component ## using full-cross validation and automatic detection of ## optimal number of components and show an overview data(simdata) x = simdata$spectra.c y = simdata$conc.c[, 1] model = pls(x, y, ncomp = 8, cv = 1) summary(model) plot(model) ## 2. Make a PLS model for concentration of first component ## using test set and 10 segment cross-validation and show overview data(simdata) x = simdata$spectra.c y = simdata$conc.c[, 1] x.t = simdata$spectra.t y.t = simdata$conc.t[, 1] model = pls(x, y, ncomp = 8, cv = 10, x.test = x.t, y.test = y.t) model = selectCompNum(model, 2) summary(model) plot(model) ## 3. Make a PLS model for concentration of first component ## using only test set validation and show overview data(simdata) x = simdata$spectra.c y = simdata$conc.c[, 1] x.t = simdata$spectra.t y.t = simdata$conc.t[, 1] model = pls(x, y, ncomp = 6, x.test = x.t, y.test = y.t) model = selectCompNum(model, 2) summary(model) plot(model) ## 4. Show variance and error plots for a PLS model par(mfrow = c(2, 2)) plotXCumVariance(model, type = 'h') plotYCumVariance(model, type = 'b', show.labels = TRUE, legend.position = 'bottomright') plotRMSE(model) plotRMSE(model, type = 'h', show.labels = TRUE) par(mfrow = c(1, 1)) ## 5. Show scores plots for a PLS model par(mfrow = c(2, 2)) plotXScores(model) plotXScores(model, comp = c(1, 3), show.labels = TRUE) plotXYScores(model) plotXYScores(model, comp = 2, show.labels = TRUE) par(mfrow = c(1, 1)) ## 6. Show loadings and coefficients plots for a PLS model par(mfrow = c(2, 2)) plotXLoadings(model) plotXLoadings(model, comp = c(1, 2), type = 'l') plotXYLoadings(model, comp = c(1, 2), legend.position = 'topleft') plotRegcoeffs(model) par(mfrow = c(1, 1)) ## 7. Show predictions and residuals plots for a PLS model par(mfrow = c(2, 2)) plotXResiduals(model, show.label = TRUE) plotYResiduals(model, show.label = TRUE) plotPredictions(model) plotPredictions(model, ncomp = 4, xlab = 'C, reference', ylab = 'C, predictions') par(mfrow = c(1, 1)) ## 8. Selectivity ratio and VIP scores plots par(mfrow = c(2, 2)) plotSelectivityRatio(model) plotSelectivityRatio(model, ncomp = 1) par(mfrow = c(1, 1)) ## 9. Variable selection with selectivity ratio selratio = getSelectivityRatio(model) selvar = !(selratio < 8) xsel = x[, selvar] modelsel = pls(xsel, y, ncomp = 6, cv = 1) modelsel = selectCompNum(modelsel, 3) summary(model) summary(modelsel) ## 10. Calculate average spectrum and show the selected variables i = 1:ncol(x) ms = apply(x, 2, mean) par(mfrow = c(2, 2)) plot(i, ms, type = 'p', pch = 16, col = 'red', main = 'Original variables') plotPredictions(model) plot(i, ms, type = 'p', pch = 16, col = 'lightgray', main = 'Selected variables') points(i[selvar], ms[selvar], col = 'red', pch = 16) plotPredictions(modelsel) par(mfrow = c(1, 1)) } \author{ Sergey Kucheryavskiy (svkucheryavski@gmail.com) } \references{ 1. S. de Jong, Chemometrics and Intelligent Laboratory Systems 18 (1993) 251-263. 2. Tarja Rajalahti et al. Chemometrics and Laboratory Systems, 95 (2009), 35-48. 3. Il-Gyo Chong, Chi-Hyuck Jun. Chemometrics and Laboratory Systems, 78 (2005), 103-112. } \seealso{ Methods for \code{pls} objects: \tabular{ll}{ \code{print} \tab prints information about a \code{pls} object.\cr \code{\link{summary.pls}} \tab shows performance statistics for the model.\cr \code{\link{plot.pls}} \tab shows plot overview of the model.\cr \code{\link{pls.simpls}} \tab implementation of SIMPLS algorithm.\cr \code{\link{predict.pls}} \tab applies PLS model to a new data.\cr \code{\link{selectCompNum.pls}} \tab set number of optimal components in the model.\cr \code{\link{plotPredictions.pls}} \tab shows predicted vs. measured plot.\cr \code{\link{plotRegcoeffs.pls}} \tab shows regression coefficients plot.\cr \code{\link{plotXScores.pls}} \tab shows scores plot for x decomposition.\cr \code{\link{plotXYScores.pls}} \tab shows scores plot for x and y decomposition.\cr \code{\link{plotXLoadings.pls}} \tab shows loadings plot for x decomposition.\cr \code{\link{plotXYLoadings.pls}} \tab shows loadings plot for x and y decomposition.\cr \code{\link{plotRMSE.pls}} \tab shows RMSE plot.\cr \code{\link{plotXVariance.pls}} \tab shows explained variance plot for x decomposition.\cr \code{\link{plotYVariance.pls}} \tab shows explained variance plot for y decomposition.\cr \code{\link{plotXCumVariance.pls}} \tab shows cumulative explained variance plot for y decomposition.\cr \code{\link{plotYCumVariance.pls}} \tab shows cumulative explained variance plot for y decomposition.\cr \code{\link{plotXResiduals.pls}} \tab shows T2 vs. Q plot for x decomposition.\cr \code{\link{plotYResiduals.pls}} \tab shows residuals plot for y values.\cr \code{\link{plotSelectivityRatio.pls}} \tab shows plot with selectivity ratio values.\cr \code{\link{plotVIPScores.pls}} \tab shows plot with VIP scores values.\cr \code{\link{getSelectivityRatio.pls}} \tab returns vector with selectivity ratio values.\cr \code{\link{getVIPScores.pls}} \tab returns vector with VIP scores values.\cr \code{\link{getRegcoeffs.pls}} \tab returns matrix with regression coefficients.\cr } Most of the methods for plotting data (except loadings and regression coefficients) are also available for PLS results (\code{\link{plsres}}) objects. There is also a randomization test for PLS-regression (\code{\link{randtest}}). }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/discovr_data.R \docType{data} \name{zhang_sample} \alias{zhang_sample} \title{Zhang et al. (2013) (subsample)} \format{ A tibble with 52 rows and 4 variables } \source{ \href{https://www.discovr.rocks/csv/zhang_2013_subsample.csv}{www.discovr.rocks/csv/zhang_2013_subsample.csv} } \usage{ zhang_sample } \description{ A dataset from Field, A. P. (2022). Discovering statistics using R and RStudio (2nd ed.). London: Sage. } \details{ Statistics and maths anxiety are common and affect people's performance on maths and stats assignments; women in particular can lack confidence in mathematics (Field, 2010). Zhang, Schmader and Hall (2013) did an intriguing study in which students completed a maths test in which some put their own name on the test booklet, whereas others were given a booklet that already had either a male or female name on. Participants in the latter two conditions were told that they would use this other person's name for the purpose of the test. Women who completed the test using a different name performed significantly better than those who completed the test using their own name. (There were no such significant effects for men.) The data are a random subsample of Zhang et al.'s data with the following variables: \itemize{ \item \strong{id}: participant ID \item \strong{sex}: participant's biological sex \item \strong{name_type}: the booklet condition to which the participant was allocated: Female fake name, Male fake name or Own name \item \strong{accuracy}: the participant's score on the maths test } } \references{ \itemize{ \item Field, A. P. (2010). Teaching Statistics. In D. Upton & A. Trapp (Eds.), \emph{Teaching Psychology in Higher Education} (pp. 134-163). Chichester, UK: Wiley-Blackwell. \item Zhang, S., Schmader, T., & Hall, W. M. (2013). L'eggo My Ego: Reducing the Gender Gap in Math by Unlinking the Self from Performance. \emph{Self and Identity}, \emph{12}, 400-412. \doi{10.1080/15298868.2012.687012} } } \keyword{datasets}
/man/zhang_sample.Rd
no_license
KristenDowns/discovr
R
false
true
2,061
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/discovr_data.R \docType{data} \name{zhang_sample} \alias{zhang_sample} \title{Zhang et al. (2013) (subsample)} \format{ A tibble with 52 rows and 4 variables } \source{ \href{https://www.discovr.rocks/csv/zhang_2013_subsample.csv}{www.discovr.rocks/csv/zhang_2013_subsample.csv} } \usage{ zhang_sample } \description{ A dataset from Field, A. P. (2022). Discovering statistics using R and RStudio (2nd ed.). London: Sage. } \details{ Statistics and maths anxiety are common and affect people's performance on maths and stats assignments; women in particular can lack confidence in mathematics (Field, 2010). Zhang, Schmader and Hall (2013) did an intriguing study in which students completed a maths test in which some put their own name on the test booklet, whereas others were given a booklet that already had either a male or female name on. Participants in the latter two conditions were told that they would use this other person's name for the purpose of the test. Women who completed the test using a different name performed significantly better than those who completed the test using their own name. (There were no such significant effects for men.) The data are a random subsample of Zhang et al.'s data with the following variables: \itemize{ \item \strong{id}: participant ID \item \strong{sex}: participant's biological sex \item \strong{name_type}: the booklet condition to which the participant was allocated: Female fake name, Male fake name or Own name \item \strong{accuracy}: the participant's score on the maths test } } \references{ \itemize{ \item Field, A. P. (2010). Teaching Statistics. In D. Upton & A. Trapp (Eds.), \emph{Teaching Psychology in Higher Education} (pp. 134-163). Chichester, UK: Wiley-Blackwell. \item Zhang, S., Schmader, T., & Hall, W. M. (2013). L'eggo My Ego: Reducing the Gender Gap in Math by Unlinking the Self from Performance. \emph{Self and Identity}, \emph{12}, 400-412. \doi{10.1080/15298868.2012.687012} } } \keyword{datasets}
# data set https://data.cms.gov/Medicare-Inpatient/Inpatient-Prospective-Payment-System-IPPS-Provider/97k6-zzx3 library(shiny) library(dplyr) library(ggplot2) library(plotly) # read in data from same directory that app.R file is in payments <- readr::read_csv("healthcare_payments.csv") # get unique Medicare Severity Diagnosis Related Group (MS-DRG) values drgs <- unique(payments$`DRG Definition`) # design user interface ui <- fluidPage( # application title titlePanel("CMS Indiana Provider Summary"), # filter by DRG fluidRow(selectInput("drg", "Select DRG", choices = drgs, multiple = TRUE)), fluidRow( tabsetPanel( tabPanel("Table", # show table of payment data tableOutput("table") ), tabPanel("Histogram", # show histogram of Total Discharges plotOutput("hist") ), tabPanel("Boxplot", # show boxplot plotlyOutput("box") ) ) ) ) # defign server logic server <- function(input, output) { filter_data <- reactive({ # get vector of DRG values drg <- ifelse(is.null(input$drg), drgs, input$drg) # filter by DRG values payments %>% dplyr::filter(`DRG Definition` %in% drg) %>% dplyr::select(c(1, 3, 8:12)) }) # create output of table data output$table <- renderTable({ filter_data() }) # create output of histogram output$hist <- renderPlot({ x <- filter_data() %>% dplyr::pull(`Total Discharges`) hist(x, xlab = "Total Discharges", main = "") }) # create output of boxplot output$box <- renderPlotly({ df <- filter_data() %>% tidyr::pivot_longer(c("Average Total Payments", "Average Medicare Payments"), names_to = "metric") g <- ggplot(df, aes(x = metric, y = value)) + geom_boxplot() ggplotly(g) }) } # run the app shinyApp(ui = ui, server = server)
/1_intro/D_boxplot/app.R
no_license
NateByers/shinytraining
R
false
false
2,314
r
# data set https://data.cms.gov/Medicare-Inpatient/Inpatient-Prospective-Payment-System-IPPS-Provider/97k6-zzx3 library(shiny) library(dplyr) library(ggplot2) library(plotly) # read in data from same directory that app.R file is in payments <- readr::read_csv("healthcare_payments.csv") # get unique Medicare Severity Diagnosis Related Group (MS-DRG) values drgs <- unique(payments$`DRG Definition`) # design user interface ui <- fluidPage( # application title titlePanel("CMS Indiana Provider Summary"), # filter by DRG fluidRow(selectInput("drg", "Select DRG", choices = drgs, multiple = TRUE)), fluidRow( tabsetPanel( tabPanel("Table", # show table of payment data tableOutput("table") ), tabPanel("Histogram", # show histogram of Total Discharges plotOutput("hist") ), tabPanel("Boxplot", # show boxplot plotlyOutput("box") ) ) ) ) # defign server logic server <- function(input, output) { filter_data <- reactive({ # get vector of DRG values drg <- ifelse(is.null(input$drg), drgs, input$drg) # filter by DRG values payments %>% dplyr::filter(`DRG Definition` %in% drg) %>% dplyr::select(c(1, 3, 8:12)) }) # create output of table data output$table <- renderTable({ filter_data() }) # create output of histogram output$hist <- renderPlot({ x <- filter_data() %>% dplyr::pull(`Total Discharges`) hist(x, xlab = "Total Discharges", main = "") }) # create output of boxplot output$box <- renderPlotly({ df <- filter_data() %>% tidyr::pivot_longer(c("Average Total Payments", "Average Medicare Payments"), names_to = "metric") g <- ggplot(df, aes(x = metric, y = value)) + geom_boxplot() ggplotly(g) }) } # run the app shinyApp(ui = ui, server = server)
#' Law-Watkinson model for projecting abundances, #' with a global alpha and no covariate effects #' #' @param lambda numeric lambda value. #' @param alpha_intra included for compatibility, not used in this model. #' @param alpha_inter single numeric value. #' @param lambda_cov included for compatibility, not used in this model. #' @param alpha_cov included for compatibility, not used in this model. #' @param abundance named numeric vector of abundances in the previous timestep. #' @param covariates included for compatibility, not used in this model. #' #' @return numeric abundance projected one timestep #' @export LW_project_alpha_global_lambdacov_none_alphacov_none <- function(lambda, alpha_intra, alpha_inter, lambda_cov, alpha_cov, abundance, covariates){ spnames <- names(abundance) alpha <- alpha_inter expected_abund <- NA_real_ num <- lambda term <- 1 for(i.sp in 1:length(abundance)){ term <- term + abundance[i.sp]^alpha }# for each sp expected_abund <- (num + term) * abundance[names(lambda)] expected_abund }
/R/LW_project_alpha_global_lambdacov_none_alphacov_none.R
permissive
RadicalCommEcol/cxr
R
false
false
1,456
r
#' Law-Watkinson model for projecting abundances, #' with a global alpha and no covariate effects #' #' @param lambda numeric lambda value. #' @param alpha_intra included for compatibility, not used in this model. #' @param alpha_inter single numeric value. #' @param lambda_cov included for compatibility, not used in this model. #' @param alpha_cov included for compatibility, not used in this model. #' @param abundance named numeric vector of abundances in the previous timestep. #' @param covariates included for compatibility, not used in this model. #' #' @return numeric abundance projected one timestep #' @export LW_project_alpha_global_lambdacov_none_alphacov_none <- function(lambda, alpha_intra, alpha_inter, lambda_cov, alpha_cov, abundance, covariates){ spnames <- names(abundance) alpha <- alpha_inter expected_abund <- NA_real_ num <- lambda term <- 1 for(i.sp in 1:length(abundance)){ term <- term + abundance[i.sp]^alpha }# for each sp expected_abund <- (num + term) * abundance[names(lambda)] expected_abund }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/update_study_annotations.R \name{.update_view_data} \alias{.update_view_data} \title{Replace/update table contents = input data must have ROW_ID, ROW_VERSION, ETAG columns to update.} \usage{ .update_view_data(table_id, new_data) } \arguments{ \item{table_id}{The synapse id of the table to update.} \item{new_data}{The updated table.} } \description{ Replace/update table contents = input data must have ROW_ID, ROW_VERSION, ETAG columns to update. }
/man/dot-update_view_data.Rd
permissive
BrunoGrandePhD/nfportalutils
R
false
true
531
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/update_study_annotations.R \name{.update_view_data} \alias{.update_view_data} \title{Replace/update table contents = input data must have ROW_ID, ROW_VERSION, ETAG columns to update.} \usage{ .update_view_data(table_id, new_data) } \arguments{ \item{table_id}{The synapse id of the table to update.} \item{new_data}{The updated table.} } \description{ Replace/update table contents = input data must have ROW_ID, ROW_VERSION, ETAG columns to update. }
#Cargar base de datos library(readxl) BD <- read_excel("Datos final.xlsx", sheet = "Datos") #Creacion de periodo final library(lubridate) BD<-BD[order(BD$final_time),] p=2 for (i in 1:length(BD$final_time)){ BD$Periodo_final[i]<-p if(yday(BD$final_time[i+1])-yday(BD$final_time[i])==1 && wday(BD$final_time[i])==1) p=p+1 if(yday(BD$final_time[i+1])-yday(BD$final_time[i])==1 && wday(BD$final_time[i])==4) p=p+1 } BD$Periodo_final[1]<-1 BD<-BD[order(BD$outlier),] BD<-BD[1:(min(which(BD$outlier>0))-1),] #Dejamos solo las columnas que nos interesan (child_id, route_id, child_route_num, difference, Periodo_final ) BD<-BD[,c(14,17,18,22,29)] #Anadimos columnas de diferencia por ruta y observacion por ruta BD$obs1<-0 BD$obs2<-0 BD$obs3<-0 BD$obs4<-0 BD$diff1<-0 BD$diff2<-0 BD$diff3<-0 BD$diff4<-0 for(i in 1:length(BD$child_id)){ if(BD$route_id[i]==1){ BD$diff1[i]<-BD$difference[i] BD$obs1[i]<-1 } if(BD$route_id[i]==2){ BD$diff2[i]<-BD$difference[i] BD$obs2[i]<-1 } if(BD$route_id[i]==3){ BD$diff3[i]<-BD$difference[i] BD$obs3[i]<-1 } if(BD$route_id[i]==4){ BD$diff4[i]<-BD$difference[i] BD$obs4[i]<-1 } } #vector con clientes unicos library(dplyr) clientes<-distinct(BD,child_id) #parametros iniciales para theta, sigma, xi y thetar #library(invgamma) param1<-c(1.05,10) param2<-c(1.05,3) param<-c(0,sqrt(param1[2]/(param1[1]-1)),sqrt(param2[2]/(param2[1]-1)),0) #construccion de tabla vacia Base_pronostico<-data.frame(client=0,route_num=0,period=0,route=0,count_period=0,diff_1=0,diff_2=0,diff_3=0,diff_4=0,mean_rhat=0, max_rhat=0,theta=0,se_mean=0,sigma=0, xi=0,theta_1=0,theta_2=0,theta_3=0,theta_4=0) for (j in clientes$child_id){ l<-max(BD$Periodo_final)+1 Base_pronostico<-rbind(Base_pronostico,list(rep(j,l),rep(NA,l),(1:l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l))) } Base_pronostico<-Base_pronostico[-1,] f<-0 count_clientes<- 0 save.image(file = 'mydata_server_9.RData')
/BD vacia.R
no_license
AngelicaValdes/Tesis
R
false
false
2,141
r
#Cargar base de datos library(readxl) BD <- read_excel("Datos final.xlsx", sheet = "Datos") #Creacion de periodo final library(lubridate) BD<-BD[order(BD$final_time),] p=2 for (i in 1:length(BD$final_time)){ BD$Periodo_final[i]<-p if(yday(BD$final_time[i+1])-yday(BD$final_time[i])==1 && wday(BD$final_time[i])==1) p=p+1 if(yday(BD$final_time[i+1])-yday(BD$final_time[i])==1 && wday(BD$final_time[i])==4) p=p+1 } BD$Periodo_final[1]<-1 BD<-BD[order(BD$outlier),] BD<-BD[1:(min(which(BD$outlier>0))-1),] #Dejamos solo las columnas que nos interesan (child_id, route_id, child_route_num, difference, Periodo_final ) BD<-BD[,c(14,17,18,22,29)] #Anadimos columnas de diferencia por ruta y observacion por ruta BD$obs1<-0 BD$obs2<-0 BD$obs3<-0 BD$obs4<-0 BD$diff1<-0 BD$diff2<-0 BD$diff3<-0 BD$diff4<-0 for(i in 1:length(BD$child_id)){ if(BD$route_id[i]==1){ BD$diff1[i]<-BD$difference[i] BD$obs1[i]<-1 } if(BD$route_id[i]==2){ BD$diff2[i]<-BD$difference[i] BD$obs2[i]<-1 } if(BD$route_id[i]==3){ BD$diff3[i]<-BD$difference[i] BD$obs3[i]<-1 } if(BD$route_id[i]==4){ BD$diff4[i]<-BD$difference[i] BD$obs4[i]<-1 } } #vector con clientes unicos library(dplyr) clientes<-distinct(BD,child_id) #parametros iniciales para theta, sigma, xi y thetar #library(invgamma) param1<-c(1.05,10) param2<-c(1.05,3) param<-c(0,sqrt(param1[2]/(param1[1]-1)),sqrt(param2[2]/(param2[1]-1)),0) #construccion de tabla vacia Base_pronostico<-data.frame(client=0,route_num=0,period=0,route=0,count_period=0,diff_1=0,diff_2=0,diff_3=0,diff_4=0,mean_rhat=0, max_rhat=0,theta=0,se_mean=0,sigma=0, xi=0,theta_1=0,theta_2=0,theta_3=0,theta_4=0) for (j in clientes$child_id){ l<-max(BD$Periodo_final)+1 Base_pronostico<-rbind(Base_pronostico,list(rep(j,l),rep(NA,l),(1:l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l),rep(NA,l))) } Base_pronostico<-Base_pronostico[-1,] f<-0 count_clientes<- 0 save.image(file = 'mydata_server_9.RData')
### show data from table showdata<-function(table, fields = ""){ conn<-conn() sql<-ifelse(nchar(fields)==0, paste("select * from ", table, sep = ""), paste("select ", fields, " from ", table, sep = "")) #sql<-paste("select ", sl," from ", table, sep = "") rs<-dbGetQuery(conn, sql) dbDisconnect(conn) return(rs) }
/R/showdata.R
no_license
trinhdangmau/bio-labs
R
false
false
343
r
### show data from table showdata<-function(table, fields = ""){ conn<-conn() sql<-ifelse(nchar(fields)==0, paste("select * from ", table, sep = ""), paste("select ", fields, " from ", table, sep = "")) #sql<-paste("select ", sl," from ", table, sep = "") rs<-dbGetQuery(conn, sql) dbDisconnect(conn) return(rs) }
######### # Function to run ODEs # with random generation of parameters # raina & christina ######## s1<- read.csv("sensitivity/lhc/lhc_humans_dd_R0vary_phi0.9.csv",header=T) # dataframe of random parameters param.list <- apply(s1,1,as.list) #converting into a list times <- seq(0, 20, by = 0.01) #time steps are constant for each run # cluster version of function super_ode <- function(x,times){ library(deSolve) #library for ode function library(pracma) #x=param.list[[1000]] core.matrix.model <- function(Time, State, Parameters) { with(as.list(c(State, Parameters)), { N.c = S.c+I.c+R.c N.m = S.m+I.m+R.m dS.c <- ifelse((1 - N.c/(k.c*(1.01-phi))) > 0, #IF UNDER CARRYING CAPACITY THEN.. ((N.c)*(b.c)*(1 - N.c/(k.c*(1.01-phi))) - ((beta.c*S.c*I.c)/N.c^kappa + (epsilon*psi*beta.m*S.c*I.m)/(N.c+epsilon*N.m)^kappa) - d.c*S.c + sigma.c*R.c), # ELSE... NO BIRTHS (- ((beta.c*S.c*I.c)/N.c^kappa + (epsilon*psi*beta.m*S.c*I.m)/(N.c+epsilon*N.m)^kappa) - d.c*S.c + sigma.c*R.c)) dI.c <- (beta.c*S.c*I.c)/N.c^kappa + (epsilon*phi*beta.m*S.c*I.m)/(N.c+epsilon*N.m)^kappa - I.c*(alpha2.c+d.c+gamma.c) dR.c <- I.c*gamma.c - R.c*(d.c+sigma.c) dS.m <- ifelse((1 - N.m/(k.m*phi)) > 0, ((N.m)*(b.m)*(1 - N.m/(k.m*phi)) - ((beta.m*S.m*I.m)/N.m^kappa + (epsilon*psi*beta.c*S.m*I.c)/(epsilon*N.c+N.m)^kappa) - d.m*S.m + sigma.m*R.m), - ((beta.m*S.m*I.m)/N.m^kappa + (epsilon*psi*beta.c*S.m*I.c)/(epsilon*N.c+N.m)^kappa) - d.m*S.m + sigma.m*R.m) dI.m <- ((beta.m*S.m*I.m)/N.m^kappa + (epsilon*psi*beta.c*S.m*I.c)/(epsilon*N.c+N.m)^kappa) - I.m*(alpha2.m+d.m+gamma.m) dR.m <- I.m*gamma.m - R.m*(d.m+sigma.m) return(list(c(dS.c, dI.c, dR.c,dS.m, dI.m, dR.m))) }) } initial.values = c(S.c=(1.01-x[['phi']])*x[['k.c']]*0.9,I.c=1,R.c=0,S.m=x[['phi']]*x[['k.m']]*0.9,I.m=0,R.m=0) out = as.data.frame(ode(func=core.matrix.model,y=initial.values,parms=x,times=times, method = 'ode45')) #return(out) max=min(c(nrow(out),length(times))) out.vec=c(out[max,2:7], (out[max,3]/(out[max,2]+out[max,3]+out[max,4])), (out[max,6]/(out[max,5]+out[max,6]+out[max,7])), max, trapz(out$time,out[,3]/(out[,2]+out[,3]+out[,4])), trapz(out$time,out[,6]/(out[,5]+out[,6]+out[,7])), max(out[,3]/(out[,2]+out[,3]+out[,4])), max(out[,6]/(out[,5]+out[,6]+out[,7])) ) return(out.vec) } #super_ode(param.list[[100]],times)
/sensitivity/sensitivity_func_cluster_SI.R
no_license
cfaustus/core_matrix_publish
R
false
false
2,716
r
######### # Function to run ODEs # with random generation of parameters # raina & christina ######## s1<- read.csv("sensitivity/lhc/lhc_humans_dd_R0vary_phi0.9.csv",header=T) # dataframe of random parameters param.list <- apply(s1,1,as.list) #converting into a list times <- seq(0, 20, by = 0.01) #time steps are constant for each run # cluster version of function super_ode <- function(x,times){ library(deSolve) #library for ode function library(pracma) #x=param.list[[1000]] core.matrix.model <- function(Time, State, Parameters) { with(as.list(c(State, Parameters)), { N.c = S.c+I.c+R.c N.m = S.m+I.m+R.m dS.c <- ifelse((1 - N.c/(k.c*(1.01-phi))) > 0, #IF UNDER CARRYING CAPACITY THEN.. ((N.c)*(b.c)*(1 - N.c/(k.c*(1.01-phi))) - ((beta.c*S.c*I.c)/N.c^kappa + (epsilon*psi*beta.m*S.c*I.m)/(N.c+epsilon*N.m)^kappa) - d.c*S.c + sigma.c*R.c), # ELSE... NO BIRTHS (- ((beta.c*S.c*I.c)/N.c^kappa + (epsilon*psi*beta.m*S.c*I.m)/(N.c+epsilon*N.m)^kappa) - d.c*S.c + sigma.c*R.c)) dI.c <- (beta.c*S.c*I.c)/N.c^kappa + (epsilon*phi*beta.m*S.c*I.m)/(N.c+epsilon*N.m)^kappa - I.c*(alpha2.c+d.c+gamma.c) dR.c <- I.c*gamma.c - R.c*(d.c+sigma.c) dS.m <- ifelse((1 - N.m/(k.m*phi)) > 0, ((N.m)*(b.m)*(1 - N.m/(k.m*phi)) - ((beta.m*S.m*I.m)/N.m^kappa + (epsilon*psi*beta.c*S.m*I.c)/(epsilon*N.c+N.m)^kappa) - d.m*S.m + sigma.m*R.m), - ((beta.m*S.m*I.m)/N.m^kappa + (epsilon*psi*beta.c*S.m*I.c)/(epsilon*N.c+N.m)^kappa) - d.m*S.m + sigma.m*R.m) dI.m <- ((beta.m*S.m*I.m)/N.m^kappa + (epsilon*psi*beta.c*S.m*I.c)/(epsilon*N.c+N.m)^kappa) - I.m*(alpha2.m+d.m+gamma.m) dR.m <- I.m*gamma.m - R.m*(d.m+sigma.m) return(list(c(dS.c, dI.c, dR.c,dS.m, dI.m, dR.m))) }) } initial.values = c(S.c=(1.01-x[['phi']])*x[['k.c']]*0.9,I.c=1,R.c=0,S.m=x[['phi']]*x[['k.m']]*0.9,I.m=0,R.m=0) out = as.data.frame(ode(func=core.matrix.model,y=initial.values,parms=x,times=times, method = 'ode45')) #return(out) max=min(c(nrow(out),length(times))) out.vec=c(out[max,2:7], (out[max,3]/(out[max,2]+out[max,3]+out[max,4])), (out[max,6]/(out[max,5]+out[max,6]+out[max,7])), max, trapz(out$time,out[,3]/(out[,2]+out[,3]+out[,4])), trapz(out$time,out[,6]/(out[,5]+out[,6]+out[,7])), max(out[,3]/(out[,2]+out[,3]+out[,4])), max(out[,6]/(out[,5]+out[,6]+out[,7])) ) return(out.vec) } #super_ode(param.list[[100]],times)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reorder.R \name{ran} \alias{ran} \title{Derives the range of a vector} \usage{ ran(x) } \arguments{ \item{x}{vector with unknown range} } \value{ the range of the vector } \description{ Derives the range of a vector } \examples{ custom_vector = c(5,2,7,9,4) ran(custom_vector) }
/man/ran.Rd
no_license
dzimmermann-tgm/yinyang
R
false
true
358
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reorder.R \name{ran} \alias{ran} \title{Derives the range of a vector} \usage{ ran(x) } \arguments{ \item{x}{vector with unknown range} } \value{ the range of the vector } \description{ Derives the range of a vector } \examples{ custom_vector = c(5,2,7,9,4) ran(custom_vector) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.svyContTable.R \name{print.svyContTable} \alias{print.svyContTable} \title{Format and print \code{svyContTable} class objects} \usage{ \method{print}{svyContTable}( x, digits = 2, pDigits = 3, quote = FALSE, missing = FALSE, explain = TRUE, printToggle = TRUE, noSpaces = FALSE, nonnormal = NULL, minMax = FALSE, insertLevel = FALSE, test = TRUE, smd = FALSE, ... ) } \arguments{ \item{x}{Object returned by \code{\link{svyCreateContTable}} function.} \item{digits}{Number of digits to print in the table.} \item{pDigits}{Number of digits to print for p-values (also used for standardized mean differences).} \item{quote}{Whether to show everything in quotes. The default is FALSE. If TRUE, everything including the row and column names are quoted so that you can copy it to Excel easily.} \item{missing}{Whether to show missing data information.} \item{explain}{Whether to add explanation to the variable names, i.e., (mean (SD) or median [IQR]) is added to the variable names.} \item{printToggle}{Whether to print the output. If FALSE, no output is created, and a matrix is invisibly returned.} \item{noSpaces}{Whether to remove spaces added for alignment. Use this option if you prefer to align numbers yourself in other software.} \item{nonnormal}{A character vector to specify the variables for which the p-values should be those of nonparametric tests. By default all p-values are from normal assumption-based tests (oneway.test).} \item{minMax}{Whether to use [min,max] instead of [p25,p75] for nonnormal variables. The default is FALSE.} \item{insertLevel}{Whether to add an empty level column to the left of strata.} \item{test}{Whether to show p-values. TRUE by default. If FALSE, only the numerical summaries are shown.} \item{smd}{Whether to show standardized mean differences. FALSE by default. If there are more than one contrasts, the average of all possible standardized mean differences is shown. For individual contrasts, use \code{summary}.} \item{...}{For compatibility with generic. Ignored.} } \value{ A matrix object containing what you see is also invisibly returned. This can be assinged a name and exported via \code{write.csv}. } \description{ \code{print} method for the \code{svyContTable} class objects created by \code{\link{CreateContTable}} function. } \examples{ ## See the examples for svyCreateTableOne() } \seealso{ \code{\link{svyCreateTableOne}}, \code{\link{svyCreateCatTable}}, \code{\link{summary.svyCatTable}} } \author{ Kazuki Yoshida }
/man/print.svyContTable.Rd
no_license
JhuangLabTools/tableone
R
false
true
2,608
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.svyContTable.R \name{print.svyContTable} \alias{print.svyContTable} \title{Format and print \code{svyContTable} class objects} \usage{ \method{print}{svyContTable}( x, digits = 2, pDigits = 3, quote = FALSE, missing = FALSE, explain = TRUE, printToggle = TRUE, noSpaces = FALSE, nonnormal = NULL, minMax = FALSE, insertLevel = FALSE, test = TRUE, smd = FALSE, ... ) } \arguments{ \item{x}{Object returned by \code{\link{svyCreateContTable}} function.} \item{digits}{Number of digits to print in the table.} \item{pDigits}{Number of digits to print for p-values (also used for standardized mean differences).} \item{quote}{Whether to show everything in quotes. The default is FALSE. If TRUE, everything including the row and column names are quoted so that you can copy it to Excel easily.} \item{missing}{Whether to show missing data information.} \item{explain}{Whether to add explanation to the variable names, i.e., (mean (SD) or median [IQR]) is added to the variable names.} \item{printToggle}{Whether to print the output. If FALSE, no output is created, and a matrix is invisibly returned.} \item{noSpaces}{Whether to remove spaces added for alignment. Use this option if you prefer to align numbers yourself in other software.} \item{nonnormal}{A character vector to specify the variables for which the p-values should be those of nonparametric tests. By default all p-values are from normal assumption-based tests (oneway.test).} \item{minMax}{Whether to use [min,max] instead of [p25,p75] for nonnormal variables. The default is FALSE.} \item{insertLevel}{Whether to add an empty level column to the left of strata.} \item{test}{Whether to show p-values. TRUE by default. If FALSE, only the numerical summaries are shown.} \item{smd}{Whether to show standardized mean differences. FALSE by default. If there are more than one contrasts, the average of all possible standardized mean differences is shown. For individual contrasts, use \code{summary}.} \item{...}{For compatibility with generic. Ignored.} } \value{ A matrix object containing what you see is also invisibly returned. This can be assinged a name and exported via \code{write.csv}. } \description{ \code{print} method for the \code{svyContTable} class objects created by \code{\link{CreateContTable}} function. } \examples{ ## See the examples for svyCreateTableOne() } \seealso{ \code{\link{svyCreateTableOne}}, \code{\link{svyCreateCatTable}}, \code{\link{summary.svyCatTable}} } \author{ Kazuki Yoshida }
# Caching the inverse of a matrix. See test_cachematrix.R for some unit tests. # This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { # The variables x (formal argument) and x_inv below hold the matrix # and its inverse, respectively. x_inv is initialized to NULL until the # inverse is computed for the first time. x_inv <- NULL # These functions allow getting and setting the value of the matrix # wrapped by the special "matrix" object. get <- function() x set <- function(new_x) { # Update the matrix and reset its inverse, since the old x_inv value # is no longer valid. Update the variables in the parent environment. x <<- new_x x_inv <<- NULL } # These function allow getting and setting the value of the inverse. getInverse <- function() x_inv setInverse <- function(new_x_inv) { # Update the variable in the parent environment. x_inv <<- new_x_inv } # Return a list with the helper functions. list(get = get, set = set, getInverse = getInverse, setInverse = setInverse) } # Compute the inverse of the special "matrix" returned by makeCacheMatrix above. cacheSolve <- function(x, ...) { x_inv <- x$getInverse() if (is.null(x_inv)) { # If x_inv is NULL the inverse has not been computed yet. Compute its # value using solve() and save it in the special "matrix" object. message("computing matrix inverse") x_inv <- solve(x$get(), ...) x$setInverse(x_inv) } else { # The cached value is being used, inform the user. message("returning cached inverse") } # Return the matrix inverse (computed or cached). x_inv }
/cachematrix.R
no_license
yasserglez/ProgrammingAssignment2
R
false
false
1,783
r
# Caching the inverse of a matrix. See test_cachematrix.R for some unit tests. # This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { # The variables x (formal argument) and x_inv below hold the matrix # and its inverse, respectively. x_inv is initialized to NULL until the # inverse is computed for the first time. x_inv <- NULL # These functions allow getting and setting the value of the matrix # wrapped by the special "matrix" object. get <- function() x set <- function(new_x) { # Update the matrix and reset its inverse, since the old x_inv value # is no longer valid. Update the variables in the parent environment. x <<- new_x x_inv <<- NULL } # These function allow getting and setting the value of the inverse. getInverse <- function() x_inv setInverse <- function(new_x_inv) { # Update the variable in the parent environment. x_inv <<- new_x_inv } # Return a list with the helper functions. list(get = get, set = set, getInverse = getInverse, setInverse = setInverse) } # Compute the inverse of the special "matrix" returned by makeCacheMatrix above. cacheSolve <- function(x, ...) { x_inv <- x$getInverse() if (is.null(x_inv)) { # If x_inv is NULL the inverse has not been computed yet. Compute its # value using solve() and save it in the special "matrix" object. message("computing matrix inverse") x_inv <- solve(x$get(), ...) x$setInverse(x_inv) } else { # The cached value is being used, inform the user. message("returning cached inverse") } # Return the matrix inverse (computed or cached). x_inv }
## Put comments here that give an overall description of what your ## functions that cache the inverse of a matrix ## This function creates a special kind of matrix object that can cache its inverse makeCacheMatrix <- function(x=matrix()){ ## Initializing the inverse property inv <- NULL ## Method to set the matrix set <- function(y){ matrix <<- y inv <<- NULL } ## Method to get the matrix get <- function(){ ## returning matrix matrix } ## Method to set the inverse of the matrix setInverse <- function(inverse) { ## storing inverse inv <<- inverse } ## Method to get the inverse of the matrix getInverse <- function() { ## returns the inverse inv } ## Returns the list of methods list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## This function computes the inverse of the special matrix returned by the "makeCacheMatrix" function. ## - If the inverse has already been calculated (and the matrix has not changed), then the "cachesolve" should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { ## getting a matrix that is the inverse of 'x' inv <- x$getInverse() ## returns if the inverse has already been calculated (i.e. if !is.null(m)==TRUE) if(!is.null(inv)) { message("getting cached data") return(inv) } ## getting the matrix from our object data <- x$get() ## calculating the inverse by using matrix multiplication m <- solve(data) %*% data ## storing the inverse to the object to future usage x$setInverse(m) ## returning a matrix that is the inverse of 'x' m }
/cachematrix.R
no_license
pc1pranav/ProgrammingAssignment2
R
false
false
2,051
r
## Put comments here that give an overall description of what your ## functions that cache the inverse of a matrix ## This function creates a special kind of matrix object that can cache its inverse makeCacheMatrix <- function(x=matrix()){ ## Initializing the inverse property inv <- NULL ## Method to set the matrix set <- function(y){ matrix <<- y inv <<- NULL } ## Method to get the matrix get <- function(){ ## returning matrix matrix } ## Method to set the inverse of the matrix setInverse <- function(inverse) { ## storing inverse inv <<- inverse } ## Method to get the inverse of the matrix getInverse <- function() { ## returns the inverse inv } ## Returns the list of methods list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## This function computes the inverse of the special matrix returned by the "makeCacheMatrix" function. ## - If the inverse has already been calculated (and the matrix has not changed), then the "cachesolve" should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { ## getting a matrix that is the inverse of 'x' inv <- x$getInverse() ## returns if the inverse has already been calculated (i.e. if !is.null(m)==TRUE) if(!is.null(inv)) { message("getting cached data") return(inv) } ## getting the matrix from our object data <- x$get() ## calculating the inverse by using matrix multiplication m <- solve(data) %*% data ## storing the inverse to the object to future usage x$setInverse(m) ## returning a matrix that is the inverse of 'x' m }
#' SPARQL As A Backend For DBI #' #' A simple implementation of R's DBI Interface. This provides access to SPARQL via DBI. #' #' @name SPARQLDBI #' @docType package #' @import methods DBI #' @importFrom dplyr tbl_df NULL
/R/SPARQLDBI-package.R
no_license
yutannihilation/SPARQLDBI
R
false
false
221
r
#' SPARQL As A Backend For DBI #' #' A simple implementation of R's DBI Interface. This provides access to SPARQL via DBI. #' #' @name SPARQLDBI #' @docType package #' @import methods DBI #' @importFrom dplyr tbl_df NULL
# library required library(data.tree) ##### Manual Data upload and target variable creation ##### # both test and train should be appended into a single dataset so that the format for both datasets is consistent data<-read.csv("C:/Users/jasksing/Desktop/IN-orbit/adult-training.csv/adult-training.csv") #### Create Variable List for Discredtization #### generateVarList(data,"C:/Users/Jaskaran Singh/Desktop/Inorbit/varList1.csv") ### varScale # O = Ordinal # N = Nominal # C = Continous ### This step will generate a csv output containing all variables in the dataset. ### Please enter Y in selection for the target # Run the function to load the edited varilist back to R varlist_check(path) # divide data into test and train #dataValid=dataTrain dataTrain<-data dataValid<-data[sample(1:nrow(data),size = 10000,replace = F),] #### computing the gini index and variable imporatnce for the first split gini_index(Data = dataTrain,dataTest = dataValid,varList = varList,Node = 1 ,numeric_round_off=T,round_off_digits=2,range_for_decimal=10) # running gini for split # If your split node is 5 then the child nodes would be 2x i.e 10 and 2x+1 i.e 11 gini_split(Data = dataTrain,dataTest = dataValid,var_name = "relationship",Node = 1, mini_split=50,mini_node=100,cut_off=0.5,numeric_round_off=T,round_off_digits=2, range_for_decimal=10) # validation reults y <- as.character(varList[varList$selection%in%"T","varName"]) p<-table(dataValid[,y],dataValid[,"y_pred"]) p # Computing precision, recall and accuracy precision<-p[2,2]/(p[2,1]+p[2,2]) recall<-p[2,2]/(p[1,2]+p[2,2]) accuracy<-(p[1,1]+p[2,2])/(p[1,1]+p[1,2]+p[2,1]+p[2,2]) output <- as.data.frame(cbind(precision,recall,accuracy)) output
/Calling_Function_rpart.R
no_license
sabihul/sabih-code
R
false
false
1,793
r
# library required library(data.tree) ##### Manual Data upload and target variable creation ##### # both test and train should be appended into a single dataset so that the format for both datasets is consistent data<-read.csv("C:/Users/jasksing/Desktop/IN-orbit/adult-training.csv/adult-training.csv") #### Create Variable List for Discredtization #### generateVarList(data,"C:/Users/Jaskaran Singh/Desktop/Inorbit/varList1.csv") ### varScale # O = Ordinal # N = Nominal # C = Continous ### This step will generate a csv output containing all variables in the dataset. ### Please enter Y in selection for the target # Run the function to load the edited varilist back to R varlist_check(path) # divide data into test and train #dataValid=dataTrain dataTrain<-data dataValid<-data[sample(1:nrow(data),size = 10000,replace = F),] #### computing the gini index and variable imporatnce for the first split gini_index(Data = dataTrain,dataTest = dataValid,varList = varList,Node = 1 ,numeric_round_off=T,round_off_digits=2,range_for_decimal=10) # running gini for split # If your split node is 5 then the child nodes would be 2x i.e 10 and 2x+1 i.e 11 gini_split(Data = dataTrain,dataTest = dataValid,var_name = "relationship",Node = 1, mini_split=50,mini_node=100,cut_off=0.5,numeric_round_off=T,round_off_digits=2, range_for_decimal=10) # validation reults y <- as.character(varList[varList$selection%in%"T","varName"]) p<-table(dataValid[,y],dataValid[,"y_pred"]) p # Computing precision, recall and accuracy precision<-p[2,2]/(p[2,1]+p[2,2]) recall<-p[2,2]/(p[1,2]+p[2,2]) accuracy<-(p[1,1]+p[2,2])/(p[1,1]+p[1,2]+p[2,1]+p[2,2]) output <- as.data.frame(cbind(precision,recall,accuracy)) output
#for list.files() exam <- read.csv("csv_exam.csv") exam <- exam %>% mutate(avg = (math + english + science)/3) exam for ( i in min(exam$class):max(exam$class) ) { print(exam %>% filter(class ==i) %>% filter(avg==max(avg))) #전체 반에서 1등인 사람들 출력 #for문은 출력시 print() 필수 } #group_by exam %>% group_by(class) %>% summarise(mean = mean(avg)) list.files() subway <- read.csv("1-4호선승하차승객수.csv") View(subway) #라인별 승하차 수 구하기 subway %>% group_by(line_no) %>% summarise(total_in = sum(in.), total_out = sum(out)) # install.packages("foreign") -> 다른 형식 파일 가져오기 가능 ex) sas install.packages("foreign") install.packages("ggplot2") install.packages("dplyr") library(foreign) library(ggplot2) library(dplyr) list.files() korean <- read.spss("Koweps_hpda12_2017_beta1.sav", to.data.frame = T) #to.data.frame : spss -> data.fram형태로 변경 View(korean) korean <- korean %>% rename(gender = h12_g3, birth = h12_g4, income = h12_pers_income1, code_job = h12_eco9) #변수(컬럼)이름 변경: rename( 이름 = "" ) #형식 체크: class(korea$gender) #빈도수 체크: table(korean$gender) class(korean$gender) table(korean$gender) #남: 1, 여: 2 korean$gender <- ifelse( korean$gender == 1 ,'male', 'female') table(korean$gender) # qplot : 빈도를 시각화 하는 그래프(정규분포도) # +a : 모든 데이터들은 정규분포를 따른다. 많이 몰려 있는 수는 따로 있다 qplot(korea$income) #x축: 연봉, y축:사람수 summary(korean$income) #NA'S출력: 군데군데 비어(NULL)있는 데이터 #NA 이유1. 데이터 유실, 실수, 일부러(민감해서) => 통계불가능 => 결측치 제거 필요 #결측치는 FALSE 일 때, 데이터가 있는 상태 korean$income <- ifelse(korea$income == 0, NA, korean$income) #income에서 0 제거 korean$income #is.na: 결측치면 TRUE, 결측치 아니면 FALSE is.na(korean$income) is.na(korean$gender) # 연봉 결측치 없는 행만 출력 => 성별별 연봉 출력 korean <- korean %>% filter( !is.na(korean$income) ) korean gen_income <- korean %>% group_by(gender) %>% summarise(mean_income = mean(income)) View(gen_income) #ggplot이용해 시각화 : ggplot(data, asc(x축 = ?, y축 =?) + 시각화 형식) ggplot(data = gen_income, aes(x=gender, y=mean_income)) + geom_col() #left_join 이용해 직업별 연봉 출력 code_job<- read("code_job.csv") View(code_job) korea <- left_join(korea, code_job) #left join job_income <- korea %>% filter(!is.na(job) & !is.na(income)) %>% group_by(job) %>% summarise(mean_income = mean(income)) View(job_income) Top10 <- job_income %>% arrange(desc(job_income$mean_income)) %>% head(10) #head(10)이라고 할때 출력 ggplot(data = Top10, aes(x=reorder(job,mean_income), y=mean_income)) + geom_col() + coord_flip()
/basic_step/Data_graph.R
no_license
m-cahe/r
R
false
false
2,931
r
#for list.files() exam <- read.csv("csv_exam.csv") exam <- exam %>% mutate(avg = (math + english + science)/3) exam for ( i in min(exam$class):max(exam$class) ) { print(exam %>% filter(class ==i) %>% filter(avg==max(avg))) #전체 반에서 1등인 사람들 출력 #for문은 출력시 print() 필수 } #group_by exam %>% group_by(class) %>% summarise(mean = mean(avg)) list.files() subway <- read.csv("1-4호선승하차승객수.csv") View(subway) #라인별 승하차 수 구하기 subway %>% group_by(line_no) %>% summarise(total_in = sum(in.), total_out = sum(out)) # install.packages("foreign") -> 다른 형식 파일 가져오기 가능 ex) sas install.packages("foreign") install.packages("ggplot2") install.packages("dplyr") library(foreign) library(ggplot2) library(dplyr) list.files() korean <- read.spss("Koweps_hpda12_2017_beta1.sav", to.data.frame = T) #to.data.frame : spss -> data.fram형태로 변경 View(korean) korean <- korean %>% rename(gender = h12_g3, birth = h12_g4, income = h12_pers_income1, code_job = h12_eco9) #변수(컬럼)이름 변경: rename( 이름 = "" ) #형식 체크: class(korea$gender) #빈도수 체크: table(korean$gender) class(korean$gender) table(korean$gender) #남: 1, 여: 2 korean$gender <- ifelse( korean$gender == 1 ,'male', 'female') table(korean$gender) # qplot : 빈도를 시각화 하는 그래프(정규분포도) # +a : 모든 데이터들은 정규분포를 따른다. 많이 몰려 있는 수는 따로 있다 qplot(korea$income) #x축: 연봉, y축:사람수 summary(korean$income) #NA'S출력: 군데군데 비어(NULL)있는 데이터 #NA 이유1. 데이터 유실, 실수, 일부러(민감해서) => 통계불가능 => 결측치 제거 필요 #결측치는 FALSE 일 때, 데이터가 있는 상태 korean$income <- ifelse(korea$income == 0, NA, korean$income) #income에서 0 제거 korean$income #is.na: 결측치면 TRUE, 결측치 아니면 FALSE is.na(korean$income) is.na(korean$gender) # 연봉 결측치 없는 행만 출력 => 성별별 연봉 출력 korean <- korean %>% filter( !is.na(korean$income) ) korean gen_income <- korean %>% group_by(gender) %>% summarise(mean_income = mean(income)) View(gen_income) #ggplot이용해 시각화 : ggplot(data, asc(x축 = ?, y축 =?) + 시각화 형식) ggplot(data = gen_income, aes(x=gender, y=mean_income)) + geom_col() #left_join 이용해 직업별 연봉 출력 code_job<- read("code_job.csv") View(code_job) korea <- left_join(korea, code_job) #left join job_income <- korea %>% filter(!is.na(job) & !is.na(income)) %>% group_by(job) %>% summarise(mean_income = mean(income)) View(job_income) Top10 <- job_income %>% arrange(desc(job_income$mean_income)) %>% head(10) #head(10)이라고 할때 출력 ggplot(data = Top10, aes(x=reorder(job,mean_income), y=mean_income)) + geom_col() + coord_flip()
checkxes <- function(xes) { if (class(xes)[1] != "jobjRef" || ! xes %instanceof% "org.processmining.xestools.XEStools") stop("wrong value of xes argument - should be object returned by xes.init") } #' Supporting function - transform map of traces into dataframe #' @param res javaref object containing map of flatXTraces #' @return data.frame with traces createdf <- function(res) { # we will create data frame with following columns: name, duration, startTime, endTime, eventCount, resource, role, eventRepetition valuesa <- res$toArray() name <- sapply(valuesa, function(item) { .jcall(item, "S", "getConceptName") } ) duration <- sapply(valuesa, function(item) { .jcall(item, "J", "getDuration") } ) eventcount <- sapply(valuesa, function(item) { .jcall(item, "I", "getEventCount") } ) resource <- sapply(valuesa, function(item) { .jcall(item, "S", "getOrgResource") } ) role <- sapply(valuesa, function(item) { .jcall(item, "S", "getOrgRole") } ) event_repetitions <- sapply(valuesa, function(item) { .jcall(item, "I", "getEventRepetitions") } ) df <- J("java.time.format.DateTimeFormatter")$ofPattern("yyyy-MM-dd HH:mm:ss.z") ts <- sapply(valuesa, function(item) { ldt <- .jcall(item, "Ljava/time/ZonedDateTime;", "getStartTime"); .jcall(ldt, "S", "format", df) } ) start_time <- with_tz(ymd_hms(ts), tzone=Sys.timezone()) ts <- sapply(valuesa, function(item) { ldt <- .jcall(item, "Ljava/time/ZonedDateTime;", "getEndTime"); .jcall(ldt, "S", "format", df) } ) end_time <- with_tz(ymd_hms(ts), tzone = Sys.timezone()) data.frame( trace = name, duration = duration, eventcount = eventcount, resource = resource, role = role, event_repetitions = event_repetitions, start_time = start_time, end_time = end_time) } #' Create filter for xestool calls based on provided parameters #' @param resources list of resources present in trace's event to pass filter. MULTI for multiple resources per trace #' @param groups list of resources present in trace's event to pass filter. MULTI for multiple groups per trace #' @param roles list of resources present in trace's event to pass filter. MULTI for multiple roles per trace #' @param eventcount range of events per trace to pass filter #' @param tracestart range of trace start dates to pass filter #' @param traceend range of trace end dates to pass filter #' @param eventnames list of events trace should contain (at least one) to pass filter #' @param tracestartwday list of trace start DoW (at least one) to pass filter #' @param traceendwday list of trace end DoW (at least one) to pass filter#' #' @param transitions list of event transitions statuses to pass filter #' @param tracenames list of trace names to pass filter #' #' @return filter object (hashmap) xes.processfilter <- function(resources = NULL, groups = NULL, roles = NULL, eventcount = NULL, tracestart = NULL, traceend = NULL, eventnames = NULL, tracestartwday = NULL, traceendwday = NULL, transitions = NULL, tracenames = NULL ) { filter <- .jnew('java.util.HashMap') if ( !is.null(resources) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$RESOURCE_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(resources)) ) } if ( !is.null(groups) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$GROUP_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(groups)) ) } if ( !is.null(roles) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$ROLE_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(resources)) ) } if ( !is.null(eventnames) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$EVENT_NAME_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(eventnames)) ) } if ( !is.null(tracestartwday) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_START_WEEKDAY_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(tracestartwday)) ) } if ( !is.null(traceendwday) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_END_WEEKDAY_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(traceendwday)) ) } if ( !is.null(transitions) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$LIFECYCLE_TRANSITION_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(transitions)) ) } if ( !is.null(tracenames) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_NAME_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(tracenames)) ) } if (!is.null(eventcount)) { type <- J("org.processmining.xestools.XEStools")$FilterType$EVENT_COUNT_RANGE if ( "min" %in% names(eventcount) && "max" %in% names(eventcount)) { min <- new (J("java.lang.Integer"), as.character(eventcount["min"])) max <- new (J("java.lang.Integer"), as.character(eventcount["max"])) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "closed", .jcast(min, "java.lang.Comparable"), .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } else if ("min" %in% names(eventcount) && !"max" %in% names(eventcount)) { min <- new (J("java.lang.Integer"), as.character(eventcount["min"])) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atLeast", .jcast(min, "java.lang.Comparable") ) filter$put(type, range) } else if (!"min" %in% names(eventcount) && "max" %in% names(eventcount)) { max <- new (J("java.lang.Integer"), as.character(eventcount["max"])) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atMost", .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } } if (!is.null(tracestart)) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_START_RANGE jzd <- J("java.time.ZonedDateTime") if ( "min" %in% names(tracestart) && "max" %in% names(tracestart)) { min <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(tracestart["min"],"%Y-%m-%dT%H:%M:%S.000%z"))) max <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(tracestart["max"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "closed", .jcast(min, "java.lang.Comparable"), .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } else if ("min" %in% names(tracestart) && !"max" %in% names(tracestart)) { min <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(tracestart["min"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atLeast", .jcast(min, "java.lang.Comparable") ) filter$put(type, range) } else if (!"min" %in% names(tracestart) && "max" %in% names(tracestart)) { max <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(tracestart["max"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atMost", .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } } if (!is.null(traceend)) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_END_RANGE jzd <- J("java.time.ZonedDateTime") if ( "min" %in% names(traceend) && "max" %in% names(traceend)) { min <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(traceend["min"],"%Y-%m-%dT%H:%M:%S.000%z"))) max <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(traceend["max"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "closed", .jcast(min, "java.lang.Comparable"), .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } else if ("min" %in% names(traceend) && !"max" %in% names(traceend)) { min <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(traceend["min"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atLeast", .jcast(min, "java.lang.Comparable") ) filter$put(type, range) } else if (!"min" %in% names(traceend) && "max" %in% names(traceend)) { max <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(traceend["max"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atMost", .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } } return (filter) }
/R/utilities.R
no_license
nicksi/rxes
R
false
false
10,753
r
checkxes <- function(xes) { if (class(xes)[1] != "jobjRef" || ! xes %instanceof% "org.processmining.xestools.XEStools") stop("wrong value of xes argument - should be object returned by xes.init") } #' Supporting function - transform map of traces into dataframe #' @param res javaref object containing map of flatXTraces #' @return data.frame with traces createdf <- function(res) { # we will create data frame with following columns: name, duration, startTime, endTime, eventCount, resource, role, eventRepetition valuesa <- res$toArray() name <- sapply(valuesa, function(item) { .jcall(item, "S", "getConceptName") } ) duration <- sapply(valuesa, function(item) { .jcall(item, "J", "getDuration") } ) eventcount <- sapply(valuesa, function(item) { .jcall(item, "I", "getEventCount") } ) resource <- sapply(valuesa, function(item) { .jcall(item, "S", "getOrgResource") } ) role <- sapply(valuesa, function(item) { .jcall(item, "S", "getOrgRole") } ) event_repetitions <- sapply(valuesa, function(item) { .jcall(item, "I", "getEventRepetitions") } ) df <- J("java.time.format.DateTimeFormatter")$ofPattern("yyyy-MM-dd HH:mm:ss.z") ts <- sapply(valuesa, function(item) { ldt <- .jcall(item, "Ljava/time/ZonedDateTime;", "getStartTime"); .jcall(ldt, "S", "format", df) } ) start_time <- with_tz(ymd_hms(ts), tzone=Sys.timezone()) ts <- sapply(valuesa, function(item) { ldt <- .jcall(item, "Ljava/time/ZonedDateTime;", "getEndTime"); .jcall(ldt, "S", "format", df) } ) end_time <- with_tz(ymd_hms(ts), tzone = Sys.timezone()) data.frame( trace = name, duration = duration, eventcount = eventcount, resource = resource, role = role, event_repetitions = event_repetitions, start_time = start_time, end_time = end_time) } #' Create filter for xestool calls based on provided parameters #' @param resources list of resources present in trace's event to pass filter. MULTI for multiple resources per trace #' @param groups list of resources present in trace's event to pass filter. MULTI for multiple groups per trace #' @param roles list of resources present in trace's event to pass filter. MULTI for multiple roles per trace #' @param eventcount range of events per trace to pass filter #' @param tracestart range of trace start dates to pass filter #' @param traceend range of trace end dates to pass filter #' @param eventnames list of events trace should contain (at least one) to pass filter #' @param tracestartwday list of trace start DoW (at least one) to pass filter #' @param traceendwday list of trace end DoW (at least one) to pass filter#' #' @param transitions list of event transitions statuses to pass filter #' @param tracenames list of trace names to pass filter #' #' @return filter object (hashmap) xes.processfilter <- function(resources = NULL, groups = NULL, roles = NULL, eventcount = NULL, tracestart = NULL, traceend = NULL, eventnames = NULL, tracestartwday = NULL, traceendwday = NULL, transitions = NULL, tracenames = NULL ) { filter <- .jnew('java.util.HashMap') if ( !is.null(resources) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$RESOURCE_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(resources)) ) } if ( !is.null(groups) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$GROUP_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(groups)) ) } if ( !is.null(roles) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$ROLE_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(resources)) ) } if ( !is.null(eventnames) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$EVENT_NAME_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(eventnames)) ) } if ( !is.null(tracestartwday) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_START_WEEKDAY_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(tracestartwday)) ) } if ( !is.null(traceendwday) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_END_WEEKDAY_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(traceendwday)) ) } if ( !is.null(transitions) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$LIFECYCLE_TRANSITION_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(transitions)) ) } if ( !is.null(tracenames) ) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_NAME_LIST filter$put( type, J("com.google.common.collect.Lists")$newArrayList(.jarray(tracenames)) ) } if (!is.null(eventcount)) { type <- J("org.processmining.xestools.XEStools")$FilterType$EVENT_COUNT_RANGE if ( "min" %in% names(eventcount) && "max" %in% names(eventcount)) { min <- new (J("java.lang.Integer"), as.character(eventcount["min"])) max <- new (J("java.lang.Integer"), as.character(eventcount["max"])) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "closed", .jcast(min, "java.lang.Comparable"), .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } else if ("min" %in% names(eventcount) && !"max" %in% names(eventcount)) { min <- new (J("java.lang.Integer"), as.character(eventcount["min"])) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atLeast", .jcast(min, "java.lang.Comparable") ) filter$put(type, range) } else if (!"min" %in% names(eventcount) && "max" %in% names(eventcount)) { max <- new (J("java.lang.Integer"), as.character(eventcount["max"])) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atMost", .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } } if (!is.null(tracestart)) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_START_RANGE jzd <- J("java.time.ZonedDateTime") if ( "min" %in% names(tracestart) && "max" %in% names(tracestart)) { min <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(tracestart["min"],"%Y-%m-%dT%H:%M:%S.000%z"))) max <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(tracestart["max"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "closed", .jcast(min, "java.lang.Comparable"), .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } else if ("min" %in% names(tracestart) && !"max" %in% names(tracestart)) { min <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(tracestart["min"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atLeast", .jcast(min, "java.lang.Comparable") ) filter$put(type, range) } else if (!"min" %in% names(tracestart) && "max" %in% names(tracestart)) { max <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(tracestart["max"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atMost", .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } } if (!is.null(traceend)) { type <- J("org.processmining.xestools.XEStools")$FilterType$TRACE_END_RANGE jzd <- J("java.time.ZonedDateTime") if ( "min" %in% names(traceend) && "max" %in% names(traceend)) { min <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(traceend["min"],"%Y-%m-%dT%H:%M:%S.000%z"))) max <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(traceend["max"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "closed", .jcast(min, "java.lang.Comparable"), .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } else if ("min" %in% names(traceend) && !"max" %in% names(traceend)) { min <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(traceend["min"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atLeast", .jcast(min, "java.lang.Comparable") ) filter$put(type, range) } else if (!"min" %in% names(traceend) && "max" %in% names(traceend)) { max <- jzd$parse(sub("(\\d\\d)$", ":\\1", format(traceend["max"],"%Y-%m-%dT%H:%M:%S.000%z"))) range <- .jcall(J("com.google.common.collect.Range"), "Lcom/google/common/collect/Range;", "atMost", .jcast(max, "java.lang.Comparable") ) filter$put(type, range) } } return (filter) }
\name{zrecode} \alias{zrecode} \title{ recode lai bien chuoi ban dau } \description{ } \usage{ zrecode(varName, dName, dtafile = NULL) } \arguments{ \item{varName}{ te^n bie^'n } \item{dName}{ te^n dataset } \item{dtafile}{te^n ta^.p tin csv (bo? underscore o+? dda^`u va` .csv } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ##---- Should be DIRECTLY executable !! ---- ##-- ==> Define data, use random, ##-- or do help(data=index) for the standard data sets. ## Lenh sau day nham ma hoa lai cac chuoi cua bien emp2 trong dataset solieu ## Quy tac bien doi nam trong tap tin csv _danhsachkhangsinh.csv ## zrecode("emp2","solieu","danhsachkhangsinh") ## tab(~emp2,data=solieu) ## Lenh sau day nham ma hoa lai cac chuoi cua bien lydo trong dataset solieu ## Quy tac bien doi nam trong tap tin csv _lydo.csv ## zrecode("lydo","solieu") ## tab(~emp2,data=solieu) }
/man/zrecode.Rd
no_license
gvdovandzung/thongke
R
false
false
1,349
rd
\name{zrecode} \alias{zrecode} \title{ recode lai bien chuoi ban dau } \description{ } \usage{ zrecode(varName, dName, dtafile = NULL) } \arguments{ \item{varName}{ te^n bie^'n } \item{dName}{ te^n dataset } \item{dtafile}{te^n ta^.p tin csv (bo? underscore o+? dda^`u va` .csv } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ##---- Should be DIRECTLY executable !! ---- ##-- ==> Define data, use random, ##-- or do help(data=index) for the standard data sets. ## Lenh sau day nham ma hoa lai cac chuoi cua bien emp2 trong dataset solieu ## Quy tac bien doi nam trong tap tin csv _danhsachkhangsinh.csv ## zrecode("emp2","solieu","danhsachkhangsinh") ## tab(~emp2,data=solieu) ## Lenh sau day nham ma hoa lai cac chuoi cua bien lydo trong dataset solieu ## Quy tac bien doi nam trong tap tin csv _lydo.csv ## zrecode("lydo","solieu") ## tab(~emp2,data=solieu) }
#training run #call tfruns with a different working directory.
/training-run.R
no_license
jlad521/melenoma_classifier
R
false
false
67
r
#training run #call tfruns with a different working directory.
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53801100427184e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613123497-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
323
r
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53801100427184e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
con<-"household_power_consumption.txt" library("data.table") data<-fread(con,header=TRUE,sep=";",na.strings="?",colClasses="character") dataset<-subset(data,Date=='1/2/2007' | Date == '2/2/2007',select=c(1:9)) x<-as.POSIXct(paste(as.Date(dataset$Date,format="%d/%m/%Y"), dataset$Time), format="%Y-%m-%d %H:%M:%S") y<-dataset$Global_active_power png(filename="plot2.png",height=480,width=480) plot(x,y,type="l",xlab="",ylab="Global Active Power (kilowatts)") dev.off()
/plot2.R
no_license
karthiksaikumar/ExData_Plotting1
R
false
false
469
r
con<-"household_power_consumption.txt" library("data.table") data<-fread(con,header=TRUE,sep=";",na.strings="?",colClasses="character") dataset<-subset(data,Date=='1/2/2007' | Date == '2/2/2007',select=c(1:9)) x<-as.POSIXct(paste(as.Date(dataset$Date,format="%d/%m/%Y"), dataset$Time), format="%Y-%m-%d %H:%M:%S") y<-dataset$Global_active_power png(filename="plot2.png",height=480,width=480) plot(x,y,type="l",xlab="",ylab="Global Active Power (kilowatts)") dev.off()
#' Calculate plot-level distances to most recent common ancestors #' #' Given a picante-style community data matrix (sites are rows, species are columns), #' and a phylogeny, calculate the distances of sets of taxa to their MRCA. #' #' @param samp A picante-style community data matrix with sites as rows, and #' species as columns. #' @param tree An ape-style phylogeny. #' @param pairwise Whether to use the MRCA of all taxa in the sample, or the MRCA of each #' pairwise comparison in the sample. See details. #' #' @details Experimental metrics! This function calculates two simple but potentially #' useful measures. The first, accessed by setting pairwise to FALSE, #' is the mean branch length between a set of taxa and their most recent common ancestor #' (MRCA). I have not seen this used in the literature before, but it seems likely I'm #' wrong. This metric was not tested in our recent Ecography review, but given #' certain data structures, it seems potentially useful. In other cases, the MRCA will #' often simply be the root of the tree, and the metric will perhaps be of less use. #' Large values of the version of distMRCA correspond to taxa with a #' distant MRCA, while small values correspond to taxa with a more recent MRCA. #' Given an ultrametric tree, the mean distance #' between a set of taxa and a single ancestor is of course equal to the distance between #' one of those taxa and the ancestor. However, in case an ultrametric tree is passed #' to the function, I do define it as the mean distance between all present taxa #' and their MRCA. It will throw a warning if a non-ultrametric tree is passed along. #' #' The second measure calculated by this function is accessed by setting pairwise to TRUE. #' Here, per plot, the metric finds the distance of the MRCA of each pairwise taxon #' comparison from the root. The value returned per plot is then the mean of these #' distances. DANGER. Because this second option calculates all #' pairwise comparisons, the time it takes to run grows exponentially with the size of the #' community data matrix. For instance, on my personal computer, #' pairwise distMRCA was calculated in 0.2 seconds #' for a CDM with 16 plots containing between 10 and 25 species each. However, for a CDM #' with 100 plots containing between 25 and 55 species, it took 42s. #' In contrast to the first flavor of this metric, large values of this metric #' correspond to plots where the taxa present are more recently derived, while small #' values correspond to plots where the taxa are less recently derived (average common #' ancestor closer to the root). To make these measures more comparable, it may be better #' subtract the final values from the total tree height (with caveat about ultrametric #' tree above). It would also be easy to derive an abundance weighted version of this #' function. UPDATE. It appears that this second form is yet another (slower) way of #' deriving the calculation of MPD/PSV. #' #' @return A vector of distMRCA values. #' #' @export #' #' @importFrom ape getMRCA #' #' @references Miller, E. T. 2016. Random thoughts. #' #' @examples #' #simulate tree with birth-death process #' tree <- geiger::sim.bdtree(b=0.1, d=0, stop="taxa", n=50) #' #' sim.abundances <- round(rlnorm(5000, meanlog=2, sdlog=1)) + 1 #' #' cdm <- simulateComm(tree, richness.vector=10:25, abundances=sim.abundances) #' #' results <- distMRCA(cdm, tree, pairwise=FALSE) distMRCA <- function(samp, tree, pairwise) { #warn if tree is not ultrametric if(!is.ultrametric(tree)) { warning("Tree is not ultrametric. Will affect metric values.") } #find the distances between all nodes allDists <- dist.nodes(tree) #coerce samp to a matrix. this is useful because when you subset each row to those #species that are actually present, it keeps species' names associated with numbers samp <- as.matrix(samp) #if there are issues with speed of this function, create some simple functions here #and instead apply them over the matrix. in the mean time, run this for loop and save #results into this vector results <- c() #if pairwise is false, just get the MRCA of all the taxa in the sample if(pairwise==FALSE) { for(i in 1:dim(samp)[1]) { #each plot, subset to those taxa present, then find the MRCA taxa <- samp[i,][samp[i,]!=0] MRCA <- getMRCA(tree, names(taxa)) #subset allDists to those between the tips and the MRCA mrcaDists <- allDists[MRCA, 1:length(tree$tip.label)] #find the mean and save the results results[i] <- mean(mrcaDists) } } #if pairwise is TRUE, get the MRCA of all taxon pairs in the sample, then take the #mean of that else if(pairwise==TRUE) { #outer i-level of loop runs over plots for(i in 1:dim(samp)[1]) { #each plot, subset to those taxa present, then find the MRCA taxa <- samp[i,][samp[i,]!=0] #quickly get all pairwise comparisons with this neat trick allPairs <- outer(names(taxa), names(taxa), function(x, y) paste(x, y, sep=",")) allPairs <- allPairs[upper.tri(allPairs)] #this is imperfect (unnecessary extra step), but stringsplit then lapply #mrca over it. unlist and run a for loop over it temp <- strsplit(allPairs, ",") MRCAs <- unlist(lapply(temp, function(x) getMRCA(tree, x))) #set up a vector to save sample mean MRCA into mrcaDists <- c() #now, per plot, this finds the distance of each MRCA from the root for(j in 1:length(MRCAs)) { #subset allDists to find the relevant distance (this is calculating the #distance between that node and the ROOT) mrcaDists[j] <- allDists[length(tree$tip.label)+1, MRCAs[j]] } #find the mean of all the pairwise MRCA dists in the sample results[i] <- mean(mrcaDists) } } else { stop("pairwise must be either TRUE or FALSE") } results }
/R/distMRCA.R
no_license
eliotmiller/metricTester
R
false
false
5,837
r
#' Calculate plot-level distances to most recent common ancestors #' #' Given a picante-style community data matrix (sites are rows, species are columns), #' and a phylogeny, calculate the distances of sets of taxa to their MRCA. #' #' @param samp A picante-style community data matrix with sites as rows, and #' species as columns. #' @param tree An ape-style phylogeny. #' @param pairwise Whether to use the MRCA of all taxa in the sample, or the MRCA of each #' pairwise comparison in the sample. See details. #' #' @details Experimental metrics! This function calculates two simple but potentially #' useful measures. The first, accessed by setting pairwise to FALSE, #' is the mean branch length between a set of taxa and their most recent common ancestor #' (MRCA). I have not seen this used in the literature before, but it seems likely I'm #' wrong. This metric was not tested in our recent Ecography review, but given #' certain data structures, it seems potentially useful. In other cases, the MRCA will #' often simply be the root of the tree, and the metric will perhaps be of less use. #' Large values of the version of distMRCA correspond to taxa with a #' distant MRCA, while small values correspond to taxa with a more recent MRCA. #' Given an ultrametric tree, the mean distance #' between a set of taxa and a single ancestor is of course equal to the distance between #' one of those taxa and the ancestor. However, in case an ultrametric tree is passed #' to the function, I do define it as the mean distance between all present taxa #' and their MRCA. It will throw a warning if a non-ultrametric tree is passed along. #' #' The second measure calculated by this function is accessed by setting pairwise to TRUE. #' Here, per plot, the metric finds the distance of the MRCA of each pairwise taxon #' comparison from the root. The value returned per plot is then the mean of these #' distances. DANGER. Because this second option calculates all #' pairwise comparisons, the time it takes to run grows exponentially with the size of the #' community data matrix. For instance, on my personal computer, #' pairwise distMRCA was calculated in 0.2 seconds #' for a CDM with 16 plots containing between 10 and 25 species each. However, for a CDM #' with 100 plots containing between 25 and 55 species, it took 42s. #' In contrast to the first flavor of this metric, large values of this metric #' correspond to plots where the taxa present are more recently derived, while small #' values correspond to plots where the taxa are less recently derived (average common #' ancestor closer to the root). To make these measures more comparable, it may be better #' subtract the final values from the total tree height (with caveat about ultrametric #' tree above). It would also be easy to derive an abundance weighted version of this #' function. UPDATE. It appears that this second form is yet another (slower) way of #' deriving the calculation of MPD/PSV. #' #' @return A vector of distMRCA values. #' #' @export #' #' @importFrom ape getMRCA #' #' @references Miller, E. T. 2016. Random thoughts. #' #' @examples #' #simulate tree with birth-death process #' tree <- geiger::sim.bdtree(b=0.1, d=0, stop="taxa", n=50) #' #' sim.abundances <- round(rlnorm(5000, meanlog=2, sdlog=1)) + 1 #' #' cdm <- simulateComm(tree, richness.vector=10:25, abundances=sim.abundances) #' #' results <- distMRCA(cdm, tree, pairwise=FALSE) distMRCA <- function(samp, tree, pairwise) { #warn if tree is not ultrametric if(!is.ultrametric(tree)) { warning("Tree is not ultrametric. Will affect metric values.") } #find the distances between all nodes allDists <- dist.nodes(tree) #coerce samp to a matrix. this is useful because when you subset each row to those #species that are actually present, it keeps species' names associated with numbers samp <- as.matrix(samp) #if there are issues with speed of this function, create some simple functions here #and instead apply them over the matrix. in the mean time, run this for loop and save #results into this vector results <- c() #if pairwise is false, just get the MRCA of all the taxa in the sample if(pairwise==FALSE) { for(i in 1:dim(samp)[1]) { #each plot, subset to those taxa present, then find the MRCA taxa <- samp[i,][samp[i,]!=0] MRCA <- getMRCA(tree, names(taxa)) #subset allDists to those between the tips and the MRCA mrcaDists <- allDists[MRCA, 1:length(tree$tip.label)] #find the mean and save the results results[i] <- mean(mrcaDists) } } #if pairwise is TRUE, get the MRCA of all taxon pairs in the sample, then take the #mean of that else if(pairwise==TRUE) { #outer i-level of loop runs over plots for(i in 1:dim(samp)[1]) { #each plot, subset to those taxa present, then find the MRCA taxa <- samp[i,][samp[i,]!=0] #quickly get all pairwise comparisons with this neat trick allPairs <- outer(names(taxa), names(taxa), function(x, y) paste(x, y, sep=",")) allPairs <- allPairs[upper.tri(allPairs)] #this is imperfect (unnecessary extra step), but stringsplit then lapply #mrca over it. unlist and run a for loop over it temp <- strsplit(allPairs, ",") MRCAs <- unlist(lapply(temp, function(x) getMRCA(tree, x))) #set up a vector to save sample mean MRCA into mrcaDists <- c() #now, per plot, this finds the distance of each MRCA from the root for(j in 1:length(MRCAs)) { #subset allDists to find the relevant distance (this is calculating the #distance between that node and the ROOT) mrcaDists[j] <- allDists[length(tree$tip.label)+1, MRCAs[j]] } #find the mean of all the pairwise MRCA dists in the sample results[i] <- mean(mrcaDists) } } else { stop("pairwise must be either TRUE or FALSE") } results }
library(datasets) Sys.setlocale(category = "LC_ALL", locale = "English") create_plot1 <- function(directory) { #set the directory in parameter as the working folder setwd(directory) if(!file.exists("./data/household_power_consumption.txt")){ print("Directory not correct, please input the path where the file <household_power_consumption.txt> is.") } else{ #load the household_power_consumption.txt file dfHousePower <- read.table("./data/household_power_consumption.txt", sep=";", header =TRUE, na.strings = "?") #subsetting on the 2007-02-01 and 2007-02-02 dfSubHousePower <- dfHousePower[ as.Date(dfHousePower$Date,"%d/%m/%Y") == as.Date("2007-02-01", "%Y-%m-%d") | as.Date(dfHousePower$Date,"%d/%m/%Y") == as.Date("2007-02-02", "%Y-%m-%d") ,] #export by using a png file type png("plot1.png", width = 480, height = 480, units = "px", #bg = "white" bg = "transparent" ) par(mar= c(4, 4, 2, 1)) #draw the histogram hist(dfSubHousePower$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (killowatts)" ) #export the histogramme in the PNG dev.off() } }
/Plot1.R
no_license
jscaze/ExData_Plotting1
R
false
false
1,621
r
library(datasets) Sys.setlocale(category = "LC_ALL", locale = "English") create_plot1 <- function(directory) { #set the directory in parameter as the working folder setwd(directory) if(!file.exists("./data/household_power_consumption.txt")){ print("Directory not correct, please input the path where the file <household_power_consumption.txt> is.") } else{ #load the household_power_consumption.txt file dfHousePower <- read.table("./data/household_power_consumption.txt", sep=";", header =TRUE, na.strings = "?") #subsetting on the 2007-02-01 and 2007-02-02 dfSubHousePower <- dfHousePower[ as.Date(dfHousePower$Date,"%d/%m/%Y") == as.Date("2007-02-01", "%Y-%m-%d") | as.Date(dfHousePower$Date,"%d/%m/%Y") == as.Date("2007-02-02", "%Y-%m-%d") ,] #export by using a png file type png("plot1.png", width = 480, height = 480, units = "px", #bg = "white" bg = "transparent" ) par(mar= c(4, 4, 2, 1)) #draw the histogram hist(dfSubHousePower$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (killowatts)" ) #export the histogramme in the PNG dev.off() } }
library(tidyverse) # metadata metadata <- read.csv("results/R1R2-EBPR-MAGs-table.csv") # vitamin annotations vitamin_list <- read.csv("results/2013_binning/annotations/vitamin-list.csv", header = FALSE) colnames(vitamin_list) <- c("ko", "pathway") vitamin_annotations <- read.delim("results/2013_binning/annotations/R1R2-vitamin-annotations.txt", sep = "\t", header=FALSE) colnames(vitamin_annotations) <- c("locus_tag", "ko", "annotation") vitamin_table <- left_join(vitamin_annotations, vitamin_list) vitamin_table$Bin <- gsub("\\_.*", "", vitamin_table$locus_tag) vitamin_results <- vitamin_table %>% select(Bin, ko, annotation, pathway) %>% arrange(Bin) vitamin_info <- left_join(vitamin_results, metadata) %>% select(Bin, Code, ko, annotation, pathway) # amino acid annotations amino_list <- read.csv("results/2013_binning/annotations/amino_acid_list.csv", header=FALSE) colnames(amino_list) <- c("ko", "pathway") amino_annotations <- read.delim("results/2013_binning/annotations/R1R2-amino-acid-annotations.txt", sep="\t", header=FALSE) colnames(amino_annotations) <- c("locus_tag", "ko", "annotation") amino_table <- left_join(amino_annotations, amino_list) amino_table$Bin <- gsub("\\_.*", "", amino_table$locus_tag) amino_results <- amino_table %>% select(Bin, ko, annotation, pathway) %>% arrange(Bin) amino_info <- left_join(amino_results, metadata) %>% select(Bin, Code, ko, annotation, pathway) # IIA tables IIA_amino_table <- amino_info %>% filter(Code=="CAPIIA") %>% arrange(pathway) IIA_vitamin_table <- vitamin_info %>% filter(Code=="CAPIIA") %>% arrange(pathway) # IA tables IA_amino_table <- amino_info %>% filter(Code=="CAPIA") %>% arrange(pathway) IA_vitamin_table <- vitamin_info %>% filter(Code=="CAPIA") %>% arrange(pathway) # Totals vitamin_totals <- vitamin_info %>% group_by(Code) %>% count() %>% arrange(n) amino_totals <- amino_info %>% group_by(Code) %>% count() # vitamins biotin_count <- vitamin_info %>% group_by(Code) %>% filter(pathway=='biotin ') %>% count() %>% arrange(n) cobalamin_count <- vitamin_info %>% group_by(Code) %>% filter(pathway=="cobalamin") %>% count() %>% arrange(n) THF_count <- vitamin_info %>% group_by(Code) %>% filter(pathway=="THF") %>% count() %>% arrange(n) thiamine_count <- vitamin_info %>% group_by(Code) %>% filter(pathway=="thiamine") %>% count() %>% arrange(n) ################################################## # Expression data added ################################################## vitamin_expression <- read.delim("results/2013_binning/annotations/vitamin-expression-table.csv", sep=";", header=FALSE) amino_expression <- read.delim("results/2013_binning/annotations/amino-expression-table.csv", sep=';', header=FALSE) names <- c("locus_tag", "t1", "t2", "t3", "t4", "t5", "t6", "ko", "Bin") colnames(vitamin_expression) <- c(names) colnames(amino_expression) <- c(names) # vitamin table vitamin_pathways <- left_join(vitamin_expression, vitamin_list) vitamin_expression_table <- left_join(vitamin_pathways, metadata) %>% select(Bin, Code, t1, t2, t3, t4, t5, t6, ko, pathway) %>% arrange(Code) CAP_vitmain_expression <- vitamin_expression_table %>% filter(Code=="CAPIIA"| Code =='CAPIA') %>% arrange(Code,pathway) # amino acid table amino_pathways <- left_join(amino_expression, amino_list) amino_expression_table <- left_join(amino_pathways, metadata) %>% select(Bin, Code, t1, t2, t3, t4, t5, t6, ko, pathway) %>% arrange(Code) CAP_amino_expression <- amino_expression_table %>% filter(Code=="CAPIIA" | Code=="CAPIA") %>% arrange(Code,pathway) # top 20 expressed genomes top_20 <- c("CAPIIA", "RUN1", "BAC3", "CAULO1", "HYPHO1", "PSEUDO1", "RHODO1", "CHIT1", "FLAVO1", "GEMMA1", "TET2", "CAPIA", "RAM1", "OBS1", "TET1", "ALPHA1", "RUBRI1", "LEAD1", "ZOO1") top_15 <- c("CAPIIA", "RUN1", "BAC3", "CAULO1", "HYPHO1", "PSEUDO1", "RHODO1", "CHIT1", "FLAVO1", "GEMMA1", "TET2", "CAPIA", "RAM1", "OBS1", "TET1") top_20_vitamin_expression <- vitamin_expression_table %>% filter(Code %in% top_20) %>% arrange(Code,pathway) top_20_amino_expression <- amino_expression_table %>% filter(Code %in% top_20) %>% arrange(Code, pathway) top_15_vitamin_expression <- vitamin_expression_table %>% filter(Code %in% top_15) %>% arrange(Code, pathway) top_15_amino_expression <- amino_expression_table %>% filter(Code %in% top_15) %>% arrange(Code, pathway) # Totals for top 15 vitamin_top_15_totals <- vitamin_info %>% filter(Code %in% top_15) %>% group_by(Code) %>% count() %>% arrange(n) amino_top_15_totals <- amino_info %>% filter(Code %in% top_15) %>% group_by(Code) %>% count() %>% arrange(n) amino_top_15_descriptions <- amino_info %>% filter(Code %in% top_15) %>% group_by(Code) %>% arrange(Code, pathway) amino_top_15_totals write.csv(amino_top_15_descriptions, "results/2013_binning/annotations/manual_annotations/top15_amino_descriptions.csv", quote=FALSE, row.names = FALSE)
/scripts/AA-vitamin-count-expression-explore.R
no_license
elizabethmcd/EBPR-MAGs
R
false
false
5,322
r
library(tidyverse) # metadata metadata <- read.csv("results/R1R2-EBPR-MAGs-table.csv") # vitamin annotations vitamin_list <- read.csv("results/2013_binning/annotations/vitamin-list.csv", header = FALSE) colnames(vitamin_list) <- c("ko", "pathway") vitamin_annotations <- read.delim("results/2013_binning/annotations/R1R2-vitamin-annotations.txt", sep = "\t", header=FALSE) colnames(vitamin_annotations) <- c("locus_tag", "ko", "annotation") vitamin_table <- left_join(vitamin_annotations, vitamin_list) vitamin_table$Bin <- gsub("\\_.*", "", vitamin_table$locus_tag) vitamin_results <- vitamin_table %>% select(Bin, ko, annotation, pathway) %>% arrange(Bin) vitamin_info <- left_join(vitamin_results, metadata) %>% select(Bin, Code, ko, annotation, pathway) # amino acid annotations amino_list <- read.csv("results/2013_binning/annotations/amino_acid_list.csv", header=FALSE) colnames(amino_list) <- c("ko", "pathway") amino_annotations <- read.delim("results/2013_binning/annotations/R1R2-amino-acid-annotations.txt", sep="\t", header=FALSE) colnames(amino_annotations) <- c("locus_tag", "ko", "annotation") amino_table <- left_join(amino_annotations, amino_list) amino_table$Bin <- gsub("\\_.*", "", amino_table$locus_tag) amino_results <- amino_table %>% select(Bin, ko, annotation, pathway) %>% arrange(Bin) amino_info <- left_join(amino_results, metadata) %>% select(Bin, Code, ko, annotation, pathway) # IIA tables IIA_amino_table <- amino_info %>% filter(Code=="CAPIIA") %>% arrange(pathway) IIA_vitamin_table <- vitamin_info %>% filter(Code=="CAPIIA") %>% arrange(pathway) # IA tables IA_amino_table <- amino_info %>% filter(Code=="CAPIA") %>% arrange(pathway) IA_vitamin_table <- vitamin_info %>% filter(Code=="CAPIA") %>% arrange(pathway) # Totals vitamin_totals <- vitamin_info %>% group_by(Code) %>% count() %>% arrange(n) amino_totals <- amino_info %>% group_by(Code) %>% count() # vitamins biotin_count <- vitamin_info %>% group_by(Code) %>% filter(pathway=='biotin ') %>% count() %>% arrange(n) cobalamin_count <- vitamin_info %>% group_by(Code) %>% filter(pathway=="cobalamin") %>% count() %>% arrange(n) THF_count <- vitamin_info %>% group_by(Code) %>% filter(pathway=="THF") %>% count() %>% arrange(n) thiamine_count <- vitamin_info %>% group_by(Code) %>% filter(pathway=="thiamine") %>% count() %>% arrange(n) ################################################## # Expression data added ################################################## vitamin_expression <- read.delim("results/2013_binning/annotations/vitamin-expression-table.csv", sep=";", header=FALSE) amino_expression <- read.delim("results/2013_binning/annotations/amino-expression-table.csv", sep=';', header=FALSE) names <- c("locus_tag", "t1", "t2", "t3", "t4", "t5", "t6", "ko", "Bin") colnames(vitamin_expression) <- c(names) colnames(amino_expression) <- c(names) # vitamin table vitamin_pathways <- left_join(vitamin_expression, vitamin_list) vitamin_expression_table <- left_join(vitamin_pathways, metadata) %>% select(Bin, Code, t1, t2, t3, t4, t5, t6, ko, pathway) %>% arrange(Code) CAP_vitmain_expression <- vitamin_expression_table %>% filter(Code=="CAPIIA"| Code =='CAPIA') %>% arrange(Code,pathway) # amino acid table amino_pathways <- left_join(amino_expression, amino_list) amino_expression_table <- left_join(amino_pathways, metadata) %>% select(Bin, Code, t1, t2, t3, t4, t5, t6, ko, pathway) %>% arrange(Code) CAP_amino_expression <- amino_expression_table %>% filter(Code=="CAPIIA" | Code=="CAPIA") %>% arrange(Code,pathway) # top 20 expressed genomes top_20 <- c("CAPIIA", "RUN1", "BAC3", "CAULO1", "HYPHO1", "PSEUDO1", "RHODO1", "CHIT1", "FLAVO1", "GEMMA1", "TET2", "CAPIA", "RAM1", "OBS1", "TET1", "ALPHA1", "RUBRI1", "LEAD1", "ZOO1") top_15 <- c("CAPIIA", "RUN1", "BAC3", "CAULO1", "HYPHO1", "PSEUDO1", "RHODO1", "CHIT1", "FLAVO1", "GEMMA1", "TET2", "CAPIA", "RAM1", "OBS1", "TET1") top_20_vitamin_expression <- vitamin_expression_table %>% filter(Code %in% top_20) %>% arrange(Code,pathway) top_20_amino_expression <- amino_expression_table %>% filter(Code %in% top_20) %>% arrange(Code, pathway) top_15_vitamin_expression <- vitamin_expression_table %>% filter(Code %in% top_15) %>% arrange(Code, pathway) top_15_amino_expression <- amino_expression_table %>% filter(Code %in% top_15) %>% arrange(Code, pathway) # Totals for top 15 vitamin_top_15_totals <- vitamin_info %>% filter(Code %in% top_15) %>% group_by(Code) %>% count() %>% arrange(n) amino_top_15_totals <- amino_info %>% filter(Code %in% top_15) %>% group_by(Code) %>% count() %>% arrange(n) amino_top_15_descriptions <- amino_info %>% filter(Code %in% top_15) %>% group_by(Code) %>% arrange(Code, pathway) amino_top_15_totals write.csv(amino_top_15_descriptions, "results/2013_binning/annotations/manual_annotations/top15_amino_descriptions.csv", quote=FALSE, row.names = FALSE)
context("anatFDR") gf = read.csv("/tmp/rminctestdata/CIVET_TEST.csv") gf = civet.getAllFilenames(gf,"ID","POND","/tmp/rminctestdata/CIVET","TRUE","1.1.12") gf = civet.readAllCivetFiles("/tmp/rminctestdata/AAL.csv",gf) sink("/dev/null"); rmincLm = anatLm(~ Sex,gf,gf$lobeThickness); sink(); lobeThickness = gf$lobeThickness[,1] Age = gf$Age Sex = gf$Sex rLm = summary(lm(lobeThickness~Sex)) rLmFDR1 = p.adjust( pt2(rmincLm[,5],attr(rmincLm,"df")[[2]]),"fdr") rLmFDR2 = p.adjust( pt2(rmincLm[,6],attr(rmincLm,"df")[[3]]),"fdr") sink("/dev/null"); rmincFDR = anatFDR(rmincLm); sink(); test_that("anatFDR Two Factors",{ expect_that(rLmFDR1[1],is_equivalent_to(rmincFDR[1,2])) expect_that(rLmFDR1[2],is_equivalent_to(rmincFDR[2,2])) expect_that(rLmFDR1[3],is_equivalent_to(rmincFDR[3,2])) expect_that(rLmFDR2[1],is_equivalent_to(rmincFDR[1,3])) expect_that(rLmFDR2[2],is_equivalent_to(rmincFDR[2,3])) expect_that(rLmFDR2[3],is_equivalent_to(rmincFDR[3,3])) }) sink("/dev/null"); rmincLm = anatLm(~ Age*Sex,gf,gf$lobeThickness); sink(); lobeThickness = gf$lobeThickness[,1] Age = gf$Age Sex = gf$Sex rLm = summary(lm(lobeThickness~Age*Sex)) rLmFDR1 = p.adjust( pt2(rmincLm[,7],attr(rmincLm,"df")[[2]]),"fdr") rLmFDR2 = p.adjust( pt2(rmincLm[,8],attr(rmincLm,"df")[[3]]),"fdr") rLmFDR3 = p.adjust( pt2(rmincLm[,9],attr(rmincLm,"df")[[4]]),"fdr") rLmFDR4 = p.adjust( pt2(rmincLm[,10],attr(rmincLm,"df")[[5]]),"fdr") sink("/dev/null"); rmincFDR = anatFDR(rmincLm); sink(); test_that("anatFDR Interaction",{ expect_that(rLmFDR1[1],is_equivalent_to(rmincFDR[1,2])) expect_that(rLmFDR1[2],is_equivalent_to(rmincFDR[2,2])) expect_that(rLmFDR1[3],is_equivalent_to(rmincFDR[3,2])) expect_that(rLmFDR2[1],is_equivalent_to(rmincFDR[1,3])) expect_that(rLmFDR2[2],is_equivalent_to(rmincFDR[2,3])) expect_that(rLmFDR2[3],is_equivalent_to(rmincFDR[3,3])) expect_that(rLmFDR3[1],is_equivalent_to(rmincFDR[1,4])) expect_that(rLmFDR3[2],is_equivalent_to(rmincFDR[2,4])) expect_that(rLmFDR3[3],is_equivalent_to(rmincFDR[3,4])) expect_that(rLmFDR4[1],is_equivalent_to(rmincFDR[1,5])) expect_that(rLmFDR4[2],is_equivalent_to(rmincFDR[2,5])) expect_that(rLmFDR4[3],is_equivalent_to(rmincFDR[3,5])) }) sink("/dev/null"); rmincLm = anatLm(~ Primary.Diagnosis,gf,gf$lobeThickness); sink(); lobeThickness = gf$lobeThickness[,1] Primary.Diagnosis = gf$Primary.Diagnosis rLm = summary(lm(lobeThickness~Primary.Diagnosis)) rLmFDR1 = p.adjust( pt2(rmincLm[,6],attr(rmincLm,"df")[[2]]),"fdr") rLmFDR2 = p.adjust( pt2(rmincLm[,7],attr(rmincLm,"df")[[3]]),"fdr") rLmFDR3 = p.adjust( pt2(rmincLm[,8],attr(rmincLm,"df")[[4]]),"fdr") sink("/dev/null"); rmincFDR = anatFDR(rmincLm); sink(); test_that("anatFDR Three Factors",{ expect_that(rLmFDR1[1],is_equivalent_to(rmincFDR[1,2])) expect_that(rLmFDR1[2],is_equivalent_to(rmincFDR[2,2])) expect_that(rLmFDR1[3],is_equivalent_to(rmincFDR[3,2])) expect_that(rLmFDR2[1],is_equivalent_to(rmincFDR[1,3])) expect_that(rLmFDR2[2],is_equivalent_to(rmincFDR[2,3])) expect_that(rLmFDR2[3],is_equivalent_to(rmincFDR[3,3])) expect_that(rLmFDR3[1],is_equivalent_to(rmincFDR[1,4])) expect_that(rLmFDR3[2],is_equivalent_to(rmincFDR[2,4])) expect_that(rLmFDR3[3],is_equivalent_to(rmincFDR[3,4])) }) sink("/dev/null"); rmincLm = anatLm(~Primary.Diagnosis*Age,gf,gf$lobeThickness); sink(); lobeThickness = gf$lobeThickness[,1] Primary.Diagnosis = gf$Primary.Diagnosis rLm = summary(lm(lobeThickness~Primary.Diagnosis*Age)) rLmFDR1 = p.adjust( pt2(rmincLm[,9],attr(rmincLm,"df")[[2]]),"fdr") rLmFDR2 = p.adjust( pt2(rmincLm[,10],attr(rmincLm,"df")[[3]]),"fdr") rLmFDR3 = p.adjust( pt2(rmincLm[,11],attr(rmincLm,"df")[[4]]),"fdr") rLmFDR4 = p.adjust( pt2(rmincLm[,12],attr(rmincLm,"df")[[5]]),"fdr") rLmFDR5 = p.adjust( pt2(rmincLm[,13],attr(rmincLm,"df")[[6]]),"fdr") rLmFDR6 = p.adjust( pt2(rmincLm[,14],attr(rmincLm,"df")[[7]]),"fdr") sink("/dev/null"); rmincFDR = anatFDR(rmincLm); sink(); test_that("anatFDR Three Factors Interaction",{ expect_that(rLmFDR1[1],is_equivalent_to(rmincFDR[1,2])) expect_that(rLmFDR1[2],is_equivalent_to(rmincFDR[2,2])) expect_that(rLmFDR1[3],is_equivalent_to(rmincFDR[3,2])) expect_that(rLmFDR2[1],is_equivalent_to(rmincFDR[1,3])) expect_that(rLmFDR2[2],is_equivalent_to(rmincFDR[2,3])) expect_that(rLmFDR2[3],is_equivalent_to(rmincFDR[3,3])) expect_that(rLmFDR3[1],is_equivalent_to(rmincFDR[1,4])) expect_that(rLmFDR3[2],is_equivalent_to(rmincFDR[2,4])) expect_that(rLmFDR3[3],is_equivalent_to(rmincFDR[3,4])) expect_that(rLmFDR4[1],is_equivalent_to(rmincFDR[1,5])) expect_that(rLmFDR4[2],is_equivalent_to(rmincFDR[2,5])) expect_that(rLmFDR4[3],is_equivalent_to(rmincFDR[3,5])) expect_that(rLmFDR5[1],is_equivalent_to(rmincFDR[1,6])) expect_that(rLmFDR5[2],is_equivalent_to(rmincFDR[2,6])) expect_that(rLmFDR5[3],is_equivalent_to(rmincFDR[3,6])) expect_that(rLmFDR6[1],is_equivalent_to(rmincFDR[1,7])) expect_that(rLmFDR6[2],is_equivalent_to(rmincFDR[2,7])) expect_that(rLmFDR6[3],is_equivalent_to(rmincFDR[3,7])) })
/inst/tests/test_anatFDR.R
no_license
bcdarwin/RMINC
R
false
false
5,005
r
context("anatFDR") gf = read.csv("/tmp/rminctestdata/CIVET_TEST.csv") gf = civet.getAllFilenames(gf,"ID","POND","/tmp/rminctestdata/CIVET","TRUE","1.1.12") gf = civet.readAllCivetFiles("/tmp/rminctestdata/AAL.csv",gf) sink("/dev/null"); rmincLm = anatLm(~ Sex,gf,gf$lobeThickness); sink(); lobeThickness = gf$lobeThickness[,1] Age = gf$Age Sex = gf$Sex rLm = summary(lm(lobeThickness~Sex)) rLmFDR1 = p.adjust( pt2(rmincLm[,5],attr(rmincLm,"df")[[2]]),"fdr") rLmFDR2 = p.adjust( pt2(rmincLm[,6],attr(rmincLm,"df")[[3]]),"fdr") sink("/dev/null"); rmincFDR = anatFDR(rmincLm); sink(); test_that("anatFDR Two Factors",{ expect_that(rLmFDR1[1],is_equivalent_to(rmincFDR[1,2])) expect_that(rLmFDR1[2],is_equivalent_to(rmincFDR[2,2])) expect_that(rLmFDR1[3],is_equivalent_to(rmincFDR[3,2])) expect_that(rLmFDR2[1],is_equivalent_to(rmincFDR[1,3])) expect_that(rLmFDR2[2],is_equivalent_to(rmincFDR[2,3])) expect_that(rLmFDR2[3],is_equivalent_to(rmincFDR[3,3])) }) sink("/dev/null"); rmincLm = anatLm(~ Age*Sex,gf,gf$lobeThickness); sink(); lobeThickness = gf$lobeThickness[,1] Age = gf$Age Sex = gf$Sex rLm = summary(lm(lobeThickness~Age*Sex)) rLmFDR1 = p.adjust( pt2(rmincLm[,7],attr(rmincLm,"df")[[2]]),"fdr") rLmFDR2 = p.adjust( pt2(rmincLm[,8],attr(rmincLm,"df")[[3]]),"fdr") rLmFDR3 = p.adjust( pt2(rmincLm[,9],attr(rmincLm,"df")[[4]]),"fdr") rLmFDR4 = p.adjust( pt2(rmincLm[,10],attr(rmincLm,"df")[[5]]),"fdr") sink("/dev/null"); rmincFDR = anatFDR(rmincLm); sink(); test_that("anatFDR Interaction",{ expect_that(rLmFDR1[1],is_equivalent_to(rmincFDR[1,2])) expect_that(rLmFDR1[2],is_equivalent_to(rmincFDR[2,2])) expect_that(rLmFDR1[3],is_equivalent_to(rmincFDR[3,2])) expect_that(rLmFDR2[1],is_equivalent_to(rmincFDR[1,3])) expect_that(rLmFDR2[2],is_equivalent_to(rmincFDR[2,3])) expect_that(rLmFDR2[3],is_equivalent_to(rmincFDR[3,3])) expect_that(rLmFDR3[1],is_equivalent_to(rmincFDR[1,4])) expect_that(rLmFDR3[2],is_equivalent_to(rmincFDR[2,4])) expect_that(rLmFDR3[3],is_equivalent_to(rmincFDR[3,4])) expect_that(rLmFDR4[1],is_equivalent_to(rmincFDR[1,5])) expect_that(rLmFDR4[2],is_equivalent_to(rmincFDR[2,5])) expect_that(rLmFDR4[3],is_equivalent_to(rmincFDR[3,5])) }) sink("/dev/null"); rmincLm = anatLm(~ Primary.Diagnosis,gf,gf$lobeThickness); sink(); lobeThickness = gf$lobeThickness[,1] Primary.Diagnosis = gf$Primary.Diagnosis rLm = summary(lm(lobeThickness~Primary.Diagnosis)) rLmFDR1 = p.adjust( pt2(rmincLm[,6],attr(rmincLm,"df")[[2]]),"fdr") rLmFDR2 = p.adjust( pt2(rmincLm[,7],attr(rmincLm,"df")[[3]]),"fdr") rLmFDR3 = p.adjust( pt2(rmincLm[,8],attr(rmincLm,"df")[[4]]),"fdr") sink("/dev/null"); rmincFDR = anatFDR(rmincLm); sink(); test_that("anatFDR Three Factors",{ expect_that(rLmFDR1[1],is_equivalent_to(rmincFDR[1,2])) expect_that(rLmFDR1[2],is_equivalent_to(rmincFDR[2,2])) expect_that(rLmFDR1[3],is_equivalent_to(rmincFDR[3,2])) expect_that(rLmFDR2[1],is_equivalent_to(rmincFDR[1,3])) expect_that(rLmFDR2[2],is_equivalent_to(rmincFDR[2,3])) expect_that(rLmFDR2[3],is_equivalent_to(rmincFDR[3,3])) expect_that(rLmFDR3[1],is_equivalent_to(rmincFDR[1,4])) expect_that(rLmFDR3[2],is_equivalent_to(rmincFDR[2,4])) expect_that(rLmFDR3[3],is_equivalent_to(rmincFDR[3,4])) }) sink("/dev/null"); rmincLm = anatLm(~Primary.Diagnosis*Age,gf,gf$lobeThickness); sink(); lobeThickness = gf$lobeThickness[,1] Primary.Diagnosis = gf$Primary.Diagnosis rLm = summary(lm(lobeThickness~Primary.Diagnosis*Age)) rLmFDR1 = p.adjust( pt2(rmincLm[,9],attr(rmincLm,"df")[[2]]),"fdr") rLmFDR2 = p.adjust( pt2(rmincLm[,10],attr(rmincLm,"df")[[3]]),"fdr") rLmFDR3 = p.adjust( pt2(rmincLm[,11],attr(rmincLm,"df")[[4]]),"fdr") rLmFDR4 = p.adjust( pt2(rmincLm[,12],attr(rmincLm,"df")[[5]]),"fdr") rLmFDR5 = p.adjust( pt2(rmincLm[,13],attr(rmincLm,"df")[[6]]),"fdr") rLmFDR6 = p.adjust( pt2(rmincLm[,14],attr(rmincLm,"df")[[7]]),"fdr") sink("/dev/null"); rmincFDR = anatFDR(rmincLm); sink(); test_that("anatFDR Three Factors Interaction",{ expect_that(rLmFDR1[1],is_equivalent_to(rmincFDR[1,2])) expect_that(rLmFDR1[2],is_equivalent_to(rmincFDR[2,2])) expect_that(rLmFDR1[3],is_equivalent_to(rmincFDR[3,2])) expect_that(rLmFDR2[1],is_equivalent_to(rmincFDR[1,3])) expect_that(rLmFDR2[2],is_equivalent_to(rmincFDR[2,3])) expect_that(rLmFDR2[3],is_equivalent_to(rmincFDR[3,3])) expect_that(rLmFDR3[1],is_equivalent_to(rmincFDR[1,4])) expect_that(rLmFDR3[2],is_equivalent_to(rmincFDR[2,4])) expect_that(rLmFDR3[3],is_equivalent_to(rmincFDR[3,4])) expect_that(rLmFDR4[1],is_equivalent_to(rmincFDR[1,5])) expect_that(rLmFDR4[2],is_equivalent_to(rmincFDR[2,5])) expect_that(rLmFDR4[3],is_equivalent_to(rmincFDR[3,5])) expect_that(rLmFDR5[1],is_equivalent_to(rmincFDR[1,6])) expect_that(rLmFDR5[2],is_equivalent_to(rmincFDR[2,6])) expect_that(rLmFDR5[3],is_equivalent_to(rmincFDR[3,6])) expect_that(rLmFDR6[1],is_equivalent_to(rmincFDR[1,7])) expect_that(rLmFDR6[2],is_equivalent_to(rmincFDR[2,7])) expect_that(rLmFDR6[3],is_equivalent_to(rmincFDR[3,7])) })
.init.exp.model.variogram = function(dist,gamma){ ############################################################################################### # cette fonction estime par moindres carres les parametres de lissage d'un semi variogram # Le modele choisi est fixe (modele Exp) # Ces parametres sont utilises comme valeurs initiales du programme d'ajustement fit.variogram appele # par la fonction krige (quand on est en mode auto) ############################################################################################### gammaFunc = function(h, A, a){ A*(1 -exp(-3*h/a)) A*(1 -exp(-h/a)) } objectif = function(par,dist,gamma){ A = par[1] a = par[2] obj = sum((gammaFunc(dist,A,a) - gamma)^2) obj } par = c(mean(gamma), max(dist)/2) opt = optim(par,objectif,dist = dist, gamma =gamma,lower = c(-Inf,0.02),method = "L-BFGS-B") if(opt$convergence!=0) return(NULL) c(psill = opt$par[1],range = opt$par[2]) }
/prevR/R/init.exp.model.variogram.r
no_license
ingted/R-Examples
R
false
false
1,008
r
.init.exp.model.variogram = function(dist,gamma){ ############################################################################################### # cette fonction estime par moindres carres les parametres de lissage d'un semi variogram # Le modele choisi est fixe (modele Exp) # Ces parametres sont utilises comme valeurs initiales du programme d'ajustement fit.variogram appele # par la fonction krige (quand on est en mode auto) ############################################################################################### gammaFunc = function(h, A, a){ A*(1 -exp(-3*h/a)) A*(1 -exp(-h/a)) } objectif = function(par,dist,gamma){ A = par[1] a = par[2] obj = sum((gammaFunc(dist,A,a) - gamma)^2) obj } par = c(mean(gamma), max(dist)/2) opt = optim(par,objectif,dist = dist, gamma =gamma,lower = c(-Inf,0.02),method = "L-BFGS-B") if(opt$convergence!=0) return(NULL) c(psill = opt$par[1],range = opt$par[2]) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/binned_tabulate.R \name{binned_tabulate} \alias{binned_tabulate} \alias{binned_tabulate.default} \alias{binned_tabulate.ff} \title{Fast tabulating in different bins} \usage{ binned_tabulate(x, bin, nbins = max(bin), nlevels = nlevels(x), ...) \method{binned_tabulate}{default}(x, bin, nbins = max(bin), nlevels = nlevels(x), ...) \method{binned_tabulate}{ff}(x, bin, nbins = max(bin), nlevels = nlevels(x), ...) } \arguments{ \item{x}{\code{factor} or \code{integer} vector with the data to be tabulated} \item{bin}{\code{integer} vector with the bin number for each data point} \item{nbins}{\code{integer} maximum bin number} \item{nlevels}{\code{integer} number of levels used in x} \item{...}{used by binned_tabulate.ff} } \value{ \code{numeric} matrix where each row is a bin and each column a level } \description{ \code{binned_sum} implements fast tabulating for given bins by calling c-code. It also returns the number of NA's per bin. Please note that incorrect use of this function may crash your R-session. the values of \code{bins} must be between \code{1} and \code{nbins} and may not contain \code{NA}. The values of \code{x} must be between \code{1} and \code{nlevels}. }
/pkg/man/binned_tabulate.Rd
no_license
namanpaul/ffbase
R
false
true
1,276
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/binned_tabulate.R \name{binned_tabulate} \alias{binned_tabulate} \alias{binned_tabulate.default} \alias{binned_tabulate.ff} \title{Fast tabulating in different bins} \usage{ binned_tabulate(x, bin, nbins = max(bin), nlevels = nlevels(x), ...) \method{binned_tabulate}{default}(x, bin, nbins = max(bin), nlevels = nlevels(x), ...) \method{binned_tabulate}{ff}(x, bin, nbins = max(bin), nlevels = nlevels(x), ...) } \arguments{ \item{x}{\code{factor} or \code{integer} vector with the data to be tabulated} \item{bin}{\code{integer} vector with the bin number for each data point} \item{nbins}{\code{integer} maximum bin number} \item{nlevels}{\code{integer} number of levels used in x} \item{...}{used by binned_tabulate.ff} } \value{ \code{numeric} matrix where each row is a bin and each column a level } \description{ \code{binned_sum} implements fast tabulating for given bins by calling c-code. It also returns the number of NA's per bin. Please note that incorrect use of this function may crash your R-session. the values of \code{bins} must be between \code{1} and \code{nbins} and may not contain \code{NA}. The values of \code{x} must be between \code{1} and \code{nlevels}. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loglikelihoods.R \name{normal_ll} \alias{normal_ll} \title{Gaussian log likelihood} \usage{ normal_ll(y, X, par) } \arguments{ \item{y}{binary outcome} \item{X}{design matrix} \item{par}{vector of gaussian scale parameter followed by model coefficients} } \value{ a scalar quantity proportional to a normal likelihood with linear parameterization, given y, X, and par } \description{ Gaussian log likelihood }
/R/metropolis/man/normal_ll.Rd
no_license
alexpkeil1/mcmc
R
false
true
490
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loglikelihoods.R \name{normal_ll} \alias{normal_ll} \title{Gaussian log likelihood} \usage{ normal_ll(y, X, par) } \arguments{ \item{y}{binary outcome} \item{X}{design matrix} \item{par}{vector of gaussian scale parameter followed by model coefficients} } \value{ a scalar quantity proportional to a normal likelihood with linear parameterization, given y, X, and par } \description{ Gaussian log likelihood }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/preprocessing.R \name{text_to_word_sequence} \alias{text_to_word_sequence} \title{Split a sentence into a list of words.} \usage{ text_to_word_sequence(text, filters = "!\\"#$\%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n", lower = TRUE, split = " ") } \arguments{ \item{text}{a string} \item{filters}{vector (or concatenation) of characters to filter out, such as punctuation.} \item{lower}{boolean. Whether to set the text to lowercase.} \item{split}{string. Separator for word splitting.} } \description{ Split a sentence into a list of words. } \references{ Chollet, Francois. 2015. \href{https://keras.io/}{Keras: Deep Learning library for Theano and TensorFlow}. } \seealso{ Other preprocessing: \code{\link{Tokenizer}}, \code{\link{expand_dims}}, \code{\link{img_to_array}}, \code{\link{load_img}}, \code{\link{one_hot}}, \code{\link{pad_sequences}} } \author{ Taylor B. Arnold, \email{taylor.arnold@acm.org} }
/man/text_to_word_sequence.Rd
no_license
Yannael/kerasR
R
false
true
1,000
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/preprocessing.R \name{text_to_word_sequence} \alias{text_to_word_sequence} \title{Split a sentence into a list of words.} \usage{ text_to_word_sequence(text, filters = "!\\"#$\%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n", lower = TRUE, split = " ") } \arguments{ \item{text}{a string} \item{filters}{vector (or concatenation) of characters to filter out, such as punctuation.} \item{lower}{boolean. Whether to set the text to lowercase.} \item{split}{string. Separator for word splitting.} } \description{ Split a sentence into a list of words. } \references{ Chollet, Francois. 2015. \href{https://keras.io/}{Keras: Deep Learning library for Theano and TensorFlow}. } \seealso{ Other preprocessing: \code{\link{Tokenizer}}, \code{\link{expand_dims}}, \code{\link{img_to_array}}, \code{\link{load_img}}, \code{\link{one_hot}}, \code{\link{pad_sequences}} } \author{ Taylor B. Arnold, \email{taylor.arnold@acm.org} }
library(keras) # #load(file = "data/clean_data.RData") clean_data = function(data) { data$X = NULL data$Date = NULL data$Evaporation = NULL data$Sunshine = NULL data$WindDir3pm = NULL data$WindDir9am = NULL data$WindGustDir = NULL data$RainTomorrow = NULL data$RainToday = ifelse(data$RainToday == "Yes", 1, 0) data = na.omit(data) return(data) } darwin_train = read.csv('data/darwin_train.csv') x_train = clean_data(darwin_train) x_test = read.csv('data/darwin_test.csv') x_test = clean_data(darwin_test) # https://blogs.rstudio.com/tensorflow/posts/2017-12-20-time-series-forecasting-with-recurrent-neural-networks/ # parms # loopback = 7 Tage zurück # step = 1 jeden Tag neue Daten ziehen # delay = 1 für einen Tag prognose machen # batch_size = 1 was auch immer das genau soll lookback <- 5 step <- 1 delay <- 1 batch_size = 128 # combine data data_train = data.matrix(x_train) data_test = data.matrix(x_test) # normalize data # train_data <- data_train[1:2000,] # mean <- apply(train_data, 2, mean) # std <- apply(train_data, 2, sd) # data_train <- scale(data_train, center = mean, scale = std) # data_test <- scale(data_test, center = mean, scale = std) # create generator generator <- function(data, lookback, delay, min_index, max_index, shuffle = FALSE, batch_size = 128, step = 1) { if (is.null(max_index)) max_index <- nrow(data) - delay - 1 i <- min_index + lookback function() { if (shuffle) { rows <- sample(c((min_index+lookback):max_index), size = batch_size) } else { if (i + batch_size >= max_index) i <<- min_index + lookback rows <- c(i:min(i+batch_size-1, max_index)) i <<- i + length(rows) } samples <- array(0, dim = c(length(rows), lookback / step, dim(data)[[-1]])) targets <- array(0, dim = c(length(rows))) for (j in 1:length(rows)) { indices <- seq(rows[[j]] - lookback, rows[[j]]-1, length.out = dim(samples)[[2]]) samples[j,,] <- data[indices,] targets[[j]] <- data[rows[[j]] + delay, ncol(data)] } list(samples, targets) } } train_gen <- generator( data_train, lookback = lookback, delay = delay, min_index = 1, max_index = 2000, step = step, batch_size = batch_size ) val_gen = generator( data_train, lookback = lookback, delay = delay, min_index = 2001, max_index = NULL, step = step, batch_size = batch_size ) test_gen <- generator( data_test, lookback = lookback, delay = delay, min_index = 1, max_index = NULL, step = step, batch_size = batch_size ) # How many steps to draw from val_gen in order to see the entire validation set val_steps <- (nrow(data_train) - 2001 - lookback) / batch_size # How many steps to draw from test_gen in order to see the entire test set test_steps <- (nrow(data_test) - 1 - lookback) / batch_size # clear session data k_clear_session() # create model shape = input_shape = c(lookback / step, dim(data)[-1]) shape = input_shape = list(NULL, dim(data_train)[[-1]]) model = keras_model_sequential() # layer_dense(model, units = 2, input_shape = shape) layer_lstm(model, units = 50, input_shape = shape, batch_size = batch_size, stateful = TRUE) layer_dropout(model, rate = 0.2) layer_dense(model, units = 1, activation = 'sigmoid') compile(model, loss = 'binary_crossentropy', optimizer = 'adam', metrics = c('accuracy') ) summary(model) # train model history = fit_generator( model, train_gen, steps_per_epoch = 2000 / batch_size, epochs = 30, validation_data = val_gen, validation_steps = val_steps ) tg = test_gen()[1] out = predict(model, tg, batch_size = batch_size) # save model save_model_hdf5(model, "lstm_darwin_rain.h5")
/p4/model/Darwin_LSTM_Classification.R
no_license
maxneuds/data_mining_lecture
R
false
false
4,056
r
library(keras) # #load(file = "data/clean_data.RData") clean_data = function(data) { data$X = NULL data$Date = NULL data$Evaporation = NULL data$Sunshine = NULL data$WindDir3pm = NULL data$WindDir9am = NULL data$WindGustDir = NULL data$RainTomorrow = NULL data$RainToday = ifelse(data$RainToday == "Yes", 1, 0) data = na.omit(data) return(data) } darwin_train = read.csv('data/darwin_train.csv') x_train = clean_data(darwin_train) x_test = read.csv('data/darwin_test.csv') x_test = clean_data(darwin_test) # https://blogs.rstudio.com/tensorflow/posts/2017-12-20-time-series-forecasting-with-recurrent-neural-networks/ # parms # loopback = 7 Tage zurück # step = 1 jeden Tag neue Daten ziehen # delay = 1 für einen Tag prognose machen # batch_size = 1 was auch immer das genau soll lookback <- 5 step <- 1 delay <- 1 batch_size = 128 # combine data data_train = data.matrix(x_train) data_test = data.matrix(x_test) # normalize data # train_data <- data_train[1:2000,] # mean <- apply(train_data, 2, mean) # std <- apply(train_data, 2, sd) # data_train <- scale(data_train, center = mean, scale = std) # data_test <- scale(data_test, center = mean, scale = std) # create generator generator <- function(data, lookback, delay, min_index, max_index, shuffle = FALSE, batch_size = 128, step = 1) { if (is.null(max_index)) max_index <- nrow(data) - delay - 1 i <- min_index + lookback function() { if (shuffle) { rows <- sample(c((min_index+lookback):max_index), size = batch_size) } else { if (i + batch_size >= max_index) i <<- min_index + lookback rows <- c(i:min(i+batch_size-1, max_index)) i <<- i + length(rows) } samples <- array(0, dim = c(length(rows), lookback / step, dim(data)[[-1]])) targets <- array(0, dim = c(length(rows))) for (j in 1:length(rows)) { indices <- seq(rows[[j]] - lookback, rows[[j]]-1, length.out = dim(samples)[[2]]) samples[j,,] <- data[indices,] targets[[j]] <- data[rows[[j]] + delay, ncol(data)] } list(samples, targets) } } train_gen <- generator( data_train, lookback = lookback, delay = delay, min_index = 1, max_index = 2000, step = step, batch_size = batch_size ) val_gen = generator( data_train, lookback = lookback, delay = delay, min_index = 2001, max_index = NULL, step = step, batch_size = batch_size ) test_gen <- generator( data_test, lookback = lookback, delay = delay, min_index = 1, max_index = NULL, step = step, batch_size = batch_size ) # How many steps to draw from val_gen in order to see the entire validation set val_steps <- (nrow(data_train) - 2001 - lookback) / batch_size # How many steps to draw from test_gen in order to see the entire test set test_steps <- (nrow(data_test) - 1 - lookback) / batch_size # clear session data k_clear_session() # create model shape = input_shape = c(lookback / step, dim(data)[-1]) shape = input_shape = list(NULL, dim(data_train)[[-1]]) model = keras_model_sequential() # layer_dense(model, units = 2, input_shape = shape) layer_lstm(model, units = 50, input_shape = shape, batch_size = batch_size, stateful = TRUE) layer_dropout(model, rate = 0.2) layer_dense(model, units = 1, activation = 'sigmoid') compile(model, loss = 'binary_crossentropy', optimizer = 'adam', metrics = c('accuracy') ) summary(model) # train model history = fit_generator( model, train_gen, steps_per_epoch = 2000 / batch_size, epochs = 30, validation_data = val_gen, validation_steps = val_steps ) tg = test_gen()[1] out = predict(model, tg, batch_size = batch_size) # save model save_model_hdf5(model, "lstm_darwin_rain.h5")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/biglm_mapper.R \name{update_levels} \alias{update_levels} \title{Standardize levels of factor variables.} \usage{ update_levels(data, levels_union) } \arguments{ \item{data}{Dataset to standardize} \item{levels_union}{Named list. Names should be variable names, values should be character vectors.} } \value{ \code{data} with variables in \code{levels_union} converted to factors with levels specified in \code{levels_union} } \description{ Standardize levels of factor variables. } \examples{ update_levels(d, list(var1=c("a","b","c"), var2=c("x","y"))) }
/man/update_levels.Rd
no_license
gregobad/biglm-mapper
R
false
true
636
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/biglm_mapper.R \name{update_levels} \alias{update_levels} \title{Standardize levels of factor variables.} \usage{ update_levels(data, levels_union) } \arguments{ \item{data}{Dataset to standardize} \item{levels_union}{Named list. Names should be variable names, values should be character vectors.} } \value{ \code{data} with variables in \code{levels_union} converted to factors with levels specified in \code{levels_union} } \description{ Standardize levels of factor variables. } \examples{ update_levels(d, list(var1=c("a","b","c"), var2=c("x","y"))) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/px_build_index.R \name{px_build_index} \alias{px_build_index} \title{Indexing function on bgzipped pairs file.} \usage{ px_build_index(filename, preset = "", sc = 0, bc = 0, ec = 0, sc2 = 0, bc2 = 0, ec2 = 0, delimiter = "\\t", comment_char = "#", region_split_character = "|", line_skip = 0, force = FALSE) } \arguments{ \item{filename}{a pairs file, or a bgzipped text file (sometextfile.gz) with an index file sometextfile.gz.px2 in the same folder.} \item{preset}{one of the following strings: 'gff', 'bed', 'sam', 'vcf', 'psltbl' (1D-indexing) or 'pairs', 'merged_nodups', 'old_merged_nodups' (2D-indexing). If preset is '', at least some of the custom parameters must be given instead (sc, bc, ec, sc2, bc2, ec2, delimiter, comment_char, line_skip). (default '')} \item{sc}{first sequence (chromosome) column index (1-based). Zero (0) means not specified. If preset is given, preset overrides sc. If preset is not given, this one is required. (default 0)} \item{bc}{first start position column index (1-based). Zero (0) means not specified. If preset is given, preset overrides bc. If preset is not given, this one is required. (default 0)} \item{ec}{first end position column index (1-based). Zero (0) means not specified. If preset is given, preset overrides ec. (default 0)} \item{sc2}{second sequence (chromosome) column index (1-based). Zero (0) means not specified. If preset is given, preset overrides sc2. If sc, bc are specified but not sc2 and bc2, it is 1D-indexed. (default 0)} \item{bc2}{second start position column index (1-based). Zero (0) means not specified. If preset is given, preset overrides bc2. (default 0)} \item{ec2}{second end position column index (1-based). Zero (0) means not specified. If preset is given, preset overrides ec2. (default 0)} \item{delimiter}{delimiter (e.g. '\\t' or ' ') (default '\\t'). If preset is given, preset overrides delimiter.} \item{comment_char}{comment character. Lines beginning with this character are skipped when creating an index. If preset is given, preset overrides comment_char (default '#')} \item{region_split_character}{region_split_character (default '|'). This option overrides preset. (All presets have default region_split_character ('|')). This parameter can be useful when one's chromosome names contain character '|'.} \item{line_skip}{number of lines to skip in the beginning. (default 0)} \item{force}{If TRUE, overwrite existing index file. If FALSE, do not overwrite unless the index file is older than the bgzipped file. (default FALSE)} } \description{ This function creates a pairix (px2) index a bgzipped text file. Either a preset or a set of custom parameters (column indices, comment_char, line_skip) must be specified. } \examples{ filename = system.file(".","test_4dn.pairs.gz", package="Rpairix") px_build_index(filename, force=TRUE) px_query(filename, 'chr22|chr22') px_build_index(filename, 'pairs', force=TRUE) px_build_index(filename, sc=2, bc=3, ec=3, sc2=4, bc2=5, ec2=5, force=TRUE) px_build_index(filename, region_split_character='^', force=TRUE) px_query(filename, 'chr22^chr22') } \keyword{index} \keyword{pairix}
/man/px_build_index.Rd
permissive
j1z0/Rpairix
R
false
true
3,218
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/px_build_index.R \name{px_build_index} \alias{px_build_index} \title{Indexing function on bgzipped pairs file.} \usage{ px_build_index(filename, preset = "", sc = 0, bc = 0, ec = 0, sc2 = 0, bc2 = 0, ec2 = 0, delimiter = "\\t", comment_char = "#", region_split_character = "|", line_skip = 0, force = FALSE) } \arguments{ \item{filename}{a pairs file, or a bgzipped text file (sometextfile.gz) with an index file sometextfile.gz.px2 in the same folder.} \item{preset}{one of the following strings: 'gff', 'bed', 'sam', 'vcf', 'psltbl' (1D-indexing) or 'pairs', 'merged_nodups', 'old_merged_nodups' (2D-indexing). If preset is '', at least some of the custom parameters must be given instead (sc, bc, ec, sc2, bc2, ec2, delimiter, comment_char, line_skip). (default '')} \item{sc}{first sequence (chromosome) column index (1-based). Zero (0) means not specified. If preset is given, preset overrides sc. If preset is not given, this one is required. (default 0)} \item{bc}{first start position column index (1-based). Zero (0) means not specified. If preset is given, preset overrides bc. If preset is not given, this one is required. (default 0)} \item{ec}{first end position column index (1-based). Zero (0) means not specified. If preset is given, preset overrides ec. (default 0)} \item{sc2}{second sequence (chromosome) column index (1-based). Zero (0) means not specified. If preset is given, preset overrides sc2. If sc, bc are specified but not sc2 and bc2, it is 1D-indexed. (default 0)} \item{bc2}{second start position column index (1-based). Zero (0) means not specified. If preset is given, preset overrides bc2. (default 0)} \item{ec2}{second end position column index (1-based). Zero (0) means not specified. If preset is given, preset overrides ec2. (default 0)} \item{delimiter}{delimiter (e.g. '\\t' or ' ') (default '\\t'). If preset is given, preset overrides delimiter.} \item{comment_char}{comment character. Lines beginning with this character are skipped when creating an index. If preset is given, preset overrides comment_char (default '#')} \item{region_split_character}{region_split_character (default '|'). This option overrides preset. (All presets have default region_split_character ('|')). This parameter can be useful when one's chromosome names contain character '|'.} \item{line_skip}{number of lines to skip in the beginning. (default 0)} \item{force}{If TRUE, overwrite existing index file. If FALSE, do not overwrite unless the index file is older than the bgzipped file. (default FALSE)} } \description{ This function creates a pairix (px2) index a bgzipped text file. Either a preset or a set of custom parameters (column indices, comment_char, line_skip) must be specified. } \examples{ filename = system.file(".","test_4dn.pairs.gz", package="Rpairix") px_build_index(filename, force=TRUE) px_query(filename, 'chr22|chr22') px_build_index(filename, 'pairs', force=TRUE) px_build_index(filename, sc=2, bc=3, ec=3, sc2=4, bc2=5, ec2=5, force=TRUE) px_build_index(filename, region_split_character='^', force=TRUE) px_query(filename, 'chr22^chr22') } \keyword{index} \keyword{pairix}
#' Remove outliers/bad subjects #' #' This function takes as import two dataframes. The first dataframe is the original dataframe containing all IDs, #' The second dataframe contains only those IDs to be removed. #' @param x dataframe #' @param remove the dataframe that contains subjects to be removed #' @param output folder directory path to save removed data #' @param id Column name containing Subject IDs. #' @keywords remove #' @export #' @examples #' remove_save(data, remove = data_remove, save = "data/remove", taskname = "Flanker") remove_save <- function(x, remove, output.dir = NULL, output.file = NULL, id = "Subject") { colnames(x)[which(colnames(x)==id)] <- "Subject" if (nrow(remove)>0){ dir.create(output.dir, showWarnings = FALSE) readr::write_csv(remove, paste(output.dir, output.file, sep = "/"), na = "") colnames(remove)[which(colnames(remove)==id)] <- "Subject" subj.remove <- unique(remove$Subject) ## Remove them! x <- dplyr::filter(x, !(Subject %in% subj.remove)) } colnames(x)[which(colnames(x)=="Subject")] <- id return(x) }
/R/remove_save.R
no_license
dr-JT/datawrangling
R
false
false
1,090
r
#' Remove outliers/bad subjects #' #' This function takes as import two dataframes. The first dataframe is the original dataframe containing all IDs, #' The second dataframe contains only those IDs to be removed. #' @param x dataframe #' @param remove the dataframe that contains subjects to be removed #' @param output folder directory path to save removed data #' @param id Column name containing Subject IDs. #' @keywords remove #' @export #' @examples #' remove_save(data, remove = data_remove, save = "data/remove", taskname = "Flanker") remove_save <- function(x, remove, output.dir = NULL, output.file = NULL, id = "Subject") { colnames(x)[which(colnames(x)==id)] <- "Subject" if (nrow(remove)>0){ dir.create(output.dir, showWarnings = FALSE) readr::write_csv(remove, paste(output.dir, output.file, sep = "/"), na = "") colnames(remove)[which(colnames(remove)==id)] <- "Subject" subj.remove <- unique(remove$Subject) ## Remove them! x <- dplyr::filter(x, !(Subject %in% subj.remove)) } colnames(x)[which(colnames(x)=="Subject")] <- id return(x) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cadtools.R \name{basindelin_big} \alias{basindelin_big} \title{Delineates hydrological basins for large datasets} \usage{ basindelin_big(dem, dirout = NA, trace = TRUE) } \arguments{ \item{dem}{a raster object of elevations.} \item{dirout}{an optional character vector containing a single path directory for temporarily storing tiles. Deleted after use. Tilde expansion (see \code{\link[=path.expand]{path.expand()}}) is done.} \item{trace}{a logical value indicating whether to plot and report on progress.} } \value{ a raster object with individual basins numbered sequentially as integers. } \description{ \code{basindelin_big} is for use with large digital elevation datasets, to delineate hydrological or cold-air drainage basins. } \details{ \code{basindelin_big} divides the large dataset into tiles and then uses \code{\link[=basindelin]{basindelin()}} to delineate basins for each tile before mosaicing back together and merging basins along tile edges if not seperated by a boundary \enumerate{ \item If \code{dirout} is unspecified, then a directory \code{basinsout} is temporarily created within the working directory. If \code{trace} is TRUE (the default) then progress is tracked during three stages: (1) the basins of each tile are plotted, (2) basins after mosaicing, but prior to merging are plotted and (3) on each merge iteration, the number of basins to merge is printed and processed basin is plotted. } } \examples{ library(raster) basins <- basindelin_big(dtm1m) plot(basins, main = "Basins") } \seealso{ \code{\link[=basindelin]{basindelin()}} for working with smaller datasets. }
/man/basindelin_big.Rd
no_license
ackuyucu/microclima
R
false
true
1,685
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cadtools.R \name{basindelin_big} \alias{basindelin_big} \title{Delineates hydrological basins for large datasets} \usage{ basindelin_big(dem, dirout = NA, trace = TRUE) } \arguments{ \item{dem}{a raster object of elevations.} \item{dirout}{an optional character vector containing a single path directory for temporarily storing tiles. Deleted after use. Tilde expansion (see \code{\link[=path.expand]{path.expand()}}) is done.} \item{trace}{a logical value indicating whether to plot and report on progress.} } \value{ a raster object with individual basins numbered sequentially as integers. } \description{ \code{basindelin_big} is for use with large digital elevation datasets, to delineate hydrological or cold-air drainage basins. } \details{ \code{basindelin_big} divides the large dataset into tiles and then uses \code{\link[=basindelin]{basindelin()}} to delineate basins for each tile before mosaicing back together and merging basins along tile edges if not seperated by a boundary \enumerate{ \item If \code{dirout} is unspecified, then a directory \code{basinsout} is temporarily created within the working directory. If \code{trace} is TRUE (the default) then progress is tracked during three stages: (1) the basins of each tile are plotted, (2) basins after mosaicing, but prior to merging are plotted and (3) on each merge iteration, the number of basins to merge is printed and processed basin is plotted. } } \examples{ library(raster) basins <- basindelin_big(dtm1m) plot(basins, main = "Basins") } \seealso{ \code{\link[=basindelin]{basindelin()}} for working with smaller datasets. }
library(repo) ### Name: repo_tags ### Title: List all tags ### Aliases: repo_tags ### ** Examples rp_path <- file.path(tempdir(), "example_repo") rp <- repo_open(rp_path, TRUE) ## Putting two items with a few tags rp$put(1, "item1", "Sample item 1", c("repo_tags", "tag1")) rp$put(2, "item2", "Sample item 2", c("repo_tags", "tag2")) ## Looking up tags rp$tags() ## wiping temporary repo unlink(rp_path, TRUE)
/data/genthat_extracted_code/repo/examples/repo_tags.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
428
r
library(repo) ### Name: repo_tags ### Title: List all tags ### Aliases: repo_tags ### ** Examples rp_path <- file.path(tempdir(), "example_repo") rp <- repo_open(rp_path, TRUE) ## Putting two items with a few tags rp$put(1, "item1", "Sample item 1", c("repo_tags", "tag1")) rp$put(2, "item2", "Sample item 2", c("repo_tags", "tag2")) ## Looking up tags rp$tags() ## wiping temporary repo unlink(rp_path, TRUE)
#' Simulate from a Bernoulli distribution #' #'The sim_bern_mean function has the same general structure as sim_regression() #'but simulates data from a Bernoulli distribution and returns the sample average. #' @param n number of observations. If length(n) > 1, the length is taken to be the number required. #' @param prob probability of success on each trial. #' #' @return #' @export #' @importFrom tibble tibble #' @importFrom magrittr "%>%" #' @import dplyr #' @import purrr #' @examples sim_bern_mean = function(n, prob) { sim_data = tibble( y = rbinom(n, 1, prob) ) tibble( samp_avg = mean(sim_data %>% pull(y)) ) }
/R/sim_bern_mean.R
no_license
jackyan0320/example.package
R
false
false
641
r
#' Simulate from a Bernoulli distribution #' #'The sim_bern_mean function has the same general structure as sim_regression() #'but simulates data from a Bernoulli distribution and returns the sample average. #' @param n number of observations. If length(n) > 1, the length is taken to be the number required. #' @param prob probability of success on each trial. #' #' @return #' @export #' @importFrom tibble tibble #' @importFrom magrittr "%>%" #' @import dplyr #' @import purrr #' @examples sim_bern_mean = function(n, prob) { sim_data = tibble( y = rbinom(n, 1, prob) ) tibble( samp_avg = mean(sim_data %>% pull(y)) ) }
setwd("osmFISH_AllenSSp/") library(Seurat) library(ggplot2) osmFISH <- readRDS("data/seurat_objects/osmFISH_Cortex.rds") osmFISH.imputed <- readRDS("data/seurat_objects/osmFISH_Cortex_imputed.rds") allen <- readRDS("data/seurat_objects/allen_brain_SSp.rds") genes.leaveout <- intersect(rownames(osmFISH),rownames(allen)) Imp_genes <- matrix(0,nrow = length(genes.leaveout),ncol = dim(osmFISH@assays$RNA)[2]) rownames(Imp_genes) <- genes.leaveout anchor_time <- vector(mode= "numeric") Transfer_time <- vector(mode= "numeric") run_imputation <- function(ref.obj, query.obj, feature.remove) { message(paste0('removing ', feature.remove)) features <- setdiff(rownames(query.obj), feature.remove) DefaultAssay(ref.obj) <- 'RNA' DefaultAssay(query.obj) <- 'RNA' start_time <- Sys.time() anchors <- FindTransferAnchors( reference = ref.obj, query = query.obj, features = features, dims = 1:30, reduction = 'cca' ) end_time <- Sys.time() anchor_time <<- c(anchor_time,as.numeric(difftime(end_time,start_time,units = 'secs'))) refdata <- GetAssayData( object = ref.obj, assay = 'RNA', slot = 'data' ) start_time <- Sys.time() imputation <- TransferData( anchorset = anchors, refdata = refdata, weight.reduction = 'pca' ) query.obj[['seq']] <- imputation end_time <- Sys.time() Transfer_time <<- c(Transfer_time,as.numeric(difftime(end_time,start_time,units = 'secs'))) return(query.obj) } for(i in 1:length(genes.leaveout)) { imputed.ss2 <- run_imputation(ref.obj = allen, query.obj = osmFISH, feature.remove = genes.leaveout[[i]]) osmFISH[['ss2']] <- imputed.ss2[, colnames(osmFISH)][['seq']] Imp_genes[genes.leaveout[[i]],] = as.vector(osmFISH@assays$ss2[genes.leaveout[i],]) } write.csv(Imp_genes,file = 'Results/Seurat_LeaveOneOut.csv') write.csv(anchor_time,file = 'Results/Seurat_anchor_time.csv',row.names = FALSE) write.csv(Transfer_time,file = 'Results/Seurat_transfer_time.csv',row.names = FALSE) # show genes not in the osmFISH dataset DefaultAssay(osmFISH.imputed) <- "ss2" new.genes <- c('Tesc', 'Pvrl3', 'Grm2') Imp_New_genes <- matrix(0,nrow = length(new.genes),ncol = dim(osmFISH.imputed@assays$ss2)[2]) rownames(Imp_New_genes) <- new.genes for(i in 1:length(new.genes)) { Imp_New_genes[new.genes[[i]],] = as.vector(osmFISH.imputed@assays$ss2[new.genes[i],]) } write.csv(Imp_New_genes,file = 'Results/Seurat_New_genes.csv')
/benchmark/osmFISH_AllenSSp/Seurat/osmFISH_integration.R
permissive
c4chow/SpaGE
R
false
false
2,499
r
setwd("osmFISH_AllenSSp/") library(Seurat) library(ggplot2) osmFISH <- readRDS("data/seurat_objects/osmFISH_Cortex.rds") osmFISH.imputed <- readRDS("data/seurat_objects/osmFISH_Cortex_imputed.rds") allen <- readRDS("data/seurat_objects/allen_brain_SSp.rds") genes.leaveout <- intersect(rownames(osmFISH),rownames(allen)) Imp_genes <- matrix(0,nrow = length(genes.leaveout),ncol = dim(osmFISH@assays$RNA)[2]) rownames(Imp_genes) <- genes.leaveout anchor_time <- vector(mode= "numeric") Transfer_time <- vector(mode= "numeric") run_imputation <- function(ref.obj, query.obj, feature.remove) { message(paste0('removing ', feature.remove)) features <- setdiff(rownames(query.obj), feature.remove) DefaultAssay(ref.obj) <- 'RNA' DefaultAssay(query.obj) <- 'RNA' start_time <- Sys.time() anchors <- FindTransferAnchors( reference = ref.obj, query = query.obj, features = features, dims = 1:30, reduction = 'cca' ) end_time <- Sys.time() anchor_time <<- c(anchor_time,as.numeric(difftime(end_time,start_time,units = 'secs'))) refdata <- GetAssayData( object = ref.obj, assay = 'RNA', slot = 'data' ) start_time <- Sys.time() imputation <- TransferData( anchorset = anchors, refdata = refdata, weight.reduction = 'pca' ) query.obj[['seq']] <- imputation end_time <- Sys.time() Transfer_time <<- c(Transfer_time,as.numeric(difftime(end_time,start_time,units = 'secs'))) return(query.obj) } for(i in 1:length(genes.leaveout)) { imputed.ss2 <- run_imputation(ref.obj = allen, query.obj = osmFISH, feature.remove = genes.leaveout[[i]]) osmFISH[['ss2']] <- imputed.ss2[, colnames(osmFISH)][['seq']] Imp_genes[genes.leaveout[[i]],] = as.vector(osmFISH@assays$ss2[genes.leaveout[i],]) } write.csv(Imp_genes,file = 'Results/Seurat_LeaveOneOut.csv') write.csv(anchor_time,file = 'Results/Seurat_anchor_time.csv',row.names = FALSE) write.csv(Transfer_time,file = 'Results/Seurat_transfer_time.csv',row.names = FALSE) # show genes not in the osmFISH dataset DefaultAssay(osmFISH.imputed) <- "ss2" new.genes <- c('Tesc', 'Pvrl3', 'Grm2') Imp_New_genes <- matrix(0,nrow = length(new.genes),ncol = dim(osmFISH.imputed@assays$ss2)[2]) rownames(Imp_New_genes) <- new.genes for(i in 1:length(new.genes)) { Imp_New_genes[new.genes[[i]],] = as.vector(osmFISH.imputed@assays$ss2[new.genes[i],]) } write.csv(Imp_New_genes,file = 'Results/Seurat_New_genes.csv')
library(rvest) library(XML) library(magrittr) # Amazon Reviews ############################# aurl <- "https://www.amazon.in/JBL-C100SI-Ear-Headphones-Black/product-reviews/B01DEWVZ2C/ref=cm_cr_getr_d_paging_btm_1?showViewpoints=1&pageNumber=" amazon_reviews <- NULL for (i in 1:30){ murl <- read_html(as.character(paste(aurl,10,""))) rev <- murl %>% html_nodes(".review-text") %>% html_text() amazon_reviews <- c(amazon_reviews,rev) } write.table(amazon_reviews,"JBLearphones.txt") write.csv(amazon_reviews,"JBLearphones.csv") getwd() # Snapdeal reviews ############################# surl_1 <- "https://www.snapdeal.com/product/sandisk-cruzer-blade-32gb-pen/28912/reviews?page=" surl_2 <- "&sortBy=HELPFUL#defRevPDP" snapdeal_reviews <- NULL for (i in 1:30){ surl <- read_html(as.character(paste(surl_1,surl_2,sep=as.character(i)))) srev <- surl %>% html_nodes("#defaultReviewsCard p") %>% html_text() snapdeal_reviews <- c(snapdeal_reviews,srev) } write.table(snapdeal_reviews,"sandisk.txt") getwd() write.csv(snapdeal_reviews,"sandisk.csv") getwd() # Sample urls # url = http://www.amazon.in/product-reviews/B01LXMHNMQ/ref=cm_cr_getr_d_paging_btm_4?ie=UTF8&reviewerType=all_reviews&showViewpoints=1&sortBy=recent&pageNumber=1 # url = http://www.amazon.in/Moto-G5-GB-Fine-Gold/product-reviews/B01N7JUH7P/ref=cm_cr_getr_d_paging_btm_3?showViewpoints=1&pageNumber=1 # url = http://www.amazon.in/Honor-6X-Grey-32GB/product-reviews/B01FM7JGT6/ref=cm_cr_arp_d_paging_btm_3?showViewpoints=1&pageNumber=1 ########## Extracting reviews from a travel website ################### a<-10 rev<-NULL url1<-"https://www.tripadvisor.in/Hotel_Review-g147399-d2354539-Reviews-or" url2<-"-The_Venetian_on_Grace_Bay-Providenciales_Turks_and_Caicos.html#REVIEWS" for(i in 0:8){ url<-read_html(as.character(paste(url1,i*a,url2,sep=""))) ping<-url %>% html_nodes(".partial_entry") %>% html_text() rev<-c(rev,ping) } write.table(rev,"travel.txt")
/text_min2.R
no_license
MGPraveen07/Text-mining
R
false
false
2,029
r
library(rvest) library(XML) library(magrittr) # Amazon Reviews ############################# aurl <- "https://www.amazon.in/JBL-C100SI-Ear-Headphones-Black/product-reviews/B01DEWVZ2C/ref=cm_cr_getr_d_paging_btm_1?showViewpoints=1&pageNumber=" amazon_reviews <- NULL for (i in 1:30){ murl <- read_html(as.character(paste(aurl,10,""))) rev <- murl %>% html_nodes(".review-text") %>% html_text() amazon_reviews <- c(amazon_reviews,rev) } write.table(amazon_reviews,"JBLearphones.txt") write.csv(amazon_reviews,"JBLearphones.csv") getwd() # Snapdeal reviews ############################# surl_1 <- "https://www.snapdeal.com/product/sandisk-cruzer-blade-32gb-pen/28912/reviews?page=" surl_2 <- "&sortBy=HELPFUL#defRevPDP" snapdeal_reviews <- NULL for (i in 1:30){ surl <- read_html(as.character(paste(surl_1,surl_2,sep=as.character(i)))) srev <- surl %>% html_nodes("#defaultReviewsCard p") %>% html_text() snapdeal_reviews <- c(snapdeal_reviews,srev) } write.table(snapdeal_reviews,"sandisk.txt") getwd() write.csv(snapdeal_reviews,"sandisk.csv") getwd() # Sample urls # url = http://www.amazon.in/product-reviews/B01LXMHNMQ/ref=cm_cr_getr_d_paging_btm_4?ie=UTF8&reviewerType=all_reviews&showViewpoints=1&sortBy=recent&pageNumber=1 # url = http://www.amazon.in/Moto-G5-GB-Fine-Gold/product-reviews/B01N7JUH7P/ref=cm_cr_getr_d_paging_btm_3?showViewpoints=1&pageNumber=1 # url = http://www.amazon.in/Honor-6X-Grey-32GB/product-reviews/B01FM7JGT6/ref=cm_cr_arp_d_paging_btm_3?showViewpoints=1&pageNumber=1 ########## Extracting reviews from a travel website ################### a<-10 rev<-NULL url1<-"https://www.tripadvisor.in/Hotel_Review-g147399-d2354539-Reviews-or" url2<-"-The_Venetian_on_Grace_Bay-Providenciales_Turks_and_Caicos.html#REVIEWS" for(i in 0:8){ url<-read_html(as.character(paste(url1,i*a,url2,sep=""))) ping<-url %>% html_nodes(".partial_entry") %>% html_text() rev<-c(rev,ping) } write.table(rev,"travel.txt")
library(lubridate) library(magrittr) library(ggplot2) library(ggpubr) library(grid) library(gridExtra) library(scales) library(dplyr) library(plyr) library(tidyr) library(zoo) # Here we assess whether or not climatic factors act more generally to enhance transmission, rather than as specific triggers # for epidemic onset # Figure S3 # Loading data ------------------------------------------------------------ mean_fortnightly_climate_30years<-read.csv("./dat/raw/mean_fortnightly_climate_30years.csv") epi_table<-read.csv("./dat/raw/epi_table.csv") raw_table<-read.csv("./dat/raw/raw_data.csv") raw_table<-raw_table%>% dplyr::mutate(fortnights_since_start_of_year = lubridate::yday(specimen_date)%/%14+1)%>% dplyr::group_by(city,year,assumed_antigenic_variant,fortnights_since_start_of_year)%>% dplyr::summarise(count=n()) cities<-c("ADELAIDE","BRISBANE","MELBOURNE","PERTH","SYDNEY") epi_table$city<-factor(epi_table$city,levels = cities) # function to return mean climate values ---------------------------------- mean_climate_over_epi<-function(x){ #x<-as.data.frame(x) if(x$epi_alarm=="N"){ return(data.frame(x, mean_epi_ah=NA, mean_epi_temp=NA)) } fortnights<-seq(x$start,x$end,1) temp_clim1<-subset(mean_fortnightly_climate_30years,city ==as.character(x$city)) temp_clim<-temp_clim1%>%subset(.,year==x$year & fortnights_since_start_of_year%in% fortnights) #roll_mean_AH<-temp_clim1$mean_AH%>%rollmean(.,k=length(fortnights)) #roll_mean_temp<-temp_clim1$mean_temp%>%rollmean(.,k=length(fortnights)) temp_clim<-temp_clim%>%dplyr::summarise(mean_epi_ah = mean(mean_AH), mean_epi_temp = mean(mean_temp)#, #z_score_mean_epi_ah = (mean_epi_ah-mean(roll_mean_AH,na.rm=TRUE))/sd(roll_mean_AH,na.rm=TRUE), #z_score_mean_epi_temp = (mean_epi_temp-mean(roll_mean_temp,na.rm=TRUE))/sd(roll_mean_temp,na.rm=TRUE) ) return(data.frame(x,temp_clim)) } early_climate<-function(x){ #mean climate over start to peak fortnight x<-as.data.frame(x) if(x$epi_alarm=="N"){ return(data.frame(x, early_ah=NA, early_temp=NA)) } temp<-raw_table%>%subset(.,city==x$city & year==x$year & assumed_antigenic_variant==as.character(x$reference_strain) & fortnights_since_start_of_year %in%c(x$start+1:x$end)) peak_fortnight<-temp$fortnights_since_start_of_year[which.max(temp$count)] fortnights<-seq(x$start,peak_fortnight,1) temp_clim1<-subset(mean_fortnightly_climate_30years,city ==as.character(x$city)) #roll_mean_AH<-temp_clim1$mean_AH%>%rollmean(.,k=length(fortnights)) #roll_mean_temp<-temp_clim1$mean_temp%>%rollmean(.,k=length(fortnights)) temp_clim<-temp_clim1%>%subset(.,year==x$year & fortnights_since_start_of_year%in% fortnights) temp_clim<-temp_clim%>%dplyr::summarise(early_ah = mean(mean_AH), early_temp = mean(mean_temp)#, #z_score_early_ah = (early_ah-mean(roll_mean_AH,na.rm=TRUE))/sd(roll_mean_AH,na.rm=TRUE), #z_score_early_temp = (early_temp-mean(roll_mean_temp,na.rm=TRUE))/sd(roll_mean_temp,na.rm=TRUE) ) } # getting climate for each epidemic --------------------------------------- epi_table_with_clim<-adply(epi_table%>%subset(.,epi_alarm=="Y" & year!=2009),1,mean_climate_over_epi) epi_table_with_clim<-adply(epi_table_with_clim,1,early_climate) # calculating z score for epidemic size and climate ---------------------- #in order to make it comparable between cities epi_table_with_clim<-epi_table_with_clim%>% dplyr::mutate(scaled_incidence_city = incidence_per_mil/mean_epi_size, log_incidence = log(incidence_per_mil)) mean_size_subtype_city<-epi_table_with_clim%>% dplyr::group_by(city,subtype)%>% dplyr::summarise(mean_epi_size_sc = mean(log_incidence,na.rm=TRUE)) epi_table_with_clim<-left_join(epi_table_with_clim,mean_size_subtype_city) epi_table_with_clim<-epi_table_with_clim%>% dplyr::mutate(scaled_incidence_subtype_city = log_incidence-mean_epi_size_sc) epi_table_with_clim<-epi_table_with_clim%>% dplyr::group_by(city,subtype)%>% dplyr::mutate(incidence_z_score_subtype_city = ifelse(epi_alarm=="Y", (log_incidence-mean(log_incidence,na.rm=TRUE))/sd(log_incidence,na.rm = TRUE), NA)) epi_table_with_clim<-epi_table_with_clim%>% dplyr::group_by(city)%>% dplyr::mutate(z_score_mean_epi_ah = ifelse(epi_alarm=="Y", (mean_epi_ah-mean(mean_epi_ah,na.rm=TRUE))/sd(mean_epi_ah,na.rm=TRUE))) # Mean AH over epidemic period -------------------------------------------- mean_epi_ah_plot<-epi_table_with_clim%>% subset(.,year!=2009 & epi_alarm=="Y")%>% ggplot(.,aes(x=mean_epi_ah,y= scaled_incidence_subtype_city))+ geom_jitter(aes(group = city, color=city), position=position_jitter(width=0.1,height=0.05),alpha=0.6,size=3.5)+ stat_cor(method = "pearson",size=8)+ scale_color_manual(name = "City", values=c("ADELAIDE"="#CC79A7", "BRISBANE"="#009E73", "MELBOURNE"="#56B4E9", "PERTH"="#999999", "SYDNEY"="#E69F00"))+ scale_x_continuous(breaks=seq(6,16,2),limits = c(5,16))+ scale_y_continuous(breaks=seq(-4,2,1),limits = c(-4,2))+ xlab(expression(paste("Mean Absolute Humidity over Epidemic Period "," (g/",m^{3},")",sep="")))+ ylab("Lab confirmed incidence") + theme_bw()+ theme(strip.background = element_blank(), strip.text = element_text(size=25), axis.title=element_text(size=18), axis.text.x =element_text(size=16,margin=margin(t=7,r=0,b=0,l=0)), axis.text.y =element_text(size=16,margin=margin(t=0,r=7,b=0,l=0)), axis.ticks.length = unit(0.4,"cm"), panel.border = element_rect(colour = "black"), legend.title=element_text(size=20), legend.text=element_text(size=17), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) # mean temp over epidemic period ------------------------------------------------- mean_epi_temp_plot<-epi_table_with_clim%>% subset(.,year!=2009 & epi_alarm=="Y")%>% ggplot(.,aes(x=mean_epi_temp,y= scaled_incidence_subtype_city))+ geom_jitter(aes(group = subtype, color=city), position=position_jitter(width=0.1,height=0.05),alpha=0.6,size=3.5)+ stat_cor(method = "pearson",size=8)+ scale_color_manual(name = "City", values=c("ADELAIDE"="#CC79A7", "BRISBANE"="#009E73", "MELBOURNE"="#56B4E9", "PERTH"="#999999", "SYDNEY"="#E69F00"))+ scale_x_continuous(breaks=seq(8,24,4),limits = c(8,24))+ scale_y_continuous(breaks=seq(-4,2,1),limits = c(-4,2))+ xlab(expression(paste("Mean Temperature over Epidemic Period (",degree,"C)",sep="")))+ ylab("Lab confirmed incidence") + theme_bw()+ theme(strip.background = element_blank(), strip.text = element_text(size=25), axis.title=element_text(size=18), axis.text.x =element_text(size=16,margin=margin(t=7,r=0,b=0,l=0)), axis.text.y =element_text(size=16,margin=margin(t=0,r=7,b=0,l=0)), axis.ticks.length = unit(0.4,"cm"), panel.border = element_rect(colour = "black"), legend.title=element_text(size=20), legend.text=element_text(size=17), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) # mean AH over early epidemic --------------------------------------------- early_epi_ah_plot<-epi_table_with_clim%>% subset(.,year!=2009 & epi_alarm=="Y")%>% ggplot(.,aes(x=early_ah,y= scaled_incidence_subtype_city))+ geom_jitter(aes(group = city, color=city), position=position_jitter(width=0.1,height=0.05),alpha=0.6,size=3.5)+ stat_cor(method = "pearson",size=8)+ scale_color_manual(name = "City", values=c("ADELAIDE"="#CC79A7", "BRISBANE"="#009E73", "MELBOURNE"="#56B4E9", "PERTH"="#999999", "SYDNEY"="#E69F00"))+ scale_x_continuous(breaks=seq(6,16,2),limits = c(5,16))+ scale_y_continuous(breaks=seq(-4,2,1),limits = c(-4,2))+ xlab(expression(paste("Mean Absolute Humidity over Early Epidemic "," (g/",m^{3},")",sep="")))+ ylab("Lab confirmed incidence") + theme_bw()+ theme(strip.background = element_blank(), strip.text = element_text(size=25), axis.title=element_text(size=18), axis.text.x =element_text(size=16,margin=margin(t=7,r=0,b=0,l=0)), axis.text.y =element_text(size=16,margin=margin(t=0,r=7,b=0,l=0)), axis.ticks.length = unit(0.4,"cm"), panel.border = element_rect(colour = "black"), legend.title=element_text(size=20), legend.text=element_text(size=17), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) # early temp over epidemic period ------------------------------------------------- early_epi_temp_plot<-epi_table_with_clim%>% subset(.,year!=2009 & epi_alarm=="Y")%>% ggplot(.,aes(x=early_temp,y= scaled_incidence_subtype_city))+ geom_jitter(aes(group = subtype, color=city), position=position_jitter(width=0.1,height=0.05),alpha=0.6,size=3.5)+ stat_cor(method = "pearson",size=8)+ scale_color_manual(name = "City", values=c("ADELAIDE"="#CC79A7", "BRISBANE"="#009E73", "MELBOURNE"="#56B4E9", "PERTH"="#999999", "SYDNEY"="#E69F00"))+ scale_x_continuous(breaks=seq(8,24,4),limits = c(8,24))+ scale_y_continuous(breaks=seq(-4,2,1),limits = c(-4,2))+ xlab(expression(paste("Mean Temperature over Early Epidemic (",degree,"C)",sep="")))+ ylab("Lab confirmed incidence") + theme_bw()+ theme(strip.background = element_blank(), strip.text = element_text(size=25), axis.title=element_text(size=18), axis.text.x =element_text(size=16,margin=margin(t=7,r=0,b=0,l=0)), axis.text.y =element_text(size=16,margin=margin(t=0,r=7,b=0,l=0)), axis.ticks.length = unit(0.4,"cm"), panel.border = element_rect(colour = "black"), legend.title=element_text(size=20), legend.text=element_text(size=17), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) # save plot --------------------------------------------------------------- #extract legend #https://github.com/hadley/ggplot2/wiki/Share-a-legend-between-two-ggplot2-graphs g_legend<-function(a.gplot){ tmp <- ggplot_gtable(ggplot_build(a.gplot)) leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box") legend <- tmp$grobs[[leg]] return(legend)} mylegend<-g_legend(mean_epi_temp_plot) yaxis_common<- textGrob(expression(paste("Lab confirmed incidence (",10^{-6},")")), gp = gpar(fontsize = 25), rot = 90, vjust = 1) figS3<-ggarrange(yaxis_common, ggarrange(mean_epi_temp_plot+ theme(plot.margin=margin(1.5,0.5,0,0,"cm"), legend.position="none",axis.title.y = element_blank()), mean_epi_ah_plot + theme(plot.margin=margin(1.5,0,0.5,0,"cm"), legend.position="none",axis.title.y = element_blank()), early_epi_temp_plot + theme(plot.margin=margin(1.5,0.5,0,0,"cm"), legend.position="none",axis.title.y = element_blank()), early_epi_ah_plot + theme(plot.margin=margin(1.5,0,0.5,0,"cm"), legend.position="none",axis.title.y = element_blank()), labels = c("a","b","c","d"), font.label = list(size=22), nrow=2,ncol=2,align = "hv"), mylegend, ncol=3,nrow = 1,widths=c(1,14,2)) figS3<-grid.arrange(yaxis_common, arrangeGrob(mean_epi_temp_plot+ theme(legend.position="none",axis.title.y = element_blank()), mean_epi_ah_plot + theme(legend.position="none",axis.title.y = element_blank()), early_epi_temp_plot + theme(legend.position="none",axis.title.y = element_blank()), early_epi_ah_plot + theme(legend.position="none",axis.title.y = element_blank()),nrow=2), mylegend, ncol=3,widths=c(1,14,2)) ggsave(plot = figS3,"./figures/supp/figure_S3.png", width=18, height=12,limitsize=FALSE)
/supplementary information analysis scripts/mean climate over epidemic.R
no_license
m-vieira/australian_seasonal_flu
R
false
false
13,312
r
library(lubridate) library(magrittr) library(ggplot2) library(ggpubr) library(grid) library(gridExtra) library(scales) library(dplyr) library(plyr) library(tidyr) library(zoo) # Here we assess whether or not climatic factors act more generally to enhance transmission, rather than as specific triggers # for epidemic onset # Figure S3 # Loading data ------------------------------------------------------------ mean_fortnightly_climate_30years<-read.csv("./dat/raw/mean_fortnightly_climate_30years.csv") epi_table<-read.csv("./dat/raw/epi_table.csv") raw_table<-read.csv("./dat/raw/raw_data.csv") raw_table<-raw_table%>% dplyr::mutate(fortnights_since_start_of_year = lubridate::yday(specimen_date)%/%14+1)%>% dplyr::group_by(city,year,assumed_antigenic_variant,fortnights_since_start_of_year)%>% dplyr::summarise(count=n()) cities<-c("ADELAIDE","BRISBANE","MELBOURNE","PERTH","SYDNEY") epi_table$city<-factor(epi_table$city,levels = cities) # function to return mean climate values ---------------------------------- mean_climate_over_epi<-function(x){ #x<-as.data.frame(x) if(x$epi_alarm=="N"){ return(data.frame(x, mean_epi_ah=NA, mean_epi_temp=NA)) } fortnights<-seq(x$start,x$end,1) temp_clim1<-subset(mean_fortnightly_climate_30years,city ==as.character(x$city)) temp_clim<-temp_clim1%>%subset(.,year==x$year & fortnights_since_start_of_year%in% fortnights) #roll_mean_AH<-temp_clim1$mean_AH%>%rollmean(.,k=length(fortnights)) #roll_mean_temp<-temp_clim1$mean_temp%>%rollmean(.,k=length(fortnights)) temp_clim<-temp_clim%>%dplyr::summarise(mean_epi_ah = mean(mean_AH), mean_epi_temp = mean(mean_temp)#, #z_score_mean_epi_ah = (mean_epi_ah-mean(roll_mean_AH,na.rm=TRUE))/sd(roll_mean_AH,na.rm=TRUE), #z_score_mean_epi_temp = (mean_epi_temp-mean(roll_mean_temp,na.rm=TRUE))/sd(roll_mean_temp,na.rm=TRUE) ) return(data.frame(x,temp_clim)) } early_climate<-function(x){ #mean climate over start to peak fortnight x<-as.data.frame(x) if(x$epi_alarm=="N"){ return(data.frame(x, early_ah=NA, early_temp=NA)) } temp<-raw_table%>%subset(.,city==x$city & year==x$year & assumed_antigenic_variant==as.character(x$reference_strain) & fortnights_since_start_of_year %in%c(x$start+1:x$end)) peak_fortnight<-temp$fortnights_since_start_of_year[which.max(temp$count)] fortnights<-seq(x$start,peak_fortnight,1) temp_clim1<-subset(mean_fortnightly_climate_30years,city ==as.character(x$city)) #roll_mean_AH<-temp_clim1$mean_AH%>%rollmean(.,k=length(fortnights)) #roll_mean_temp<-temp_clim1$mean_temp%>%rollmean(.,k=length(fortnights)) temp_clim<-temp_clim1%>%subset(.,year==x$year & fortnights_since_start_of_year%in% fortnights) temp_clim<-temp_clim%>%dplyr::summarise(early_ah = mean(mean_AH), early_temp = mean(mean_temp)#, #z_score_early_ah = (early_ah-mean(roll_mean_AH,na.rm=TRUE))/sd(roll_mean_AH,na.rm=TRUE), #z_score_early_temp = (early_temp-mean(roll_mean_temp,na.rm=TRUE))/sd(roll_mean_temp,na.rm=TRUE) ) } # getting climate for each epidemic --------------------------------------- epi_table_with_clim<-adply(epi_table%>%subset(.,epi_alarm=="Y" & year!=2009),1,mean_climate_over_epi) epi_table_with_clim<-adply(epi_table_with_clim,1,early_climate) # calculating z score for epidemic size and climate ---------------------- #in order to make it comparable between cities epi_table_with_clim<-epi_table_with_clim%>% dplyr::mutate(scaled_incidence_city = incidence_per_mil/mean_epi_size, log_incidence = log(incidence_per_mil)) mean_size_subtype_city<-epi_table_with_clim%>% dplyr::group_by(city,subtype)%>% dplyr::summarise(mean_epi_size_sc = mean(log_incidence,na.rm=TRUE)) epi_table_with_clim<-left_join(epi_table_with_clim,mean_size_subtype_city) epi_table_with_clim<-epi_table_with_clim%>% dplyr::mutate(scaled_incidence_subtype_city = log_incidence-mean_epi_size_sc) epi_table_with_clim<-epi_table_with_clim%>% dplyr::group_by(city,subtype)%>% dplyr::mutate(incidence_z_score_subtype_city = ifelse(epi_alarm=="Y", (log_incidence-mean(log_incidence,na.rm=TRUE))/sd(log_incidence,na.rm = TRUE), NA)) epi_table_with_clim<-epi_table_with_clim%>% dplyr::group_by(city)%>% dplyr::mutate(z_score_mean_epi_ah = ifelse(epi_alarm=="Y", (mean_epi_ah-mean(mean_epi_ah,na.rm=TRUE))/sd(mean_epi_ah,na.rm=TRUE))) # Mean AH over epidemic period -------------------------------------------- mean_epi_ah_plot<-epi_table_with_clim%>% subset(.,year!=2009 & epi_alarm=="Y")%>% ggplot(.,aes(x=mean_epi_ah,y= scaled_incidence_subtype_city))+ geom_jitter(aes(group = city, color=city), position=position_jitter(width=0.1,height=0.05),alpha=0.6,size=3.5)+ stat_cor(method = "pearson",size=8)+ scale_color_manual(name = "City", values=c("ADELAIDE"="#CC79A7", "BRISBANE"="#009E73", "MELBOURNE"="#56B4E9", "PERTH"="#999999", "SYDNEY"="#E69F00"))+ scale_x_continuous(breaks=seq(6,16,2),limits = c(5,16))+ scale_y_continuous(breaks=seq(-4,2,1),limits = c(-4,2))+ xlab(expression(paste("Mean Absolute Humidity over Epidemic Period "," (g/",m^{3},")",sep="")))+ ylab("Lab confirmed incidence") + theme_bw()+ theme(strip.background = element_blank(), strip.text = element_text(size=25), axis.title=element_text(size=18), axis.text.x =element_text(size=16,margin=margin(t=7,r=0,b=0,l=0)), axis.text.y =element_text(size=16,margin=margin(t=0,r=7,b=0,l=0)), axis.ticks.length = unit(0.4,"cm"), panel.border = element_rect(colour = "black"), legend.title=element_text(size=20), legend.text=element_text(size=17), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) # mean temp over epidemic period ------------------------------------------------- mean_epi_temp_plot<-epi_table_with_clim%>% subset(.,year!=2009 & epi_alarm=="Y")%>% ggplot(.,aes(x=mean_epi_temp,y= scaled_incidence_subtype_city))+ geom_jitter(aes(group = subtype, color=city), position=position_jitter(width=0.1,height=0.05),alpha=0.6,size=3.5)+ stat_cor(method = "pearson",size=8)+ scale_color_manual(name = "City", values=c("ADELAIDE"="#CC79A7", "BRISBANE"="#009E73", "MELBOURNE"="#56B4E9", "PERTH"="#999999", "SYDNEY"="#E69F00"))+ scale_x_continuous(breaks=seq(8,24,4),limits = c(8,24))+ scale_y_continuous(breaks=seq(-4,2,1),limits = c(-4,2))+ xlab(expression(paste("Mean Temperature over Epidemic Period (",degree,"C)",sep="")))+ ylab("Lab confirmed incidence") + theme_bw()+ theme(strip.background = element_blank(), strip.text = element_text(size=25), axis.title=element_text(size=18), axis.text.x =element_text(size=16,margin=margin(t=7,r=0,b=0,l=0)), axis.text.y =element_text(size=16,margin=margin(t=0,r=7,b=0,l=0)), axis.ticks.length = unit(0.4,"cm"), panel.border = element_rect(colour = "black"), legend.title=element_text(size=20), legend.text=element_text(size=17), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) # mean AH over early epidemic --------------------------------------------- early_epi_ah_plot<-epi_table_with_clim%>% subset(.,year!=2009 & epi_alarm=="Y")%>% ggplot(.,aes(x=early_ah,y= scaled_incidence_subtype_city))+ geom_jitter(aes(group = city, color=city), position=position_jitter(width=0.1,height=0.05),alpha=0.6,size=3.5)+ stat_cor(method = "pearson",size=8)+ scale_color_manual(name = "City", values=c("ADELAIDE"="#CC79A7", "BRISBANE"="#009E73", "MELBOURNE"="#56B4E9", "PERTH"="#999999", "SYDNEY"="#E69F00"))+ scale_x_continuous(breaks=seq(6,16,2),limits = c(5,16))+ scale_y_continuous(breaks=seq(-4,2,1),limits = c(-4,2))+ xlab(expression(paste("Mean Absolute Humidity over Early Epidemic "," (g/",m^{3},")",sep="")))+ ylab("Lab confirmed incidence") + theme_bw()+ theme(strip.background = element_blank(), strip.text = element_text(size=25), axis.title=element_text(size=18), axis.text.x =element_text(size=16,margin=margin(t=7,r=0,b=0,l=0)), axis.text.y =element_text(size=16,margin=margin(t=0,r=7,b=0,l=0)), axis.ticks.length = unit(0.4,"cm"), panel.border = element_rect(colour = "black"), legend.title=element_text(size=20), legend.text=element_text(size=17), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) # early temp over epidemic period ------------------------------------------------- early_epi_temp_plot<-epi_table_with_clim%>% subset(.,year!=2009 & epi_alarm=="Y")%>% ggplot(.,aes(x=early_temp,y= scaled_incidence_subtype_city))+ geom_jitter(aes(group = subtype, color=city), position=position_jitter(width=0.1,height=0.05),alpha=0.6,size=3.5)+ stat_cor(method = "pearson",size=8)+ scale_color_manual(name = "City", values=c("ADELAIDE"="#CC79A7", "BRISBANE"="#009E73", "MELBOURNE"="#56B4E9", "PERTH"="#999999", "SYDNEY"="#E69F00"))+ scale_x_continuous(breaks=seq(8,24,4),limits = c(8,24))+ scale_y_continuous(breaks=seq(-4,2,1),limits = c(-4,2))+ xlab(expression(paste("Mean Temperature over Early Epidemic (",degree,"C)",sep="")))+ ylab("Lab confirmed incidence") + theme_bw()+ theme(strip.background = element_blank(), strip.text = element_text(size=25), axis.title=element_text(size=18), axis.text.x =element_text(size=16,margin=margin(t=7,r=0,b=0,l=0)), axis.text.y =element_text(size=16,margin=margin(t=0,r=7,b=0,l=0)), axis.ticks.length = unit(0.4,"cm"), panel.border = element_rect(colour = "black"), legend.title=element_text(size=20), legend.text=element_text(size=17), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) # save plot --------------------------------------------------------------- #extract legend #https://github.com/hadley/ggplot2/wiki/Share-a-legend-between-two-ggplot2-graphs g_legend<-function(a.gplot){ tmp <- ggplot_gtable(ggplot_build(a.gplot)) leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box") legend <- tmp$grobs[[leg]] return(legend)} mylegend<-g_legend(mean_epi_temp_plot) yaxis_common<- textGrob(expression(paste("Lab confirmed incidence (",10^{-6},")")), gp = gpar(fontsize = 25), rot = 90, vjust = 1) figS3<-ggarrange(yaxis_common, ggarrange(mean_epi_temp_plot+ theme(plot.margin=margin(1.5,0.5,0,0,"cm"), legend.position="none",axis.title.y = element_blank()), mean_epi_ah_plot + theme(plot.margin=margin(1.5,0,0.5,0,"cm"), legend.position="none",axis.title.y = element_blank()), early_epi_temp_plot + theme(plot.margin=margin(1.5,0.5,0,0,"cm"), legend.position="none",axis.title.y = element_blank()), early_epi_ah_plot + theme(plot.margin=margin(1.5,0,0.5,0,"cm"), legend.position="none",axis.title.y = element_blank()), labels = c("a","b","c","d"), font.label = list(size=22), nrow=2,ncol=2,align = "hv"), mylegend, ncol=3,nrow = 1,widths=c(1,14,2)) figS3<-grid.arrange(yaxis_common, arrangeGrob(mean_epi_temp_plot+ theme(legend.position="none",axis.title.y = element_blank()), mean_epi_ah_plot + theme(legend.position="none",axis.title.y = element_blank()), early_epi_temp_plot + theme(legend.position="none",axis.title.y = element_blank()), early_epi_ah_plot + theme(legend.position="none",axis.title.y = element_blank()),nrow=2), mylegend, ncol=3,widths=c(1,14,2)) ggsave(plot = figS3,"./figures/supp/figure_S3.png", width=18, height=12,limitsize=FALSE)
x <- 1 if (x == 1){ print("X is equal to one!") } if ( (x %% 2) == 0 ){ print("X is even") } else { print("X is Not even") } if (is.matrix(x) == TRUE){ print("X is a Matrix") } else { print("X is NOT a Matrix") } x <- c(3,7,1) one <- x[1] two <- x[2] three <- x[3] print(one) print(two) print(three) x <- c(20, 10, 1) one <- x[1] two <- x[2] three <- x[3] max <- 0 if(one > max){ max <- one } if(two > max){ max <- two } if(three > max){ max <- three } library(stringr) print(str_c("Max number is: ", max)) print(cat("Max number is: ", max)) cat("Max number is: ", max) #cat("It can't be done with numbers like: ", max)
/conditionals.R
no_license
dladowitz/R_examples
R
false
false
666
r
x <- 1 if (x == 1){ print("X is equal to one!") } if ( (x %% 2) == 0 ){ print("X is even") } else { print("X is Not even") } if (is.matrix(x) == TRUE){ print("X is a Matrix") } else { print("X is NOT a Matrix") } x <- c(3,7,1) one <- x[1] two <- x[2] three <- x[3] print(one) print(two) print(three) x <- c(20, 10, 1) one <- x[1] two <- x[2] three <- x[3] max <- 0 if(one > max){ max <- one } if(two > max){ max <- two } if(three > max){ max <- three } library(stringr) print(str_c("Max number is: ", max)) print(cat("Max number is: ", max)) cat("Max number is: ", max) #cat("It can't be done with numbers like: ", max)
# Exercise 2: working with data frames # Create a vector of 100 employees ("Employee 1", "Employee 2", ... "Employee 100") # Hint: use the `paste()` function and vector recycling to add a number to the word # "Employee" employees <- paste("Employee", 1:100) # Create a vector of 100 random salaries for the year 2017 # Use the `runif()` function to pick random numbers between 40000 and 50000 salaries_2017 <- runif(100, min = 40000, max = 50000) # Create a vector of 100 annual salary adjustments between -5000 and 10000. # (A negative number represents a salary decrease due to corporate greed) # Again use the `runif()` function to pick 100 random numbers in that range. adjustments <- runif(100, min = -5000, max = 10000) # Create a data frame `salaries` by combining the 3 vectors you just made # Remember to set `stringsAsFactors=FALSE`! salaries <- data.frame(employees, salaries_2017, adjustments, stringsAsFactors = FALSE) View(salaries) # Add a column to the `salaries` data frame that represents each person's # salary in 2018 (e.g., with the salary adjustment added in). salaries$salaries_2018 <- salaries_2017 + adjustments View(salaries) # Add a column to the `salaries` data frame that has a value of `TRUE` if the # person got a raise (their salary went up) salaries$raise <- salaries$adjustments > 0 View(salaries) ### Retrieve values from your data frame to answer the following questions ### Note that you should get the value as specific as possible (e.g., a single ### cell rather than the whole row!) # What was the 2018 salary of Employee 57 salaries[salaries$employees == "Employee 57", "salaries_2018"] # How many employees got a raise? nrow(salaries[salaries$raise == TRUE,]) # What was the dollar value of the highest raise? salaries[ salaries$adjustments == max(salaries$adjustments), "adjustments"] # What was the "name" of the employee who received the highest raise? salaries[ salaries$adjustments == max(salaries$adjustments), "employees"] # What was the largest decrease in salaries between the two years? # What was the name of the employee who recieved largest decrease in salary? # What was the average salary change? # For people who did not get a raise, how much money did they lose on average? paycut <- salaries[salaries$raise == FALSE, ] mean(paycut$adjustments) ## Consider: do the above averages match what you expected them to be based on ## how you generated the salaries? # Write a .csv file of your salary data to your working directory
/chapter-10-exercises/exercise-2/exercise.R
permissive
LunaLan88/book-exercises
R
false
false
2,504
r
# Exercise 2: working with data frames # Create a vector of 100 employees ("Employee 1", "Employee 2", ... "Employee 100") # Hint: use the `paste()` function and vector recycling to add a number to the word # "Employee" employees <- paste("Employee", 1:100) # Create a vector of 100 random salaries for the year 2017 # Use the `runif()` function to pick random numbers between 40000 and 50000 salaries_2017 <- runif(100, min = 40000, max = 50000) # Create a vector of 100 annual salary adjustments between -5000 and 10000. # (A negative number represents a salary decrease due to corporate greed) # Again use the `runif()` function to pick 100 random numbers in that range. adjustments <- runif(100, min = -5000, max = 10000) # Create a data frame `salaries` by combining the 3 vectors you just made # Remember to set `stringsAsFactors=FALSE`! salaries <- data.frame(employees, salaries_2017, adjustments, stringsAsFactors = FALSE) View(salaries) # Add a column to the `salaries` data frame that represents each person's # salary in 2018 (e.g., with the salary adjustment added in). salaries$salaries_2018 <- salaries_2017 + adjustments View(salaries) # Add a column to the `salaries` data frame that has a value of `TRUE` if the # person got a raise (their salary went up) salaries$raise <- salaries$adjustments > 0 View(salaries) ### Retrieve values from your data frame to answer the following questions ### Note that you should get the value as specific as possible (e.g., a single ### cell rather than the whole row!) # What was the 2018 salary of Employee 57 salaries[salaries$employees == "Employee 57", "salaries_2018"] # How many employees got a raise? nrow(salaries[salaries$raise == TRUE,]) # What was the dollar value of the highest raise? salaries[ salaries$adjustments == max(salaries$adjustments), "adjustments"] # What was the "name" of the employee who received the highest raise? salaries[ salaries$adjustments == max(salaries$adjustments), "employees"] # What was the largest decrease in salaries between the two years? # What was the name of the employee who recieved largest decrease in salary? # What was the average salary change? # For people who did not get a raise, how much money did they lose on average? paycut <- salaries[salaries$raise == FALSE, ] mean(paycut$adjustments) ## Consider: do the above averages match what you expected them to be based on ## how you generated the salaries? # Write a .csv file of your salary data to your working directory
#caching the inverse of a matric ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. #If the inverse has already been calculated (and the matrix has not changed), then the cachesolve #should retrieve the inverse from the cache. ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function computes the inverse of the special "matrix" created by ## makeCacheMatrix above. If the inverse has already been calculated (and the ## matrix has not changed), then it should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { inv <- x$getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data, ...) x$setinverse(inv) inv }
/cachematrix.R
no_license
Chiranthi-Yasora/ProgrammingAssignment2
R
false
false
1,115
r
#caching the inverse of a matric ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. #If the inverse has already been calculated (and the matrix has not changed), then the cachesolve #should retrieve the inverse from the cache. ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function computes the inverse of the special "matrix" created by ## makeCacheMatrix above. If the inverse has already been calculated (and the ## matrix has not changed), then it should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { inv <- x$getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data, ...) x$setinverse(inv) inv }
#' Takes weighted soil layers, creates stack, and sums values to create single soil raster for that parameter #' @param raster_list a list of weighted soil rasters #' @return a single raster layer that sums the soil values including the weights #' for the velox extraction stage #' @examples #' \dontrun{stack_and_sum_soil()} #' @note This is a pretty simple function but I wanted to keep this separate from the other preparation stages. stack_and_sum_soil <- function(raster_list){ filtered_list <- length_filter(raster_list) soil_stack <- lapply(filtered_list[[1]], function(data_layer){ return(sum(raster::stack(data_layer))) }) soil_list <- c(soil_stack, unlist(filtered_list[[2]])) return(soil_list) }
/R/stack_and_sum_soil.R
permissive
liandi93/soilgrids
R
false
false
730
r
#' Takes weighted soil layers, creates stack, and sums values to create single soil raster for that parameter #' @param raster_list a list of weighted soil rasters #' @return a single raster layer that sums the soil values including the weights #' for the velox extraction stage #' @examples #' \dontrun{stack_and_sum_soil()} #' @note This is a pretty simple function but I wanted to keep this separate from the other preparation stages. stack_and_sum_soil <- function(raster_list){ filtered_list <- length_filter(raster_list) soil_stack <- lapply(filtered_list[[1]], function(data_layer){ return(sum(raster::stack(data_layer))) }) soil_list <- c(soil_stack, unlist(filtered_list[[2]])) return(soil_list) }
## This R file has two functions makeCacheMatrix and cacheSolve ## makeCacheMatrix - this function creates the matrix and set the values in the matrix as well as the inverse makeCacheMatrix <- function(x = matrix()) { #initialize inverse to NULL inverseMatrix <- NULL set <- function(y) { x <<- y inverseMatrix <<- NULL } #returns the matrix get <- function() x #sets the inverse of the matrix setInverse <- function(invP) inverseMatrix<<- invP #gets the inverse of the matrix getInverse <- function() inverseMatrix list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## cacheSolve - this function returns the inverse of the matrix if it is present the cache. ## If there is no copy in the cache then it calculates the inverse using the solve function and ## stores the inverse in the cache cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inverseMatrix = x$getInverse() if ( !is.null(inverseMatrix)){ message("Getting cached data") return (inverseMatrix) } #no cached data calculate inverse myMatrix = x$get() inverseMatrix = solve(myMatrix,...) x$setInverse(inverseMatrix) return (inverseMatrix) }
/cachematrix.R
no_license
aseethar/ProgrammingAssignment2
R
false
false
1,236
r
## This R file has two functions makeCacheMatrix and cacheSolve ## makeCacheMatrix - this function creates the matrix and set the values in the matrix as well as the inverse makeCacheMatrix <- function(x = matrix()) { #initialize inverse to NULL inverseMatrix <- NULL set <- function(y) { x <<- y inverseMatrix <<- NULL } #returns the matrix get <- function() x #sets the inverse of the matrix setInverse <- function(invP) inverseMatrix<<- invP #gets the inverse of the matrix getInverse <- function() inverseMatrix list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## cacheSolve - this function returns the inverse of the matrix if it is present the cache. ## If there is no copy in the cache then it calculates the inverse using the solve function and ## stores the inverse in the cache cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inverseMatrix = x$getInverse() if ( !is.null(inverseMatrix)){ message("Getting cached data") return (inverseMatrix) } #no cached data calculate inverse myMatrix = x$get() inverseMatrix = solve(myMatrix,...) x$setInverse(inverseMatrix) return (inverseMatrix) }
library(plyr) library(tidyverse) library(magrittr) library(ggplot2) library(drc) source("scripts/geom_indicator.R") load("intermediate_data/image_scores.Rdata") meta_well_scores <- image_scores %>% dplyr::rename(master_plate_id = parent_plate_barcode) %>% dplyr::distinct( master_plate_id, Compound, dose_nM, row, column) viral_intensity_well_scores <- image_scores %>% dplyr::select( master_plate_id=parent_plate_barcode, Compound, dose_nM, Image_Count_Cells, Image_Classify_Positive_PctObjectsPerBin) %>% dplyr::group_by(master_plate_id, dose_nM) %>% dplyr::mutate( cell_count_baseline = mean(Image_Count_Cells), prob_pose_baseline = mean(Image_Classify_Positive_PctObjectsPerBin[which(Compound == "Negative Control")])) %>% dplyr::ungroup() %>% dplyr::group_by(master_plate_id, dose_nM, Compound) %>% dplyr::mutate( normed_cell_count = Image_Count_Cells / cell_count_baseline, normed_prob_pos = Image_Classify_Positive_PctObjectsPerBin / prob_pose_baseline) %>% dplyr::summarize( mean_normed_prob_pos = mean(normed_prob_pos), sem_normed_prob_pos = sd(normed_prob_pos)/sqrt(dplyr::n()), mean_normed_cell_count = mean(normed_cell_count), sem_normed_cell_count = sd(normed_cell_count)/sqrt(dplyr::n())) %>% dplyr::ungroup() Compound <- "Niclosamide" compound_scores <- viral_intensity_well_scores %>% dplyr::filter(Compound == !!Compound) %>% dplyr::select( master_plate_id, Compound, dose_nM, viral_intensity_prob_pos = mean_normed_prob_pos, viral_intensity_prob_pos_sem = sem_normed_prob_pos, cell_count = mean_normed_cell_count, cell_count_sem = sem_normed_cell_count) p <- ggplot2::ggplot(data=compound_scores) + ggplot2::theme_bw() + ggplot2::geom_errorbar( mapping=ggplot2::aes( x=dose_nM, ymin=cell_count-cell_count_sem, ymax=cell_count+cell_count_sem), color="black", width=.1) + ggplot2::geom_line( mapping=ggplot2::aes(x=dose_nM, y=cell_count), color="black", size=1.4) + ggplot2::geom_errorbar( mapping=ggplot2::aes( x=dose_nM, ymin=viral_intensity_prob_pos-viral_intensity_prob_pos_sem, ymax=viral_intensity_prob_pos+viral_intensity_prob_pos_sem), color="red", width=.1) + ggplot2::geom_line( mapping=ggplot2::aes(x=dose_nM, y=viral_intensity_prob_pos), color="red", size=1.3) + ggplot2::scale_x_log10( "Drug Dose", breaks=c(50, 250, 500, 1000, 2000), labels=c("50nM", "250nM", "0.5uM", "1uM", "2uM")) + ggplot2::scale_y_continuous("Normalized Intensity") ggplot2::ggsave( "product/figures/dose_response/Niclosamide_dose_response_200501.pdf", width=3, height=2) ####################### compound_scores <- compound_scores %>% dplyr::mutate( log_dose = log10(dose_nM) - 9) fit <- drc::drm( formula=viral_intensity_prob_pos ~ log_dose, data=compound_scores, fct=drc::L.4(fixed=c(NA, 0, NA, NA))) log_dose <- seq(log10(50)-9, log10(2000)-9, length.out=200) pred_value <- predict(fit, expand.grid(log_dose, 1)) fit_data <- data.frame(log_dose, pred_value) %>% dplyr::mutate( slope=fit$coefficients[1], bottom=0, top=fit$coefficients[2], ic50=fit$coefficients[3], dose_nM = 10^(log_dose + 9)) p <- ggplot2::ggplot() + ggplot2::theme_bw() + ggplot2::geom_errorbar( data=compound_scores, mapping=ggplot2::aes( x=dose_nM, ymin=cell_count-cell_count_sem, ymax=cell_count+cell_count_sem), color="black", width=.1) + ggplot2::geom_line( data=compound_scores, mapping=ggplot2::aes(x=dose_nM, y=cell_count), color="black", size=1.4) + ggplot2::geom_errorbar( data=compound_scores, mapping=ggplot2::aes( x=dose_nM, ymin=viral_intensity_prob_pos-viral_intensity_prob_pos_sem, ymax=viral_intensity_prob_pos+viral_intensity_prob_pos_sem), color="red", width=.1) + ggplot2::geom_point( data=compound_scores, mapping=ggplot2::aes( x=dose_nM, y=viral_intensity_prob_pos), color="red") + ggplot2::geom_line( data=fit_data, mapping=ggplot2::aes(x=dose_nM, y=pred_value), color="red", size=1.3) + MPStats::geom_indicator( data=data.frame(ic50=paste0("IC50: ", signif(10^(fit_data$ic50[1] + 9), 3), " nM")), mapping=ggplot2::aes(indicator=ic50), group=1) + ggplot2::scale_x_log10( "Drug Dose", breaks=c(50, 250, 500, 1000, 2000), labels=c("50nM", "250nM", "0.5uM", "1uM", "2uM")) + ggplot2::scale_y_continuous("Normalized Intensity") ggplot2::ggsave( "product/figures/dose_response/Niclosamide_dose_response_fit_200501.pdf", width=3, height=2)
/SARS-CoV-2/scripts/4_dose_response_niclosamide.R
permissive
jilimcaoco/MPProjects
R
false
false
5,137
r
library(plyr) library(tidyverse) library(magrittr) library(ggplot2) library(drc) source("scripts/geom_indicator.R") load("intermediate_data/image_scores.Rdata") meta_well_scores <- image_scores %>% dplyr::rename(master_plate_id = parent_plate_barcode) %>% dplyr::distinct( master_plate_id, Compound, dose_nM, row, column) viral_intensity_well_scores <- image_scores %>% dplyr::select( master_plate_id=parent_plate_barcode, Compound, dose_nM, Image_Count_Cells, Image_Classify_Positive_PctObjectsPerBin) %>% dplyr::group_by(master_plate_id, dose_nM) %>% dplyr::mutate( cell_count_baseline = mean(Image_Count_Cells), prob_pose_baseline = mean(Image_Classify_Positive_PctObjectsPerBin[which(Compound == "Negative Control")])) %>% dplyr::ungroup() %>% dplyr::group_by(master_plate_id, dose_nM, Compound) %>% dplyr::mutate( normed_cell_count = Image_Count_Cells / cell_count_baseline, normed_prob_pos = Image_Classify_Positive_PctObjectsPerBin / prob_pose_baseline) %>% dplyr::summarize( mean_normed_prob_pos = mean(normed_prob_pos), sem_normed_prob_pos = sd(normed_prob_pos)/sqrt(dplyr::n()), mean_normed_cell_count = mean(normed_cell_count), sem_normed_cell_count = sd(normed_cell_count)/sqrt(dplyr::n())) %>% dplyr::ungroup() Compound <- "Niclosamide" compound_scores <- viral_intensity_well_scores %>% dplyr::filter(Compound == !!Compound) %>% dplyr::select( master_plate_id, Compound, dose_nM, viral_intensity_prob_pos = mean_normed_prob_pos, viral_intensity_prob_pos_sem = sem_normed_prob_pos, cell_count = mean_normed_cell_count, cell_count_sem = sem_normed_cell_count) p <- ggplot2::ggplot(data=compound_scores) + ggplot2::theme_bw() + ggplot2::geom_errorbar( mapping=ggplot2::aes( x=dose_nM, ymin=cell_count-cell_count_sem, ymax=cell_count+cell_count_sem), color="black", width=.1) + ggplot2::geom_line( mapping=ggplot2::aes(x=dose_nM, y=cell_count), color="black", size=1.4) + ggplot2::geom_errorbar( mapping=ggplot2::aes( x=dose_nM, ymin=viral_intensity_prob_pos-viral_intensity_prob_pos_sem, ymax=viral_intensity_prob_pos+viral_intensity_prob_pos_sem), color="red", width=.1) + ggplot2::geom_line( mapping=ggplot2::aes(x=dose_nM, y=viral_intensity_prob_pos), color="red", size=1.3) + ggplot2::scale_x_log10( "Drug Dose", breaks=c(50, 250, 500, 1000, 2000), labels=c("50nM", "250nM", "0.5uM", "1uM", "2uM")) + ggplot2::scale_y_continuous("Normalized Intensity") ggplot2::ggsave( "product/figures/dose_response/Niclosamide_dose_response_200501.pdf", width=3, height=2) ####################### compound_scores <- compound_scores %>% dplyr::mutate( log_dose = log10(dose_nM) - 9) fit <- drc::drm( formula=viral_intensity_prob_pos ~ log_dose, data=compound_scores, fct=drc::L.4(fixed=c(NA, 0, NA, NA))) log_dose <- seq(log10(50)-9, log10(2000)-9, length.out=200) pred_value <- predict(fit, expand.grid(log_dose, 1)) fit_data <- data.frame(log_dose, pred_value) %>% dplyr::mutate( slope=fit$coefficients[1], bottom=0, top=fit$coefficients[2], ic50=fit$coefficients[3], dose_nM = 10^(log_dose + 9)) p <- ggplot2::ggplot() + ggplot2::theme_bw() + ggplot2::geom_errorbar( data=compound_scores, mapping=ggplot2::aes( x=dose_nM, ymin=cell_count-cell_count_sem, ymax=cell_count+cell_count_sem), color="black", width=.1) + ggplot2::geom_line( data=compound_scores, mapping=ggplot2::aes(x=dose_nM, y=cell_count), color="black", size=1.4) + ggplot2::geom_errorbar( data=compound_scores, mapping=ggplot2::aes( x=dose_nM, ymin=viral_intensity_prob_pos-viral_intensity_prob_pos_sem, ymax=viral_intensity_prob_pos+viral_intensity_prob_pos_sem), color="red", width=.1) + ggplot2::geom_point( data=compound_scores, mapping=ggplot2::aes( x=dose_nM, y=viral_intensity_prob_pos), color="red") + ggplot2::geom_line( data=fit_data, mapping=ggplot2::aes(x=dose_nM, y=pred_value), color="red", size=1.3) + MPStats::geom_indicator( data=data.frame(ic50=paste0("IC50: ", signif(10^(fit_data$ic50[1] + 9), 3), " nM")), mapping=ggplot2::aes(indicator=ic50), group=1) + ggplot2::scale_x_log10( "Drug Dose", breaks=c(50, 250, 500, 1000, 2000), labels=c("50nM", "250nM", "0.5uM", "1uM", "2uM")) + ggplot2::scale_y_continuous("Normalized Intensity") ggplot2::ggsave( "product/figures/dose_response/Niclosamide_dose_response_fit_200501.pdf", width=3, height=2)
\name{parse_and_save} \alias{parse_and_save} \title{Parse the input Rd file and save the roxygen documentation into a file} \usage{ parse_and_save(path, file, usage = FALSE) } \arguments{ \item{path}{the path of the Rd file} \item{file}{the path to save the roxygen documentation} \item{usage}{logical: whether to include the usage section in the output} } \value{ a character vector if \code{file} is not specified, or write the vector into a file } \description{ Parse the input Rd file and save the roxygen documentation into a file } \author{ Hadley Wickham; modified by Yihui Xie <\url{http://yihui.name}> }
/man/parse_and_save.Rd
no_license
aalfons/Rd2roxygen
R
false
false
635
rd
\name{parse_and_save} \alias{parse_and_save} \title{Parse the input Rd file and save the roxygen documentation into a file} \usage{ parse_and_save(path, file, usage = FALSE) } \arguments{ \item{path}{the path of the Rd file} \item{file}{the path to save the roxygen documentation} \item{usage}{logical: whether to include the usage section in the output} } \value{ a character vector if \code{file} is not specified, or write the vector into a file } \description{ Parse the input Rd file and save the roxygen documentation into a file } \author{ Hadley Wickham; modified by Yihui Xie <\url{http://yihui.name}> }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SampleTau.r \name{Sample_tau} \alias{Sample_tau} \title{Sample the dispersal parameter, tau} \usage{ Sample_tau(tau, j, m, nT, r, lambda, K, coords, N, h_tau, h_lambda, mu_tau, sigma_tau) } \arguments{ \item{tau}{dispersal parameter (vector of length m)} \item{j}{MC iteration step} \item{m}{number of spatial locations} \item{nT}{number of time points} \item{r}{growth rate (scalar)} \item{lambda}{intensity matrix (m x nT)} \item{K}{carrying capacity (scalar)} \item{coords}{matrix of spatial co-ordinates of locations (longitude, latitude)} \item{N}{true number of organisms (m x nT)} \item{h_tau}{M-H step value for tau} \item{h_lambda}{M-H step value for lambda} \item{mu_tau}{prior mean for tau} \item{sigma_tau}{prior variance matrix for tau} } \description{ Simulates from the posterior distribution of the dispersal parameter, tau. }
/man/Sample_tau.Rd
no_license
pkuhnert/HSTMM
R
false
true
935
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SampleTau.r \name{Sample_tau} \alias{Sample_tau} \title{Sample the dispersal parameter, tau} \usage{ Sample_tau(tau, j, m, nT, r, lambda, K, coords, N, h_tau, h_lambda, mu_tau, sigma_tau) } \arguments{ \item{tau}{dispersal parameter (vector of length m)} \item{j}{MC iteration step} \item{m}{number of spatial locations} \item{nT}{number of time points} \item{r}{growth rate (scalar)} \item{lambda}{intensity matrix (m x nT)} \item{K}{carrying capacity (scalar)} \item{coords}{matrix of spatial co-ordinates of locations (longitude, latitude)} \item{N}{true number of organisms (m x nT)} \item{h_tau}{M-H step value for tau} \item{h_lambda}{M-H step value for lambda} \item{mu_tau}{prior mean for tau} \item{sigma_tau}{prior variance matrix for tau} } \description{ Simulates from the posterior distribution of the dispersal parameter, tau. }
## File Name: tam_linking_2studies_create_M_SD.R ## File Version: 0.05 tam_linking_2studies_create_M_SD <- function(M1, SD1, M2, SD2, trafo_persons) { G1 <- length(M1) G2 <- length(M2) GT <- G1+G2 M_SD <- matrix( 0 , nrow=GT , ncol=2 ) colnames(M_SD) <- c("M", "SD") # M_SD <- as.data.frame(M_SD) rownames(M_SD) <- tam_linking_2studies_create_M_SD_rownames(G1=G1, G2=G2) ind1 <- seq(1,G1) M_SD[ ind1 , "M"] <- M1 M_SD[ ind1 , "SD"] <- SD1 ind2 <- G1 + seq(1,G2) M_SD[ ind2 , "M"] <- M2 M_SD[ ind2 , "SD"] <- SD2 #-- transformations M_SD[ind2, "SD"] <- M_SD[ind2, "SD"] * trafo_persons["a"] M_SD[ind2, "M"] <- M_SD[ind2, "M"] * trafo_persons["a"] + trafo_persons["b"] attr( M_SD, "N_groups") <- c(G1, G2) #--- output return(M_SD) }
/R/tam_linking_2studies_create_M_SD.R
no_license
yaozeyang90/TAM
R
false
false
753
r
## File Name: tam_linking_2studies_create_M_SD.R ## File Version: 0.05 tam_linking_2studies_create_M_SD <- function(M1, SD1, M2, SD2, trafo_persons) { G1 <- length(M1) G2 <- length(M2) GT <- G1+G2 M_SD <- matrix( 0 , nrow=GT , ncol=2 ) colnames(M_SD) <- c("M", "SD") # M_SD <- as.data.frame(M_SD) rownames(M_SD) <- tam_linking_2studies_create_M_SD_rownames(G1=G1, G2=G2) ind1 <- seq(1,G1) M_SD[ ind1 , "M"] <- M1 M_SD[ ind1 , "SD"] <- SD1 ind2 <- G1 + seq(1,G2) M_SD[ ind2 , "M"] <- M2 M_SD[ ind2 , "SD"] <- SD2 #-- transformations M_SD[ind2, "SD"] <- M_SD[ind2, "SD"] * trafo_persons["a"] M_SD[ind2, "M"] <- M_SD[ind2, "M"] * trafo_persons["a"] + trafo_persons["b"] attr( M_SD, "N_groups") <- c(G1, G2) #--- output return(M_SD) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/as_numeric.R \name{as_numeric} \alias{as_numeric} \alias{to_numeric} \alias{as_numeric.data.frame} \title{Convert factors to numeric variables} \usage{ as_numeric(x, ...) to_numeric(x, ...) \method{as_numeric}{data.frame}(x, ..., start.at = NULL, keep.labels = TRUE, use.labels = FALSE) } \arguments{ \item{x}{A vector or data frame.} \item{...}{Optional, unquoted names of variables that should be selected for further processing. Required, if \code{x} is a data frame (and no vector) and only selected variables from \code{x} should be processed. You may also use functions like \code{:} or tidyselect's select-helpers. See 'Examples'.} \item{start.at}{Starting index, i.e. the lowest numeric value of the variable's value range. By default, this argument is \code{NULL}, hence the lowest value of the returned numeric variable corresponds to the lowest factor level (if factor levels are numeric) or to \code{1} (if factor levels are not numeric).} \item{keep.labels}{Logical, if \code{TRUE}, former factor levels will be added as value labels. For numeric factor levels, values labels will be used, if present. See 'Examples' and \code{\link{set_labels}} for more details.} \item{use.labels}{Logical, if \code{TRUE} and \code{x} has numeric value labels, the values defined in the labels (right-hand side of \code{labels}, for instance \code{labels = c(null = 0, one = 1)}) will be set as numeric values (instead of consecutive factor level numbers). See 'Examples'.} } \value{ A numeric variable with values ranging either from \code{start.at} to \code{start.at} + length of factor levels, or to the corresponding factor levels (if these were numeric). If \code{x} is a data frame, the complete data frame \code{x} will be returned, where variables specified in \code{...} are coerced to numeric; if \code{...} is not specified, applies to all variables in the data frame. } \description{ This function converts (replaces) factor levels with the related factor level index number, thus the factor is converted to a numeric variable. } \examples{ data(efc) test <- as_label(efc$e42dep) table(test) table(as_numeric(test)) hist(as_numeric(test, start.at = 0)) # set lowest value of new variable to "5". table(as_numeric(test, start.at = 5)) # numeric factor keeps values dummy <- factor(c("3", "4", "6")) table(as_numeric(dummy)) # do not drop unused factor levels dummy <- ordered(c(rep("No", 5), rep("Maybe", 3)), levels = c("Yes", "No", "Maybe")) as_numeric(dummy) # non-numeric factor is converted to numeric # starting at 1 dummy <- factor(c("D", "F", "H")) table(as_numeric(dummy)) # for numeric factor levels, value labels will be used, if present dummy1 <- factor(c("3", "4", "6")) dummy1 <- set_labels(dummy1, labels = c("first", "2nd", "3rd")) dummy1 as_numeric(dummy1) # for non-numeric factor levels, these will be used. # value labels will be ignored dummy2 <- factor(c("D", "F", "H")) dummy2 <- set_labels(dummy2, labels = c("first", "2nd", "3rd")) dummy2 as_numeric(dummy2) # easily coerce specific variables in a data frame to numeric # and keep other variables, with their class preserved data(efc) efc$e42dep <- as.factor(efc$e42dep) efc$e16sex <- as.factor(efc$e16sex) efc$e17age <- as.factor(efc$e17age) # convert back "sex" and "age" into numeric head(as_numeric(efc, e16sex, e17age)) x <- factor(c("None", "Little", "Some", "Lots")) x <- set_labels(x, labels = c(None = "0.5", Little = "1.3", Some = "1.8", Lots = ".2") ) x as_numeric(x) as_numeric(x, use.labels = TRUE) as_numeric(x, use.labels = TRUE, keep.labels = FALSE) }
/man/as_numeric.Rd
no_license
strengejacke/sjlabelled
R
false
true
3,716
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/as_numeric.R \name{as_numeric} \alias{as_numeric} \alias{to_numeric} \alias{as_numeric.data.frame} \title{Convert factors to numeric variables} \usage{ as_numeric(x, ...) to_numeric(x, ...) \method{as_numeric}{data.frame}(x, ..., start.at = NULL, keep.labels = TRUE, use.labels = FALSE) } \arguments{ \item{x}{A vector or data frame.} \item{...}{Optional, unquoted names of variables that should be selected for further processing. Required, if \code{x} is a data frame (and no vector) and only selected variables from \code{x} should be processed. You may also use functions like \code{:} or tidyselect's select-helpers. See 'Examples'.} \item{start.at}{Starting index, i.e. the lowest numeric value of the variable's value range. By default, this argument is \code{NULL}, hence the lowest value of the returned numeric variable corresponds to the lowest factor level (if factor levels are numeric) or to \code{1} (if factor levels are not numeric).} \item{keep.labels}{Logical, if \code{TRUE}, former factor levels will be added as value labels. For numeric factor levels, values labels will be used, if present. See 'Examples' and \code{\link{set_labels}} for more details.} \item{use.labels}{Logical, if \code{TRUE} and \code{x} has numeric value labels, the values defined in the labels (right-hand side of \code{labels}, for instance \code{labels = c(null = 0, one = 1)}) will be set as numeric values (instead of consecutive factor level numbers). See 'Examples'.} } \value{ A numeric variable with values ranging either from \code{start.at} to \code{start.at} + length of factor levels, or to the corresponding factor levels (if these were numeric). If \code{x} is a data frame, the complete data frame \code{x} will be returned, where variables specified in \code{...} are coerced to numeric; if \code{...} is not specified, applies to all variables in the data frame. } \description{ This function converts (replaces) factor levels with the related factor level index number, thus the factor is converted to a numeric variable. } \examples{ data(efc) test <- as_label(efc$e42dep) table(test) table(as_numeric(test)) hist(as_numeric(test, start.at = 0)) # set lowest value of new variable to "5". table(as_numeric(test, start.at = 5)) # numeric factor keeps values dummy <- factor(c("3", "4", "6")) table(as_numeric(dummy)) # do not drop unused factor levels dummy <- ordered(c(rep("No", 5), rep("Maybe", 3)), levels = c("Yes", "No", "Maybe")) as_numeric(dummy) # non-numeric factor is converted to numeric # starting at 1 dummy <- factor(c("D", "F", "H")) table(as_numeric(dummy)) # for numeric factor levels, value labels will be used, if present dummy1 <- factor(c("3", "4", "6")) dummy1 <- set_labels(dummy1, labels = c("first", "2nd", "3rd")) dummy1 as_numeric(dummy1) # for non-numeric factor levels, these will be used. # value labels will be ignored dummy2 <- factor(c("D", "F", "H")) dummy2 <- set_labels(dummy2, labels = c("first", "2nd", "3rd")) dummy2 as_numeric(dummy2) # easily coerce specific variables in a data frame to numeric # and keep other variables, with their class preserved data(efc) efc$e42dep <- as.factor(efc$e42dep) efc$e16sex <- as.factor(efc$e16sex) efc$e17age <- as.factor(efc$e17age) # convert back "sex" and "age" into numeric head(as_numeric(efc, e16sex, e17age)) x <- factor(c("None", "Little", "Some", "Lots")) x <- set_labels(x, labels = c(None = "0.5", Little = "1.3", Some = "1.8", Lots = ".2") ) x as_numeric(x) as_numeric(x, use.labels = TRUE) as_numeric(x, use.labels = TRUE, keep.labels = FALSE) }
TwoSampleMean.Equivalence <- function(alpha,beta,sigma,k,delta,margin){ n2<-(qnorm(1-alpha)+qnorm(1-beta/2))^2*sigma^2*(1+1/k)/(delta-abs(margin))^2 n2 n1<-k*n2 return(n1) }
/R/TwoSampleMean.Equivalence.R
no_license
cran/TrialSize
R
false
false
197
r
TwoSampleMean.Equivalence <- function(alpha,beta,sigma,k,delta,margin){ n2<-(qnorm(1-alpha)+qnorm(1-beta/2))^2*sigma^2*(1+1/k)/(delta-abs(margin))^2 n2 n1<-k*n2 return(n1) }
num = c(1,2,3,4) v1 <- sample(num, 100, replace=TRUE) v1_factor <- factor(v1) v1 v1_factor
/Lab3_exercise1.R
no_license
emilyOberH/R-language
R
false
false
98
r
num = c(1,2,3,4) v1 <- sample(num, 100, replace=TRUE) v1_factor <- factor(v1) v1 v1_factor
library(eDMA) ### Name: SimulateDLM ### Title: Simulate from DLM of West and Harrison (1999). ### Aliases: SimulateDLM ### ** Examples set.seed(7892) iT <- 500 iK <- 3 dV <- 0.1 mW <- diag(iK + 1) * 0.01 dPhi <- 1 vBeta0 <- rep(0, iK + 1) mX <- cbind(1, matrix(rnorm(iT * (iK)), iT, iK)) lOut <- SimulateDLM(iT, mX, vBeta0, mW, dV, dPhi) vY <- lOut$vY
/data/genthat_extracted_code/eDMA/examples/SimulateDLM.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
365
r
library(eDMA) ### Name: SimulateDLM ### Title: Simulate from DLM of West and Harrison (1999). ### Aliases: SimulateDLM ### ** Examples set.seed(7892) iT <- 500 iK <- 3 dV <- 0.1 mW <- diag(iK + 1) * 0.01 dPhi <- 1 vBeta0 <- rep(0, iK + 1) mX <- cbind(1, matrix(rnorm(iT * (iK)), iT, iK)) lOut <- SimulateDLM(iT, mX, vBeta0, mW, dV, dPhi) vY <- lOut$vY
## link bed1 = generateRandomBed(nr = 100) bed1 = bed1[sample(nrow(bed1), 20), ] bed2 = generateRandomBed(nr = 100) bed2 = bed2[sample(nrow(bed2), 20), ] circos.par("default.track.height" = 0.1, cell.padding = c(0, 0, 0, 0)) circos.initializeWithIdeogram() circos.genomicLink(bed1, bed2, col = sample(1:5, 20, replace = TRUE), border = NA) circos.clear()
/test/genomic_functions/test_genomicLink.R
permissive
jokergoo/circlize
R
false
false
368
r
## link bed1 = generateRandomBed(nr = 100) bed1 = bed1[sample(nrow(bed1), 20), ] bed2 = generateRandomBed(nr = 100) bed2 = bed2[sample(nrow(bed2), 20), ] circos.par("default.track.height" = 0.1, cell.padding = c(0, 0, 0, 0)) circos.initializeWithIdeogram() circos.genomicLink(bed1, bed2, col = sample(1:5, 20, replace = TRUE), border = NA) circos.clear()
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", skip = 66636, nrows = 2880, colClasses = c("character", "character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")) colnames(data) <- c("CDate","CTime","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") data$DateTime <- strptime(paste(data$CDate, data$CTime), format = "%d/%m/%Y %H:%M:%S") hist(data$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)") dev.copy(png, "plot1.png") dev.off()
/plot1.R
no_license
ngoc-hien/ExData_Plotting1
R
false
false
682
r
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", skip = 66636, nrows = 2880, colClasses = c("character", "character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")) colnames(data) <- c("CDate","CTime","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") data$DateTime <- strptime(paste(data$CDate, data$CTime), format = "%d/%m/%Y %H:%M:%S") hist(data$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)") dev.copy(png, "plot1.png") dev.off()
source('code/clean.R') attach(bodyfat.clean) par(mfrow = c(1, 1)) #bodyfat.clean <- scale(bodyfat.clean) #bodyfat.clean=bodyfat[-c(42,39),-1] ### Simple linear model #bodyfat.clean <- bodyfat.clean[,-1] #bodyfat.clean[,c(2:15)] <- scale(bodyfat.clean[,c(2:15)]) # Consider all variables and fit a simple linear model, there is a relative large R-square. # But most of the variables are not significant. library(car) vif(lm.fat) ### Lasso ## sampling x <- as.matrix(bodyfat.clean[,-c(1,2)]) y <- as.vector(BODYFAT) ## ridge regression library(glmnet) grid <- 10^seq(10,-2,length = 100) #ridge.mod <- glmnet(x, y, alpha = 0, lambda = grid) #plot(ridge.mod, main = "The ridge") par(mfrow = c(1, 1)) ## the lasso library(lars) (laa <- lars(x, y)) summary(laa) lasso.mod<-glmnet(x, y, alpha = 1, lambda = grid) plot(lasso.mod, main = "The lasso") summary(lm.lasso <- lm(formula = BODYFAT ~ HEIGHT + ABDOMEN + WRIST + AGE + NECK, data = bodyfat.clean[, -2])) summary(lm.lasso <- lm(formula = BODYFAT ~ HEIGHT + ABDOMEN + WRIST + AGE, data = bodyfat.clean[, -2])) library(glmnet) ## cross-validation set.seed(1000) cv.out <- cv.glmnet(as.matrix(bodyfat.clean[,-c(1,2)]), as.vector(BODYFAT), alpha = 1, nfolds = 10) plot(cv.out) (best.lambda <- cv.out$lambda.min) lasso.pred <- predict(lasso.mod, s = best.lambda, newx = x) mean((lasso.pred - y)^2) ## out <- glmnet(x, y, alpha = 1, lambda = grid) lasso.coef <- predict(out,type = "coefficients",s = best.lambda) lasso.coef # Fit linear model lm.backward <- lm(formula = BODYFAT ~ ., data = bodyfat.clean[, -2]) summary(step(lm.backward, direction = "backward", k = log(250) )) lm.backward <- lm(formula = BODYFAT ~ ABDOMEN + WRIST + AGE, data = bodyfat.clean[, -2]) plot(lm.backward, which = 1) summary(step(lm.lasso, direction = "backward", k = log(250) )) lm.lasso.step <- lm(formula = BODYFAT ~ HEIGHT + ABDOMEN + WRIST, data = bodyfat.clean[, -2]) finaldata <- as.data.frame(cbind(BODYFAT, HEIGHT, ABDOMEN, FOREARM, WRIST)) plot(finaldata) lm.full <- lm(formula = BODYFAT ~ ., data = bodyfat.clean[, -2]) ### Summary Adj.r.squared <- round(c(summary(lm.lasso)$adj.r.squared, summary(lm.backward)$adj.r.squared, summary(lm.lasso.step)$adj.r.squared, summary(lm.full)$adj.r.squared) , 4)# Adjusted R^2 AIC <- round(c(AIC(lm.lasso), AIC(lm.backward), AIC(lm.lasso.step), AIC(lm.full)) , 4)# AIC BIC <- round(c(BIC(lm.lasso), BIC(lm.backward), BIC(lm.lasso.step), BIC(lm.full)) , 4)# BIC MSE <- round(c(mean((lm.lasso$fitted.values - y)^2), mean((lm.backward$fitted.values - y)^2), mean((lm.lasso.step$fitted.values - y)^2), mean((lm.full$fitted.values - y)^2)) , 4) # MSE Variable <- c('HEIGHT, ABDOMEN, WRIST, AGE','HEIGHT, ABDOMEN, WRIST','ABDOMEN, WRIST, AGE','All') summary <- rbind(Adj.r.squared, AIC, BIC, MSE, Variable) colnames(summary) <- c('Lasso','Backward','Lasso.step','Full') rownames(summary) <- c('Adjusted R^2', 'AIC', 'BIC', 'MSE','Variables') summary summary.lasso <- summary(lm.lasso) CI.upper <- summary.lasso$coefficients[,1] + summary.lasso$coefficients[,2]*qnorm(0.975) CI.lower <- summary.lasso$coefficients[,1] - summary.lasso$coefficients[,2]*qnorm(0.975) cbind(round(summary.lasso$coefficients, 4), 'Confidence Intervals' = paste0('(',round(CI.lower,4), ', ', round(CI.upper,4),')'))
/code/model.R
no_license
Vesna0130/Data-Science-Practicum-BodyFat
R
false
false
3,393
r
source('code/clean.R') attach(bodyfat.clean) par(mfrow = c(1, 1)) #bodyfat.clean <- scale(bodyfat.clean) #bodyfat.clean=bodyfat[-c(42,39),-1] ### Simple linear model #bodyfat.clean <- bodyfat.clean[,-1] #bodyfat.clean[,c(2:15)] <- scale(bodyfat.clean[,c(2:15)]) # Consider all variables and fit a simple linear model, there is a relative large R-square. # But most of the variables are not significant. library(car) vif(lm.fat) ### Lasso ## sampling x <- as.matrix(bodyfat.clean[,-c(1,2)]) y <- as.vector(BODYFAT) ## ridge regression library(glmnet) grid <- 10^seq(10,-2,length = 100) #ridge.mod <- glmnet(x, y, alpha = 0, lambda = grid) #plot(ridge.mod, main = "The ridge") par(mfrow = c(1, 1)) ## the lasso library(lars) (laa <- lars(x, y)) summary(laa) lasso.mod<-glmnet(x, y, alpha = 1, lambda = grid) plot(lasso.mod, main = "The lasso") summary(lm.lasso <- lm(formula = BODYFAT ~ HEIGHT + ABDOMEN + WRIST + AGE + NECK, data = bodyfat.clean[, -2])) summary(lm.lasso <- lm(formula = BODYFAT ~ HEIGHT + ABDOMEN + WRIST + AGE, data = bodyfat.clean[, -2])) library(glmnet) ## cross-validation set.seed(1000) cv.out <- cv.glmnet(as.matrix(bodyfat.clean[,-c(1,2)]), as.vector(BODYFAT), alpha = 1, nfolds = 10) plot(cv.out) (best.lambda <- cv.out$lambda.min) lasso.pred <- predict(lasso.mod, s = best.lambda, newx = x) mean((lasso.pred - y)^2) ## out <- glmnet(x, y, alpha = 1, lambda = grid) lasso.coef <- predict(out,type = "coefficients",s = best.lambda) lasso.coef # Fit linear model lm.backward <- lm(formula = BODYFAT ~ ., data = bodyfat.clean[, -2]) summary(step(lm.backward, direction = "backward", k = log(250) )) lm.backward <- lm(formula = BODYFAT ~ ABDOMEN + WRIST + AGE, data = bodyfat.clean[, -2]) plot(lm.backward, which = 1) summary(step(lm.lasso, direction = "backward", k = log(250) )) lm.lasso.step <- lm(formula = BODYFAT ~ HEIGHT + ABDOMEN + WRIST, data = bodyfat.clean[, -2]) finaldata <- as.data.frame(cbind(BODYFAT, HEIGHT, ABDOMEN, FOREARM, WRIST)) plot(finaldata) lm.full <- lm(formula = BODYFAT ~ ., data = bodyfat.clean[, -2]) ### Summary Adj.r.squared <- round(c(summary(lm.lasso)$adj.r.squared, summary(lm.backward)$adj.r.squared, summary(lm.lasso.step)$adj.r.squared, summary(lm.full)$adj.r.squared) , 4)# Adjusted R^2 AIC <- round(c(AIC(lm.lasso), AIC(lm.backward), AIC(lm.lasso.step), AIC(lm.full)) , 4)# AIC BIC <- round(c(BIC(lm.lasso), BIC(lm.backward), BIC(lm.lasso.step), BIC(lm.full)) , 4)# BIC MSE <- round(c(mean((lm.lasso$fitted.values - y)^2), mean((lm.backward$fitted.values - y)^2), mean((lm.lasso.step$fitted.values - y)^2), mean((lm.full$fitted.values - y)^2)) , 4) # MSE Variable <- c('HEIGHT, ABDOMEN, WRIST, AGE','HEIGHT, ABDOMEN, WRIST','ABDOMEN, WRIST, AGE','All') summary <- rbind(Adj.r.squared, AIC, BIC, MSE, Variable) colnames(summary) <- c('Lasso','Backward','Lasso.step','Full') rownames(summary) <- c('Adjusted R^2', 'AIC', 'BIC', 'MSE','Variables') summary summary.lasso <- summary(lm.lasso) CI.upper <- summary.lasso$coefficients[,1] + summary.lasso$coefficients[,2]*qnorm(0.975) CI.lower <- summary.lasso$coefficients[,1] - summary.lasso$coefficients[,2]*qnorm(0.975) cbind(round(summary.lasso$coefficients, 4), 'Confidence Intervals' = paste0('(',round(CI.lower,4), ', ', round(CI.upper,4),')'))
#' Combine terms in a regression model #' #' The function combines terms from a regression model, and replaces the terms #' with a single row in the output table. The p-value is calculated using #' [stats::anova()]. #' #' @param x a `tbl_regression` object #' @param formula_update formula update passed to the [stats::update]. #' This updated formula is used to construct a reduced model, and is #' subsequently passed to [stats::anova()] to calculate the p-value for the #' group of removed terms. See the [stats::update] help file for proper syntax. #' function's `formula.=` argument #' @param label Option string argument labeling the combined rows #' @param ... Additional arguments passed to [stats::anova] #' @inheritParams add_global_p #' @author Daniel D. Sjoberg #' @family tbl_regression tools #' @return `tbl_regression` object #' @export #' #' @examples #' # Example 1 ---------------------------------- #' # Logistic Regression Example, LRT p-value #' combine_terms_ex1 <- #' glm( #' response ~ marker + I(marker^2) + grade, #' trial[c("response", "marker", "grade")] %>% na.omit(), # keep complete cases only! #' family = binomial #' ) %>% #' tbl_regression(label = grade ~ "Grade", exponentiate = TRUE) %>% #' # collapse non-linear terms to a single row in output using anova #' combine_terms( #' formula_update = . ~ . - marker - I(marker^2), #' label = "Marker (non-linear terms)", #' test = "LRT" #' ) #' @section Example Output: #' \if{html}{Example 1} #' #' \if{html}{\figure{combine_terms_ex1.png}{options: width=45\%}} combine_terms <- function(x, formula_update, label = NULL, quiet = NULL, ...) { updated_call_list <- c(x$call_list, list(combine_terms = match.call())) # setting defaults ----------------------------------------------------------- quiet <- quiet %||% get_theme_element("pkgwide-lgl:quiet") %||% FALSE # checking input ------------------------------------------------------------- if (!inherits(x, "tbl_regression")) { stop("`x` input must be class `tbl_regression`", call. = FALSE) } if (!is.null(label) && !rlang::is_string(label)) { stop(paste( "`label` argument must be a string of length one." ), call. = FALSE) } # creating updated model object ---------------------------------------------- expr_update <- rlang::expr(stats::update(x$model_obj, formula. = !!formula_update)) %>% deparse() %>% paste(collapse = "") %>% stringr::str_squish() if (quiet == FALSE) { rlang::inform(glue("combine_terms: Creating a reduced model with\n `reduced_model <- {expr_update}`")) } reduced_model <- stats::update(x$model_obj, formula. = formula_update) tryCatch( { expr_anova <- rlang::expr(stats::anova(x$model_obj, reduced_model, !!!list(...))) %>% deparse() %>% paste(collapse = "") %>% stringr::str_squish() if (quiet == FALSE) { rlang::inform(glue( "combine_terms: Calculating p-value comparing full and reduced models with\n", " `{expr_anova}`" )) } anova <- stats::anova(x$model_obj, reduced_model, ...) }, error = function(e) { err_msg <- paste( "There was error calculating the p-value in the", "'anova()' function.\n", "There are two common causes for an error during the calculation:\n", "1. The model type is not supported by 'anova()'.\n", "2. The number of observations used to estimate the full and reduced", "models is different.\n\n", as.character(e) ) stop(err_msg, call. = FALSE) } ) # extracting p-value from anova object --------------------------------------- df_anova <- as_tibble(anova) %>% select(starts_with("Pr(>"), starts_with("P(>")) # if no column was selected, print error if (ncol(df_anova) == 0) { stop(paste( "The output from `anova()` did not contain a p-value.\n", "A common source of this error is not specifying the `test=` argument.\n", "For example, to get the LRT p-value for a logistic regression estimated with `glm()`,\n", "include the argument `test = \"LRT\"` in the `combine_terms()` call." ), call. = FALSE) } anova_p <- df_anova %>% slice(n()) %>% pull() # if no p-value returned in p-value column if (is.na(anova_p)) { stop("The output from `anova()` did not contain a p-value.", call. = FALSE) } # tbl'ing the new model object ----------------------------------------------- new_model_tbl <- rlang::call2( "tbl_regression", x = reduced_model, # updated model object label = x$inputs$label, exponentiate = x$inputs$exponentiate, include = rlang::expr(intersect(any_of(!!x$inputs$include), everything())), show_single_row = rlang::expr(intersect(any_of(!!x$inputs$show_single_row), everything())), conf.level = x$inputs$conf.level, intercept = x$inputs$intercept, estimate_fun = x$inputs$estimate_fun, pvalue_fun = x$inputs$pvalue_fun, tidy_fun = x$inputs$tidy_fun ) %>% eval() # updating original tbl object ----------------------------------------------- # adding p-value column, if it is not already there if (!"p.value" %in% names(x$table_body)) { # adding p.value to table_body x$table_body <- mutate(x$table_body, p.value = NA_real_) x <- modify_table_styling( x, columns = "p.value", label = "**p-value**", hide = FALSE, fmt_fun = x$inputs$pvalue_fun %||% getOption("gtsummary.pvalue_fun", default = style_pvalue) ) } # replacing the combined rows with a single row table_body <- x$table_body %>% left_join( new_model_tbl$table_body %>% select( .data$variable, .data$var_type, .data$reference_row, .data$row_type, .data$label ) %>% mutate(collapse_row = FALSE), by = c("variable", "var_type", "row_type", "reference_row", "label") ) %>% # marking rows on tbl that will be reduced to a single row mutate(collapse_row = ifelse(is.na(.data$collapse_row), TRUE, .data$collapse_row)) %>% group_by(.data$collapse_row) %>% filter(.data$collapse_row == FALSE | (dplyr::row_number() == 1 & .data$collapse_row == TRUE)) %>% # updating column values for collapsed rows mutate_at( vars(.data$estimate, .data$conf.low, .data$conf.high, .data$ci), ~ ifelse(.data$collapse_row == TRUE, NA, .) ) %>% mutate( p.value = ifelse(.data$collapse_row == TRUE, !!anova_p, .data$p.value), row_type = ifelse(.data$collapse_row == TRUE, "label", .data$row_type) ) %>% ungroup() # adding variable label, if specified ---------------------------------------- if (!is.null(label)) { table_body <- table_body %>% mutate(label = ifelse(.data$collapse_row == TRUE, !!label, .data$label)) } # writing over the table_body in x ------------------------------------------- x$table_body <- table_body %>% select(-.data$collapse_row) # returning updated tbl object ----------------------------------------------- x$call_list <- updated_call_list x }
/R/combine_terms.R
permissive
eliascrapa/gtsummary
R
false
false
7,229
r
#' Combine terms in a regression model #' #' The function combines terms from a regression model, and replaces the terms #' with a single row in the output table. The p-value is calculated using #' [stats::anova()]. #' #' @param x a `tbl_regression` object #' @param formula_update formula update passed to the [stats::update]. #' This updated formula is used to construct a reduced model, and is #' subsequently passed to [stats::anova()] to calculate the p-value for the #' group of removed terms. See the [stats::update] help file for proper syntax. #' function's `formula.=` argument #' @param label Option string argument labeling the combined rows #' @param ... Additional arguments passed to [stats::anova] #' @inheritParams add_global_p #' @author Daniel D. Sjoberg #' @family tbl_regression tools #' @return `tbl_regression` object #' @export #' #' @examples #' # Example 1 ---------------------------------- #' # Logistic Regression Example, LRT p-value #' combine_terms_ex1 <- #' glm( #' response ~ marker + I(marker^2) + grade, #' trial[c("response", "marker", "grade")] %>% na.omit(), # keep complete cases only! #' family = binomial #' ) %>% #' tbl_regression(label = grade ~ "Grade", exponentiate = TRUE) %>% #' # collapse non-linear terms to a single row in output using anova #' combine_terms( #' formula_update = . ~ . - marker - I(marker^2), #' label = "Marker (non-linear terms)", #' test = "LRT" #' ) #' @section Example Output: #' \if{html}{Example 1} #' #' \if{html}{\figure{combine_terms_ex1.png}{options: width=45\%}} combine_terms <- function(x, formula_update, label = NULL, quiet = NULL, ...) { updated_call_list <- c(x$call_list, list(combine_terms = match.call())) # setting defaults ----------------------------------------------------------- quiet <- quiet %||% get_theme_element("pkgwide-lgl:quiet") %||% FALSE # checking input ------------------------------------------------------------- if (!inherits(x, "tbl_regression")) { stop("`x` input must be class `tbl_regression`", call. = FALSE) } if (!is.null(label) && !rlang::is_string(label)) { stop(paste( "`label` argument must be a string of length one." ), call. = FALSE) } # creating updated model object ---------------------------------------------- expr_update <- rlang::expr(stats::update(x$model_obj, formula. = !!formula_update)) %>% deparse() %>% paste(collapse = "") %>% stringr::str_squish() if (quiet == FALSE) { rlang::inform(glue("combine_terms: Creating a reduced model with\n `reduced_model <- {expr_update}`")) } reduced_model <- stats::update(x$model_obj, formula. = formula_update) tryCatch( { expr_anova <- rlang::expr(stats::anova(x$model_obj, reduced_model, !!!list(...))) %>% deparse() %>% paste(collapse = "") %>% stringr::str_squish() if (quiet == FALSE) { rlang::inform(glue( "combine_terms: Calculating p-value comparing full and reduced models with\n", " `{expr_anova}`" )) } anova <- stats::anova(x$model_obj, reduced_model, ...) }, error = function(e) { err_msg <- paste( "There was error calculating the p-value in the", "'anova()' function.\n", "There are two common causes for an error during the calculation:\n", "1. The model type is not supported by 'anova()'.\n", "2. The number of observations used to estimate the full and reduced", "models is different.\n\n", as.character(e) ) stop(err_msg, call. = FALSE) } ) # extracting p-value from anova object --------------------------------------- df_anova <- as_tibble(anova) %>% select(starts_with("Pr(>"), starts_with("P(>")) # if no column was selected, print error if (ncol(df_anova) == 0) { stop(paste( "The output from `anova()` did not contain a p-value.\n", "A common source of this error is not specifying the `test=` argument.\n", "For example, to get the LRT p-value for a logistic regression estimated with `glm()`,\n", "include the argument `test = \"LRT\"` in the `combine_terms()` call." ), call. = FALSE) } anova_p <- df_anova %>% slice(n()) %>% pull() # if no p-value returned in p-value column if (is.na(anova_p)) { stop("The output from `anova()` did not contain a p-value.", call. = FALSE) } # tbl'ing the new model object ----------------------------------------------- new_model_tbl <- rlang::call2( "tbl_regression", x = reduced_model, # updated model object label = x$inputs$label, exponentiate = x$inputs$exponentiate, include = rlang::expr(intersect(any_of(!!x$inputs$include), everything())), show_single_row = rlang::expr(intersect(any_of(!!x$inputs$show_single_row), everything())), conf.level = x$inputs$conf.level, intercept = x$inputs$intercept, estimate_fun = x$inputs$estimate_fun, pvalue_fun = x$inputs$pvalue_fun, tidy_fun = x$inputs$tidy_fun ) %>% eval() # updating original tbl object ----------------------------------------------- # adding p-value column, if it is not already there if (!"p.value" %in% names(x$table_body)) { # adding p.value to table_body x$table_body <- mutate(x$table_body, p.value = NA_real_) x <- modify_table_styling( x, columns = "p.value", label = "**p-value**", hide = FALSE, fmt_fun = x$inputs$pvalue_fun %||% getOption("gtsummary.pvalue_fun", default = style_pvalue) ) } # replacing the combined rows with a single row table_body <- x$table_body %>% left_join( new_model_tbl$table_body %>% select( .data$variable, .data$var_type, .data$reference_row, .data$row_type, .data$label ) %>% mutate(collapse_row = FALSE), by = c("variable", "var_type", "row_type", "reference_row", "label") ) %>% # marking rows on tbl that will be reduced to a single row mutate(collapse_row = ifelse(is.na(.data$collapse_row), TRUE, .data$collapse_row)) %>% group_by(.data$collapse_row) %>% filter(.data$collapse_row == FALSE | (dplyr::row_number() == 1 & .data$collapse_row == TRUE)) %>% # updating column values for collapsed rows mutate_at( vars(.data$estimate, .data$conf.low, .data$conf.high, .data$ci), ~ ifelse(.data$collapse_row == TRUE, NA, .) ) %>% mutate( p.value = ifelse(.data$collapse_row == TRUE, !!anova_p, .data$p.value), row_type = ifelse(.data$collapse_row == TRUE, "label", .data$row_type) ) %>% ungroup() # adding variable label, if specified ---------------------------------------- if (!is.null(label)) { table_body <- table_body %>% mutate(label = ifelse(.data$collapse_row == TRUE, !!label, .data$label)) } # writing over the table_body in x ------------------------------------------- x$table_body <- table_body %>% select(-.data$collapse_row) # returning updated tbl object ----------------------------------------------- x$call_list <- updated_call_list x }
library(metRology) ### Name: gplot ### Title: Grouped plots of type "h" ### Aliases: gplot ### Keywords: hplot ### ** Examples data(RMstudy) h <- with(RMstudy, mandel.h(RMstudy[2:9], g=Lab)) gplot(h, las=2) #Note the absence of indicator lines, title etc. #compared to plot(h)
/data/genthat_extracted_code/metRology/examples/gplot.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
303
r
library(metRology) ### Name: gplot ### Title: Grouped plots of type "h" ### Aliases: gplot ### Keywords: hplot ### ** Examples data(RMstudy) h <- with(RMstudy, mandel.h(RMstudy[2:9], g=Lab)) gplot(h, las=2) #Note the absence of indicator lines, title etc. #compared to plot(h)
## check namespace. detachem <- function(x){ NS <- loadedNamespaces() if(any(NS==x)){ pkgn <- paste0("package:", x) detach(pkgn, unload=TRUE, character.only=TRUE) } } Pkgs <- c("MirhostDb.Hsapiens.v75.v20", "mirhostgenes", "ensembldb") tmp <- sapply(Pkgs, detachem) tmp <- sapply(Pkgs, library, character.only=TRUE) DB <- MirhostDb.Hsapiens.v75.v20 ######******************************************** ## testing the queries. that's really an internal function that should never be called ## directly. ##### ## 1) tojoin <- c("pre_mirna", "host_gene") ## adds host_tx Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q library(RSQLite) R1 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## OK. ## the same, but starting from the host_gene table! Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="host_gene") Q R2 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## compare the results nrow(R1) nrow(R2) ## have more for R1 where we started with pre_mirna sum(is.na(R1$gene_id)) sum(is.na(R2$gene_id)) ##### ## 2) tojoin <- c("mat_mirna", "host_gene") Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q R1 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## the same but starting from host_gene Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="host_gene") Q R2 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## what if we started from mat_mirna? Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="mat_mirna") Q ## compare the results nrow(R1) nrow(R2) ## have more for R1 where we started with pre_mirna sum(is.na(R1$gene_id)) sum(is.na(R2$gene_id)) ##### ## 3) tojoin <- c("mirfam", "mat_mirna") Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q R1 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## start from mirfam Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="mirfam") Q R2 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## compare the results nrow(R1) nrow(R2) ## have more for R1 where we started with pre_mirna sum(is.na(R1$mirfam_id)) sum(is.na(R2$mirfam_id)) ##### ## 4) ## adds pre_mirna, host_tx tojoin <- c("mat_mirna", "mirfam", "host_gene") ## adds pre_mirna, host_tx Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q R1 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## same but start from mirfam... Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="mirfam") Q R2 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## we're supposed to get less in R2 ## compare the results nrow(R1) nrow(R2) ## have more for R1 where we started with pre_mirna sum(is.na(R1$mirfam_id)) sum(is.na(R2$mirfam_id)) ##### ## 5) ## tojoin <- c("mat_mirna", "array_feature") ## adds pre_mirna, host_tx Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q tojoin <- c( "host_gene", "array_feature" ) Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q ## ######******************************************** ## testing the queries to see whether we get what we want... ## Note: these functions are internal. Query <- mirhostgenes:::.buildQuery(DB, columns=c("gene_name", "sequence", "pre_mirna_name", "pre_mirna_id", "mat_mirna_name"), filter=list(SeqendFilter(123, condition=">", feature="mat_mirna"), GenenameFilter("SMC4")), order.by="mat_mirna_seq_end") Query Res <- dbGetQuery(dbconn(DB), Query) Res ## now we want to get all mature_mirnas for host gene SMC4, along with its host transcripts, ordered by mat_mirna_seq_start. Query <- mirhostgenes:::.buildQuery(DB, columns=c("tx_id", "gene_name", "pre_mirna_id", "pre_mirna_name", "mat_mirna_name", "mat_mirna_seq_start"), filter=list(GenenameFilter("SMC4")), order.by="mat_mirna_seq_start") Query Res <- dbGetQuery(dbconn(DB), Query) Res ## get all pre_mirnas Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name")) Allpres <- dbGetQuery(dbconn(DB), Query) nrow(Allpres) length(unique(Allpres$pre_mirna_id)) ## now build a query joining pre_mirnas with host_tx Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name", "tx_id")) Res.join <- dbGetQuery(dbconn(DB), Query) nrow(Res.join) length(unique(Res.join$pre_mirna_id)) ## what if we used a full join??? doesn't work, only left joins!!! Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name", "tx_id"), join="left join") Res.left <- dbGetQuery(dbconn(DB), Query) nrow(Res.left) length(unique(Res.left$pre_mirna_id)) sum(is.na(Res.left$tx_id)) ## OK, so we get also some without tx_id ## if we say now that we want to start with host_tx we will end up with the same as above. Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name", "tx_id"), join="left join", start.table="host_tx") Res.join <- dbGetQuery(dbconn(DB), Query) nrow(Res.join) length(unique(Res.join$pre_mirna_id)) Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name", "tx_id", "mirfam_name")) Query Res <- dbGetQuery(dbconn(DB), Query) nrow(Res) length(unique(Res$pre_mirna_id)) length(unique(Res$mirfam_name)) ## what if we used a left join here? Query <- gsub(Query, pattern="join", replacement="left join") Res.left <- dbGetQuery(dbconn(DB), Query) nrow(Res.left) length(unique(Res.left$pre_mirna_id)) length(unique(Res.left$mirfam_name)) dbGetQuery(dbconn(DB), "select count(distinct mirfam_name) from mirfam") dbGetQuery(dbconn(DB), "select count(distinct pre_mirna_id) from mirfam") dbGetQuery(dbconn(DB), "select count(distinct pre_mirna_id) from pre_mirna") ## so, with all left joins we get as much as possible... it's just important from which table we start... ##************************************************* ## ## Internal functions and methods. ## ##************************************************* cat("Testing internal stuff...") mirhostgenes:::tablesByDegree(DB) mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "array_id", "feature_id")) mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "database", "gene_id")) mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_biotype")) mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_id")) mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_id", "gene_name")) mirhostgenes:::.buildQuery(DB, columns=listColumns(DB, "host_gene")) ## what if we use a GeneidFilter and columns gene_id, tx_id, it should ## filter on tx.gene_id mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_id"), filter=list(GeneidFilter("a"))) ## if we have more attrs from the gene table it should be host_gene.gene_id mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_id", "gene_name", "gene_biotype"), filter=list(GeneidFilter("a"))) cat("done.\n")
/inst/test/testQueries.R
permissive
ctoste/mirhostgenes
R
false
false
7,667
r
## check namespace. detachem <- function(x){ NS <- loadedNamespaces() if(any(NS==x)){ pkgn <- paste0("package:", x) detach(pkgn, unload=TRUE, character.only=TRUE) } } Pkgs <- c("MirhostDb.Hsapiens.v75.v20", "mirhostgenes", "ensembldb") tmp <- sapply(Pkgs, detachem) tmp <- sapply(Pkgs, library, character.only=TRUE) DB <- MirhostDb.Hsapiens.v75.v20 ######******************************************** ## testing the queries. that's really an internal function that should never be called ## directly. ##### ## 1) tojoin <- c("pre_mirna", "host_gene") ## adds host_tx Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q library(RSQLite) R1 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## OK. ## the same, but starting from the host_gene table! Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="host_gene") Q R2 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## compare the results nrow(R1) nrow(R2) ## have more for R1 where we started with pre_mirna sum(is.na(R1$gene_id)) sum(is.na(R2$gene_id)) ##### ## 2) tojoin <- c("mat_mirna", "host_gene") Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q R1 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## the same but starting from host_gene Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="host_gene") Q R2 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## what if we started from mat_mirna? Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="mat_mirna") Q ## compare the results nrow(R1) nrow(R2) ## have more for R1 where we started with pre_mirna sum(is.na(R1$gene_id)) sum(is.na(R2$gene_id)) ##### ## 3) tojoin <- c("mirfam", "mat_mirna") Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q R1 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## start from mirfam Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="mirfam") Q R2 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## compare the results nrow(R1) nrow(R2) ## have more for R1 where we started with pre_mirna sum(is.na(R1$mirfam_id)) sum(is.na(R2$mirfam_id)) ##### ## 4) ## adds pre_mirna, host_tx tojoin <- c("mat_mirna", "mirfam", "host_gene") ## adds pre_mirna, host_tx Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q R1 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## same but start from mirfam... Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join", start.table="mirfam") Q R2 <- dbGetQuery(dbconn(DB), paste("select * from", Q)) ## we're supposed to get less in R2 ## compare the results nrow(R1) nrow(R2) ## have more for R1 where we started with pre_mirna sum(is.na(R1$mirfam_id)) sum(is.na(R2$mirfam_id)) ##### ## 5) ## tojoin <- c("mat_mirna", "array_feature") ## adds pre_mirna, host_tx Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q tojoin <- c( "host_gene", "array_feature" ) Q <- mirhostgenes:::joinQueryOnTables(DB, tojoin, join="left join") Q ## ######******************************************** ## testing the queries to see whether we get what we want... ## Note: these functions are internal. Query <- mirhostgenes:::.buildQuery(DB, columns=c("gene_name", "sequence", "pre_mirna_name", "pre_mirna_id", "mat_mirna_name"), filter=list(SeqendFilter(123, condition=">", feature="mat_mirna"), GenenameFilter("SMC4")), order.by="mat_mirna_seq_end") Query Res <- dbGetQuery(dbconn(DB), Query) Res ## now we want to get all mature_mirnas for host gene SMC4, along with its host transcripts, ordered by mat_mirna_seq_start. Query <- mirhostgenes:::.buildQuery(DB, columns=c("tx_id", "gene_name", "pre_mirna_id", "pre_mirna_name", "mat_mirna_name", "mat_mirna_seq_start"), filter=list(GenenameFilter("SMC4")), order.by="mat_mirna_seq_start") Query Res <- dbGetQuery(dbconn(DB), Query) Res ## get all pre_mirnas Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name")) Allpres <- dbGetQuery(dbconn(DB), Query) nrow(Allpres) length(unique(Allpres$pre_mirna_id)) ## now build a query joining pre_mirnas with host_tx Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name", "tx_id")) Res.join <- dbGetQuery(dbconn(DB), Query) nrow(Res.join) length(unique(Res.join$pre_mirna_id)) ## what if we used a full join??? doesn't work, only left joins!!! Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name", "tx_id"), join="left join") Res.left <- dbGetQuery(dbconn(DB), Query) nrow(Res.left) length(unique(Res.left$pre_mirna_id)) sum(is.na(Res.left$tx_id)) ## OK, so we get also some without tx_id ## if we say now that we want to start with host_tx we will end up with the same as above. Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name", "tx_id"), join="left join", start.table="host_tx") Res.join <- dbGetQuery(dbconn(DB), Query) nrow(Res.join) length(unique(Res.join$pre_mirna_id)) Query <- mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "pre_mirna_name", "tx_id", "mirfam_name")) Query Res <- dbGetQuery(dbconn(DB), Query) nrow(Res) length(unique(Res$pre_mirna_id)) length(unique(Res$mirfam_name)) ## what if we used a left join here? Query <- gsub(Query, pattern="join", replacement="left join") Res.left <- dbGetQuery(dbconn(DB), Query) nrow(Res.left) length(unique(Res.left$pre_mirna_id)) length(unique(Res.left$mirfam_name)) dbGetQuery(dbconn(DB), "select count(distinct mirfam_name) from mirfam") dbGetQuery(dbconn(DB), "select count(distinct pre_mirna_id) from mirfam") dbGetQuery(dbconn(DB), "select count(distinct pre_mirna_id) from pre_mirna") ## so, with all left joins we get as much as possible... it's just important from which table we start... ##************************************************* ## ## Internal functions and methods. ## ##************************************************* cat("Testing internal stuff...") mirhostgenes:::tablesByDegree(DB) mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "array_id", "feature_id")) mirhostgenes:::.buildQuery(DB, columns=c("pre_mirna_id", "database", "gene_id")) mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_biotype")) mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_id")) mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_id", "gene_name")) mirhostgenes:::.buildQuery(DB, columns=listColumns(DB, "host_gene")) ## what if we use a GeneidFilter and columns gene_id, tx_id, it should ## filter on tx.gene_id mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_id"), filter=list(GeneidFilter("a"))) ## if we have more attrs from the gene table it should be host_gene.gene_id mirhostgenes:::.buildQuery(DB, columns=c("gene_id", "tx_id", "gene_name", "gene_biotype"), filter=list(GeneidFilter("a"))) cat("done.\n")
library(tidyverse) library(raster) library(ggspatial) library(sf) library(patchwork) library(ggExtra) library(cowplot) chm <- raster("out/scaled_chm_mosaic.tif") theme_set(theme_minimal() + theme(panel.grid.minor = element_blank())) test_preds <- read_csv("out/test-set-checks.csv") %>% mutate(gamma_12 = plogis(-6 + 40 * chm), gamma_21 = plogis(6 - 40 * chm)) # final model switched state order (so we plot true gamma_12 vs. estimated gamma_21) # (there are no constraints to guarantee that state 1 or 2 is "in transit" or "foraging") p1 <- test_preds %>% ggplot(aes(gamma_12, pred_gamma_21)) + geom_point(color = NA) + stat_density_2d(geom = "raster", aes(fill = stat(density)), contour = FALSE) + scale_fill_viridis_c(option = "A") + xlab(expression(paste("True ", gamma["1,2"]))) + ylab(expression(paste("Estimated ", gamma["1,2"]))) + theme(legend.position = "none") + coord_equal() + ggtitle("(a)") p1m <- ggMarginal(p1, type = "histogram") p2 <- test_preds %>% ggplot(aes(gamma_21, pred_gamma_12)) + geom_point(color = NA) + stat_density_2d(geom = "raster", aes(fill = stat(density)), contour = FALSE) + scale_fill_viridis_c(option = "A") + xlab(expression(paste("True ", gamma["2,1"]))) + ylab(expression(paste("Estimated ", gamma["2,1"]))) + theme(legend.position = "none") + coord_equal() + ggtitle("(b)") p2m <- ggMarginal(p2, type = "histogram") transition_densities <- cowplot::plot_grid(p1m, p2m) ggsave("fig/transition-densities.png", plot = transition_densities, width = 6, height = 3.5) # Plot some rows of interest ----------------------------------------------- plot_df_row <- function(df) { stopifnot(nrow(df) == 1) dir <- file.path("out", "trajectories", "test", df$directory) chip <- dir %>% list.files(pattern = sprintf("%03d", df$t), full.names = TRUE) %>% grep("chip.*.tiff$", ., value = TRUE) %>% raster::stack() bbox <- st_read(file.path(dir, "chip_bboxes.gpkg"))[df$t, ] rgb_plot <- ggplot() + ggspatial::layer_spatial(data = chip) + theme_void() rgb_plot } plots <- test_preds %>% top_n(9, pred_gamma_12) %>% arrange(pred_gamma_12) %>% mutate(id = 1:n()) %>% split(.$id) %>% lapply(plot_df_row) plots[[1]] <- plots[[1]] + ggtitle("(a) Highest Pr(transition to \"in transit\")") plots_21 <- test_preds %>% top_n(9, pred_gamma_21) %>% arrange(pred_gamma_21) %>% mutate(id = 1:n()) %>% split(.$id) %>% lapply(plot_df_row) plots_21[[1]] <- plots_21[[1]] + ggtitle("(b) Highest Pr(transition to \"foraging\")") top_prob_plots <- wrap_plots(c(plots, plots_21), nrow = 2) ggsave("fig/top-prob-chips.png", top_prob_plots, width = 8, height = 2.5)
/R/s05-visualize-test-set.R
permissive
PepSalehi/neuralecology
R
false
false
2,709
r
library(tidyverse) library(raster) library(ggspatial) library(sf) library(patchwork) library(ggExtra) library(cowplot) chm <- raster("out/scaled_chm_mosaic.tif") theme_set(theme_minimal() + theme(panel.grid.minor = element_blank())) test_preds <- read_csv("out/test-set-checks.csv") %>% mutate(gamma_12 = plogis(-6 + 40 * chm), gamma_21 = plogis(6 - 40 * chm)) # final model switched state order (so we plot true gamma_12 vs. estimated gamma_21) # (there are no constraints to guarantee that state 1 or 2 is "in transit" or "foraging") p1 <- test_preds %>% ggplot(aes(gamma_12, pred_gamma_21)) + geom_point(color = NA) + stat_density_2d(geom = "raster", aes(fill = stat(density)), contour = FALSE) + scale_fill_viridis_c(option = "A") + xlab(expression(paste("True ", gamma["1,2"]))) + ylab(expression(paste("Estimated ", gamma["1,2"]))) + theme(legend.position = "none") + coord_equal() + ggtitle("(a)") p1m <- ggMarginal(p1, type = "histogram") p2 <- test_preds %>% ggplot(aes(gamma_21, pred_gamma_12)) + geom_point(color = NA) + stat_density_2d(geom = "raster", aes(fill = stat(density)), contour = FALSE) + scale_fill_viridis_c(option = "A") + xlab(expression(paste("True ", gamma["2,1"]))) + ylab(expression(paste("Estimated ", gamma["2,1"]))) + theme(legend.position = "none") + coord_equal() + ggtitle("(b)") p2m <- ggMarginal(p2, type = "histogram") transition_densities <- cowplot::plot_grid(p1m, p2m) ggsave("fig/transition-densities.png", plot = transition_densities, width = 6, height = 3.5) # Plot some rows of interest ----------------------------------------------- plot_df_row <- function(df) { stopifnot(nrow(df) == 1) dir <- file.path("out", "trajectories", "test", df$directory) chip <- dir %>% list.files(pattern = sprintf("%03d", df$t), full.names = TRUE) %>% grep("chip.*.tiff$", ., value = TRUE) %>% raster::stack() bbox <- st_read(file.path(dir, "chip_bboxes.gpkg"))[df$t, ] rgb_plot <- ggplot() + ggspatial::layer_spatial(data = chip) + theme_void() rgb_plot } plots <- test_preds %>% top_n(9, pred_gamma_12) %>% arrange(pred_gamma_12) %>% mutate(id = 1:n()) %>% split(.$id) %>% lapply(plot_df_row) plots[[1]] <- plots[[1]] + ggtitle("(a) Highest Pr(transition to \"in transit\")") plots_21 <- test_preds %>% top_n(9, pred_gamma_21) %>% arrange(pred_gamma_21) %>% mutate(id = 1:n()) %>% split(.$id) %>% lapply(plot_df_row) plots_21[[1]] <- plots_21[[1]] + ggtitle("(b) Highest Pr(transition to \"foraging\")") top_prob_plots <- wrap_plots(c(plots, plots_21), nrow = 2) ggsave("fig/top-prob-chips.png", top_prob_plots, width = 8, height = 2.5)
?auto.arima() ?sarima library(forecast) library(lmtest) library(astsa) library(fpp2) library(ggplot2) library(lubridate) library(FitAR) options(warn=-1) auto.arima(PM10.ts.train,allowdrift = F) #ok sarima(PM10.ts.train_2,3,0,1,1,1,0,52) #ok auto.arima(PM10.ts.train_3) #ok auto.arima(PM2.5.ts.train) #ok auto.arima(PM2.5.ts.train_2,allowdrift = F) #ok auto.arima(PM2.5.ts.train_3) #ok ##################SARIMA model PM10 all stations############# sarima_pm10_1<- arima(x=PM10.ts.train, order = c(0,0,0), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm10_1<-forecast(sarima_pm10_1,h=57,level= c(95,95)) sarima_pred_pm10_1_mean<-sarima_pred_pm10_1$mean sarima_pm10_2<- arima(x=PM10.ts.train_2, order = c(3,0,1), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm10_2<-forecast(sarima_pm10_2,h=57,level= c(95,95)) sarima_pred_pm10_2_mean<-sarima_pred_pm10_2$mean sarima_pm10_3<- arima(x=PM10.ts.train_3, order = c(2,0,2), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm10_3<-forecast(sarima_pm10_3,h=57,level= c(95,95)) sarima_pred_pm10_3_mean<-sarima_pred_pm10_3$mean par(mfrow=c(3,1)) ##########PLOT of PM10 CAMS-1############# plot(PM10.ts, type="l",col="red", ylab="PM10",lwd=3, main="CAMS1 weekly PM10 data (time series)") par(new=TRUE) lines(sarima_pred_pm10_1_mean, xlab="", ylab="", lty=2,col="blue",lwd=4) legend(2018.1,400,legend=c("Actual", "Predicted"), col=c("red", "blue"), lty=1:2, cex=1,bty="n") grid() ##########PLOT of PM10 CAMS-2############# plot(PM10.ts_2, type="l",col="red", ylab="PM10",lwd=3, main="CAMS2 weekly PM10 data (time series)") par(new=TRUE) lines(sarima_pred_pm10_2_mean, xlab="", ylab="", lty=2,col="blue",lwd=4) legend(2018.4,600,legend=c("Actual", "Predicted"), col=c("red", "blue"), lty=1:2, cex=1,bty="n") grid() ##########PLOT of PM10 CAMS-3############# plot(PM10.ts_3, type="l",col="red", ylab="PM10",lwd=3, main="CAMS3 weekly PM10 data (time series)") par(new=TRUE) lines(sarima_pred_pm10_3_mean, xlab="", ylab="", lty=2,col="blue",lwd=4) legend(2018.2,500,legend=c("Actual", "Predicted"), col=c("red", "blue"), lty=1:2, cex=1,bty = "n") grid() ###################### ?legend() library(ggpubr) library(ggfortify) library(changepoint) library(strucchange) library(ggpmisc) ##################SARIMA model PM2.5 all station############# sarima_pm2.5_1<- arima(x=PM2.5.ts.train, order = c(4,0,0), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm2.5_1<-forecast(sarima_pm2.5_1,h=57,level= c(95,95)) sarima_pred_pm2.5_1_mean<-sarima_pred_pm2.5_1$mean sarima_pm2.5_2<- arima(x=PM2.5.ts.train_2, order = c(0,0,0), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm2.5_2<-forecast(sarima_pm2.5_2,h=57,level= c(95,95)) sarima_pred_pm2.5_2_mean<-sarima_pred_pm2.5_2$mean sarima_pm2.5_3<- arima(x=PM2.5.ts.train_3, order = c(4,0,0), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm2.5_3<-forecast(sarima_pm2.5_3,h=57,level= c(95,95)) sarima_pred_pm2.5_3_mean<-sarima_pred_pm2.5_3$mean par(mfrow=c(3,1)) ##########PLOT of PM2.5 CAMS-1####### plot(PM2.5.ts, type="l",col="orange3", ylab="PM2.5",lwd=2, main="CAMS1 weekly PM2.5 data (time series)") par(new=TRUE) lines(sarima_pred_pm2.5_1_mean, xlab="", ylab="", lty=2,col="midnightblue",lwd=3) legend(2018.3,250,legend=c("Actual", "Predicted"), col=c("orange3", "midnightblue"), lty=1:2, cex=.7) grid() ##########PLOT of PM2.5 CAMS-2####### plot(PM2.5.ts_2, type="l",col="orange3", ylab="PM2.5",lwd=2, main="CAMS2 weekly PM2.5 data (time series)") par(new=TRUE) lines(sarima_pred_pm2.5_2_mean, xlab="", ylab="", lty=2,col="midnightblue",lwd=3) legend(2018.4,350,legend=c("Actual", "Predicted"), col=c("orange3", "midnightblue"), lty=1:2, cex=.7) grid() ##########PLOT of PM2.5 CAMS-3####### plot(PM2.5.ts_3, type="l",col="orange3", ylab="PM2.5",lwd=2, main="CAMS3 weekly PM2.5 data (time series)") par(new=TRUE) lines(sarima_pred_pm2.5_3_mean, xlab="", ylab="", lty=2,col="midnightblue",lwd=3) legend(2018.4,275,legend=c("Actual", "Predicted"), col=c("orange3", "midnightblue"), lty=1:2, cex=.7) grid() coeftest(sarima_pm10_3)
/Time_Series_Analysis/auto_arima.R
no_license
ishmamshahid/Time-Series-Analysis-of-Particulate-Matter-of-Dhaka-city
R
false
false
4,381
r
?auto.arima() ?sarima library(forecast) library(lmtest) library(astsa) library(fpp2) library(ggplot2) library(lubridate) library(FitAR) options(warn=-1) auto.arima(PM10.ts.train,allowdrift = F) #ok sarima(PM10.ts.train_2,3,0,1,1,1,0,52) #ok auto.arima(PM10.ts.train_3) #ok auto.arima(PM2.5.ts.train) #ok auto.arima(PM2.5.ts.train_2,allowdrift = F) #ok auto.arima(PM2.5.ts.train_3) #ok ##################SARIMA model PM10 all stations############# sarima_pm10_1<- arima(x=PM10.ts.train, order = c(0,0,0), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm10_1<-forecast(sarima_pm10_1,h=57,level= c(95,95)) sarima_pred_pm10_1_mean<-sarima_pred_pm10_1$mean sarima_pm10_2<- arima(x=PM10.ts.train_2, order = c(3,0,1), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm10_2<-forecast(sarima_pm10_2,h=57,level= c(95,95)) sarima_pred_pm10_2_mean<-sarima_pred_pm10_2$mean sarima_pm10_3<- arima(x=PM10.ts.train_3, order = c(2,0,2), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm10_3<-forecast(sarima_pm10_3,h=57,level= c(95,95)) sarima_pred_pm10_3_mean<-sarima_pred_pm10_3$mean par(mfrow=c(3,1)) ##########PLOT of PM10 CAMS-1############# plot(PM10.ts, type="l",col="red", ylab="PM10",lwd=3, main="CAMS1 weekly PM10 data (time series)") par(new=TRUE) lines(sarima_pred_pm10_1_mean, xlab="", ylab="", lty=2,col="blue",lwd=4) legend(2018.1,400,legend=c("Actual", "Predicted"), col=c("red", "blue"), lty=1:2, cex=1,bty="n") grid() ##########PLOT of PM10 CAMS-2############# plot(PM10.ts_2, type="l",col="red", ylab="PM10",lwd=3, main="CAMS2 weekly PM10 data (time series)") par(new=TRUE) lines(sarima_pred_pm10_2_mean, xlab="", ylab="", lty=2,col="blue",lwd=4) legend(2018.4,600,legend=c("Actual", "Predicted"), col=c("red", "blue"), lty=1:2, cex=1,bty="n") grid() ##########PLOT of PM10 CAMS-3############# plot(PM10.ts_3, type="l",col="red", ylab="PM10",lwd=3, main="CAMS3 weekly PM10 data (time series)") par(new=TRUE) lines(sarima_pred_pm10_3_mean, xlab="", ylab="", lty=2,col="blue",lwd=4) legend(2018.2,500,legend=c("Actual", "Predicted"), col=c("red", "blue"), lty=1:2, cex=1,bty = "n") grid() ###################### ?legend() library(ggpubr) library(ggfortify) library(changepoint) library(strucchange) library(ggpmisc) ##################SARIMA model PM2.5 all station############# sarima_pm2.5_1<- arima(x=PM2.5.ts.train, order = c(4,0,0), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm2.5_1<-forecast(sarima_pm2.5_1,h=57,level= c(95,95)) sarima_pred_pm2.5_1_mean<-sarima_pred_pm2.5_1$mean sarima_pm2.5_2<- arima(x=PM2.5.ts.train_2, order = c(0,0,0), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm2.5_2<-forecast(sarima_pm2.5_2,h=57,level= c(95,95)) sarima_pred_pm2.5_2_mean<-sarima_pred_pm2.5_2$mean sarima_pm2.5_3<- arima(x=PM2.5.ts.train_3, order = c(4,0,0), seasonal = list(order=c(1,1,0), period=52)) sarima_pred_pm2.5_3<-forecast(sarima_pm2.5_3,h=57,level= c(95,95)) sarima_pred_pm2.5_3_mean<-sarima_pred_pm2.5_3$mean par(mfrow=c(3,1)) ##########PLOT of PM2.5 CAMS-1####### plot(PM2.5.ts, type="l",col="orange3", ylab="PM2.5",lwd=2, main="CAMS1 weekly PM2.5 data (time series)") par(new=TRUE) lines(sarima_pred_pm2.5_1_mean, xlab="", ylab="", lty=2,col="midnightblue",lwd=3) legend(2018.3,250,legend=c("Actual", "Predicted"), col=c("orange3", "midnightblue"), lty=1:2, cex=.7) grid() ##########PLOT of PM2.5 CAMS-2####### plot(PM2.5.ts_2, type="l",col="orange3", ylab="PM2.5",lwd=2, main="CAMS2 weekly PM2.5 data (time series)") par(new=TRUE) lines(sarima_pred_pm2.5_2_mean, xlab="", ylab="", lty=2,col="midnightblue",lwd=3) legend(2018.4,350,legend=c("Actual", "Predicted"), col=c("orange3", "midnightblue"), lty=1:2, cex=.7) grid() ##########PLOT of PM2.5 CAMS-3####### plot(PM2.5.ts_3, type="l",col="orange3", ylab="PM2.5",lwd=2, main="CAMS3 weekly PM2.5 data (time series)") par(new=TRUE) lines(sarima_pred_pm2.5_3_mean, xlab="", ylab="", lty=2,col="midnightblue",lwd=3) legend(2018.4,275,legend=c("Actual", "Predicted"), col=c("orange3", "midnightblue"), lty=1:2, cex=.7) grid() coeftest(sarima_pm10_3)
#' Convert a data.frame with one or more coordinate columns to an sf object #' #' @description #' - [coords_to_sf()]: Convert a data frame with coordinates into a simple #' feature object #' - [check_coords()]: Check if a provided vector with coordinate column names #' are valid for the provided data frame #' - [separate_coords()]: Separate coordinates from a single combined column #' into two columns #' - [format_coords()]: Format coordinates as numeric values and remove missing #' coordinates from a data.frame #' - [has_coords()]: Suggests a coordinate pair by comparing common values to #' the column names for a provided data.frame #' - [rev_coords()]: Reverse a vector of coordinate names if the text "lat" or #' "y" appears in the first position #' #' @param x A data.frame with one or more coordinate columns. If coordinates are #' contained within a single column, coord must be length 1 and a length 2 #' into parameter must be provided. #' @param coords Coordinate columns for input data.frame or output sf object (if #' geometry is 'centroid' or 'point') Default: c("lon", "lat"). #' @param remove_coords For [df_to_sf()], if `TRUE`, remove the coordinate columns #' after converting a data frame to simple feature object; defaults to #' `FALSE`. #' @param crs Coordinate reference system used by the coordinates in the #' provided data frame. #' @inheritParams rlang::args_error_context #' @seealso [is_geo_coords()] #' @export #' @importFrom sf st_as_sf #' @importFrom rlang caller_env has_length coords_to_sf <- function(x, coords = c("lon", "lat"), into = NULL, sep = ",", rev = FALSE, remove_coords = FALSE, crs = 4326, call = caller_env()) { if (!is_null(into) && has_length(into, 2) && has_length(coords, 1)) { x <- separate_coords(x = x, coords = coords, into = into, sep = sep) coords <- into } else { coords <- check_coords(x = x, coords = coords, rev = rev) } if (identical(has_coords(x, coords), character(0))) { cli_warn( c("{.arg x} can't be converted to a {.cls sf} object.", " " = "Returning a {.cls {class(x)}} object." ), call = call ) return(x) } x <- format_coords(x, coords = coords, call = call) sf::st_as_sf( x, coords = c(coords[[1]], coords[[2]]), agr = "constant", crs = crs, stringsAsFactors = FALSE, remove = remove_coords ) } #' @rdname coords_to_sf #' @name check_coords #' @param default Default coordinate values; defaults to `c("lon", "lat")`. #' @param rev If `TRUE`, reverse `c("lat", "lon")` coords to `c("lon", "lat")`. #' [check_coords()] only. #' @export check_coords <- function(x = NULL, coords = NULL, default = c("lon", "lat"), rev = FALSE, call = caller_env()) { # If x is a data frame if (!is_null(x) && is.data.frame(x)) { x_has_coords <- has_coords(x, coords = coords, value = FALSE) if (x_has_coords) { coords <- has_coords(x, coords = coords, value = TRUE) } else if (!is_null(coords) && !identical(has_coords(x, default), character(0))) { cli_warn( c( "{.arg coords} ({.val {coords}}) can't be found in {.arg x}.", " " = "Replacing {.arg coords} with {.arg default} ({.val {default}})." ), call = call ) coords <- NULL } } # If x is NULL or not a data.frame check_coords just validates coord pairs or # sets a default value coords <- coords %||% default cli_abort_ifnot( # FIXME: What about the coord_col value where coordinates are split in two? "{.arg coords} must be a length 2 {.cls character} or {.cls numeric} vector.", condition = length(coords) == 2 && (is.character(coords) || is.numeric(coords)), call = call ) if (isTRUE(rev)) { coords <- rev_coords(coords) } coords } #' @rdname coords_to_sf #' @name rev_coords #' @param pattern Pattern passed by [rev_coords()] to [grepl()] used to match #' vectors that are reversed. Defaults to `c("lat", "^y")`. #' @param ignore.case If `TRUE`, pattern matching is not case sensitive. #' @export rev_coords <- function(coords, pattern = c("lat", "^y"), ignore.case = TRUE) { if (grepl( pattern = paste0(pattern, collapse = "|"), x = coords[1], ignore.case = ignore.case )) { return(rev(coords)) } coords } #' @rdname coords_to_sf #' @name has_coords #' @param value If `TRUE`, return the value of the coordinate column names. Used #' by [has_coords()]. #' @export #' @importFrom dplyr case_when #' @importFrom rlang has_name has_coords <- function(x, coords = NULL, value = TRUE) { stopifnot( !is_null(x) && is.data.frame(x) ) x_names <- names(x) x <- set_snakecaseish_names(x) x_coords <- NULL x_coords <- dplyr::case_when( all(coords %in% x_names) ~ coords, all(has_name(x, coords)) ~ coords, has_name(x, "lon") ~ c("lon", "lat"), has_name(x, "long") ~ c("long", "lat"), has_name(x, "lng") ~ c("lng", "lat"), has_name(x, "longitude") ~ c("longitude", "latitude"), has_name(x, "y") ~ c("y", "x"), has_name(x, "geo_y") ~ c("geo_y", "geo_x"), has_name(x, "geo_longitude") ~ c("geo_longitude", "geo_latitude") ) x_has_coords <- grep( paste0(paste0("^", x_coords, "$"), collapse = "|"), x_names, ignore.case = TRUE, value = value ) if (value) { return(x_has_coords) } has_same_len(x_has_coords, x_coords) } #' @rdname coords_to_sf #' @name format_coords #' @param keep_missing If `TRUE`, keep rows with missing coordinate values. #' Defaults to `FALSE` which filters out rows with missing coordinates. #' @export format_coords <- function(x, coords = c("lon", "lat"), keep_missing = FALSE, call = caller_env()) { cli_abort_ifnot( "{.arg coords} can't be {.val NULL} or {.val character(0)}.", condition = !is_null(coords) && !identical(coords, character(0)), call = call ) cli_abort_ifnot( "{.arg x} must be a {.cls data.frame} with columns named {.val {coords}}.", condition = is.data.frame(x) && all(has_name(x, coords)), call = call ) lon <- coords[[1]] lat <- coords[[2]] if (!all(is.numeric(x[[lon]])) || !all(is.numeric(x[[lat]]))) { x[[lon]] <- as.numeric(x[[lon]]) x[[lat]] <- as.numeric(x[[lat]]) } missing_coords <- (is.na(x[[lon]]) | is.na(x[[lat]])) n_missing_coords <- sum(missing_coords) has_missing_coords <- n_missing_coords > 0 if (n_missing_coords == nrow(x)) { cli_abort( "{.arg x} must have one or more coordinate pairs in column{?s} {.val {coords}}.", call = call ) } if (has_missing_coords && !keep_missing) { # Exclude rows with missing coordinates cli_alert_info( "Removing {.val {n_missing_coords}} row{?s} with missing coordinates." ) return(x[!missing_coords, ]) } if (has_missing_coords) { cli_alert_info( "{.arg x} has {.val {n_missing_coords}} row{?s} with missing coordinates." ) } x } #' @rdname coords_to_sf #' @name separate_coords #' @param into If coords is a single column name with both longitude and #' latitude, `into` is used as the names of the new columns that coords is #' separated into. Passed to [tidyr::separate()]. #' @param sep If coords is a single column name with both longitude and #' latitude, `sep` is used as the separator between coordinate values. Passed #' to [tidyr::separate()]. #' @export #' @importFrom dplyr mutate across all_of separate_coords <- function(x, coords, into = c("lon", "lat"), sep = ",") { into <- check_coords(x = NULL, coords = into) check_installed(c("tidyr", "readr")) x <- tidyr::separate( x, col = dplyr::all_of(coords), into = into, sep = sep ) dplyr::mutate( x, dplyr::across( .cols = dplyr::all_of(into), ~ readr::parse_number(.x) ) ) }
/R/coords_to_sf.R
permissive
elipousson/sfext
R
false
false
8,234
r
#' Convert a data.frame with one or more coordinate columns to an sf object #' #' @description #' - [coords_to_sf()]: Convert a data frame with coordinates into a simple #' feature object #' - [check_coords()]: Check if a provided vector with coordinate column names #' are valid for the provided data frame #' - [separate_coords()]: Separate coordinates from a single combined column #' into two columns #' - [format_coords()]: Format coordinates as numeric values and remove missing #' coordinates from a data.frame #' - [has_coords()]: Suggests a coordinate pair by comparing common values to #' the column names for a provided data.frame #' - [rev_coords()]: Reverse a vector of coordinate names if the text "lat" or #' "y" appears in the first position #' #' @param x A data.frame with one or more coordinate columns. If coordinates are #' contained within a single column, coord must be length 1 and a length 2 #' into parameter must be provided. #' @param coords Coordinate columns for input data.frame or output sf object (if #' geometry is 'centroid' or 'point') Default: c("lon", "lat"). #' @param remove_coords For [df_to_sf()], if `TRUE`, remove the coordinate columns #' after converting a data frame to simple feature object; defaults to #' `FALSE`. #' @param crs Coordinate reference system used by the coordinates in the #' provided data frame. #' @inheritParams rlang::args_error_context #' @seealso [is_geo_coords()] #' @export #' @importFrom sf st_as_sf #' @importFrom rlang caller_env has_length coords_to_sf <- function(x, coords = c("lon", "lat"), into = NULL, sep = ",", rev = FALSE, remove_coords = FALSE, crs = 4326, call = caller_env()) { if (!is_null(into) && has_length(into, 2) && has_length(coords, 1)) { x <- separate_coords(x = x, coords = coords, into = into, sep = sep) coords <- into } else { coords <- check_coords(x = x, coords = coords, rev = rev) } if (identical(has_coords(x, coords), character(0))) { cli_warn( c("{.arg x} can't be converted to a {.cls sf} object.", " " = "Returning a {.cls {class(x)}} object." ), call = call ) return(x) } x <- format_coords(x, coords = coords, call = call) sf::st_as_sf( x, coords = c(coords[[1]], coords[[2]]), agr = "constant", crs = crs, stringsAsFactors = FALSE, remove = remove_coords ) } #' @rdname coords_to_sf #' @name check_coords #' @param default Default coordinate values; defaults to `c("lon", "lat")`. #' @param rev If `TRUE`, reverse `c("lat", "lon")` coords to `c("lon", "lat")`. #' [check_coords()] only. #' @export check_coords <- function(x = NULL, coords = NULL, default = c("lon", "lat"), rev = FALSE, call = caller_env()) { # If x is a data frame if (!is_null(x) && is.data.frame(x)) { x_has_coords <- has_coords(x, coords = coords, value = FALSE) if (x_has_coords) { coords <- has_coords(x, coords = coords, value = TRUE) } else if (!is_null(coords) && !identical(has_coords(x, default), character(0))) { cli_warn( c( "{.arg coords} ({.val {coords}}) can't be found in {.arg x}.", " " = "Replacing {.arg coords} with {.arg default} ({.val {default}})." ), call = call ) coords <- NULL } } # If x is NULL or not a data.frame check_coords just validates coord pairs or # sets a default value coords <- coords %||% default cli_abort_ifnot( # FIXME: What about the coord_col value where coordinates are split in two? "{.arg coords} must be a length 2 {.cls character} or {.cls numeric} vector.", condition = length(coords) == 2 && (is.character(coords) || is.numeric(coords)), call = call ) if (isTRUE(rev)) { coords <- rev_coords(coords) } coords } #' @rdname coords_to_sf #' @name rev_coords #' @param pattern Pattern passed by [rev_coords()] to [grepl()] used to match #' vectors that are reversed. Defaults to `c("lat", "^y")`. #' @param ignore.case If `TRUE`, pattern matching is not case sensitive. #' @export rev_coords <- function(coords, pattern = c("lat", "^y"), ignore.case = TRUE) { if (grepl( pattern = paste0(pattern, collapse = "|"), x = coords[1], ignore.case = ignore.case )) { return(rev(coords)) } coords } #' @rdname coords_to_sf #' @name has_coords #' @param value If `TRUE`, return the value of the coordinate column names. Used #' by [has_coords()]. #' @export #' @importFrom dplyr case_when #' @importFrom rlang has_name has_coords <- function(x, coords = NULL, value = TRUE) { stopifnot( !is_null(x) && is.data.frame(x) ) x_names <- names(x) x <- set_snakecaseish_names(x) x_coords <- NULL x_coords <- dplyr::case_when( all(coords %in% x_names) ~ coords, all(has_name(x, coords)) ~ coords, has_name(x, "lon") ~ c("lon", "lat"), has_name(x, "long") ~ c("long", "lat"), has_name(x, "lng") ~ c("lng", "lat"), has_name(x, "longitude") ~ c("longitude", "latitude"), has_name(x, "y") ~ c("y", "x"), has_name(x, "geo_y") ~ c("geo_y", "geo_x"), has_name(x, "geo_longitude") ~ c("geo_longitude", "geo_latitude") ) x_has_coords <- grep( paste0(paste0("^", x_coords, "$"), collapse = "|"), x_names, ignore.case = TRUE, value = value ) if (value) { return(x_has_coords) } has_same_len(x_has_coords, x_coords) } #' @rdname coords_to_sf #' @name format_coords #' @param keep_missing If `TRUE`, keep rows with missing coordinate values. #' Defaults to `FALSE` which filters out rows with missing coordinates. #' @export format_coords <- function(x, coords = c("lon", "lat"), keep_missing = FALSE, call = caller_env()) { cli_abort_ifnot( "{.arg coords} can't be {.val NULL} or {.val character(0)}.", condition = !is_null(coords) && !identical(coords, character(0)), call = call ) cli_abort_ifnot( "{.arg x} must be a {.cls data.frame} with columns named {.val {coords}}.", condition = is.data.frame(x) && all(has_name(x, coords)), call = call ) lon <- coords[[1]] lat <- coords[[2]] if (!all(is.numeric(x[[lon]])) || !all(is.numeric(x[[lat]]))) { x[[lon]] <- as.numeric(x[[lon]]) x[[lat]] <- as.numeric(x[[lat]]) } missing_coords <- (is.na(x[[lon]]) | is.na(x[[lat]])) n_missing_coords <- sum(missing_coords) has_missing_coords <- n_missing_coords > 0 if (n_missing_coords == nrow(x)) { cli_abort( "{.arg x} must have one or more coordinate pairs in column{?s} {.val {coords}}.", call = call ) } if (has_missing_coords && !keep_missing) { # Exclude rows with missing coordinates cli_alert_info( "Removing {.val {n_missing_coords}} row{?s} with missing coordinates." ) return(x[!missing_coords, ]) } if (has_missing_coords) { cli_alert_info( "{.arg x} has {.val {n_missing_coords}} row{?s} with missing coordinates." ) } x } #' @rdname coords_to_sf #' @name separate_coords #' @param into If coords is a single column name with both longitude and #' latitude, `into` is used as the names of the new columns that coords is #' separated into. Passed to [tidyr::separate()]. #' @param sep If coords is a single column name with both longitude and #' latitude, `sep` is used as the separator between coordinate values. Passed #' to [tidyr::separate()]. #' @export #' @importFrom dplyr mutate across all_of separate_coords <- function(x, coords, into = c("lon", "lat"), sep = ",") { into <- check_coords(x = NULL, coords = into) check_installed(c("tidyr", "readr")) x <- tidyr::separate( x, col = dplyr::all_of(coords), into = into, sep = sep ) dplyr::mutate( x, dplyr::across( .cols = dplyr::all_of(into), ~ readr::parse_number(.x) ) ) }
options(digits=4) generateData <- function(n){ w1 <- rbinom(n, size=1, prob=0.5) w2 <- rbinom(n, size=1, prob=0.65) w3 <- round(runif(n, min=0, max=4), digits=3) w4 <- round(runif(n, min=0, max=5), digits=3) A <- rbinom(n, size=1, prob= plogis(-0.4 + 0.2*w2 + 0.15*w3 + 0.2*w4 + 0.15*w2*w4)) Y <- rbinom(n, size=1, prob= plogis(-1 + A -0.1*w1 + 0.3*w2 + 0.25*w3 + 0.2*w4 + 0.15*w2*w4)) # counterfactual Y.1 <- rbinom(n, size=1, prob= plogis(-1 + 1 -0.1*w1 + 0.3*w2 + 0.25*w3 + 0.2*w4 + 0.15*w2*w4)) Y.0 <- rbinom(n, size=1, prob= plogis(-1 + 0 -0.1*w1 + 0.3*w2 + 0.25*w3 + 0.2*w4 + 0.15*w2*w4)) # return data.frame data.frame(w1, w2, w3, w4, A, Y, Y.1, Y.0) } set.seed(7777) ObsData <- generateData(n=10000) True_Psi <- mean(ObsData$Y.1-ObsData$Y.0); cat(" True_Psi:", True_Psi) Bias_Psi <- lm(data=ObsData, Y~ A + w1 + w2 + w3 + w4) cat("\n") cat("\n Naive_Biased_Psi:",summary(Bias_Psi)$coef[2, 1]) Naive_Bias <- ((summary(Bias_Psi)$coef[2, 1])-True_Psi); cat("\n Naives bias:", Naive_Bias) Naive_Relative_Bias <- (((summary(Bias_Psi)$coef[2, 1])-True_Psi)/True_Psi)*100; cat("\n Relative Naives bias:", Naive_Relative_Bias,"%")
/src/tmle/generateData.r
no_license
gfrascadorio/gfrascadorio.github.io
R
false
false
1,163
r
options(digits=4) generateData <- function(n){ w1 <- rbinom(n, size=1, prob=0.5) w2 <- rbinom(n, size=1, prob=0.65) w3 <- round(runif(n, min=0, max=4), digits=3) w4 <- round(runif(n, min=0, max=5), digits=3) A <- rbinom(n, size=1, prob= plogis(-0.4 + 0.2*w2 + 0.15*w3 + 0.2*w4 + 0.15*w2*w4)) Y <- rbinom(n, size=1, prob= plogis(-1 + A -0.1*w1 + 0.3*w2 + 0.25*w3 + 0.2*w4 + 0.15*w2*w4)) # counterfactual Y.1 <- rbinom(n, size=1, prob= plogis(-1 + 1 -0.1*w1 + 0.3*w2 + 0.25*w3 + 0.2*w4 + 0.15*w2*w4)) Y.0 <- rbinom(n, size=1, prob= plogis(-1 + 0 -0.1*w1 + 0.3*w2 + 0.25*w3 + 0.2*w4 + 0.15*w2*w4)) # return data.frame data.frame(w1, w2, w3, w4, A, Y, Y.1, Y.0) } set.seed(7777) ObsData <- generateData(n=10000) True_Psi <- mean(ObsData$Y.1-ObsData$Y.0); cat(" True_Psi:", True_Psi) Bias_Psi <- lm(data=ObsData, Y~ A + w1 + w2 + w3 + w4) cat("\n") cat("\n Naive_Biased_Psi:",summary(Bias_Psi)$coef[2, 1]) Naive_Bias <- ((summary(Bias_Psi)$coef[2, 1])-True_Psi); cat("\n Naives bias:", Naive_Bias) Naive_Relative_Bias <- (((summary(Bias_Psi)$coef[2, 1])-True_Psi)/True_Psi)*100; cat("\n Relative Naives bias:", Naive_Relative_Bias,"%")
\name{src} \alias{is.src} \alias{src} \title{Create a "src" object} \usage{ src(subclass, ...) is.src(x) } \arguments{ \item{subclass}{name of subclass. "src" is an abstract base class, so you must supply this value. \code{src_} is automatically prepended to the class name} \item{...}{fields used by object} \item{x}{object to test for "src"-ness.} } \description{ \code{src} is the standard constructor for srcs and \code{is.src} tests. } \keyword{internal}
/dplyr/man/src.Rd
no_license
radfordneal/R-package-mods
R
false
false
474
rd
\name{src} \alias{is.src} \alias{src} \title{Create a "src" object} \usage{ src(subclass, ...) is.src(x) } \arguments{ \item{subclass}{name of subclass. "src" is an abstract base class, so you must supply this value. \code{src_} is automatically prepended to the class name} \item{...}{fields used by object} \item{x}{object to test for "src"-ness.} } \description{ \code{src} is the standard constructor for srcs and \code{is.src} tests. } \keyword{internal}
#Cleanup environment rm(list = ls()) # free memory gc() setwd("C:/Users/gmanish/Downloads/openminds/code/R/") library(magrittr) foo_foo <- little_bunny() #Intermediate Steps foo_foo_1 <- hop(foo_foo, through = forest) foo_foo_2 <- scoop(foo_foo_1, up = field_mice) foo_foo_3 <- bop(foo_foo_2, on = head) diamonds <- ggplot2::diamonds diamonds2 <- diamonds %>% dplyr::mutate(price_per_carat = price / carat) pryr::object_size(diamonds) #> 3.46 MB pryr::object_size(diamonds2) #> 3.89 MB pryr::object_size(diamonds, diamonds2) #> 3.89 MB diamonds$carat[1] <- NA pryr::object_size(diamonds) #> 3.46 MB pryr::object_size(diamonds2) #> 3.89 MB pryr::object_size(diamonds, diamonds2) #> 4.32 MB #Overwrite the Original foo_foo <- hop(foo_foo, through = forest) foo_foo <- scoop(foo_foo, up = field_mice) foo_foo <- bop(foo_foo, on = head) bop( scoop( hop(foo_foo, through = forest), up = field_mice ), on = head ) #Use the Pipe foo_foo %>% hop(through = forest) %>% scoop(up = field_mouse) %>% bop(on = head) my_pipe <- function(.) { . <- hop(., through = forest) . <- scoop(., up = field_mice) bop(., on = head) } my_pipe(foo_foo) assign("x", 10) x #> [1] 10 "x" %>% assign(100) x #> [1] 10 env <- environment() "x" %>% assign(100, envir = env) x #> [1] 100 tryCatch(stop("!"), error = function(e) "An error") #> [1] "An error" stop("!") %>% tryCatch(error = function(e) "An error") #> Error in eval(expr, envir, enclos): ! #Other Tools from magrittr rnorm(100) %>% matrix(ncol = 2) %>% plot() %>% str() #> NULL rnorm(100) %>% matrix(ncol = 2) %T>% plot() %>% str() #> num [1:50, 1:2] -0.387 -0.785 -1.057 -0.796 -1.756 ... mtcars %$% cor(disp, mpg) #> [1] -0.848 mtcars <- mtcars %>% transform(cyl = cyl * 2) mtcars %<>% transform(cyl = cyl * 2)
/13.Pipes_with_magrittr.R
no_license
sumanth048/R
R
false
false
1,763
r
#Cleanup environment rm(list = ls()) # free memory gc() setwd("C:/Users/gmanish/Downloads/openminds/code/R/") library(magrittr) foo_foo <- little_bunny() #Intermediate Steps foo_foo_1 <- hop(foo_foo, through = forest) foo_foo_2 <- scoop(foo_foo_1, up = field_mice) foo_foo_3 <- bop(foo_foo_2, on = head) diamonds <- ggplot2::diamonds diamonds2 <- diamonds %>% dplyr::mutate(price_per_carat = price / carat) pryr::object_size(diamonds) #> 3.46 MB pryr::object_size(diamonds2) #> 3.89 MB pryr::object_size(diamonds, diamonds2) #> 3.89 MB diamonds$carat[1] <- NA pryr::object_size(diamonds) #> 3.46 MB pryr::object_size(diamonds2) #> 3.89 MB pryr::object_size(diamonds, diamonds2) #> 4.32 MB #Overwrite the Original foo_foo <- hop(foo_foo, through = forest) foo_foo <- scoop(foo_foo, up = field_mice) foo_foo <- bop(foo_foo, on = head) bop( scoop( hop(foo_foo, through = forest), up = field_mice ), on = head ) #Use the Pipe foo_foo %>% hop(through = forest) %>% scoop(up = field_mouse) %>% bop(on = head) my_pipe <- function(.) { . <- hop(., through = forest) . <- scoop(., up = field_mice) bop(., on = head) } my_pipe(foo_foo) assign("x", 10) x #> [1] 10 "x" %>% assign(100) x #> [1] 10 env <- environment() "x" %>% assign(100, envir = env) x #> [1] 100 tryCatch(stop("!"), error = function(e) "An error") #> [1] "An error" stop("!") %>% tryCatch(error = function(e) "An error") #> Error in eval(expr, envir, enclos): ! #Other Tools from magrittr rnorm(100) %>% matrix(ncol = 2) %>% plot() %>% str() #> NULL rnorm(100) %>% matrix(ncol = 2) %T>% plot() %>% str() #> num [1:50, 1:2] -0.387 -0.785 -1.057 -0.796 -1.756 ... mtcars %$% cor(disp, mpg) #> [1] -0.848 mtcars <- mtcars %>% transform(cyl = cyl * 2) mtcars %<>% transform(cyl = cyl * 2)
## Trials for Financial data # source('~/Desktop/GERGM_Development/Financial_Data_Application.R') #set year YEAR = 1 #use this to open a shell in CentOS 6 that can be used to run the scripts #scl enable devtoolset-1.1 bash PKG_CPPFLAGS = "-std=c++0x" Sys.setenv(PKG_CPPFLAGS = PKG_CPPFLAGS) ## This is working directly out of the GERGM folder in Github setwd("~/Desktop/GERGM_Development") library(RcppArmadillo) Rcpp::sourceCpp('Scripts/MH_Sampler_Normal_Together.cpp') source('Scripts/GERGM.R', echo=F) Report <- function(x){print(x)} load("Data/Transformed_Country_Lending_Data_1980-2005.Rdata") RUN_MODEL = T EVALUATE_OUTPUT = F if(RUN_MODEL){ # First model: transform.data only takes the mean of the realized values # The ERGM part of the model is a transitive.triads + reciprocity model net <- log_adjacency_matrix_list[[YEAR]] formula.obj = net ~ in2star + out2star+ ttriads shape = 0.05 sample_every = 2000 seed = 123 CUSTOM = F MH.fit <- gergm(formula.obj, directed = TRUE, seed = seed, transform.data = NULL, method = "Metropolis", max.num.iterations = 10, mc.num.iterations = 100, nsim = 2000000, MCMC.burnin = 200000, tolerance = 0.001, shape.parameter = shape, together = 1, thin = 1/sample_every, weights = c(0.03,0.03,0.03), gain.factor = 0.5) #Simulate MH to see if we have any goodness of fit? MH.sims <- simulate.gergm(MH.fit, 2000000, seed = seed, method = "Metropolis", MCMC.burnin = 10000, thin = 1/sample_every, together = 1) True.stats <- MH.fit@stats[1,] temp.stats <- MH.sims$Statistics #Re weighting the recip and ttriads to correct level #temp.stats$recip = temp.stats$recip ^ (1/0.03) temp.stats$ttriads = temp.stats$ttriads ^ (1/0.03) temp.stats$in2stars = temp.stats$in2stars ^ (1/0.03) temp.stats$out2stars = temp.stats$out2stars ^ (1/0.03) ## plot goodness of fit indx = order(True.stats) #setwd("~/Desktop/GERGM_Development/Output") pdf(file = paste("MH_GOF_",YEAR,".pdf",sep = ""), width = 7, height = 5) par(oma=c(1,1,1,1)) par(mar=c(4,4.5,2,1)) spacing = seq(1,401, by = 15) par(mfrow = c(1,1)) boxplot(log(temp.stats)[indx], ylab = "Log value", cex.axis = 1.5, cex.lab = 1.5, main = paste("Metropolis-Hastings", YEAR), cex.main = 1.5) lines(log(True.stats)[indx], type = "b", lwd = 3, pch = 5, lty = 8, col = "blue") legend("bottomright", "Observed", lwd = 3, pch = 5, lty = 8, col = "blue", cex = 1.25) dev.off() save(MH.fit, MH.sims, file = paste("Financial_Data_RTIO_",YEAR,".RData",sep = "")) #setwd("~/Desktop/GERGM_Development") } if(EVALUATE_OUTPUT){ #### take a look at our saved output setwd("~/Desktop/GERGM_Development") load("~/Desktop/GERGM_Development/Financial_model_1.RData") #look at the MH parameter estimates MH.fit temp <- MH.fit@theta.coef z.stats <- as.numeric(temp[1,]/temp[2,]) 2*pnorm(-abs(z.stats[1])) ## 0.01065612 2*pnorm(-abs(z.stats[2])) ## 0.214362 }
/Financial_Data_Application.R
no_license
matthewjdenny/GERGM_Development
R
false
false
3,046
r
## Trials for Financial data # source('~/Desktop/GERGM_Development/Financial_Data_Application.R') #set year YEAR = 1 #use this to open a shell in CentOS 6 that can be used to run the scripts #scl enable devtoolset-1.1 bash PKG_CPPFLAGS = "-std=c++0x" Sys.setenv(PKG_CPPFLAGS = PKG_CPPFLAGS) ## This is working directly out of the GERGM folder in Github setwd("~/Desktop/GERGM_Development") library(RcppArmadillo) Rcpp::sourceCpp('Scripts/MH_Sampler_Normal_Together.cpp') source('Scripts/GERGM.R', echo=F) Report <- function(x){print(x)} load("Data/Transformed_Country_Lending_Data_1980-2005.Rdata") RUN_MODEL = T EVALUATE_OUTPUT = F if(RUN_MODEL){ # First model: transform.data only takes the mean of the realized values # The ERGM part of the model is a transitive.triads + reciprocity model net <- log_adjacency_matrix_list[[YEAR]] formula.obj = net ~ in2star + out2star+ ttriads shape = 0.05 sample_every = 2000 seed = 123 CUSTOM = F MH.fit <- gergm(formula.obj, directed = TRUE, seed = seed, transform.data = NULL, method = "Metropolis", max.num.iterations = 10, mc.num.iterations = 100, nsim = 2000000, MCMC.burnin = 200000, tolerance = 0.001, shape.parameter = shape, together = 1, thin = 1/sample_every, weights = c(0.03,0.03,0.03), gain.factor = 0.5) #Simulate MH to see if we have any goodness of fit? MH.sims <- simulate.gergm(MH.fit, 2000000, seed = seed, method = "Metropolis", MCMC.burnin = 10000, thin = 1/sample_every, together = 1) True.stats <- MH.fit@stats[1,] temp.stats <- MH.sims$Statistics #Re weighting the recip and ttriads to correct level #temp.stats$recip = temp.stats$recip ^ (1/0.03) temp.stats$ttriads = temp.stats$ttriads ^ (1/0.03) temp.stats$in2stars = temp.stats$in2stars ^ (1/0.03) temp.stats$out2stars = temp.stats$out2stars ^ (1/0.03) ## plot goodness of fit indx = order(True.stats) #setwd("~/Desktop/GERGM_Development/Output") pdf(file = paste("MH_GOF_",YEAR,".pdf",sep = ""), width = 7, height = 5) par(oma=c(1,1,1,1)) par(mar=c(4,4.5,2,1)) spacing = seq(1,401, by = 15) par(mfrow = c(1,1)) boxplot(log(temp.stats)[indx], ylab = "Log value", cex.axis = 1.5, cex.lab = 1.5, main = paste("Metropolis-Hastings", YEAR), cex.main = 1.5) lines(log(True.stats)[indx], type = "b", lwd = 3, pch = 5, lty = 8, col = "blue") legend("bottomright", "Observed", lwd = 3, pch = 5, lty = 8, col = "blue", cex = 1.25) dev.off() save(MH.fit, MH.sims, file = paste("Financial_Data_RTIO_",YEAR,".RData",sep = "")) #setwd("~/Desktop/GERGM_Development") } if(EVALUATE_OUTPUT){ #### take a look at our saved output setwd("~/Desktop/GERGM_Development") load("~/Desktop/GERGM_Development/Financial_model_1.RData") #look at the MH parameter estimates MH.fit temp <- MH.fit@theta.coef z.stats <- as.numeric(temp[1,]/temp[2,]) 2*pnorm(-abs(z.stats[1])) ## 0.01065612 2*pnorm(-abs(z.stats[2])) ## 0.214362 }
basec <- read.csv("census.csv") basec <- basec[-1] #busca os valores categoricos existentes na coluna table(basec$sex) #troca os atributos de categoricos por numericos basec$sex <- factor(basec$sex, levels = c(" Female", " Male"), labels = c(0, 1)) #basec$workclass <- factor(basec$workclass, levels = unique(basec$workclass), labels = c(1:ifelse(length(unique(basec$workclass)) > 2, length(unique(basec$workclass)), 2))) #basec$education <- factor(basec$education, levels = unique(basec$education), labels = c(1:ifelse(length(unique(basec$education)) > 2, length(unique(basec$education)), 2))) #basec$marital.status <- factor(basec$marital.status, levels = unique(basec$marital.status), labels = c(1:ifelse(length(unique(basec$marital.status)) > 2, length(unique(basec$marital.status)), 2))) #basec$occupation <- factor(basec$occupation, levels = unique(basec$occupation), labels = c(1:ifelse(length(unique(basec$occupation)) > 2, length(unique(basec$occupation)), 2))) #basec$relationship <- factor(basec$relationship, levels = unique(basec$relationship), labels = c(1:ifelse(length(unique(basec$relationship)) > 2, length(unique(basec$relationship)), 2))) #basec$race <- factor(basec$race, levels = unique(basec$race), labels = c(1:ifelse(length(unique(basec$race)) > 2, length(unique(basec$race)), 2))) #basec$native.country <- factor(basec$native.country, levels = unique(basec$native.country), labels = c(1:ifelse(length(unique(basec$native.country)) > 2, length(unique(basec$native.country)), 2))) #basec$income <- factor(basec$income, levels = unique(basec$income), labels = c(1:length( unique(basec$income)))) basec$workclass = factor(basec$workclass, levels = c(' Federal-gov', ' Local-gov', ' Private', ' Self-emp-inc', ' Self-emp-not-inc', ' State-gov', ' Without-pay'), labels = c(1, 2, 3, 4, 5, 6, 7)) basec$education = factor(basec$education, levels = c(' 10th', ' 11th', ' 12th', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' Assoc-acdm', ' Assoc-voc', ' Bachelors', ' Doctorate', ' HS-grad', ' Masters', ' Preschool', ' Prof-school', ' Some-college'), labels = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)) basec$marital.status = factor(basec$marital.status, levels = c(' Divorced', ' Married-AF-spouse', ' Married-civ-spouse', ' Married-spouse-absent', ' Never-married', ' Separated', ' Widowed'), labels = c(1, 2, 3, 4, 5, 6, 7)) basec$occupation = factor(basec$occupation, levels = c(' Adm-clerical', ' Armed-Forces', ' Craft-repair', ' Exec-managerial', ' Farming-fishing', ' Handlers-cleaners', ' Machine-op-inspct', ' Other-service', ' Priv-house-serv', ' Prof-specialty', ' Protective-serv', ' Sales', ' Tech-support', ' Transport-moving'), labels = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)) basec$relationship = factor(basec$relationship, levels = c(' Husband', ' Not-in-family', ' Other-relative', ' Own-child', ' Unmarried', ' Wife'), labels = c(1, 2, 3, 4, 5, 6)) basec$race = factor(basec$race, levels = c(' Amer-Indian-Eskimo', ' Asian-Pac-Islander', ' Black', ' Other', ' White'), labels = c(1, 2, 3, 4, 5)) basec$native.country = factor(basec$native.country, levels = c(' Cambodia', ' Canada', ' China', ' Columbia', ' Cuba', ' Dominican-Republic', ' Ecuador', ' El-Salvador', ' England', ' France', ' Germany', ' Greece', ' Guatemala', ' Haiti', ' Holand-Netherlands', ' Honduras', ' Hong', ' Hungary', ' India', ' Iran', ' Ireland', ' Italy', ' Jamaica', ' Japan', ' Laos', ' Mexico', ' Nicaragua', ' Outlying-US(Guam-USVI-etc)', ' Peru', ' Philippines', ' Poland', ' Portugal', ' Puerto-Rico', ' Scotland', ' South', ' Taiwan', ' Thailand', ' Trinadad&Tobago', ' United-States', ' Vietnam', ' Yugoslavia'), labels = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)) basec$income = factor(basec$income, levels = c(' <=50K', ' >50K'), labels = c(0, 1)) #escalona as colunas que são numericas basec[which(sapply(basec,is.numeric))] <- scale(basec[which(sapply(basec,is.numeric))]) library(caTools) set.seed(1) divisao_c <- sample.split(basec$income, SplitRatio = 0.85) base_treinamento_c <- subset(basec, divisao_c == TRUE) base_teste_c <- subset(basec, divisao_c == FALSE) library(e1071) library(caret) classificador_nb_census <- naiveBayes(x <- base_treinamento_c[-15], y <- base_treinamento_c$income) previsoes_nb_census <- predict(classificador_nb_census, newdata = base_teste_c[-15]) matriz_confusao_nb_census <- table(base_teste_c[,15], previsoes_nb_census) #acerto de 82.14% confusionMatrix(matriz_confusao_nb_census)
/curso_ML/naive_bayes_census.R
no_license
henrikots/dados-credito-curso
R
false
false
4,601
r
basec <- read.csv("census.csv") basec <- basec[-1] #busca os valores categoricos existentes na coluna table(basec$sex) #troca os atributos de categoricos por numericos basec$sex <- factor(basec$sex, levels = c(" Female", " Male"), labels = c(0, 1)) #basec$workclass <- factor(basec$workclass, levels = unique(basec$workclass), labels = c(1:ifelse(length(unique(basec$workclass)) > 2, length(unique(basec$workclass)), 2))) #basec$education <- factor(basec$education, levels = unique(basec$education), labels = c(1:ifelse(length(unique(basec$education)) > 2, length(unique(basec$education)), 2))) #basec$marital.status <- factor(basec$marital.status, levels = unique(basec$marital.status), labels = c(1:ifelse(length(unique(basec$marital.status)) > 2, length(unique(basec$marital.status)), 2))) #basec$occupation <- factor(basec$occupation, levels = unique(basec$occupation), labels = c(1:ifelse(length(unique(basec$occupation)) > 2, length(unique(basec$occupation)), 2))) #basec$relationship <- factor(basec$relationship, levels = unique(basec$relationship), labels = c(1:ifelse(length(unique(basec$relationship)) > 2, length(unique(basec$relationship)), 2))) #basec$race <- factor(basec$race, levels = unique(basec$race), labels = c(1:ifelse(length(unique(basec$race)) > 2, length(unique(basec$race)), 2))) #basec$native.country <- factor(basec$native.country, levels = unique(basec$native.country), labels = c(1:ifelse(length(unique(basec$native.country)) > 2, length(unique(basec$native.country)), 2))) #basec$income <- factor(basec$income, levels = unique(basec$income), labels = c(1:length( unique(basec$income)))) basec$workclass = factor(basec$workclass, levels = c(' Federal-gov', ' Local-gov', ' Private', ' Self-emp-inc', ' Self-emp-not-inc', ' State-gov', ' Without-pay'), labels = c(1, 2, 3, 4, 5, 6, 7)) basec$education = factor(basec$education, levels = c(' 10th', ' 11th', ' 12th', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' Assoc-acdm', ' Assoc-voc', ' Bachelors', ' Doctorate', ' HS-grad', ' Masters', ' Preschool', ' Prof-school', ' Some-college'), labels = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)) basec$marital.status = factor(basec$marital.status, levels = c(' Divorced', ' Married-AF-spouse', ' Married-civ-spouse', ' Married-spouse-absent', ' Never-married', ' Separated', ' Widowed'), labels = c(1, 2, 3, 4, 5, 6, 7)) basec$occupation = factor(basec$occupation, levels = c(' Adm-clerical', ' Armed-Forces', ' Craft-repair', ' Exec-managerial', ' Farming-fishing', ' Handlers-cleaners', ' Machine-op-inspct', ' Other-service', ' Priv-house-serv', ' Prof-specialty', ' Protective-serv', ' Sales', ' Tech-support', ' Transport-moving'), labels = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)) basec$relationship = factor(basec$relationship, levels = c(' Husband', ' Not-in-family', ' Other-relative', ' Own-child', ' Unmarried', ' Wife'), labels = c(1, 2, 3, 4, 5, 6)) basec$race = factor(basec$race, levels = c(' Amer-Indian-Eskimo', ' Asian-Pac-Islander', ' Black', ' Other', ' White'), labels = c(1, 2, 3, 4, 5)) basec$native.country = factor(basec$native.country, levels = c(' Cambodia', ' Canada', ' China', ' Columbia', ' Cuba', ' Dominican-Republic', ' Ecuador', ' El-Salvador', ' England', ' France', ' Germany', ' Greece', ' Guatemala', ' Haiti', ' Holand-Netherlands', ' Honduras', ' Hong', ' Hungary', ' India', ' Iran', ' Ireland', ' Italy', ' Jamaica', ' Japan', ' Laos', ' Mexico', ' Nicaragua', ' Outlying-US(Guam-USVI-etc)', ' Peru', ' Philippines', ' Poland', ' Portugal', ' Puerto-Rico', ' Scotland', ' South', ' Taiwan', ' Thailand', ' Trinadad&Tobago', ' United-States', ' Vietnam', ' Yugoslavia'), labels = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)) basec$income = factor(basec$income, levels = c(' <=50K', ' >50K'), labels = c(0, 1)) #escalona as colunas que são numericas basec[which(sapply(basec,is.numeric))] <- scale(basec[which(sapply(basec,is.numeric))]) library(caTools) set.seed(1) divisao_c <- sample.split(basec$income, SplitRatio = 0.85) base_treinamento_c <- subset(basec, divisao_c == TRUE) base_teste_c <- subset(basec, divisao_c == FALSE) library(e1071) library(caret) classificador_nb_census <- naiveBayes(x <- base_treinamento_c[-15], y <- base_treinamento_c$income) previsoes_nb_census <- predict(classificador_nb_census, newdata = base_teste_c[-15]) matriz_confusao_nb_census <- table(base_teste_c[,15], previsoes_nb_census) #acerto de 82.14% confusionMatrix(matriz_confusao_nb_census)
#This is the R code for Applied Unsupervised Learning with R, lesson 6 #load the dataset for US Arrests data(USArrests) head(USArrests) #create a boxplot for rape arrests boxplot(USArrests$Rape) #find the highest values (in this case, the outliers) highest<-USArrests[which(USArrests$Rape>40),] print(highest) #load the rivers dataset data(rivers) head(rivers) boxplot(rivers) #perform a logarithm transformation and look at the resulting boxplot log_rivers<-log(rivers) boxplot(log_rivers) #find the interquartile range of the rivers data iqr<-unname(quantile(rivers,.75)-quantile(rivers,.25)) #find the upper and lower limits for non-outlier data upper_limit<-unname(quantile(rivers,.75)+1.5*iqr) lower_limit<-unname(quantile(rivers,.25)-1.5*iqr) #classify outliers as points outside our defined upper and lower limits rivers[which(rivers>upper_limit | rivers<lower_limit)] #define different limits based on preference upper_limit<-unname(quantile(rivers,.75)+3*iqr) lower_limit<-unname(quantile(rivers,.25)-3*iqr) #load rivers data data(rivers) #get the standard deviation of rivers data st_dev<-sd(rivers) #define upper and lower limits based on standard deviation upper_limit<-mean(rivers)+2*st_dev lower_limit<-mean(rivers)-2*st_dev #classify outliers based on upper and lower limits upper_outliers<-rivers[which(rivers>upper_limit)] lower_outliers<-rivers[which(rivers<lower_limit)] #rename columns of height-weight data names(raw)<-c('index','height','weight') #plot the height-weight data and observe the patterns plot(raw$height,raw$weight) #calculate the centroid of the data centroid<-c(mean(raw$height),mean(raw$weight)) #example of the distance between a given point and the centroid example_distance<-raw[1,c('height','weight')]-centroid #get the covariance matrix of the data, and that matrix's inverse cov_mat<-cov(raw[,c('height','weight')]) inv_cov_mat<-solve(cov_mat) #get the mahalanobis distance between our point and the centroid of the dataset: mahalanobis_dist<-t(matrix(as.numeric(example_distance)))%*% matrix(inv_cov_mat,nrow=2) %*% matrix(as.numeric(example_distance)) #run a loop that calculates a mahalanobis distance for every point all_distances<-NULL k<-1 while(k<=nrow(raw)){ the_distance<-raw[k,c('height','weight')]-centroid mahalanobis_dist<-t(matrix(as.numeric(the_distance)))%*% matrix(inv_cov_mat,nrow=2) %*% matrix(as.numeric(the_distance)) all_distances<-c(all_distances,mahalanobis_dist) k<-k+1 } #plot all observations that have particularly high mahalanobis distances plot(raw$height,raw$weight) points(raw$height[which(all_distances>quantile(all_distances,.9))],raw$weight[which(all_distances>quantile(all_distances,.9))],col='red',pch=19) #seasonality modeling #clean the raw seasonality data names(raw)<-c('month','sales') raw<-raw[1:108,] raw$period<-1:nrow(raw) #find the time trend in the data timetrend<-lm(sales~period+I(log(period)),data=raw) #obtain fitted values for the time trend raw$timetrend<-predict(timetrend,raw) #de-trend the data and plot the result raw$withouttimetrend<-raw$sales-raw$timetrend plot(raw$withouttimetrend,type='o') #create a matrix of monthly data, and get the mean value for each month seasonsmatrix = t(matrix(data = raw$withouttimetrend, nrow = 12)) seasons = colMeans(seasonsmatrix, na.rm = T) #create a 9-year repetition of the monthly averages raw$seasons<-c(rep(seasons,9)) #obtain de-trended, de-cycled data and plot it raw$error<-raw$sales-raw$timetrend-raw$seasons plot(raw$error,type='o') #plot all elements of seasonality modeling together par(mfrow=c(3,1)) plot(raw$timetrend,type='o') plot(raw$seasons,type='o') plot(raw$error,type='o') #get the standard deviation of the de-trended, de-cycled data stdev<-sd(raw$error) #find which data points are more than 2 standard deviations away from the mean of the data high_outliers<-which(raw$error>(mean(raw$error)+2*sd(raw$error))) low_outliers<-which(raw$error<(mean(raw$error)-2*sd(raw$error))) #Examine the observations that we have classified as outliers. raw[high_outliers,] raw[low_outliers,] #plot the points that we have classified as outliers plot(raw$period,raw$sales,type='o') points(raw$period[high_outliers],raw$sales[high_outliers],pch=19,col='red') points(raw$period[low_outliers],raw$sales[low_outliers],pch=19,col='blue')
/lesson6.R
permissive
SmithaShivakumar/Unsupervised-Learning-with-R
R
false
false
4,318
r
#This is the R code for Applied Unsupervised Learning with R, lesson 6 #load the dataset for US Arrests data(USArrests) head(USArrests) #create a boxplot for rape arrests boxplot(USArrests$Rape) #find the highest values (in this case, the outliers) highest<-USArrests[which(USArrests$Rape>40),] print(highest) #load the rivers dataset data(rivers) head(rivers) boxplot(rivers) #perform a logarithm transformation and look at the resulting boxplot log_rivers<-log(rivers) boxplot(log_rivers) #find the interquartile range of the rivers data iqr<-unname(quantile(rivers,.75)-quantile(rivers,.25)) #find the upper and lower limits for non-outlier data upper_limit<-unname(quantile(rivers,.75)+1.5*iqr) lower_limit<-unname(quantile(rivers,.25)-1.5*iqr) #classify outliers as points outside our defined upper and lower limits rivers[which(rivers>upper_limit | rivers<lower_limit)] #define different limits based on preference upper_limit<-unname(quantile(rivers,.75)+3*iqr) lower_limit<-unname(quantile(rivers,.25)-3*iqr) #load rivers data data(rivers) #get the standard deviation of rivers data st_dev<-sd(rivers) #define upper and lower limits based on standard deviation upper_limit<-mean(rivers)+2*st_dev lower_limit<-mean(rivers)-2*st_dev #classify outliers based on upper and lower limits upper_outliers<-rivers[which(rivers>upper_limit)] lower_outliers<-rivers[which(rivers<lower_limit)] #rename columns of height-weight data names(raw)<-c('index','height','weight') #plot the height-weight data and observe the patterns plot(raw$height,raw$weight) #calculate the centroid of the data centroid<-c(mean(raw$height),mean(raw$weight)) #example of the distance between a given point and the centroid example_distance<-raw[1,c('height','weight')]-centroid #get the covariance matrix of the data, and that matrix's inverse cov_mat<-cov(raw[,c('height','weight')]) inv_cov_mat<-solve(cov_mat) #get the mahalanobis distance between our point and the centroid of the dataset: mahalanobis_dist<-t(matrix(as.numeric(example_distance)))%*% matrix(inv_cov_mat,nrow=2) %*% matrix(as.numeric(example_distance)) #run a loop that calculates a mahalanobis distance for every point all_distances<-NULL k<-1 while(k<=nrow(raw)){ the_distance<-raw[k,c('height','weight')]-centroid mahalanobis_dist<-t(matrix(as.numeric(the_distance)))%*% matrix(inv_cov_mat,nrow=2) %*% matrix(as.numeric(the_distance)) all_distances<-c(all_distances,mahalanobis_dist) k<-k+1 } #plot all observations that have particularly high mahalanobis distances plot(raw$height,raw$weight) points(raw$height[which(all_distances>quantile(all_distances,.9))],raw$weight[which(all_distances>quantile(all_distances,.9))],col='red',pch=19) #seasonality modeling #clean the raw seasonality data names(raw)<-c('month','sales') raw<-raw[1:108,] raw$period<-1:nrow(raw) #find the time trend in the data timetrend<-lm(sales~period+I(log(period)),data=raw) #obtain fitted values for the time trend raw$timetrend<-predict(timetrend,raw) #de-trend the data and plot the result raw$withouttimetrend<-raw$sales-raw$timetrend plot(raw$withouttimetrend,type='o') #create a matrix of monthly data, and get the mean value for each month seasonsmatrix = t(matrix(data = raw$withouttimetrend, nrow = 12)) seasons = colMeans(seasonsmatrix, na.rm = T) #create a 9-year repetition of the monthly averages raw$seasons<-c(rep(seasons,9)) #obtain de-trended, de-cycled data and plot it raw$error<-raw$sales-raw$timetrend-raw$seasons plot(raw$error,type='o') #plot all elements of seasonality modeling together par(mfrow=c(3,1)) plot(raw$timetrend,type='o') plot(raw$seasons,type='o') plot(raw$error,type='o') #get the standard deviation of the de-trended, de-cycled data stdev<-sd(raw$error) #find which data points are more than 2 standard deviations away from the mean of the data high_outliers<-which(raw$error>(mean(raw$error)+2*sd(raw$error))) low_outliers<-which(raw$error<(mean(raw$error)-2*sd(raw$error))) #Examine the observations that we have classified as outliers. raw[high_outliers,] raw[low_outliers,] #plot the points that we have classified as outliers plot(raw$period,raw$sales,type='o') points(raw$period[high_outliers],raw$sales[high_outliers],pch=19,col='red') points(raw$period[low_outliers],raw$sales[low_outliers],pch=19,col='blue')
library(tidyverse) library(haven) # devtools::install_github("jlmelville/smallvis/smallvis") library(smallvis) try(dir.create("data/casen")) url <- "http://observatorio.ministeriodesarrollosocial.gob.cl/casen-multidimensional/casen/docs/casen_2015_spss.rar" file <- file.path("data", "casen", basename(url)) if(!file.exists(file)){ download.file(url, file) } # ya descomprimido el archivo --------------------------------------------- casen <- read_sav("data/casen/Casen 2017.sav") casen glimpse(casen) str(casen) # preguntas --------------------------------------------------------------- str(casen$o13) names(casen$o13) attr(casen$o13, "label") pregs <- map_chr(casen, ~ attr(.x, "label")) pregs <- data_frame(col = names(casen)) %>% mutate(preg = pregs) pregs # pregs %>% # writexl::write_xlsx("data/casen/casen_preguntas_17.xlsx") # variables seleccionadas ------------------------------------------------- vars <- c("region", "comuna", "tot_hog", "tot_par", "tot_nuc", "tot_per", "sexo", "edad", "ecivil", "pareja", "e1", "e2a", "o1", "o2", "y1", "y2d", "y3a", "y27a", "y27b", "y27c", "y27d", "y27e", "s4", "s5", "s12", "s13", "r1a", "v1", "v2", "v8", "v10", "v11", "v13", "v18", "v19", "v23", "v26", "y0101", "y0301", "ESC", "educ", "depen", "hacinamiento") vars <- intersect(vars, names(casen)) casen <- select(casen, vars) gc() glimpse(casen) # cluster ----------------------------------------------------------------- # casendict <- casen %>% # distinct(region, comuna) %>% # mutate(comuna_lbl = as_factor(comuna)) casen <- casen %>% mutate_if(is.labelled, as_factor) casen <- casen %>% mutate_at(vars(y1, y2d, s4, s5, v8), function(x) as.numeric(as.character(x))) # casen %>% # map(class) %>% # unlist() %>% # table() # # casen <- casen %>% # select_if(negate(is.character)) # casen num casen_num <- casen %>% group_by(region, comuna) %>% select_if(is.numeric) %>% # mutate_all(replace_na, 0) %>% ungroup() names(casen_num) # casen factor casen_fac <- casen %>% group_by(region, comuna) %>% select_if(is.factor) %>% ungroup() %>% mutate_all(fct_explicit_na, na_level = "NA") %>% mutate_at(vars(-1, -2), fct_lump, other_level = "Otra") nms <- names(casen_fac) casen_fac <- map2_dfc(casen_fac, nms, function(v, n){ levels(v) <- paste(n, levels(v), sep = "_") v }) casen_fac <- casen_fac %>% mutate_at(vars(1, 2), as.character) %>% mutate_at(vars(1, 2), str_remove, "^.*_") # por comuna -------------------------------------------------------------- casen_num_c <- casen_num %>% group_by(region, comuna) %>% summarise_all(mean, na.rm = TRUE) %>% mutate_all(replace_na, 0) %>% ungroup() table_list <- function(x) { # x <- sample(LETTERS[1:5], 20, 1:5, replace = TRUE) t <- table(x) data_frame( cat = names(t), val = as.numeric(prop.table(t)) ) %>% spread(cat, val) %>% list() } casen_fac_c <- casen_fac %>% group_by(region, comuna) %>% summarise_all(table_list) %>% unnest() casen_c <- left_join(casen_num_c, casen_fac_c) casen_c <- casen_num_c glimpse(casen_c) tsne_casen_c <- smallvis(casen_c, perplexity = 25,verbose = TRUE) umap_casen_c <- smallvis(casen_c, method = "umap", perplexity = 25, eta = 0.01) casen_c %>% bind_cols(as.data.frame(tsne_casen_c)) %>% ggplot(aes(V1, V2, color = region, label = comuna)) + geom_point() + ggrepel::geom_text_repel(size = 2) + facet_wrap(~region, scales = "free") casen_c %>% bind_cols(as.data.frame(umap_casen_c)) %>% ggplot(aes(V1, V2, color = region, label = comuna)) + geom_point() + ggrepel::geom_text_repel(size = 2, color = "black") + scale_color_viridis_d() + facet_wrap(~region)
/R/casen.R
no_license
jbkunst/intro-r-workshop
R
false
false
3,964
r
library(tidyverse) library(haven) # devtools::install_github("jlmelville/smallvis/smallvis") library(smallvis) try(dir.create("data/casen")) url <- "http://observatorio.ministeriodesarrollosocial.gob.cl/casen-multidimensional/casen/docs/casen_2015_spss.rar" file <- file.path("data", "casen", basename(url)) if(!file.exists(file)){ download.file(url, file) } # ya descomprimido el archivo --------------------------------------------- casen <- read_sav("data/casen/Casen 2017.sav") casen glimpse(casen) str(casen) # preguntas --------------------------------------------------------------- str(casen$o13) names(casen$o13) attr(casen$o13, "label") pregs <- map_chr(casen, ~ attr(.x, "label")) pregs <- data_frame(col = names(casen)) %>% mutate(preg = pregs) pregs # pregs %>% # writexl::write_xlsx("data/casen/casen_preguntas_17.xlsx") # variables seleccionadas ------------------------------------------------- vars <- c("region", "comuna", "tot_hog", "tot_par", "tot_nuc", "tot_per", "sexo", "edad", "ecivil", "pareja", "e1", "e2a", "o1", "o2", "y1", "y2d", "y3a", "y27a", "y27b", "y27c", "y27d", "y27e", "s4", "s5", "s12", "s13", "r1a", "v1", "v2", "v8", "v10", "v11", "v13", "v18", "v19", "v23", "v26", "y0101", "y0301", "ESC", "educ", "depen", "hacinamiento") vars <- intersect(vars, names(casen)) casen <- select(casen, vars) gc() glimpse(casen) # cluster ----------------------------------------------------------------- # casendict <- casen %>% # distinct(region, comuna) %>% # mutate(comuna_lbl = as_factor(comuna)) casen <- casen %>% mutate_if(is.labelled, as_factor) casen <- casen %>% mutate_at(vars(y1, y2d, s4, s5, v8), function(x) as.numeric(as.character(x))) # casen %>% # map(class) %>% # unlist() %>% # table() # # casen <- casen %>% # select_if(negate(is.character)) # casen num casen_num <- casen %>% group_by(region, comuna) %>% select_if(is.numeric) %>% # mutate_all(replace_na, 0) %>% ungroup() names(casen_num) # casen factor casen_fac <- casen %>% group_by(region, comuna) %>% select_if(is.factor) %>% ungroup() %>% mutate_all(fct_explicit_na, na_level = "NA") %>% mutate_at(vars(-1, -2), fct_lump, other_level = "Otra") nms <- names(casen_fac) casen_fac <- map2_dfc(casen_fac, nms, function(v, n){ levels(v) <- paste(n, levels(v), sep = "_") v }) casen_fac <- casen_fac %>% mutate_at(vars(1, 2), as.character) %>% mutate_at(vars(1, 2), str_remove, "^.*_") # por comuna -------------------------------------------------------------- casen_num_c <- casen_num %>% group_by(region, comuna) %>% summarise_all(mean, na.rm = TRUE) %>% mutate_all(replace_na, 0) %>% ungroup() table_list <- function(x) { # x <- sample(LETTERS[1:5], 20, 1:5, replace = TRUE) t <- table(x) data_frame( cat = names(t), val = as.numeric(prop.table(t)) ) %>% spread(cat, val) %>% list() } casen_fac_c <- casen_fac %>% group_by(region, comuna) %>% summarise_all(table_list) %>% unnest() casen_c <- left_join(casen_num_c, casen_fac_c) casen_c <- casen_num_c glimpse(casen_c) tsne_casen_c <- smallvis(casen_c, perplexity = 25,verbose = TRUE) umap_casen_c <- smallvis(casen_c, method = "umap", perplexity = 25, eta = 0.01) casen_c %>% bind_cols(as.data.frame(tsne_casen_c)) %>% ggplot(aes(V1, V2, color = region, label = comuna)) + geom_point() + ggrepel::geom_text_repel(size = 2) + facet_wrap(~region, scales = "free") casen_c %>% bind_cols(as.data.frame(umap_casen_c)) %>% ggplot(aes(V1, V2, color = region, label = comuna)) + geom_point() + ggrepel::geom_text_repel(size = 2, color = "black") + scale_color_viridis_d() + facet_wrap(~region)
#Load data file "household_power_consumption.txt" stored in working directory hpower <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?") #subset to only 2007-02-01 and 2007-02-02 hpower <- hpower[as.Date(hpower$Date,"%d/%m/%Y") == "2007-02-01" | as.Date(hpower$Date,"%d/%m/%Y") == "2007-02-02",] #Convert Date and Time variables to Date/Time classes hpower$Date <- as.Date(hpower$Date,"%d/%m/%Y") hpower$DateTime <- strptime(paste(hpower$Date,hpower$Time),"%Y-%m-%d %H:%M:%S") #create plot #2 png(filename = "plot2.png", width = 480, height = 480) plot(hpower$DateTime,hpower$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="") dev.off()
/plot2.R
no_license
briangill/ExData_Plotting1
R
false
false
709
r
#Load data file "household_power_consumption.txt" stored in working directory hpower <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?") #subset to only 2007-02-01 and 2007-02-02 hpower <- hpower[as.Date(hpower$Date,"%d/%m/%Y") == "2007-02-01" | as.Date(hpower$Date,"%d/%m/%Y") == "2007-02-02",] #Convert Date and Time variables to Date/Time classes hpower$Date <- as.Date(hpower$Date,"%d/%m/%Y") hpower$DateTime <- strptime(paste(hpower$Date,hpower$Time),"%Y-%m-%d %H:%M:%S") #create plot #2 png(filename = "plot2.png", width = 480, height = 480) plot(hpower$DateTime,hpower$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="") dev.off()
library(lidR) library(dplyr) library(future) library(e1071) lasc <- readLAScatalog("C:/1_Work/2_Ciron/Data/ALS/norm/") rs <- 30 #resolution of the grid area_filt <- 810 #processing parameters plan(multisession, workers = 6L) opt_chunk_buffer(lasc) <- 0 opt_chunk_size(lasc) <- 0 opt_stop_early(lasc) <- FALSE plot(lasc, chunk_pattern = TRUE) opt_select(lasc) <- "*" opt <- list(raster_alignment = rs, automerge = TRUE) #creating classes class_list <- list(0:10, 10:20, 20:30, 30:40, 40:50) class_median <- list(0, 15, 25, 35, 45) names(class_list) = letters[seq_along(class_list)] breaks = c(min(class_list[[1]]), sapply(class_list, max)) med_data = data.frame(median = unlist(class_median), class = names(class_list)) med_data <- mutate(med_data, class=as.vector(class)) met_calc = function(cluster, res) { las = readLAS(cluster, filter = "-drop_z_below 1") if (is.empty(las)) return(NULL) las <- lasflightline(las, dt = 30) out <- grid_metrics(las, mymets(X, Y, Z, ScanAngleRank, flightlineID), rs) bbox <- raster::extent(cluster) out <- raster::crop(out, bbox) return(out) } area_calc = function(dfr) { #print(length(dfr$x)) ch_pts <- chull(dfr$x,dfr$y) ch_pts <- c(ch_pts, ch_pts[1]) dfr <- dfr[ch_pts,] dfr <- dfr %>% select(1:2) ch_poly <- Polygon(dfr, hole=F) return(ch_poly@area) } mymets = function(x, y, z, sc, flid) { #z-Z co-ordinates #sc-scan angle #flid-flight line ID dframe <- as.data.frame(cbind(x, y, z, sc, flid)) #print(colnames(dframe)) dframe <- dframe %>% #drop all flight lines less than 1000 points group_by(flid) %>% filter(n()>1000) %>% ungroup() flist <- unique(dframe$flid) #get the unique flight lines meanlist <- c() fl <- data.frame() #For all the unique flight lines, the means of the scan angles of the points in a flight line are computed for (i in flist) { dframe1 <- dframe[dframe$flid == i, ] mean_val <- (abs(mean(dframe1$sc))) ch_ar <- area_calc(dframe1) fl <- rbind(fl, c(i, mean_val, ch_ar)) } if(dim(fl)[1]!=0 && dim(fl)[2]!=0) { names(fl) <- c("flist", "meanlist", "pt_area") fl <- fl %>% filter(pt_area>area_filt) } if (dim(fl)[1]>0) { #Each class will have multiple flight lines. From among the flight lines (in a class), the flight line whose mean angle #closest to the median of the class is picked as a representative fin_flist <- fl %>% # assign classes mutate( class = cut(meanlist, breaks = breaks, labels = names(class_list)), class = as.vector(class) ) %>% # get medians left_join(med_data, by = "class") %>% # within each class... group_by(class) %>% #filter out any rows with low area of coverage in a pixel #filter(pt_area>area_filt) %>% # keep the row with the smallest absolute difference to the median slice(which.min(abs(meanlist - median))) %>% # sort in original order arrange(flist) fin_flist <- as.data.frame(fin_flist) #Get the classes which are not present in fin_flist miss <- setdiff(class_median, fin_flist$median) #To ensure the final table fin_flist has 5 rows with each representing a class, the missing rows are replaced with flight line # no. "99" and class "x" count = 91 for (i in miss) { fin_flist <- rbind(fin_flist, c(count, 99, 0, 'x', i)) count = count + 1 } fin_flist <- fin_flist %>% arrange(median) #print(fin_flist) #Computation of the metrics and compiling in a list. Each value in a list is the computed metric for a given class met_list <- c() for (i in 1:length(fin_flist$flist)) { if (fin_flist$flist[i] < 90) { dframe2 <- dframe[dframe$flid == fin_flist$flist[i], ] met_list <- c(met_list, max(dframe2$z)) } else { met_list <- c(met_list, NA) } } mets = list( cl1 = met_list[1], cl2 = met_list[2], cl3 = met_list[3], cl4 = met_list[4], cl5 = met_list[5]) return(mets) } # else # { # mets = list( # cl1 = -999, # cl2 = -999, # cl3 = -999, # cl4 = -999, # cl5 = -999) # # return(mets) # } } output_ku <- catalog_apply(lasc, met_calc, res = rs, .options = opt)
/src/1_main_w_ar_filter.R
no_license
drkrd/grid_based_analysis_of_impact_of_scan_angles
R
false
false
4,457
r
library(lidR) library(dplyr) library(future) library(e1071) lasc <- readLAScatalog("C:/1_Work/2_Ciron/Data/ALS/norm/") rs <- 30 #resolution of the grid area_filt <- 810 #processing parameters plan(multisession, workers = 6L) opt_chunk_buffer(lasc) <- 0 opt_chunk_size(lasc) <- 0 opt_stop_early(lasc) <- FALSE plot(lasc, chunk_pattern = TRUE) opt_select(lasc) <- "*" opt <- list(raster_alignment = rs, automerge = TRUE) #creating classes class_list <- list(0:10, 10:20, 20:30, 30:40, 40:50) class_median <- list(0, 15, 25, 35, 45) names(class_list) = letters[seq_along(class_list)] breaks = c(min(class_list[[1]]), sapply(class_list, max)) med_data = data.frame(median = unlist(class_median), class = names(class_list)) med_data <- mutate(med_data, class=as.vector(class)) met_calc = function(cluster, res) { las = readLAS(cluster, filter = "-drop_z_below 1") if (is.empty(las)) return(NULL) las <- lasflightline(las, dt = 30) out <- grid_metrics(las, mymets(X, Y, Z, ScanAngleRank, flightlineID), rs) bbox <- raster::extent(cluster) out <- raster::crop(out, bbox) return(out) } area_calc = function(dfr) { #print(length(dfr$x)) ch_pts <- chull(dfr$x,dfr$y) ch_pts <- c(ch_pts, ch_pts[1]) dfr <- dfr[ch_pts,] dfr <- dfr %>% select(1:2) ch_poly <- Polygon(dfr, hole=F) return(ch_poly@area) } mymets = function(x, y, z, sc, flid) { #z-Z co-ordinates #sc-scan angle #flid-flight line ID dframe <- as.data.frame(cbind(x, y, z, sc, flid)) #print(colnames(dframe)) dframe <- dframe %>% #drop all flight lines less than 1000 points group_by(flid) %>% filter(n()>1000) %>% ungroup() flist <- unique(dframe$flid) #get the unique flight lines meanlist <- c() fl <- data.frame() #For all the unique flight lines, the means of the scan angles of the points in a flight line are computed for (i in flist) { dframe1 <- dframe[dframe$flid == i, ] mean_val <- (abs(mean(dframe1$sc))) ch_ar <- area_calc(dframe1) fl <- rbind(fl, c(i, mean_val, ch_ar)) } if(dim(fl)[1]!=0 && dim(fl)[2]!=0) { names(fl) <- c("flist", "meanlist", "pt_area") fl <- fl %>% filter(pt_area>area_filt) } if (dim(fl)[1]>0) { #Each class will have multiple flight lines. From among the flight lines (in a class), the flight line whose mean angle #closest to the median of the class is picked as a representative fin_flist <- fl %>% # assign classes mutate( class = cut(meanlist, breaks = breaks, labels = names(class_list)), class = as.vector(class) ) %>% # get medians left_join(med_data, by = "class") %>% # within each class... group_by(class) %>% #filter out any rows with low area of coverage in a pixel #filter(pt_area>area_filt) %>% # keep the row with the smallest absolute difference to the median slice(which.min(abs(meanlist - median))) %>% # sort in original order arrange(flist) fin_flist <- as.data.frame(fin_flist) #Get the classes which are not present in fin_flist miss <- setdiff(class_median, fin_flist$median) #To ensure the final table fin_flist has 5 rows with each representing a class, the missing rows are replaced with flight line # no. "99" and class "x" count = 91 for (i in miss) { fin_flist <- rbind(fin_flist, c(count, 99, 0, 'x', i)) count = count + 1 } fin_flist <- fin_flist %>% arrange(median) #print(fin_flist) #Computation of the metrics and compiling in a list. Each value in a list is the computed metric for a given class met_list <- c() for (i in 1:length(fin_flist$flist)) { if (fin_flist$flist[i] < 90) { dframe2 <- dframe[dframe$flid == fin_flist$flist[i], ] met_list <- c(met_list, max(dframe2$z)) } else { met_list <- c(met_list, NA) } } mets = list( cl1 = met_list[1], cl2 = met_list[2], cl3 = met_list[3], cl4 = met_list[4], cl5 = met_list[5]) return(mets) } # else # { # mets = list( # cl1 = -999, # cl2 = -999, # cl3 = -999, # cl4 = -999, # cl5 = -999) # # return(mets) # } } output_ku <- catalog_apply(lasc, met_calc, res = rs, .options = opt)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gscp_10c_build_adj_and_cooccurrence_matrices.R \name{create_adj_matrix_with_spp_rows_vs_PU_cols} \alias{create_adj_matrix_with_spp_rows_vs_PU_cols} \title{Create adjacency matrix with species rows vs planning unit columns} \usage{ create_adj_matrix_with_spp_rows_vs_PU_cols( num_spp, num_PUs, PU_spp_pair_indices, PU_costs, spp_col_name, PU_col_name, dependent_node_IDs, correct_solution_vector_is_known ) } \arguments{ \item{num_spp}{integer number of species in the problem} \item{num_PUs}{integer number of planning units} \item{PU_spp_pair_indices}{2 column data frame of PU IDs vs species IDs, where each row identifies a the ID of a given species that occurs on the given planning unit} \item{PU_costs}{numeric vector of planning unit costs} \item{spp_col_name}{character string giving species column name in data frames} \item{PU_col_name}{character string giving planning unit column name in data frames} \item{dependent_node_IDs}{integer vector of IDs of planning units contained in a constructed correct solution to a Xu problem} \item{correct_solution_vector_is_known}{boolean flag indicating whether a correct optimal solution vector is known for the problem (as opposed for example, to only knowing the correct cost)} } \value{ Returns bpm; integer matrix with one row for each species and one column for each planning unit. Each matrix entry specifies whether that species occupies that planning unit; 1 indicates the species does occupy the planning unit and 0 indicates it does not. } \description{ Create numeric adjacency matrix with one row for each species and one column for each planning unit and each matrix entry specifying whether that species occupies that planning unit. A 1 indicates the species does occupy the planning unit and 0 indicates it does not. }
/man/create_adj_matrix_with_spp_rows_vs_PU_cols.Rd
no_license
langfob/bdpg
R
false
true
1,900
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gscp_10c_build_adj_and_cooccurrence_matrices.R \name{create_adj_matrix_with_spp_rows_vs_PU_cols} \alias{create_adj_matrix_with_spp_rows_vs_PU_cols} \title{Create adjacency matrix with species rows vs planning unit columns} \usage{ create_adj_matrix_with_spp_rows_vs_PU_cols( num_spp, num_PUs, PU_spp_pair_indices, PU_costs, spp_col_name, PU_col_name, dependent_node_IDs, correct_solution_vector_is_known ) } \arguments{ \item{num_spp}{integer number of species in the problem} \item{num_PUs}{integer number of planning units} \item{PU_spp_pair_indices}{2 column data frame of PU IDs vs species IDs, where each row identifies a the ID of a given species that occurs on the given planning unit} \item{PU_costs}{numeric vector of planning unit costs} \item{spp_col_name}{character string giving species column name in data frames} \item{PU_col_name}{character string giving planning unit column name in data frames} \item{dependent_node_IDs}{integer vector of IDs of planning units contained in a constructed correct solution to a Xu problem} \item{correct_solution_vector_is_known}{boolean flag indicating whether a correct optimal solution vector is known for the problem (as opposed for example, to only knowing the correct cost)} } \value{ Returns bpm; integer matrix with one row for each species and one column for each planning unit. Each matrix entry specifies whether that species occupies that planning unit; 1 indicates the species does occupy the planning unit and 0 indicates it does not. } \description{ Create numeric adjacency matrix with one row for each species and one column for each planning unit and each matrix entry specifying whether that species occupies that planning unit. A 1 indicates the species does occupy the planning unit and 0 indicates it does not. }
##XXXXXXXXXXXXXX## ## Chi-sqr Test ## ##XXXXXXXXXXXXXX## arm_1 <- c(100,900) # i.e. Intervention arm_2 <- c(10,990) # i.e. Control exp_df <- data.frame(rbind(arm_1,arm_2)) #assimilate data into simple 2x2 table names(exp_df) <- c("purchase", "no purchase") # add columns names (optional) exp_df # verify data looks right # Chi-sqr test function chi_out <- chisq.test(exp_df) str(chi_out) # check out the parameters of the output chi_out$p.value # select one! p-value is the most important # is this statistically signficant ?? pval_cut <- 0.05 isSignificant <- if(chi_out$p.value < pval_cut){"These are statistically different" }else{"These are note statistially different"} isSignificant ##XXXXXXXXXXXXXX## ## t-test ## ##XXXXXXXXXXXXXX## arm_1 = c(18,21,22,19,20,22,23,24,22,21) # Intervention arm_2 = c(16,17,16,18,14,12,15,14,15,17) # Control # Check out what the distribution looks like plot(density(arm_1)) # Looks about "normal"? plot(density(arm_2)) # Looks about "normal"? #Check out how averages and variation line up (does it look like there is overlap?) boxplot(arm_1,arm_2,ylab="Avg Cart Size ($)", names=c("arm_1","arm_2"), main="Avg Cart Size ($) By Arm") # Perform the t-test | lets assume the variations are about equal and they are approx normal t_test_out <- t.test(arm_1,arm_2) str(t_test_out) # check out the parameters of the output t_test_out$p.value # select one! p-value is the most important # is this statistically signficant ?? pval_cut <- 0.05 isSignificant <- if(t_test_out$p.value < pval_cut){"These are statistically different" }else{"These are note statistially different"} isSignificant ## Calculate Standard Deviation std_1 <- sqrt(var(arm_1)/length(arm_1)) std_2 <- sqrt(var(arm_2)/length(arm_2))
/RCODE_Experiment_Evaluation.R
no_license
mshump/Experimentation_Overview
R
false
false
2,062
r
##XXXXXXXXXXXXXX## ## Chi-sqr Test ## ##XXXXXXXXXXXXXX## arm_1 <- c(100,900) # i.e. Intervention arm_2 <- c(10,990) # i.e. Control exp_df <- data.frame(rbind(arm_1,arm_2)) #assimilate data into simple 2x2 table names(exp_df) <- c("purchase", "no purchase") # add columns names (optional) exp_df # verify data looks right # Chi-sqr test function chi_out <- chisq.test(exp_df) str(chi_out) # check out the parameters of the output chi_out$p.value # select one! p-value is the most important # is this statistically signficant ?? pval_cut <- 0.05 isSignificant <- if(chi_out$p.value < pval_cut){"These are statistically different" }else{"These are note statistially different"} isSignificant ##XXXXXXXXXXXXXX## ## t-test ## ##XXXXXXXXXXXXXX## arm_1 = c(18,21,22,19,20,22,23,24,22,21) # Intervention arm_2 = c(16,17,16,18,14,12,15,14,15,17) # Control # Check out what the distribution looks like plot(density(arm_1)) # Looks about "normal"? plot(density(arm_2)) # Looks about "normal"? #Check out how averages and variation line up (does it look like there is overlap?) boxplot(arm_1,arm_2,ylab="Avg Cart Size ($)", names=c("arm_1","arm_2"), main="Avg Cart Size ($) By Arm") # Perform the t-test | lets assume the variations are about equal and they are approx normal t_test_out <- t.test(arm_1,arm_2) str(t_test_out) # check out the parameters of the output t_test_out$p.value # select one! p-value is the most important # is this statistically signficant ?? pval_cut <- 0.05 isSignificant <- if(t_test_out$p.value < pval_cut){"These are statistically different" }else{"These are note statistially different"} isSignificant ## Calculate Standard Deviation std_1 <- sqrt(var(arm_1)/length(arm_1)) std_2 <- sqrt(var(arm_2)/length(arm_2))
\name{ahull.grid} \alias{ahull.grid} \docType{data} \title{ahull grid} \description{Label the closest point on the alpha hull of the data.} \usage{"ahull.grid"}
/man/ahull.grid.Rd
no_license
tdhock/directlabels
R
false
false
166
rd
\name{ahull.grid} \alias{ahull.grid} \docType{data} \title{ahull grid} \description{Label the closest point on the alpha hull of the data.} \usage{"ahull.grid"}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simple_math.R \name{percent_cutoff} \alias{percent_cutoff} \title{Find percentage of numbers that are above given thresholds.} \usage{ percent_cutoff(x, cutoffs = c(0.3, 0.5), digits = 2, below = F, inclusive = T) } \arguments{ \item{x}{(num vector) A vector of numbers.} \item{cutoffs}{(num vector) A vector of thresholds. Default=(.30, .50)} \item{digits}{(num scalar) The number of digits to round output to. Default=2.} \item{below}{(log scalar) Whether to count values below the cutoff (default false).} \item{inclusive}{(log scalar) Whether to include values at the cutoff (default true).} } \description{ Takes a numeric vector and a numeric vector of thresholds. Returns the percent of numbers in the first above each of the numbers in the second. } \examples{ percent_cutoff(iris$Sepal.Length, cutoffs = 4:8) percent_cutoff(iris$Sepal.Length, cutoffs = 4:8, below = T) #reverse cutoff percent_cutoff(c(1:3, NA, NaN, 4:6), cutoffs = 3) #ignores NA/NaN }
/man/percent_cutoff.Rd
permissive
Deleetdk/kirkegaard
R
false
true
1,044
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simple_math.R \name{percent_cutoff} \alias{percent_cutoff} \title{Find percentage of numbers that are above given thresholds.} \usage{ percent_cutoff(x, cutoffs = c(0.3, 0.5), digits = 2, below = F, inclusive = T) } \arguments{ \item{x}{(num vector) A vector of numbers.} \item{cutoffs}{(num vector) A vector of thresholds. Default=(.30, .50)} \item{digits}{(num scalar) The number of digits to round output to. Default=2.} \item{below}{(log scalar) Whether to count values below the cutoff (default false).} \item{inclusive}{(log scalar) Whether to include values at the cutoff (default true).} } \description{ Takes a numeric vector and a numeric vector of thresholds. Returns the percent of numbers in the first above each of the numbers in the second. } \examples{ percent_cutoff(iris$Sepal.Length, cutoffs = 4:8) percent_cutoff(iris$Sepal.Length, cutoffs = 4:8, below = T) #reverse cutoff percent_cutoff(c(1:3, NA, NaN, 4:6), cutoffs = 3) #ignores NA/NaN }
# install libraries install.packages("nnet"); install.package("neuralnet"); install.packages("multicore"); install.packages("caret"); install.packages("nlme"); install.packages("R.utils"); library(multicore) library(nnet) library(neuralnet) library(caret) library(nlme) library(R.utils) # read data setwd("C:/Users/Paavni/My Academics/Quarter IV/CS 229/Project/Final_Data/") # setwd("~/CS229/Project/Final_Data/") sourceDirectory("Functions/") kg = read.csv("kishanganj_features.csv",header = FALSE, sep = ",") kg = kg[,-1] # mean for each variable meancity = colMeans(kg); # normalization kgn = normalization(kg) # dividing into test and train train = kg[c(1:1068),] test = kg[c(1069:1224),] trainn = kgn[c(1:1068),] testn = kgn[c(1069:1224),] ################################################################################################################################################ # OUR METHOD ################################################################################################################################################ # Window Size k = 3; s = 20; decay = 0.00001; n_f = nnet_sgd(trainn, maxit = 1000, s = 20, decay = 0.00001, future = FALSE); predtrain = de_normalization(as.matrix(n_f$p), as.matrix(kg[,11])) actualtrain = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we = weighted_error(predtrain, actualtrain) mae = mean(abs(predtrain-actualtrain)) rmse = sqrt(mean((predtrain-actualtrain)^2)) hist(abs(predtrain-actualtrain), xlab = "Absolute Error", col = "red") par(mfrow=c(2,1)) plot(actualtrain, type='l', col = "blue",xlab = "Time", ylab = "Actual Rainfall(mm)"); plot(predtrain, type='l', col = "red", xlab = "Time", ylab = "Predicted Rainfall(mm)") n_f_test = nnet_sgd(testn,maxit = 1000, s = 20, decay = 0.00001, future = FALSE); pred = de_normalization(as.matrix(n_f_test$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(testn[,11]), as.matrix(kg[,11])) wet = weighted_error(pred, actual) maet = mean(abs(pred-actual)) rmset = sqrt(mean((pred-actual)^2)) ################################################################################################################################################ # TUNING ################################################################################################################################################ nnet.wrapper.k=function(k){ nnet_sgd(trainn,k=k, window = TRUE) } res.k=mclapply(c(3,5,7,9),nnet.wrapper.k) we = rep(0, length(res.k)); mae = rep(0, length(res.k)) for(i in 1:length(res.k)) { pred = de_normalization(as.matrix(res.k[[i]]$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we[i] = weighted_error(pred, actual) mae[i] = mean(abs(pred-actual)) } nnet.wrapper.s=function(s){ n_f = nnet_sgd(trainn,s=s, future = FALSE) } res.s=mclapply(c(5,10,15,20,25),nnet.wrapper.s) we = rep(0, length(res.s)); mae = rep(0, length(res.s)) for(i in 1:length(res.s)) { pred = de_normalization(as.matrix(res.s[[i]]$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we[i] = weighted_error(pred, actual) mae[i] = mean(abs(pred-actual)) } nnet.wrapper.d=function(decay){ n_f = nnet_sgd(trainn,decay = decay, s= 20, future = FALSE) } res.d=mclapply(c(1,0.1,0.01,0.001,0.0001, 0.00001),nnet.wrapper.d) we = rep(0, length(res.d)); mae = rep(0, length(res.d)) for(i in 1:length(res.d)) { pred = de_normalization(as.matrix(res.d[[i]]$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we[i] = weighted_error(pred, actual) mae[i] = mean(abs(pred-actual)) } nnet.wrapper.i=function(maxit){ n_f = nnet_sgd(trainn,maxit = maxit, decay = 0.00001, s= 20, future = FALSE) } res.i=mclapply(c(100,500,1000,1500),nnet.wrapper.i) we = rep(0, length(res.i)); mae = rep(0, length(res.i)) for(i in 1:length(res.i)) { pred = de_normalization(as.matrix(res.i[[i]]$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we[i] = weighted_error(pred, actual) mae[i] = mean(abs(pred-actual)) } ################################################################################################################################################ # REGRESSION ################################################################################################################################################ kg_hist = read.csv("kishanganj_longfeatures.csv",header = FALSE, sep = ",") plot(kg_hist[c(1:60),1], kg_hist[c(1:60),24], type='o',ylab = "Rainfall (mm)", col= "blue", xlab = "Time", xaxt='n', pch = 18) axis(side=1, at=c(191206,191306,191406, 191506, 191606), labels=c("1912","1913","1914", "1915", "1916")) plot(kg_hist[c(1033:1092),1], kg_hist[c(1033:1092),24], type='o',ylab = "Rainfall (mm)", col= "blue", xlab = "Time", xaxt='n', pch = 18) axis(side=1, at=c(199806,199906,200006, 200106, 200206), labels=c("1998","1999","2000", "2001", "2002")) kg_hist = kg_hist[,-1] feat.train.hist = kg_hist[c(1:937),] feat.test.hist = kg_hist[c(938:1092),] lm.hist <- lm(V24 ~ ., data=feat.train.hist) # lm.hist <- gls(V24 ~ ., data=feat.train.hist, method='ML', correlation=corARMA(p,q)) p.hist.train.s = predict(lm.hist, feat.train.hist[,c(1:22)]); error.hist.train.s = mean(abs(p.hist.train.s-feat.train.hist[,23])) we.hist.train.s = weighted_error(as.matrix(p.hist.train.s), as.matrix(feat.train.hist[,23])) p.hist.test.s = predict(lm.hist, feat.test.hist[,c(1:22)]); error.hist.test.s = mean(abs(p.hist.test.s-feat.test.hist[,23])) we.hist.test.s = weighted_error(as.matrix(p.hist.test.s), as.matrix(feat.test.hist[,23])) ################################################################################################################################################ kg_feat = read.csv("kishanganj_features.csv",header = FALSE, sep = ",") kg_feat = kg_feat[,-1] feat.train = kg_feat[c(1:1068),] feat.test = kg_feat[c(1069:1224),] lm.feat <- lm(V12 ~ ., data=feat.train) p.feat.train = predict(lm.feat, feat.train[,c(1:10)]); error.feat.train = mean(abs(p.feat.train-feat.train[,11])) we.feat.train = weighted_error(as.matrix(p.feat.train), as.matrix(feat.train[,11])) p.feat.test = predict(lm.feat, feat.test[,c(1:10)]); error.feat.test = mean(abs(p.feat.test-feat.test[,11])) we.feat.test = weighted_error(as.matrix(p.feat.test), as.matrix(feat.test[,11])) rmse.feat.test = sqrt(mean((p.feat.train-feat.test[,11])^2)) ################################################################################################################################################ s.feat = svd(kg_feat[,-11]) s.feat$d s.feat$d[c((length(s.feat$d)-2):length(s.feat$d))] = 0 x.feat = s.feat$u %*% diag(s.feat$d) %*% t(s.feat$v) x.feat = data.frame(cbind(x.feat, kg_feat[,11])) x.feat = x.feat[,c(-8,-9,-10)] x.feat.train = x.feat[c(1:1068),] x.feat.test = x.feat[c(1069:1224),] lm.feat.s <- lm(X11 ~ ., data=x.feat.train) p.feat.train.s = predict(lm.feat.s, x.feat.train[,c(1:7)]); error.feat.train.s = mean(abs(p.feat.train.s-x.feat.train[,8])) we.feat.train.s = weighted_error(as.matrix(p.feat.train.s), as.matrix(x.feat.train[,8])) p.feat.test.s = predict(lm.feat.s, x.feat.test[,c(1:7)]); error.feat.test.s = mean(abs(p.feat.test.s-x.feat.test[,8])) we.feat.test.s = weighted_error(as.matrix(p.feat.test.s), as.matrix(x.feat.test[,8])) ################################################################################################################################################ # NEURAL NETWORK ################################################################################################################################################ train = kg[c(1:1068),] test = kg[c(1069:1224),] trainn = kgn[c(1:1068),] testn = kgn[c(1069:1224),] h = 15 t = 0.1 sm = 1e+5 r = 1 lr=0.001 af = "tanh" nn_f = neuralnet(V12~(V2+V3+V4+V5+V6+V7+V8+V9+V10+V11), trainn, hidden = h, threshold = t, stepmax = sm, rep = r, learningrate=lr,err.fct = "sse", act.fct = af, linear.output = TRUE) pred.train = compute(nn_f, trainn[,c(1:10)])$net.result pred.train = de_normalization(as.matrix(pred.train), as.matrix(kg[,11])) actual.train = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) error.train = mean(abs(pred.train-actual.train)) we.train= weighted_error(as.matrix(pred.train), as.matrix(actual.train)) pred.test = compute(nn_f, testn[,c(1:10)])$net.result pred.test = de_normalization(as.matrix(pred.test), as.matrix(kg[,11])) actual.test = de_normalization(as.matrix(testn[,11]), as.matrix(kg[,11])) error.test = mean(abs(pred.test-actual.test)) we.test= weighted_error(as.matrix(pred.test), as.matrix(actual.test)) error.train we.train error.test we.test ################################################################################################################################################ # LOGISTIC REGRESSION ################################################################################################################################################ kg = read.csv("kishanganj_features.csv",header = FALSE, sep = ",") # kg = normalization(kg) flood = read.csv("floods.csv",header = FALSE, sep = ",") flood_class = cbind(kg[c(301:1224),-1],"flood" = flood[c(301:1224),-1]) # predprec = rbind(predtrain, pred) # flood_class = cbind(kg[c(301:1224),c(-1,-12)],"predicted" = predprec[c(301:1224),], "flood" = flood[c(301:1224),-1]) flood_class$flood <- factor(flood_class$flood, labels = c(0,1)) row.names(flood_class) = NULL train = flood_class[c(1:720),] test = flood_class[c(720:924),] row.names(test) = NULL row.names(train) = NULL logit <- glm(flood ~ ., data = train, family = "binomial") p = predict(logit, train[,c(1:11)], type="response") ptest = predict(logit, test[,c(1:11)], type="response") prob = sum(train$flood==1)/(dim(train)[1]) p = ifelse(p>prob, 1, 0) prec = sum(p==1 & train$flood==1)/((sum(p==1 & train$flood==1))+(sum(p==0 & train$flood==1))) rec = sum(p==1 & train$flood==1)/((sum(p==1 & train$flood==1))+(sum(p==1 & train$flood==0))) ptest = ifelse(ptest>prob, 1, 0) prect = sum(ptest==1 & test$flood==1)/((sum(ptest==1 & test$flood==1))+(sum(ptest==0 & test$flood==1))) rect = sum(ptest==1 & test$flood==1)/((sum(ptest==1 & test$flood==1))+(sum(ptest==1 & test$flood==0)))
/R-Code-229.r
no_license
Paavni/CS-229-Predict-Rainfall
R
false
false
10,236
r
# install libraries install.packages("nnet"); install.package("neuralnet"); install.packages("multicore"); install.packages("caret"); install.packages("nlme"); install.packages("R.utils"); library(multicore) library(nnet) library(neuralnet) library(caret) library(nlme) library(R.utils) # read data setwd("C:/Users/Paavni/My Academics/Quarter IV/CS 229/Project/Final_Data/") # setwd("~/CS229/Project/Final_Data/") sourceDirectory("Functions/") kg = read.csv("kishanganj_features.csv",header = FALSE, sep = ",") kg = kg[,-1] # mean for each variable meancity = colMeans(kg); # normalization kgn = normalization(kg) # dividing into test and train train = kg[c(1:1068),] test = kg[c(1069:1224),] trainn = kgn[c(1:1068),] testn = kgn[c(1069:1224),] ################################################################################################################################################ # OUR METHOD ################################################################################################################################################ # Window Size k = 3; s = 20; decay = 0.00001; n_f = nnet_sgd(trainn, maxit = 1000, s = 20, decay = 0.00001, future = FALSE); predtrain = de_normalization(as.matrix(n_f$p), as.matrix(kg[,11])) actualtrain = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we = weighted_error(predtrain, actualtrain) mae = mean(abs(predtrain-actualtrain)) rmse = sqrt(mean((predtrain-actualtrain)^2)) hist(abs(predtrain-actualtrain), xlab = "Absolute Error", col = "red") par(mfrow=c(2,1)) plot(actualtrain, type='l', col = "blue",xlab = "Time", ylab = "Actual Rainfall(mm)"); plot(predtrain, type='l', col = "red", xlab = "Time", ylab = "Predicted Rainfall(mm)") n_f_test = nnet_sgd(testn,maxit = 1000, s = 20, decay = 0.00001, future = FALSE); pred = de_normalization(as.matrix(n_f_test$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(testn[,11]), as.matrix(kg[,11])) wet = weighted_error(pred, actual) maet = mean(abs(pred-actual)) rmset = sqrt(mean((pred-actual)^2)) ################################################################################################################################################ # TUNING ################################################################################################################################################ nnet.wrapper.k=function(k){ nnet_sgd(trainn,k=k, window = TRUE) } res.k=mclapply(c(3,5,7,9),nnet.wrapper.k) we = rep(0, length(res.k)); mae = rep(0, length(res.k)) for(i in 1:length(res.k)) { pred = de_normalization(as.matrix(res.k[[i]]$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we[i] = weighted_error(pred, actual) mae[i] = mean(abs(pred-actual)) } nnet.wrapper.s=function(s){ n_f = nnet_sgd(trainn,s=s, future = FALSE) } res.s=mclapply(c(5,10,15,20,25),nnet.wrapper.s) we = rep(0, length(res.s)); mae = rep(0, length(res.s)) for(i in 1:length(res.s)) { pred = de_normalization(as.matrix(res.s[[i]]$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we[i] = weighted_error(pred, actual) mae[i] = mean(abs(pred-actual)) } nnet.wrapper.d=function(decay){ n_f = nnet_sgd(trainn,decay = decay, s= 20, future = FALSE) } res.d=mclapply(c(1,0.1,0.01,0.001,0.0001, 0.00001),nnet.wrapper.d) we = rep(0, length(res.d)); mae = rep(0, length(res.d)) for(i in 1:length(res.d)) { pred = de_normalization(as.matrix(res.d[[i]]$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we[i] = weighted_error(pred, actual) mae[i] = mean(abs(pred-actual)) } nnet.wrapper.i=function(maxit){ n_f = nnet_sgd(trainn,maxit = maxit, decay = 0.00001, s= 20, future = FALSE) } res.i=mclapply(c(100,500,1000,1500),nnet.wrapper.i) we = rep(0, length(res.i)); mae = rep(0, length(res.i)) for(i in 1:length(res.i)) { pred = de_normalization(as.matrix(res.i[[i]]$p), as.matrix(kg[,11])) actual = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) we[i] = weighted_error(pred, actual) mae[i] = mean(abs(pred-actual)) } ################################################################################################################################################ # REGRESSION ################################################################################################################################################ kg_hist = read.csv("kishanganj_longfeatures.csv",header = FALSE, sep = ",") plot(kg_hist[c(1:60),1], kg_hist[c(1:60),24], type='o',ylab = "Rainfall (mm)", col= "blue", xlab = "Time", xaxt='n', pch = 18) axis(side=1, at=c(191206,191306,191406, 191506, 191606), labels=c("1912","1913","1914", "1915", "1916")) plot(kg_hist[c(1033:1092),1], kg_hist[c(1033:1092),24], type='o',ylab = "Rainfall (mm)", col= "blue", xlab = "Time", xaxt='n', pch = 18) axis(side=1, at=c(199806,199906,200006, 200106, 200206), labels=c("1998","1999","2000", "2001", "2002")) kg_hist = kg_hist[,-1] feat.train.hist = kg_hist[c(1:937),] feat.test.hist = kg_hist[c(938:1092),] lm.hist <- lm(V24 ~ ., data=feat.train.hist) # lm.hist <- gls(V24 ~ ., data=feat.train.hist, method='ML', correlation=corARMA(p,q)) p.hist.train.s = predict(lm.hist, feat.train.hist[,c(1:22)]); error.hist.train.s = mean(abs(p.hist.train.s-feat.train.hist[,23])) we.hist.train.s = weighted_error(as.matrix(p.hist.train.s), as.matrix(feat.train.hist[,23])) p.hist.test.s = predict(lm.hist, feat.test.hist[,c(1:22)]); error.hist.test.s = mean(abs(p.hist.test.s-feat.test.hist[,23])) we.hist.test.s = weighted_error(as.matrix(p.hist.test.s), as.matrix(feat.test.hist[,23])) ################################################################################################################################################ kg_feat = read.csv("kishanganj_features.csv",header = FALSE, sep = ",") kg_feat = kg_feat[,-1] feat.train = kg_feat[c(1:1068),] feat.test = kg_feat[c(1069:1224),] lm.feat <- lm(V12 ~ ., data=feat.train) p.feat.train = predict(lm.feat, feat.train[,c(1:10)]); error.feat.train = mean(abs(p.feat.train-feat.train[,11])) we.feat.train = weighted_error(as.matrix(p.feat.train), as.matrix(feat.train[,11])) p.feat.test = predict(lm.feat, feat.test[,c(1:10)]); error.feat.test = mean(abs(p.feat.test-feat.test[,11])) we.feat.test = weighted_error(as.matrix(p.feat.test), as.matrix(feat.test[,11])) rmse.feat.test = sqrt(mean((p.feat.train-feat.test[,11])^2)) ################################################################################################################################################ s.feat = svd(kg_feat[,-11]) s.feat$d s.feat$d[c((length(s.feat$d)-2):length(s.feat$d))] = 0 x.feat = s.feat$u %*% diag(s.feat$d) %*% t(s.feat$v) x.feat = data.frame(cbind(x.feat, kg_feat[,11])) x.feat = x.feat[,c(-8,-9,-10)] x.feat.train = x.feat[c(1:1068),] x.feat.test = x.feat[c(1069:1224),] lm.feat.s <- lm(X11 ~ ., data=x.feat.train) p.feat.train.s = predict(lm.feat.s, x.feat.train[,c(1:7)]); error.feat.train.s = mean(abs(p.feat.train.s-x.feat.train[,8])) we.feat.train.s = weighted_error(as.matrix(p.feat.train.s), as.matrix(x.feat.train[,8])) p.feat.test.s = predict(lm.feat.s, x.feat.test[,c(1:7)]); error.feat.test.s = mean(abs(p.feat.test.s-x.feat.test[,8])) we.feat.test.s = weighted_error(as.matrix(p.feat.test.s), as.matrix(x.feat.test[,8])) ################################################################################################################################################ # NEURAL NETWORK ################################################################################################################################################ train = kg[c(1:1068),] test = kg[c(1069:1224),] trainn = kgn[c(1:1068),] testn = kgn[c(1069:1224),] h = 15 t = 0.1 sm = 1e+5 r = 1 lr=0.001 af = "tanh" nn_f = neuralnet(V12~(V2+V3+V4+V5+V6+V7+V8+V9+V10+V11), trainn, hidden = h, threshold = t, stepmax = sm, rep = r, learningrate=lr,err.fct = "sse", act.fct = af, linear.output = TRUE) pred.train = compute(nn_f, trainn[,c(1:10)])$net.result pred.train = de_normalization(as.matrix(pred.train), as.matrix(kg[,11])) actual.train = de_normalization(as.matrix(trainn[,11]), as.matrix(kg[,11])) error.train = mean(abs(pred.train-actual.train)) we.train= weighted_error(as.matrix(pred.train), as.matrix(actual.train)) pred.test = compute(nn_f, testn[,c(1:10)])$net.result pred.test = de_normalization(as.matrix(pred.test), as.matrix(kg[,11])) actual.test = de_normalization(as.matrix(testn[,11]), as.matrix(kg[,11])) error.test = mean(abs(pred.test-actual.test)) we.test= weighted_error(as.matrix(pred.test), as.matrix(actual.test)) error.train we.train error.test we.test ################################################################################################################################################ # LOGISTIC REGRESSION ################################################################################################################################################ kg = read.csv("kishanganj_features.csv",header = FALSE, sep = ",") # kg = normalization(kg) flood = read.csv("floods.csv",header = FALSE, sep = ",") flood_class = cbind(kg[c(301:1224),-1],"flood" = flood[c(301:1224),-1]) # predprec = rbind(predtrain, pred) # flood_class = cbind(kg[c(301:1224),c(-1,-12)],"predicted" = predprec[c(301:1224),], "flood" = flood[c(301:1224),-1]) flood_class$flood <- factor(flood_class$flood, labels = c(0,1)) row.names(flood_class) = NULL train = flood_class[c(1:720),] test = flood_class[c(720:924),] row.names(test) = NULL row.names(train) = NULL logit <- glm(flood ~ ., data = train, family = "binomial") p = predict(logit, train[,c(1:11)], type="response") ptest = predict(logit, test[,c(1:11)], type="response") prob = sum(train$flood==1)/(dim(train)[1]) p = ifelse(p>prob, 1, 0) prec = sum(p==1 & train$flood==1)/((sum(p==1 & train$flood==1))+(sum(p==0 & train$flood==1))) rec = sum(p==1 & train$flood==1)/((sum(p==1 & train$flood==1))+(sum(p==1 & train$flood==0))) ptest = ifelse(ptest>prob, 1, 0) prect = sum(ptest==1 & test$flood==1)/((sum(ptest==1 & test$flood==1))+(sum(ptest==0 & test$flood==1))) rect = sum(ptest==1 & test$flood==1)/((sum(ptest==1 & test$flood==1))+(sum(ptest==1 & test$flood==0)))
library(tidyverse) devtools::load_all("augur") #load("cache/before_recode.RData") #library(tidyverse) cancers <- read_csv("cache/cancers_prerecode.csv.gz", progress = FALSE) #cancers[["dx_code_count"]] <- # ifelse(is.na(cancers[["dx_code_count"]]), 0, cancers[["dx_code_count"]]) seer_recode <- function(pedsf_df, key_var, df_var) { p_recodes <- pedsf_utils$pedsf_recodes %>% distinct(Code, .keep_all = TRUE) key_df <- p_recodes[p_recodes$Var == key_var, c("Code", "Meaning")] new_name <- paste0(df_var, "_v") names(key_df) <- c(df_var, new_name) to_return <- left_join(pedsf_df, key_df) #to_return <- to_return[,names(to_return) != df_var] #names(to_return)[new_name == names(to_return)] <- df_var to_return } cancers <- seer_recode(cancers, "payer_dx1", "payerdx") cancers$dod_flg <- factor(cancers$dod_flg, levels = c(0:5), #7), labels = c("Not dead by 12/14", "Dead for both", "Dead and off by 1-3mos", "Dead and off by 4-6mos", "Dead only in Medicare", "Dead only in SEER"))#, "Dead but months missing in Medicare or SEER")) key_vars_to_recode <- c( "radbrn", "csmetsdxliv_pub", "csmetsdxbr_pub", "csmetsdxb_pub", "csmetsdxlung_pub", "grade", "lat", "radsurg", "rad", "nosrg", "vasinv", "nhiade", "adjajc6t", "adjajc6n", "adjajc6m", "dajccm", "origin", "cur_ent", "dajccstg", "dxconf", "erstat", "histrec", "intprim", "linkflag", "m_sex", "marst", "mat_type", "med_stcd", "numdigit", "odthclass", "onco_rg1", "onco_rns1", "dajcct", "dajccn", "dajccstg", # "onco_time1", "origrecb", "oseqcon1", "ositage1", "other_tx1", "prstat", "race", "rsncd1", #"reg_id", "sex", "srvmflag", "sssurg", "stat_rec", "tumor1", "tumor2", "tumor3", "typefu", "vrfydth", "yobflg1", "her2rec", "brstsub", "adjajc6t", "adjajc6n", "adjajc6m", "cs04sch") df_vars_to_recode <- c( "rad_brn", "csmetsdxliv_pub", "csmetsdxbr_pub", "csmetsdxb_pub", "csmetsdxlung_pub", "grade", "lateral", "rad_surg", "radiatn", "no_surg", "vasinv", "nhiade", "t_value", "n_value", "m_value", "d_ajcc_m", "origin", "cur_ent", "dajcc7stg", "dx_conf", "erstatus", "histrec", "intprim", "linkflag", "m_sex", "mar_stat", "mat_type", "med_stcd", "numdigit", "o_dth_class", "oncotype_rg", "oncotype_rns", "d_ajcc_t", "d_ajcc_n", "d_ajcc_s", # "oncotype_time", "origrecb", "o_seqcon", "o_sitage", "othr_rx", "prstatus", "race", "rsncd1", #"reg_at_dx", "s_sex", "srv_time_mon_flag","ss_surg", "stat_rec", "tumor_1v", "tumor_2v", "tumor_3v", "typefup", "vrfydth", "yobflg1", "her2", "brst_sub", "adjtm_6value", "adjnm_6value", "adjm_6value", "cs0204schema") #edit pedsf_utils to suit needs------- unique_recodes <- pedsf_utils$pedsf_recodes %>% distinct(Var, Code, .keep_all = TRUE) to_lose <- unique_recodes$Var == "race" & grepl("\\(", unique_recodes$Meaning) unique_recodes <- unique_recodes[-which(to_lose),] unique_recodes$Code[unique_recodes$Var == "reg_id"] <- gsub("15", "", unique_recodes$Code[unique_recodes$Var == "reg_id"]) unique_recodes[unique_recodes %in% c("Blank", "blank")] <- "" unique_recodes$Meaning[unique_recodes$Code == "1= 0%-<5% poverty"] <- "0%-<5% poverty" unique_recodes$Code[unique_recodes$Code == "1= 0%-<5% poverty"] <- "1" unique_recodes$Meaning[unique_recodes$Meaning == "Reviewed and confirmed that"] <- "Reviewed and confirmed correct demo. characteristics" problem_value <- "Grade I; grade i; grade 1; well differentiated; differentiated, NOS" unique_recodes$Meaning[unique_recodes$Meaning == problem_value] <- "Grade I" problem_value <- "cell type not determined, not stated or not applicable" unique_recodes$Meaning[unique_recodes$Meaning == problem_value] <- "NA" problem_value <- "Unmarried or domestic partner (same sex or opposite sex or unregistered)" unique_recodes$Meaning[unique_recodes$Meaning == problem_value] <- "Unmarried" unique_recodes$Meaning[unique_recodes$Meaning %in% c("B-cell", "T-cell")] <- NA #technique #1 -------------------------------------- #for_recode <- function(vr, k_vr, k_df){ # k_df <- k_df[k_df$Var == k_vr, c("Code", "Meaning")] # factor(factor(vr, levels = k_df$Code, labels = k_df$Meaning)) #} #for_seer_recode <- pryr::partial(for_recode, k_df = unique_recodes) # #UNCOMMENT THESE TO RUN THE "for_recode" FUNCTIONS #cancers[,paste(df_vars_to_recode, "v", sep = "_")] <- #cancers[,df_vars_to_recode] <- # map2(cancers[,df_vars_to_recode], key_vars_to_recode, for_seer_recode) #---------------------------------------------------------- #technique #2 -------------------------------------- #The general idea, written as pipes (with an extra gsub) cancers <- pedsf_utils$pedsf_recodes %>% filter(Var == "reg_id") %>% mutate(reg_at_dx = gsub("^15", "", Code)) %>% select(reg_at_dx, reg_at_dx_v = Meaning) %>% right_join(cancers) keygen <- function(kdf, df_var, key_df_var) { kdf <- kdf[kdf[["Var"]] == key_df_var,] kdf <- kdf[!duplicated(kdf),c("Code", "Meaning")] names(kdf) <- c(df_var, paste0(df_var, "_v")) kdf } pedsf_keygen <- pryr::partial(keygen, kdf = unique_recodes) key_dfs <- map2(df_vars_to_recode, key_vars_to_recode, pedsf_keygen) library(zeallot) lj_coerce <- function(df1, df2){ join_by <- intersect(names(df1), names(df2)) if(length(join_by) != 1) { stop("Can't coerce more than one 'by' column") } c(df1[[join_by]], df2[[join_by]]) %<-% lapply(list(df1[[join_by]], df2[[join_by]]), as.character) left_join(df1, df2) } cancers <- reduce(key_dfs, lj_coerce, .init = cancers) #more specific recoding===================================== cancers$urbrur <- factor(as.numeric(cancers$urbrur), levels = c(1:5,9), labels = c("Big Metro", "Metro", "Urban", "Less Urban", "Rural", "Unknown")) %>% as.character() cancers$insrec_pub <- factor(cancers$insrec_pub, levels = 1:4, labels = c("Uninsured", "Medicaid", "Insured", "Insured Nonspecific")) %>% as.character cancers$beh03v <- factor(cancers$beh03v, levels = 2:3, labels = c("Carcinoid", "Malignant")) %>% as.character ssg_labels <- c("In situ", "Localized", "Regional, direct", "Regional, LN", "Regional, ext", "Regional, NOS", "Distant", "Unknown") cancers$d_ssg00 <- factor(cancers$d_ssg00, levels = c(0:5, 7, 9), labels = ssg_labels) rm(ssg_labels) cancers$eod10_pn[cancers$eod10_pn == 99] <- NA cancers$eod10_pn[cancers$eod10_pn == 98] <- NA cancers$eod10_pn[cancers$eod10_pn == 97] <- NA cancers$cs_mets[cancers$cs_mets == 99] <- NA cancers$cs_mets[cancers$cs_mets == 98] <- NA cancers$cs_size[as.numeric(cancers$cs_size) >= 988] <- NA #histo_url <- "https://seer.cancer.gov/icd-o-3/sitetype.icdo3.d20150918.xls" #download.file(histo_url, "~/brain-metastases/documentation/seer_histo.xls") histocodes <- readxl::read_xls("documentation/seer_histo.xls") names(histocodes) <- c("siterec", "sitedesc", "histo", "histodesc", "histobeh", "histobehdesc") split_stuff <- strsplit(histocodes$histobeh, "/") histocodes[,c("hist03v", "Behavior")] %<-% list(map_chr(split_stuff, 1), map_chr(split_stuff, 2)) histocodes$hist03v <- as.numeric(histocodes$hist03v) h_codes <- histocodes histocodes <- histocodes %>% select(hist03v, histodesc) %>% distinct(hist03v, .keep_all = TRUE) %>% filter(hist03v %in% unique(cancers$hist03v)) new_codes <- c("8983", "8213", "8325", "9970", "8711", "8392", "8825", "8406") %>% as.numeric() histocodes <- bind_rows(histocodes, data.frame(hist03v = new_codes, histodesc = c("Adenomyoepitheliomia with carcinoma", "Serrated adenocarcinoma", "Granular cell carcinoma", "Myelodysplastic neoplasm", "Glomangiosarcoma", "Skin carcinoma", "Myofibroblastic sarcoma", "Adenocarcinoma NOS"), stringsAsFactors = FALSE) ) #These warnings may indicate incompatibility with R3.4 #cancers$hist03v_v <- # suppressWarnings( # factor(cancers$hist03v, levels = histocodes$hist03v, # labels = histocodes$histodesc)) histo_key <- with(histocodes, setNames(hist03v, histodesc)) cancers$hist03v_v <- reduce2(histo_key, names(histo_key), .init = cancers$hist03v, function(hst_vec, hk, n_hk) { if(hk %in% unique(hst_vec)) { ifelse(hst_vec == hk, n_hk, hst_vec) } else { hst_vec } }) rm_unk <- function(x) { levels_to_collapse <- c("Unknown", "Not 1990+ Breast", "Not 2010+ Breast", "Borderline") ifelse(x %in% levels_to_collapse, "Other and unknown", x) } to_rm_unkn <- c("her2_v", "prstatus_v", "erstatus_v") cancers[,to_rm_unkn] <- lapply(cancers[,to_rm_unkn], rm_unk) cancers$d_ssg00[cancers$d_ssg00 == "Regional, NOS"] <- "Unknown" cancers$grade_v[cancers$grade_v == "N K cell (natural killer cell)"] <- NA cancers$mar_stat_v[cancers$mar_stat_v %in% c("Unknown", "Unmarried", "Separated")] <- "Unknown or other" #site_url <- "https://www.cms.gov/Medicare/Coding/ICD10/Downloads/2016-Code-Descriptions-in-Tabular-Order.zip" #download.file(site_url, "~/brain-metastases/documentation/icd/icd10.zip") #!/bin/bash #cd documentation #unzip icd10.zip #cd .. #cancers <- # left_join(cancers, # read_table("documentation/icd/icd10cm_codes_2016.txt", # col_names = c("icdot10v", "icd10"))) #cancers$site <- # gsub("(Malignant\\ neoplasm\\ of|Carcinoma|Melanoma|Malignant\\ Melanoma)(\\ in\\ situ)?(\\ of)?\\ ", # "", cancers$icd10) # #cancers <- left_join(cancers, site_conversions) #using ICD data from GNUHealth # http://health.gnu.org/ # file:///home/mustafa/Downloads/gnuhealth-3.2.9/health_icd10/data/diseases.xml library(XML) dx_xml <- xmlParse("documentation/diseases.xml", useInternalNodes = TRUE) dx_xml <- xmlToList(dx_xml)[[1]] dx_xml <- dx_xml[-length(dx_xml)] diseases <- data.frame(icdot10v = gsub("\\.", "", map_chr(dx_xml, function(x) x[[2]][[1]])), icd10 = map_chr(dx_xml, function(x) x[[1]][[1]]), stringsAsFactors = FALSE) cancers <- left_join(cancers, diseases) #cancers$site <- # gsub("(Malignant\\ neoplasm\\ of|Carcinoma|Melanoma|Malignant\\ Melanoma)(\\ in\\ situ)?(\\ of)?\\ ", # "", cancers$icd10) rep_pat <- "(Malignant\\ neoplasm\\:|Carcinoma\\ in\\ situ|Malignant\\ neoplasm\\ of\\ other\\ and\\ ill-defined\\ sites\\:)\\ (of)?" cancers$icd <- gsub(rep_pat, "", cancers$icd10) stems <- list(mis = c("(Melanoma", "in", "situ)"), malm = c("(Malignant", "melanoma)"), olo = c("(Overlapping", "lesion)"), cars = c("(Carcinoma", "in", "situ)"), cst = c("(Connective", "and", "soft", "tissue)"), mnp = c("(Malignant", "neoplasm)"), qdrnt = "(^.*quadrant)", lobe = "(^.*lobe)", inc = "(including.*$)", unsp = "(unspecified$)", oth = "(other", "and", "unspecified", "parts)" ) to_replace <- c(paste0(map(stems, function(x) paste0(tolower(x), collapse = "\\ ")), collapse = "|"), "\\,|of\\ |:\\ ", "skin", "\\ +") replacements <- c("", "", "", "\\ ") cancers$icd <- reduce2(to_replace, replacements, function(starting, fst, snd) { new_x <- trimws(tolower(starting), "both") gsub(pattern = fst, replacement = snd, x = new_x) }, .init = cancers$icd) cancers$icd[cancers$icd == ""] <- NA #cancers$icd <- ifelse(grepl("[lL]ower\\ [lL]imb", cancers$icd), "llt", cancers$icd) #cancers$icd <- ifelse(grepl("[tT]run[ck](al)?", cancers$icd), "llt", cancers$icd) #cancers$icd <- ifelse(grepl("[Ff]ace", cancers$icd), "ulf", cancers$icd) #cancers$icd <- ifelse(grepl("[uU]pper\\ [lL]imb", cancers$icd), "ulf", cancers$icd) to_reps <- c("[lL]ower\\ [lL]imb", "[tT]run[ck](al)?", "[Ff]ace", "[uU]pper\\ [lL]imb", "[sS]calp|[nN]eck|[eE]ar|[eE]ye|[lL]ip") replacements <- c("llt", "llt", "ulf", "ulf", "ulf") cancers <- reduce2(to_reps, replacements, function(df, to_rep, repmnt) { df[["icd"]] <- ifelse(grepl(to_rep, df[["icd"]]), repmnt, df[["icd"]]) df }, .init = cancers) skins <- which(cancers$which_cancer == "skin") cancers$icd_c[skins] <- ifelse((cancers$icd[skins] %in% c("llt", "ulf")), cancers$icd, "other") cancers$rac_recy_v <- as.character( factor(cancers$rac_recy, levels = c(1:4, 9), labels = c("White", "Black", "American Indian", "Asian/Pacific Islander", NA)) ) to_numeric <- c("age_dx", "cs_size", "eod10_pn", "cs_mets") cancers[,to_numeric] <- lapply(cancers[,to_numeric], as.numeric) rm(to_numeric) cancers$cs_size[as.numeric(cancers$cs_size) >= 988] <- NA cancers$age_cut <- cut(as.numeric(cancers$age_dx), c(65, 70, 75, 80, 85, 115), include.lowest = TRUE, labels = c("65 to 69", "70 to 74", "75 to 79", "80 to 84", "85+")) cancers$race_v <- cancers$rac_recy_v hispanic_origin_values <- c("Cuban", "Dominican Republic", "Mexican", "NHIA Surname Match Only", "Other specified Spanish/Hispanic Origin including Europe", "Puerto Rican", "South or Central American excluding Brazil", "Spanish/Hispanic/Latino, NOS") cancers[["race_v"]] <- ifelse(cancers[["nhiade_v"]] %in% hispanic_origin_values & cancers[["race_v"]] == "White", "White Hispanic", gsub("White", "White Non-Hispanic", cancers[["race_v"]])) cancers <- reduce2(list(c("American Indian", "Asian/Pacific Islander"), c("American Indian", "Black", "Asian/Pacific Islander")), c("breast", "skin"), function(df, oths, cncr){ df[["race_v"]] <- ifelse(df[["which_cancer"]] == cncr & df[["race_v"]] %in% oths, "Other", df[["race_v"]]) df}, .init = cancers) write_csv(cancers, "cache/cancers_postrecode.csv.gz")
/munge/recoding.R
permissive
mustafaascha/seerm-bevacizumab
R
false
false
14,805
r
library(tidyverse) devtools::load_all("augur") #load("cache/before_recode.RData") #library(tidyverse) cancers <- read_csv("cache/cancers_prerecode.csv.gz", progress = FALSE) #cancers[["dx_code_count"]] <- # ifelse(is.na(cancers[["dx_code_count"]]), 0, cancers[["dx_code_count"]]) seer_recode <- function(pedsf_df, key_var, df_var) { p_recodes <- pedsf_utils$pedsf_recodes %>% distinct(Code, .keep_all = TRUE) key_df <- p_recodes[p_recodes$Var == key_var, c("Code", "Meaning")] new_name <- paste0(df_var, "_v") names(key_df) <- c(df_var, new_name) to_return <- left_join(pedsf_df, key_df) #to_return <- to_return[,names(to_return) != df_var] #names(to_return)[new_name == names(to_return)] <- df_var to_return } cancers <- seer_recode(cancers, "payer_dx1", "payerdx") cancers$dod_flg <- factor(cancers$dod_flg, levels = c(0:5), #7), labels = c("Not dead by 12/14", "Dead for both", "Dead and off by 1-3mos", "Dead and off by 4-6mos", "Dead only in Medicare", "Dead only in SEER"))#, "Dead but months missing in Medicare or SEER")) key_vars_to_recode <- c( "radbrn", "csmetsdxliv_pub", "csmetsdxbr_pub", "csmetsdxb_pub", "csmetsdxlung_pub", "grade", "lat", "radsurg", "rad", "nosrg", "vasinv", "nhiade", "adjajc6t", "adjajc6n", "adjajc6m", "dajccm", "origin", "cur_ent", "dajccstg", "dxconf", "erstat", "histrec", "intprim", "linkflag", "m_sex", "marst", "mat_type", "med_stcd", "numdigit", "odthclass", "onco_rg1", "onco_rns1", "dajcct", "dajccn", "dajccstg", # "onco_time1", "origrecb", "oseqcon1", "ositage1", "other_tx1", "prstat", "race", "rsncd1", #"reg_id", "sex", "srvmflag", "sssurg", "stat_rec", "tumor1", "tumor2", "tumor3", "typefu", "vrfydth", "yobflg1", "her2rec", "brstsub", "adjajc6t", "adjajc6n", "adjajc6m", "cs04sch") df_vars_to_recode <- c( "rad_brn", "csmetsdxliv_pub", "csmetsdxbr_pub", "csmetsdxb_pub", "csmetsdxlung_pub", "grade", "lateral", "rad_surg", "radiatn", "no_surg", "vasinv", "nhiade", "t_value", "n_value", "m_value", "d_ajcc_m", "origin", "cur_ent", "dajcc7stg", "dx_conf", "erstatus", "histrec", "intprim", "linkflag", "m_sex", "mar_stat", "mat_type", "med_stcd", "numdigit", "o_dth_class", "oncotype_rg", "oncotype_rns", "d_ajcc_t", "d_ajcc_n", "d_ajcc_s", # "oncotype_time", "origrecb", "o_seqcon", "o_sitage", "othr_rx", "prstatus", "race", "rsncd1", #"reg_at_dx", "s_sex", "srv_time_mon_flag","ss_surg", "stat_rec", "tumor_1v", "tumor_2v", "tumor_3v", "typefup", "vrfydth", "yobflg1", "her2", "brst_sub", "adjtm_6value", "adjnm_6value", "adjm_6value", "cs0204schema") #edit pedsf_utils to suit needs------- unique_recodes <- pedsf_utils$pedsf_recodes %>% distinct(Var, Code, .keep_all = TRUE) to_lose <- unique_recodes$Var == "race" & grepl("\\(", unique_recodes$Meaning) unique_recodes <- unique_recodes[-which(to_lose),] unique_recodes$Code[unique_recodes$Var == "reg_id"] <- gsub("15", "", unique_recodes$Code[unique_recodes$Var == "reg_id"]) unique_recodes[unique_recodes %in% c("Blank", "blank")] <- "" unique_recodes$Meaning[unique_recodes$Code == "1= 0%-<5% poverty"] <- "0%-<5% poverty" unique_recodes$Code[unique_recodes$Code == "1= 0%-<5% poverty"] <- "1" unique_recodes$Meaning[unique_recodes$Meaning == "Reviewed and confirmed that"] <- "Reviewed and confirmed correct demo. characteristics" problem_value <- "Grade I; grade i; grade 1; well differentiated; differentiated, NOS" unique_recodes$Meaning[unique_recodes$Meaning == problem_value] <- "Grade I" problem_value <- "cell type not determined, not stated or not applicable" unique_recodes$Meaning[unique_recodes$Meaning == problem_value] <- "NA" problem_value <- "Unmarried or domestic partner (same sex or opposite sex or unregistered)" unique_recodes$Meaning[unique_recodes$Meaning == problem_value] <- "Unmarried" unique_recodes$Meaning[unique_recodes$Meaning %in% c("B-cell", "T-cell")] <- NA #technique #1 -------------------------------------- #for_recode <- function(vr, k_vr, k_df){ # k_df <- k_df[k_df$Var == k_vr, c("Code", "Meaning")] # factor(factor(vr, levels = k_df$Code, labels = k_df$Meaning)) #} #for_seer_recode <- pryr::partial(for_recode, k_df = unique_recodes) # #UNCOMMENT THESE TO RUN THE "for_recode" FUNCTIONS #cancers[,paste(df_vars_to_recode, "v", sep = "_")] <- #cancers[,df_vars_to_recode] <- # map2(cancers[,df_vars_to_recode], key_vars_to_recode, for_seer_recode) #---------------------------------------------------------- #technique #2 -------------------------------------- #The general idea, written as pipes (with an extra gsub) cancers <- pedsf_utils$pedsf_recodes %>% filter(Var == "reg_id") %>% mutate(reg_at_dx = gsub("^15", "", Code)) %>% select(reg_at_dx, reg_at_dx_v = Meaning) %>% right_join(cancers) keygen <- function(kdf, df_var, key_df_var) { kdf <- kdf[kdf[["Var"]] == key_df_var,] kdf <- kdf[!duplicated(kdf),c("Code", "Meaning")] names(kdf) <- c(df_var, paste0(df_var, "_v")) kdf } pedsf_keygen <- pryr::partial(keygen, kdf = unique_recodes) key_dfs <- map2(df_vars_to_recode, key_vars_to_recode, pedsf_keygen) library(zeallot) lj_coerce <- function(df1, df2){ join_by <- intersect(names(df1), names(df2)) if(length(join_by) != 1) { stop("Can't coerce more than one 'by' column") } c(df1[[join_by]], df2[[join_by]]) %<-% lapply(list(df1[[join_by]], df2[[join_by]]), as.character) left_join(df1, df2) } cancers <- reduce(key_dfs, lj_coerce, .init = cancers) #more specific recoding===================================== cancers$urbrur <- factor(as.numeric(cancers$urbrur), levels = c(1:5,9), labels = c("Big Metro", "Metro", "Urban", "Less Urban", "Rural", "Unknown")) %>% as.character() cancers$insrec_pub <- factor(cancers$insrec_pub, levels = 1:4, labels = c("Uninsured", "Medicaid", "Insured", "Insured Nonspecific")) %>% as.character cancers$beh03v <- factor(cancers$beh03v, levels = 2:3, labels = c("Carcinoid", "Malignant")) %>% as.character ssg_labels <- c("In situ", "Localized", "Regional, direct", "Regional, LN", "Regional, ext", "Regional, NOS", "Distant", "Unknown") cancers$d_ssg00 <- factor(cancers$d_ssg00, levels = c(0:5, 7, 9), labels = ssg_labels) rm(ssg_labels) cancers$eod10_pn[cancers$eod10_pn == 99] <- NA cancers$eod10_pn[cancers$eod10_pn == 98] <- NA cancers$eod10_pn[cancers$eod10_pn == 97] <- NA cancers$cs_mets[cancers$cs_mets == 99] <- NA cancers$cs_mets[cancers$cs_mets == 98] <- NA cancers$cs_size[as.numeric(cancers$cs_size) >= 988] <- NA #histo_url <- "https://seer.cancer.gov/icd-o-3/sitetype.icdo3.d20150918.xls" #download.file(histo_url, "~/brain-metastases/documentation/seer_histo.xls") histocodes <- readxl::read_xls("documentation/seer_histo.xls") names(histocodes) <- c("siterec", "sitedesc", "histo", "histodesc", "histobeh", "histobehdesc") split_stuff <- strsplit(histocodes$histobeh, "/") histocodes[,c("hist03v", "Behavior")] %<-% list(map_chr(split_stuff, 1), map_chr(split_stuff, 2)) histocodes$hist03v <- as.numeric(histocodes$hist03v) h_codes <- histocodes histocodes <- histocodes %>% select(hist03v, histodesc) %>% distinct(hist03v, .keep_all = TRUE) %>% filter(hist03v %in% unique(cancers$hist03v)) new_codes <- c("8983", "8213", "8325", "9970", "8711", "8392", "8825", "8406") %>% as.numeric() histocodes <- bind_rows(histocodes, data.frame(hist03v = new_codes, histodesc = c("Adenomyoepitheliomia with carcinoma", "Serrated adenocarcinoma", "Granular cell carcinoma", "Myelodysplastic neoplasm", "Glomangiosarcoma", "Skin carcinoma", "Myofibroblastic sarcoma", "Adenocarcinoma NOS"), stringsAsFactors = FALSE) ) #These warnings may indicate incompatibility with R3.4 #cancers$hist03v_v <- # suppressWarnings( # factor(cancers$hist03v, levels = histocodes$hist03v, # labels = histocodes$histodesc)) histo_key <- with(histocodes, setNames(hist03v, histodesc)) cancers$hist03v_v <- reduce2(histo_key, names(histo_key), .init = cancers$hist03v, function(hst_vec, hk, n_hk) { if(hk %in% unique(hst_vec)) { ifelse(hst_vec == hk, n_hk, hst_vec) } else { hst_vec } }) rm_unk <- function(x) { levels_to_collapse <- c("Unknown", "Not 1990+ Breast", "Not 2010+ Breast", "Borderline") ifelse(x %in% levels_to_collapse, "Other and unknown", x) } to_rm_unkn <- c("her2_v", "prstatus_v", "erstatus_v") cancers[,to_rm_unkn] <- lapply(cancers[,to_rm_unkn], rm_unk) cancers$d_ssg00[cancers$d_ssg00 == "Regional, NOS"] <- "Unknown" cancers$grade_v[cancers$grade_v == "N K cell (natural killer cell)"] <- NA cancers$mar_stat_v[cancers$mar_stat_v %in% c("Unknown", "Unmarried", "Separated")] <- "Unknown or other" #site_url <- "https://www.cms.gov/Medicare/Coding/ICD10/Downloads/2016-Code-Descriptions-in-Tabular-Order.zip" #download.file(site_url, "~/brain-metastases/documentation/icd/icd10.zip") #!/bin/bash #cd documentation #unzip icd10.zip #cd .. #cancers <- # left_join(cancers, # read_table("documentation/icd/icd10cm_codes_2016.txt", # col_names = c("icdot10v", "icd10"))) #cancers$site <- # gsub("(Malignant\\ neoplasm\\ of|Carcinoma|Melanoma|Malignant\\ Melanoma)(\\ in\\ situ)?(\\ of)?\\ ", # "", cancers$icd10) # #cancers <- left_join(cancers, site_conversions) #using ICD data from GNUHealth # http://health.gnu.org/ # file:///home/mustafa/Downloads/gnuhealth-3.2.9/health_icd10/data/diseases.xml library(XML) dx_xml <- xmlParse("documentation/diseases.xml", useInternalNodes = TRUE) dx_xml <- xmlToList(dx_xml)[[1]] dx_xml <- dx_xml[-length(dx_xml)] diseases <- data.frame(icdot10v = gsub("\\.", "", map_chr(dx_xml, function(x) x[[2]][[1]])), icd10 = map_chr(dx_xml, function(x) x[[1]][[1]]), stringsAsFactors = FALSE) cancers <- left_join(cancers, diseases) #cancers$site <- # gsub("(Malignant\\ neoplasm\\ of|Carcinoma|Melanoma|Malignant\\ Melanoma)(\\ in\\ situ)?(\\ of)?\\ ", # "", cancers$icd10) rep_pat <- "(Malignant\\ neoplasm\\:|Carcinoma\\ in\\ situ|Malignant\\ neoplasm\\ of\\ other\\ and\\ ill-defined\\ sites\\:)\\ (of)?" cancers$icd <- gsub(rep_pat, "", cancers$icd10) stems <- list(mis = c("(Melanoma", "in", "situ)"), malm = c("(Malignant", "melanoma)"), olo = c("(Overlapping", "lesion)"), cars = c("(Carcinoma", "in", "situ)"), cst = c("(Connective", "and", "soft", "tissue)"), mnp = c("(Malignant", "neoplasm)"), qdrnt = "(^.*quadrant)", lobe = "(^.*lobe)", inc = "(including.*$)", unsp = "(unspecified$)", oth = "(other", "and", "unspecified", "parts)" ) to_replace <- c(paste0(map(stems, function(x) paste0(tolower(x), collapse = "\\ ")), collapse = "|"), "\\,|of\\ |:\\ ", "skin", "\\ +") replacements <- c("", "", "", "\\ ") cancers$icd <- reduce2(to_replace, replacements, function(starting, fst, snd) { new_x <- trimws(tolower(starting), "both") gsub(pattern = fst, replacement = snd, x = new_x) }, .init = cancers$icd) cancers$icd[cancers$icd == ""] <- NA #cancers$icd <- ifelse(grepl("[lL]ower\\ [lL]imb", cancers$icd), "llt", cancers$icd) #cancers$icd <- ifelse(grepl("[tT]run[ck](al)?", cancers$icd), "llt", cancers$icd) #cancers$icd <- ifelse(grepl("[Ff]ace", cancers$icd), "ulf", cancers$icd) #cancers$icd <- ifelse(grepl("[uU]pper\\ [lL]imb", cancers$icd), "ulf", cancers$icd) to_reps <- c("[lL]ower\\ [lL]imb", "[tT]run[ck](al)?", "[Ff]ace", "[uU]pper\\ [lL]imb", "[sS]calp|[nN]eck|[eE]ar|[eE]ye|[lL]ip") replacements <- c("llt", "llt", "ulf", "ulf", "ulf") cancers <- reduce2(to_reps, replacements, function(df, to_rep, repmnt) { df[["icd"]] <- ifelse(grepl(to_rep, df[["icd"]]), repmnt, df[["icd"]]) df }, .init = cancers) skins <- which(cancers$which_cancer == "skin") cancers$icd_c[skins] <- ifelse((cancers$icd[skins] %in% c("llt", "ulf")), cancers$icd, "other") cancers$rac_recy_v <- as.character( factor(cancers$rac_recy, levels = c(1:4, 9), labels = c("White", "Black", "American Indian", "Asian/Pacific Islander", NA)) ) to_numeric <- c("age_dx", "cs_size", "eod10_pn", "cs_mets") cancers[,to_numeric] <- lapply(cancers[,to_numeric], as.numeric) rm(to_numeric) cancers$cs_size[as.numeric(cancers$cs_size) >= 988] <- NA cancers$age_cut <- cut(as.numeric(cancers$age_dx), c(65, 70, 75, 80, 85, 115), include.lowest = TRUE, labels = c("65 to 69", "70 to 74", "75 to 79", "80 to 84", "85+")) cancers$race_v <- cancers$rac_recy_v hispanic_origin_values <- c("Cuban", "Dominican Republic", "Mexican", "NHIA Surname Match Only", "Other specified Spanish/Hispanic Origin including Europe", "Puerto Rican", "South or Central American excluding Brazil", "Spanish/Hispanic/Latino, NOS") cancers[["race_v"]] <- ifelse(cancers[["nhiade_v"]] %in% hispanic_origin_values & cancers[["race_v"]] == "White", "White Hispanic", gsub("White", "White Non-Hispanic", cancers[["race_v"]])) cancers <- reduce2(list(c("American Indian", "Asian/Pacific Islander"), c("American Indian", "Black", "Asian/Pacific Islander")), c("breast", "skin"), function(df, oths, cncr){ df[["race_v"]] <- ifelse(df[["which_cancer"]] == cncr & df[["race_v"]] %in% oths, "Other", df[["race_v"]]) df}, .init = cancers) write_csv(cancers, "cache/cancers_postrecode.csv.gz")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/line2alias.R \name{line2alias} \alias{line2alias} \title{Line 2 Alias} \usage{ line2alias(df, strain.col, is.rix = F) } \arguments{ \item{df}{Input data frame which contains a column with new CC line designations} \item{strain.col}{Character, name of column containing strains to be changed- not required if is.rix=T.} \item{is.rix}{Logical, is the data from inbred crosses (RIX)?} } \value{ Input data frame with CC line designation converted to old alias nomenclature } \description{ Convert from new CC0XX line designations to old CC nomenclature }
/man/line2alias.Rd
no_license
kenoll/cckit
R
false
true
632
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/line2alias.R \name{line2alias} \alias{line2alias} \title{Line 2 Alias} \usage{ line2alias(df, strain.col, is.rix = F) } \arguments{ \item{df}{Input data frame which contains a column with new CC line designations} \item{strain.col}{Character, name of column containing strains to be changed- not required if is.rix=T.} \item{is.rix}{Logical, is the data from inbred crosses (RIX)?} } \value{ Input data frame with CC line designation converted to old alias nomenclature } \description{ Convert from new CC0XX line designations to old CC nomenclature }
# 500, 500, 3, 0.25 -> Magic L1 = 3.150 # 500, 500, 3, 0.50 -> Magic L1 = 3.335 # 500, 500, 3, 0.75 -> Magic L1 = 3.511 # 500, 500, 3, 1.00 -> Magic L1 = 3.673 # 500, 500, 3, 1.25 -> Magic L1 = 3.839 # 500, 500, 3, 1.50 -> Magic L1 = 3.980 # 500, 500, 3, 2.00 -> Magic L1 = 4.132 # 500, 500, 3, 2.25 -> Magic L1 = 4.208 # 500, 500, 3, 2.50 -> Magic L1 = 4.264 L1.lasso = 1 * sqrt(2 * log(p) / n) pb <- progress_bar$new( format = " Simulating [:bar] :percent in :elapsed. ETA :eta", total = n.sims * n, clear = F, width = 60) set.seed(1) beta <- create.beta(n, p, s) * b x <- mvrnorm(n, rep(0, p), sigma.x) if(errors.type == "t") { errors.array <- array(rt(n * n.sims, 3) / sqrt(3), dim = c(n.sims, n)) } else { errors.array <- mvrnorm(n.sims, rep(0, n), sigma.errors) } t.all <- seq(0, 1, length.out = n) i.bw.all <- which(t.all < (1 - bw) & t.all > bw) n.bw.all <- length(i.bw.all) beta.lasso <- array(0, dim = c(p, n, n.sims)) for(sim in 1:n.sims){ if(n.sims == 1) { errors <- errors.array } else { errors <- errors.array[sim, ] } y <- diag(x %*% beta) + errors for(i in 1:n) { i.bw <- which(abs(t.all - t.all[i]) <= bw) n.bw <- length(i.bw) x.bw <- x[i.bw, ] y.bw <- y[i.bw] # Model fitting fit.lasso <- glmnet(x.bw, y.bw, lambda = L1.lasso, alpha = 1, intercept=F) beta.lasso[, i, sim] <- coef(fit.lasso)[-1] pb$tick() } } n.FN.lasso <- rep(NA, n.sims) n.FP.lasso <- rep(NA, n.sims) FWER.lasso <- rep(NA, n.sims) RMSE.lasso <- rep(NA, n.sims) for(sim in 1:n.sims) { n.FN.lasso[sim] <- sum(beta.lasso[1:s, i.bw.all, sim] == 0) n.FP.lasso[sim] <- sum(beta.lasso[-(1:s), i.bw.all, sim] != 0) FWER.lasso[sim] <- mean(colSums(beta.lasso[-(1:s), i.bw.all, sim]) != 0) RMSE.lasso[sim] <- sqrt(mean((beta.lasso[, i.bw.all, sim] - beta[, i.bw.all]) ^ 2)) } FPR.lasso <- mean(n.FP.lasso) / ((p - s) * n.bw.all) FNR.lasso <- mean(n.FN.lasso) / (s * n.bw.all) sd.FNR.lasso <- sd(n.FN.lasso) / sqrt(n.sims) / (s * n.bw.all * n.sims) sd.FPR.lasso <- sd(n.FP.lasso) / sqrt(n.sims) / ((p - s) * n.bw.all * n.sims) sd.FWER.lasso <- sd(FWER.lasso) / sqrt(n.sims) sd.RMSE.lasso <- sd(RMSE.lasso) / sqrt(n.sims) results.lasso <- data.frame(n = n, p = p, s = s, b = b, L1 = L1.lasso, n.FP = mean(n.FP.lasso), FPR = FPR.lasso, sd.FPR = sd.FPR.lasso, n.FN = mean(n.FN.lasso), FNR = FNR.lasso, sd.FNR = sd.FNR.lasso, FWER = mean(FWER.lasso), sd.FWER = sd.FWER.lasso, RMSE = mean(RMSE.lasso), sd.RMSE = sd.RMSE.lasso, error = errors.type) if(print == T) { if(L1.lasso < 1.1 * sqrt(2 * log(p) / n)) { write.table(results.lasso, file = "Lasso, univ.csv", append = T, quote = F, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = F, col.names = F, qmethod = c("escape", "double"), fileEncoding = "") } else { write.table(results.lasso, file = "Lasso, magic.csv", append = T, quote = F, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = F, col.names = F, qmethod = c("escape", "double"), fileEncoding = "") } }
/time-varying lasso.R
no_license
Kevindork/Time-Varying-High-Dimensional-Inference
R
false
false
3,294
r
# 500, 500, 3, 0.25 -> Magic L1 = 3.150 # 500, 500, 3, 0.50 -> Magic L1 = 3.335 # 500, 500, 3, 0.75 -> Magic L1 = 3.511 # 500, 500, 3, 1.00 -> Magic L1 = 3.673 # 500, 500, 3, 1.25 -> Magic L1 = 3.839 # 500, 500, 3, 1.50 -> Magic L1 = 3.980 # 500, 500, 3, 2.00 -> Magic L1 = 4.132 # 500, 500, 3, 2.25 -> Magic L1 = 4.208 # 500, 500, 3, 2.50 -> Magic L1 = 4.264 L1.lasso = 1 * sqrt(2 * log(p) / n) pb <- progress_bar$new( format = " Simulating [:bar] :percent in :elapsed. ETA :eta", total = n.sims * n, clear = F, width = 60) set.seed(1) beta <- create.beta(n, p, s) * b x <- mvrnorm(n, rep(0, p), sigma.x) if(errors.type == "t") { errors.array <- array(rt(n * n.sims, 3) / sqrt(3), dim = c(n.sims, n)) } else { errors.array <- mvrnorm(n.sims, rep(0, n), sigma.errors) } t.all <- seq(0, 1, length.out = n) i.bw.all <- which(t.all < (1 - bw) & t.all > bw) n.bw.all <- length(i.bw.all) beta.lasso <- array(0, dim = c(p, n, n.sims)) for(sim in 1:n.sims){ if(n.sims == 1) { errors <- errors.array } else { errors <- errors.array[sim, ] } y <- diag(x %*% beta) + errors for(i in 1:n) { i.bw <- which(abs(t.all - t.all[i]) <= bw) n.bw <- length(i.bw) x.bw <- x[i.bw, ] y.bw <- y[i.bw] # Model fitting fit.lasso <- glmnet(x.bw, y.bw, lambda = L1.lasso, alpha = 1, intercept=F) beta.lasso[, i, sim] <- coef(fit.lasso)[-1] pb$tick() } } n.FN.lasso <- rep(NA, n.sims) n.FP.lasso <- rep(NA, n.sims) FWER.lasso <- rep(NA, n.sims) RMSE.lasso <- rep(NA, n.sims) for(sim in 1:n.sims) { n.FN.lasso[sim] <- sum(beta.lasso[1:s, i.bw.all, sim] == 0) n.FP.lasso[sim] <- sum(beta.lasso[-(1:s), i.bw.all, sim] != 0) FWER.lasso[sim] <- mean(colSums(beta.lasso[-(1:s), i.bw.all, sim]) != 0) RMSE.lasso[sim] <- sqrt(mean((beta.lasso[, i.bw.all, sim] - beta[, i.bw.all]) ^ 2)) } FPR.lasso <- mean(n.FP.lasso) / ((p - s) * n.bw.all) FNR.lasso <- mean(n.FN.lasso) / (s * n.bw.all) sd.FNR.lasso <- sd(n.FN.lasso) / sqrt(n.sims) / (s * n.bw.all * n.sims) sd.FPR.lasso <- sd(n.FP.lasso) / sqrt(n.sims) / ((p - s) * n.bw.all * n.sims) sd.FWER.lasso <- sd(FWER.lasso) / sqrt(n.sims) sd.RMSE.lasso <- sd(RMSE.lasso) / sqrt(n.sims) results.lasso <- data.frame(n = n, p = p, s = s, b = b, L1 = L1.lasso, n.FP = mean(n.FP.lasso), FPR = FPR.lasso, sd.FPR = sd.FPR.lasso, n.FN = mean(n.FN.lasso), FNR = FNR.lasso, sd.FNR = sd.FNR.lasso, FWER = mean(FWER.lasso), sd.FWER = sd.FWER.lasso, RMSE = mean(RMSE.lasso), sd.RMSE = sd.RMSE.lasso, error = errors.type) if(print == T) { if(L1.lasso < 1.1 * sqrt(2 * log(p) / n)) { write.table(results.lasso, file = "Lasso, univ.csv", append = T, quote = F, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = F, col.names = F, qmethod = c("escape", "double"), fileEncoding = "") } else { write.table(results.lasso, file = "Lasso, magic.csv", append = T, quote = F, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = F, col.names = F, qmethod = c("escape", "double"), fileEncoding = "") } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/co2_to_ca.R \name{co2_to_ca} \alias{co2_to_ca} \title{co2_to_ca} \usage{ co2_to_ca(co2, patm) } \arguments{ \item{co2}{annual atm. CO2 in ppm} \item{patm}{atm.pressure in Pa} } \value{ ca in Pa } \description{ Converts ambient CO2 from ppm to Pa This function is adapted from the rpmodel package, Stocker et al. 2019 } \examples{ } \references{ Stocker et al., 2019 <doi:10.5194/gmd-2019-200> <https://cran.r-project.org/web/packages/rpmodel/index.html> } \author{ Beni Stocker }
/Practicals/Practical_B/Photosynthesis.Rcheck/00_pkg_src/Photosynthesis/man/co2_to_ca.Rd
no_license
femeunier/VegMod_course
R
false
true
562
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/co2_to_ca.R \name{co2_to_ca} \alias{co2_to_ca} \title{co2_to_ca} \usage{ co2_to_ca(co2, patm) } \arguments{ \item{co2}{annual atm. CO2 in ppm} \item{patm}{atm.pressure in Pa} } \value{ ca in Pa } \description{ Converts ambient CO2 from ppm to Pa This function is adapted from the rpmodel package, Stocker et al. 2019 } \examples{ } \references{ Stocker et al., 2019 <doi:10.5194/gmd-2019-200> <https://cran.r-project.org/web/packages/rpmodel/index.html> } \author{ Beni Stocker }
context("mutate") test_that("mutate computed before summarise", { mf <- memdb_frame(x = c(1, 2, 3), y = c(9, 8, 7)) out <- mutate(mf, z = x + y) %>% summarise(sum_z = sum(z, na.rm = TRUE)) %>% collect() expect_equal(out$sum_z, 30) }) test_that("two mutates equivalent to one", { mf <- memdb_frame(x = c(1, 5, 9), y = c(3, 12, 11)) df1 <- mf %>% mutate(x2 = x * 2, y4 = y * 4) %>% collect() df2 <- mf %>% collect() %>% mutate(x2 = x * 2, y4 = y * 4) expect_equal_tbl(df1, df2) }) test_that("can refer to fresly created values", { out1 <- memdb_frame(x1 = 1) %>% mutate(x2 = x1 + 1, x3 = x2 + 1, x4 = x3 + 1) %>% collect() expect_equal(out1, tibble(x1 = 1, x2 = 2, x3 = 3, x4 = 4)) out2 <- memdb_frame(x = 1) %>% mutate(x = x + 1, x = x + 1, x = x + 1) %>% collect() expect_equal(out2, tibble(x = 4)) }) test_that("queries are not nested unnecessarily", { # Should only be one query deep sql <- memdb_frame(x = 1) %>% mutate(y = x + 1, a = y + 1, b = y + 1) %>% sql_build() expect_s3_class(sql$from, "select_query") expect_s3_class(sql$from$from, "ident") }) # SQL generation ----------------------------------------------------------- test_that("mutate calls windowed versions of sql functions", { dfs <- test_frame_windowed(x = 1:4, g = rep(c(1, 2), each = 2)) out <- map(dfs, . %>% group_by(g) %>% mutate(r = as.numeric(row_number(x)))) expect_equal(out$df$r, c(1, 2, 1, 2)) expect_equal_tbls(out) }) test_that("recycled aggregates generate window function", { dfs <- test_frame_windowed(x = 1:4, g = rep(c(1, 2), each = 2)) out <- map(dfs, . %>% group_by(g) %>% mutate(r = x > mean(x, na.rm = TRUE))) expect_equal(out$df$r, c(FALSE, TRUE, FALSE, TRUE)) expect_equal_tbls(out) }) test_that("cumulative aggregates generate window function", { dfs <- test_frame_windowed(x = 1:4, g = rep(c(1, 2), each = 2)) out <- map(dfs, . %>% group_by(g) %>% arrange(x) %>% mutate(r = as.numeric(cumsum(x))) ) expect_equal(out$df$r, c(1, 3, 3, 7)) expect_equal_tbls(out) })
/tests/testthat/test-mutate.r
permissive
refik/dbplyr
R
false
false
2,076
r
context("mutate") test_that("mutate computed before summarise", { mf <- memdb_frame(x = c(1, 2, 3), y = c(9, 8, 7)) out <- mutate(mf, z = x + y) %>% summarise(sum_z = sum(z, na.rm = TRUE)) %>% collect() expect_equal(out$sum_z, 30) }) test_that("two mutates equivalent to one", { mf <- memdb_frame(x = c(1, 5, 9), y = c(3, 12, 11)) df1 <- mf %>% mutate(x2 = x * 2, y4 = y * 4) %>% collect() df2 <- mf %>% collect() %>% mutate(x2 = x * 2, y4 = y * 4) expect_equal_tbl(df1, df2) }) test_that("can refer to fresly created values", { out1 <- memdb_frame(x1 = 1) %>% mutate(x2 = x1 + 1, x3 = x2 + 1, x4 = x3 + 1) %>% collect() expect_equal(out1, tibble(x1 = 1, x2 = 2, x3 = 3, x4 = 4)) out2 <- memdb_frame(x = 1) %>% mutate(x = x + 1, x = x + 1, x = x + 1) %>% collect() expect_equal(out2, tibble(x = 4)) }) test_that("queries are not nested unnecessarily", { # Should only be one query deep sql <- memdb_frame(x = 1) %>% mutate(y = x + 1, a = y + 1, b = y + 1) %>% sql_build() expect_s3_class(sql$from, "select_query") expect_s3_class(sql$from$from, "ident") }) # SQL generation ----------------------------------------------------------- test_that("mutate calls windowed versions of sql functions", { dfs <- test_frame_windowed(x = 1:4, g = rep(c(1, 2), each = 2)) out <- map(dfs, . %>% group_by(g) %>% mutate(r = as.numeric(row_number(x)))) expect_equal(out$df$r, c(1, 2, 1, 2)) expect_equal_tbls(out) }) test_that("recycled aggregates generate window function", { dfs <- test_frame_windowed(x = 1:4, g = rep(c(1, 2), each = 2)) out <- map(dfs, . %>% group_by(g) %>% mutate(r = x > mean(x, na.rm = TRUE))) expect_equal(out$df$r, c(FALSE, TRUE, FALSE, TRUE)) expect_equal_tbls(out) }) test_that("cumulative aggregates generate window function", { dfs <- test_frame_windowed(x = 1:4, g = rep(c(1, 2), each = 2)) out <- map(dfs, . %>% group_by(g) %>% arrange(x) %>% mutate(r = as.numeric(cumsum(x))) ) expect_equal(out$df$r, c(1, 3, 3, 7)) expect_equal_tbls(out) })
# This file was generated by Rcpp::compileAttributes # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 bayesBLP_rcpp_loop <- function(IV, X, Z, share, J, T, v, R, sigmasqR, A, theta_hat, deltabar, Ad, nu0, s0_sq, VOmega, ssq, cand_cov, theta_bar_initial, r_initial, tau_sq_initial, Omega_initial, delta_initial, tol, keep, nprint) { .Call('bayesm_bayesBLP_rcpp_loop', PACKAGE = 'bayesm', IV, X, Z, share, J, T, v, R, sigmasqR, A, theta_hat, deltabar, Ad, nu0, s0_sq, VOmega, ssq, cand_cov, theta_bar_initial, r_initial, tau_sq_initial, Omega_initial, delta_initial, tol, keep, nprint) } breg <- function(y, X, betabar, A) { .Call('bayesm_breg', PACKAGE = 'bayesm', y, X, betabar, A) } cgetC <- function(e, k) { .Call('bayesm_cgetC', PACKAGE = 'bayesm', e, k) } clusterMix_rcpp_loop <- function(zdraw, cutoff, SILENT, nprint) { .Call('bayesm_clusterMix_rcpp_loop', PACKAGE = 'bayesm', zdraw, cutoff, SILENT, nprint) } ghkvec <- function(L, trunpt, above, r, HALTON = TRUE, pn = as.integer( c(0))) { .Call('bayesm_ghkvec', PACKAGE = 'bayesm', L, trunpt, above, r, HALTON, pn) } llmnl <- function(beta, y, X) { .Call('bayesm_llmnl', PACKAGE = 'bayesm', beta, y, X) } lndIChisq <- function(nu, ssq, X) { .Call('bayesm_lndIChisq', PACKAGE = 'bayesm', nu, ssq, X) } lndIWishart <- function(nu, V, IW) { .Call('bayesm_lndIWishart', PACKAGE = 'bayesm', nu, V, IW) } lndMvn <- function(x, mu, rooti) { .Call('bayesm_lndMvn', PACKAGE = 'bayesm', x, mu, rooti) } lndMvst <- function(x, nu, mu, rooti, NORMC = FALSE) { .Call('bayesm_lndMvst', PACKAGE = 'bayesm', x, nu, mu, rooti, NORMC) } rbprobitGibbs_rcpp_loop <- function(y, X, Abetabar, root, beta, sigma, a, b, R, keep, nprint) { .Call('bayesm_rbprobitGibbs_rcpp_loop', PACKAGE = 'bayesm', y, X, Abetabar, root, beta, sigma, a, b, R, keep, nprint) } rdirichlet <- function(alpha) { .Call('bayesm_rdirichlet', PACKAGE = 'bayesm', alpha) } rDPGibbs_rcpp_loop <- function(R, keep, nprint, y, lambda_hyper, SCALE, maxuniq, PrioralphaList, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) { .Call('bayesm_rDPGibbs_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, y, lambda_hyper, SCALE, maxuniq, PrioralphaList, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) } rhierLinearMixture_rcpp_loop <- function(regdata, Z, deltabar, Ad, mubar, Amu, nu, V, nu_e, ssq, R, keep, nprint, drawdelta, olddelta, a, oldprob, ind, tau) { .Call('bayesm_rhierLinearMixture_rcpp_loop', PACKAGE = 'bayesm', regdata, Z, deltabar, Ad, mubar, Amu, nu, V, nu_e, ssq, R, keep, nprint, drawdelta, olddelta, a, oldprob, ind, tau) } rhierLinearModel_rcpp_loop <- function(regdata, Z, Deltabar, A, nu, V, nu_e, ssq, tau, Delta, Vbeta, R, keep, nprint) { .Call('bayesm_rhierLinearModel_rcpp_loop', PACKAGE = 'bayesm', regdata, Z, Deltabar, A, nu, V, nu_e, ssq, tau, Delta, Vbeta, R, keep, nprint) } rhierMnlDP_rcpp_loop <- function(R, keep, nprint, lgtdata, Z, deltabar, Ad, PrioralphaList, lambda_hyper, drawdelta, nvar, oldbetas, s, maxuniq, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) { .Call('bayesm_rhierMnlDP_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, lgtdata, Z, deltabar, Ad, PrioralphaList, lambda_hyper, drawdelta, nvar, oldbetas, s, maxuniq, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) } rhierMnlRwMixture_rcpp_loop <- function(lgtdata, Z, deltabar, Ad, mubar, Amu, nu, V, s, R, keep, nprint, drawdelta, olddelta, a, oldprob, oldbetas, ind) { .Call('bayesm_rhierMnlRwMixture_rcpp_loop', PACKAGE = 'bayesm', lgtdata, Z, deltabar, Ad, mubar, Amu, nu, V, s, R, keep, nprint, drawdelta, olddelta, a, oldprob, oldbetas, ind) } rhierNegbinRw_rcpp_loop <- function(regdata, hessdata, Z, Beta, Delta, Deltabar, Adelta, nu, V, a, b, R, keep, sbeta, alphacroot, nprint, rootA, alpha, fixalpha) { .Call('bayesm_rhierNegbinRw_rcpp_loop', PACKAGE = 'bayesm', regdata, hessdata, Z, Beta, Delta, Deltabar, Adelta, nu, V, a, b, R, keep, sbeta, alphacroot, nprint, rootA, alpha, fixalpha) } rivDP_rcpp_loop <- function(R, keep, nprint, dimd, mbg, Abg, md, Ad, y, isgamma, z, x, w, delta, PrioralphaList, gridsize, SCALE, maxuniq, scalex, scaley, lambda_hyper, BayesmConstantA, BayesmConstantnu) { .Call('bayesm_rivDP_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, dimd, mbg, Abg, md, Ad, y, isgamma, z, x, w, delta, PrioralphaList, gridsize, SCALE, maxuniq, scalex, scaley, lambda_hyper, BayesmConstantA, BayesmConstantnu) } rivGibbs_rcpp_loop <- function(y, x, z, w, mbg, Abg, md, Ad, V, nu, R, keep, nprint) { .Call('bayesm_rivGibbs_rcpp_loop', PACKAGE = 'bayesm', y, x, z, w, mbg, Abg, md, Ad, V, nu, R, keep, nprint) } rmixGibbs <- function(y, Bbar, A, nu, V, a, p, z) { .Call('bayesm_rmixGibbs', PACKAGE = 'bayesm', y, Bbar, A, nu, V, a, p, z) } rmixture <- function(n, pvec, comps) { .Call('bayesm_rmixture', PACKAGE = 'bayesm', n, pvec, comps) } rmnlIndepMetrop_rcpp_loop <- function(R, keep, nu, betastar, root, y, X, betabar, rootpi, rooti, oldlimp, oldlpost, nprint) { .Call('bayesm_rmnlIndepMetrop_rcpp_loop', PACKAGE = 'bayesm', R, keep, nu, betastar, root, y, X, betabar, rootpi, rooti, oldlimp, oldlpost, nprint) } rmnpGibbs_rcpp_loop <- function(R, keep, nprint, pm1, y, X, beta0, sigma0, V, nu, betabar, A) { .Call('bayesm_rmnpGibbs_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, pm1, y, X, beta0, sigma0, V, nu, betabar, A) } rmultireg <- function(Y, X, Bbar, A, nu, V) { .Call('bayesm_rmultireg', PACKAGE = 'bayesm', Y, X, Bbar, A, nu, V) } rmvpGibbs_rcpp_loop <- function(R, keep, nprint, p, y, X, beta0, sigma0, V, nu, betabar, A) { .Call('bayesm_rmvpGibbs_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, p, y, X, beta0, sigma0, V, nu, betabar, A) } rmvst <- function(nu, mu, root) { .Call('bayesm_rmvst', PACKAGE = 'bayesm', nu, mu, root) } rnegbinRw_rcpp_loop <- function(y, X, betabar, rootA, a, b, beta, alpha, fixalpha, betaroot, alphacroot, R, keep, nprint) { .Call('bayesm_rnegbinRw_rcpp_loop', PACKAGE = 'bayesm', y, X, betabar, rootA, a, b, beta, alpha, fixalpha, betaroot, alphacroot, R, keep, nprint) } rnmixGibbs_rcpp_loop <- function(y, Mubar, A, nu, V, a, p, z, R, keep, nprint) { .Call('bayesm_rnmixGibbs_rcpp_loop', PACKAGE = 'bayesm', y, Mubar, A, nu, V, a, p, z, R, keep, nprint) } rordprobitGibbs_rcpp_loop <- function(y, X, k, A, betabar, Ad, s, inc_root, dstarbar, betahat, R, keep, nprint) { .Call('bayesm_rordprobitGibbs_rcpp_loop', PACKAGE = 'bayesm', y, X, k, A, betabar, Ad, s, inc_root, dstarbar, betahat, R, keep, nprint) } rscaleUsage_rcpp_loop <- function(k, x, p, n, R, keep, ndghk, nprint, y, mu, Sigma, tau, sigma, Lambda, e, domu, doSigma, dosigma, dotau, doLambda, doe, nu, V, mubar, Am, gsigma, gl11, gl22, gl12, nuL, VL, ge) { .Call('bayesm_rscaleUsage_rcpp_loop', PACKAGE = 'bayesm', k, x, p, n, R, keep, ndghk, nprint, y, mu, Sigma, tau, sigma, Lambda, e, domu, doSigma, dosigma, dotau, doLambda, doe, nu, V, mubar, Am, gsigma, gl11, gl22, gl12, nuL, VL, ge) } rsurGibbs_rcpp_loop <- function(regdata, indreg, cumnk, nk, XspXs, Sigmainv, A, Abetabar, nu, V, nvar, E, Y, R, keep, nprint) { .Call('bayesm_rsurGibbs_rcpp_loop', PACKAGE = 'bayesm', regdata, indreg, cumnk, nk, XspXs, Sigmainv, A, Abetabar, nu, V, nvar, E, Y, R, keep, nprint) } rtrun <- function(mu, sigma, a, b) { .Call('bayesm_rtrun', PACKAGE = 'bayesm', mu, sigma, a, b) } runireg_rcpp_loop <- function(y, X, betabar, A, nu, ssq, R, keep, nprint) { .Call('bayesm_runireg_rcpp_loop', PACKAGE = 'bayesm', y, X, betabar, A, nu, ssq, R, keep, nprint) } runiregGibbs_rcpp_loop <- function(y, X, betabar, A, nu, ssq, sigmasq, R, keep, nprint) { .Call('bayesm_runiregGibbs_rcpp_loop', PACKAGE = 'bayesm', y, X, betabar, A, nu, ssq, sigmasq, R, keep, nprint) } rwishart <- function(nu, V) { .Call('bayesm_rwishart', PACKAGE = 'bayesm', nu, V) } callroot <- function(c1, c2, tol, iterlim) { .Call('bayesm_callroot', PACKAGE = 'bayesm', c1, c2, tol, iterlim) }
/pkgs/bayesm/R/rcppexports.r
no_license
vaguiar/EDAV_Project_2017
R
false
false
8,080
r
# This file was generated by Rcpp::compileAttributes # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 bayesBLP_rcpp_loop <- function(IV, X, Z, share, J, T, v, R, sigmasqR, A, theta_hat, deltabar, Ad, nu0, s0_sq, VOmega, ssq, cand_cov, theta_bar_initial, r_initial, tau_sq_initial, Omega_initial, delta_initial, tol, keep, nprint) { .Call('bayesm_bayesBLP_rcpp_loop', PACKAGE = 'bayesm', IV, X, Z, share, J, T, v, R, sigmasqR, A, theta_hat, deltabar, Ad, nu0, s0_sq, VOmega, ssq, cand_cov, theta_bar_initial, r_initial, tau_sq_initial, Omega_initial, delta_initial, tol, keep, nprint) } breg <- function(y, X, betabar, A) { .Call('bayesm_breg', PACKAGE = 'bayesm', y, X, betabar, A) } cgetC <- function(e, k) { .Call('bayesm_cgetC', PACKAGE = 'bayesm', e, k) } clusterMix_rcpp_loop <- function(zdraw, cutoff, SILENT, nprint) { .Call('bayesm_clusterMix_rcpp_loop', PACKAGE = 'bayesm', zdraw, cutoff, SILENT, nprint) } ghkvec <- function(L, trunpt, above, r, HALTON = TRUE, pn = as.integer( c(0))) { .Call('bayesm_ghkvec', PACKAGE = 'bayesm', L, trunpt, above, r, HALTON, pn) } llmnl <- function(beta, y, X) { .Call('bayesm_llmnl', PACKAGE = 'bayesm', beta, y, X) } lndIChisq <- function(nu, ssq, X) { .Call('bayesm_lndIChisq', PACKAGE = 'bayesm', nu, ssq, X) } lndIWishart <- function(nu, V, IW) { .Call('bayesm_lndIWishart', PACKAGE = 'bayesm', nu, V, IW) } lndMvn <- function(x, mu, rooti) { .Call('bayesm_lndMvn', PACKAGE = 'bayesm', x, mu, rooti) } lndMvst <- function(x, nu, mu, rooti, NORMC = FALSE) { .Call('bayesm_lndMvst', PACKAGE = 'bayesm', x, nu, mu, rooti, NORMC) } rbprobitGibbs_rcpp_loop <- function(y, X, Abetabar, root, beta, sigma, a, b, R, keep, nprint) { .Call('bayesm_rbprobitGibbs_rcpp_loop', PACKAGE = 'bayesm', y, X, Abetabar, root, beta, sigma, a, b, R, keep, nprint) } rdirichlet <- function(alpha) { .Call('bayesm_rdirichlet', PACKAGE = 'bayesm', alpha) } rDPGibbs_rcpp_loop <- function(R, keep, nprint, y, lambda_hyper, SCALE, maxuniq, PrioralphaList, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) { .Call('bayesm_rDPGibbs_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, y, lambda_hyper, SCALE, maxuniq, PrioralphaList, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) } rhierLinearMixture_rcpp_loop <- function(regdata, Z, deltabar, Ad, mubar, Amu, nu, V, nu_e, ssq, R, keep, nprint, drawdelta, olddelta, a, oldprob, ind, tau) { .Call('bayesm_rhierLinearMixture_rcpp_loop', PACKAGE = 'bayesm', regdata, Z, deltabar, Ad, mubar, Amu, nu, V, nu_e, ssq, R, keep, nprint, drawdelta, olddelta, a, oldprob, ind, tau) } rhierLinearModel_rcpp_loop <- function(regdata, Z, Deltabar, A, nu, V, nu_e, ssq, tau, Delta, Vbeta, R, keep, nprint) { .Call('bayesm_rhierLinearModel_rcpp_loop', PACKAGE = 'bayesm', regdata, Z, Deltabar, A, nu, V, nu_e, ssq, tau, Delta, Vbeta, R, keep, nprint) } rhierMnlDP_rcpp_loop <- function(R, keep, nprint, lgtdata, Z, deltabar, Ad, PrioralphaList, lambda_hyper, drawdelta, nvar, oldbetas, s, maxuniq, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) { .Call('bayesm_rhierMnlDP_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, lgtdata, Z, deltabar, Ad, PrioralphaList, lambda_hyper, drawdelta, nvar, oldbetas, s, maxuniq, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) } rhierMnlRwMixture_rcpp_loop <- function(lgtdata, Z, deltabar, Ad, mubar, Amu, nu, V, s, R, keep, nprint, drawdelta, olddelta, a, oldprob, oldbetas, ind) { .Call('bayesm_rhierMnlRwMixture_rcpp_loop', PACKAGE = 'bayesm', lgtdata, Z, deltabar, Ad, mubar, Amu, nu, V, s, R, keep, nprint, drawdelta, olddelta, a, oldprob, oldbetas, ind) } rhierNegbinRw_rcpp_loop <- function(regdata, hessdata, Z, Beta, Delta, Deltabar, Adelta, nu, V, a, b, R, keep, sbeta, alphacroot, nprint, rootA, alpha, fixalpha) { .Call('bayesm_rhierNegbinRw_rcpp_loop', PACKAGE = 'bayesm', regdata, hessdata, Z, Beta, Delta, Deltabar, Adelta, nu, V, a, b, R, keep, sbeta, alphacroot, nprint, rootA, alpha, fixalpha) } rivDP_rcpp_loop <- function(R, keep, nprint, dimd, mbg, Abg, md, Ad, y, isgamma, z, x, w, delta, PrioralphaList, gridsize, SCALE, maxuniq, scalex, scaley, lambda_hyper, BayesmConstantA, BayesmConstantnu) { .Call('bayesm_rivDP_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, dimd, mbg, Abg, md, Ad, y, isgamma, z, x, w, delta, PrioralphaList, gridsize, SCALE, maxuniq, scalex, scaley, lambda_hyper, BayesmConstantA, BayesmConstantnu) } rivGibbs_rcpp_loop <- function(y, x, z, w, mbg, Abg, md, Ad, V, nu, R, keep, nprint) { .Call('bayesm_rivGibbs_rcpp_loop', PACKAGE = 'bayesm', y, x, z, w, mbg, Abg, md, Ad, V, nu, R, keep, nprint) } rmixGibbs <- function(y, Bbar, A, nu, V, a, p, z) { .Call('bayesm_rmixGibbs', PACKAGE = 'bayesm', y, Bbar, A, nu, V, a, p, z) } rmixture <- function(n, pvec, comps) { .Call('bayesm_rmixture', PACKAGE = 'bayesm', n, pvec, comps) } rmnlIndepMetrop_rcpp_loop <- function(R, keep, nu, betastar, root, y, X, betabar, rootpi, rooti, oldlimp, oldlpost, nprint) { .Call('bayesm_rmnlIndepMetrop_rcpp_loop', PACKAGE = 'bayesm', R, keep, nu, betastar, root, y, X, betabar, rootpi, rooti, oldlimp, oldlpost, nprint) } rmnpGibbs_rcpp_loop <- function(R, keep, nprint, pm1, y, X, beta0, sigma0, V, nu, betabar, A) { .Call('bayesm_rmnpGibbs_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, pm1, y, X, beta0, sigma0, V, nu, betabar, A) } rmultireg <- function(Y, X, Bbar, A, nu, V) { .Call('bayesm_rmultireg', PACKAGE = 'bayesm', Y, X, Bbar, A, nu, V) } rmvpGibbs_rcpp_loop <- function(R, keep, nprint, p, y, X, beta0, sigma0, V, nu, betabar, A) { .Call('bayesm_rmvpGibbs_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, p, y, X, beta0, sigma0, V, nu, betabar, A) } rmvst <- function(nu, mu, root) { .Call('bayesm_rmvst', PACKAGE = 'bayesm', nu, mu, root) } rnegbinRw_rcpp_loop <- function(y, X, betabar, rootA, a, b, beta, alpha, fixalpha, betaroot, alphacroot, R, keep, nprint) { .Call('bayesm_rnegbinRw_rcpp_loop', PACKAGE = 'bayesm', y, X, betabar, rootA, a, b, beta, alpha, fixalpha, betaroot, alphacroot, R, keep, nprint) } rnmixGibbs_rcpp_loop <- function(y, Mubar, A, nu, V, a, p, z, R, keep, nprint) { .Call('bayesm_rnmixGibbs_rcpp_loop', PACKAGE = 'bayesm', y, Mubar, A, nu, V, a, p, z, R, keep, nprint) } rordprobitGibbs_rcpp_loop <- function(y, X, k, A, betabar, Ad, s, inc_root, dstarbar, betahat, R, keep, nprint) { .Call('bayesm_rordprobitGibbs_rcpp_loop', PACKAGE = 'bayesm', y, X, k, A, betabar, Ad, s, inc_root, dstarbar, betahat, R, keep, nprint) } rscaleUsage_rcpp_loop <- function(k, x, p, n, R, keep, ndghk, nprint, y, mu, Sigma, tau, sigma, Lambda, e, domu, doSigma, dosigma, dotau, doLambda, doe, nu, V, mubar, Am, gsigma, gl11, gl22, gl12, nuL, VL, ge) { .Call('bayesm_rscaleUsage_rcpp_loop', PACKAGE = 'bayesm', k, x, p, n, R, keep, ndghk, nprint, y, mu, Sigma, tau, sigma, Lambda, e, domu, doSigma, dosigma, dotau, doLambda, doe, nu, V, mubar, Am, gsigma, gl11, gl22, gl12, nuL, VL, ge) } rsurGibbs_rcpp_loop <- function(regdata, indreg, cumnk, nk, XspXs, Sigmainv, A, Abetabar, nu, V, nvar, E, Y, R, keep, nprint) { .Call('bayesm_rsurGibbs_rcpp_loop', PACKAGE = 'bayesm', regdata, indreg, cumnk, nk, XspXs, Sigmainv, A, Abetabar, nu, V, nvar, E, Y, R, keep, nprint) } rtrun <- function(mu, sigma, a, b) { .Call('bayesm_rtrun', PACKAGE = 'bayesm', mu, sigma, a, b) } runireg_rcpp_loop <- function(y, X, betabar, A, nu, ssq, R, keep, nprint) { .Call('bayesm_runireg_rcpp_loop', PACKAGE = 'bayesm', y, X, betabar, A, nu, ssq, R, keep, nprint) } runiregGibbs_rcpp_loop <- function(y, X, betabar, A, nu, ssq, sigmasq, R, keep, nprint) { .Call('bayesm_runiregGibbs_rcpp_loop', PACKAGE = 'bayesm', y, X, betabar, A, nu, ssq, sigmasq, R, keep, nprint) } rwishart <- function(nu, V) { .Call('bayesm_rwishart', PACKAGE = 'bayesm', nu, V) } callroot <- function(c1, c2, tol, iterlim) { .Call('bayesm_callroot', PACKAGE = 'bayesm', c1, c2, tol, iterlim) }
#' prepare vi_c for compilation #' #' @description This function selects the tree-level data from vi_c (cardc) based on selected cluster/plot headers. #' Additonally, the function calculates basal area and tree per ha factor. #' #' @param compilationType character, either \code{PSP} or \code{nonPSP}. If it is \code{PSP}, it #' is consistent with original PSP compiler, otherwise, it #' is consistent with VRI compiler. #' @param clusterplotHeader data.table, Cluster and plot-level attributes. #' @param dataSourcePath character, Specifies the path that directs to the compilation_sa. #' #' @param walkThru logical, Indicates whether walkthrough sampling protocal is used, #' Tree weight is determined by walkthrough method. In walkthrough #' method, a tree is identified as \code{NA} (no walkthrough applied), #' \code{O} for out tree (not counted), and \code{W} for double counted tree. #' #' #' #' #' @return A data table that contains tree-level information. A log file that describes the detailed process. #' #' #' @importFrom data.table data.table ':=' set rbindlist setnames setkey #' @importFrom dplyr '%>%' #' @importFrom FAIBBase merge_dupUpdate PHFCalculator #' @export #' @docType methods #' @rdname vicPrep #' #' @author Yong Luo #' #' vicPrep<- function(compilationType, clusterplotHeader, dataSourcePath, walkThru = TRUE){ vi_c <- readRDS(file.path(dataSourcePath, "vi_c.rds")) %>% data.table vi_c[, clusterPlot := paste(CLSTR_ID, PLOT, sep = "_")] clusterplotHeader[, clusterPlot := paste(CLSTR_ID, PLOT, sep = "_")] vi_c <- vi_c[clusterPlot %in% unique(clusterplotHeader$clusterPlot), ] vi_c[DBH != 0, BA_TREE := pi * ((DBH/200)^2)] # remove get_vars function as SP0 remains same as that in vi_pc table vi_c[DBH == 0, ':='(SP0 = NA, BA_TREE = NA)] vi_c[, TREE_WT := 1] if(compilationType == "nonPSP"){ if(walkThru){ vi_c[toupper(WALKTHRU_STATUS) == "O", TREE_WT := 0] # tree is out and is not used vi_c[toupper(WALKTHRU_STATUS) == "W", TREE_WT := 2] # tree is } } vi_c <- FAIBBase::merge_dupUpdate(vi_c, unique(clusterplotHeader[,.(clusterPlot, SAMP_TYP, PLOT_WT, BLOWUP_MAIN, BLOWUP_SUBPLOT, FIZ, TYPE_CD, SAMPLE_ESTABLISHMENT_TYPE, DBH_LIMIT_TAG, SAMPLE_BREAK_POINT)], by = "clusterPlot"), by = "clusterPlot", all.x = TRUE) if(compilationType == "nonPSP"){ vi_c <- vi_c[DBH >= 4, ] # for nonPSP, only dbh bigger than or equal to 4 cm is valid vi_c[, PHF_TREE := FAIBBase::PHFCalculator(sampleType = SAMP_TYP, blowUp = BLOWUP_MAIN, treeWeight = TREE_WT, plotWeight = PLOT_WT, treeBasalArea = BA_TREE)] # for NFI (F), CMI and YSMI, the plots use a 100 m2 subplot for # trees with a dbh < 9, therefore should be extrapolate to 400 m2 (size of large tree plot) vi_c[TYPE_CD %in% c("F", "M", "Y", "L") & DBH < 9, PHF_TREE := PHF_TREE*4] # 2) FHYSM Only - Can we change the PHF_TREE from 25 to 100 for the 4-9cm DBH deciduous trees in the 5.64m subplot? # • The reason why is because there are non-standard diameter limits used in this project. All conifers >1m height in the 11.28m radius main plot are tagged (all conifers get a PHF_TREE = 25) while deciduous are tagged using standard YSM/CMI protocol. ie., trees 4-9cm dbh in the 5.64m radius subplot (PHF_TREE = 100), and trees >9cm dbh in the 11.28m radius main plot (PHF_TREE = 25). # • It looks like it only impacts one tree so far but these FHYSM samples will be remeasured and we may get more. deci_sp <- lookup_species() deci_sp <- unique(deci_sp[SP_TYPE == "D"]$SPECIES) vi_c[SAMPLE_ESTABLISHMENT_TYPE == "FHYSM" & DBH < 9 & SPECIES %in% deci_sp, PHF_TREE := PHF_TREE*4] } else { vi_c[DBH >= SAMPLE_BREAK_POINT, PHF_TREE := FAIBBase::PHFCalculator(sampleType = SAMP_TYP, blowUp = BLOWUP_MAIN, treeWeight = TREE_WT, plotWeight = 1, treeBasalArea = BA_TREE)] vi_c[DBH < SAMPLE_BREAK_POINT & is.na(PHF_TREE) & !is.na(BLOWUP_SUBPLOT), PHF_TREE := FAIBBase::PHFCalculator(sampleType = SAMP_TYP, blowUp = BLOWUP_SUBPLOT, treeWeight = TREE_WT, plotWeight = 1, treeBasalArea = BA_TREE)] } vi_c[BROKEN_TOP_IND == "Y" & !is.na(TREE_LEN), HT_BTOP := TREE_LEN] ## as long as TREE_LEN is available, the break height is TREE_LEN vi_c[BROKEN_TOP_IND == "Y" & is.na(TREE_LEN) & !is.na(HEIGHT_TO_BREAK), HT_BTOP := HEIGHT_TO_BREAK] ## otherwise, the height_to_break is used as break height if(compilationType == "PSP"){ vi_c <- vi_c[order(CLSTR_ID, PLOT, TREE_NO),.(CLSTR_ID, PLOT, BEC_ZONE, BEC_SBZ, BEC_VAR, FIZ, TYPE_CD, TREE_NO, SPECIES, SPECIES_ORG, LV_D, S_F, NO_LOGS = 1, TREE_WT, DBH, SP0, BA_TREE, PHF_TREE, HEIGHT = TREE_LEN, BARK_PER, HT_PROJ, DIAM_BTP, BROKEN_TOP_IND, HT_BTOP, MEASUREMENT_ANOMALY_CODE, TREE_PLANTED_IND, TREE_CLASS_CODE, TAGGING_SECTOR_NO, SITE_SECTOR_NO, RESIDUAL)] return(vi_c) } else { vi_c <- vi_c[order(CLSTR_ID, PLOT, TREE_NO),.(CLSTR_ID, PLOT, BEC_ZONE, BEC_SBZ, BEC_VAR, FIZ, TYPE_CD, TREE_NO, SPECIES, SPECIES_ORG, LV_D, S_F, NO_LOGS, TREE_WT, DBH, SP0, BA_TREE, PHF_TREE, HEIGHT = TREE_LEN, BARK_PER, HT_PROJ, DIAM_BTP, BROKEN_TOP_IND, HT_BTOP, MEASUREMENT_ANOMALY_CODE, TREE_PLANTED_IND, TREE_CLASS_CODE, TAGGING_SECTOR_NO, SITE_SECTOR_NO, RESIDUAL, LOG_G_1, LOG_G_2, LOG_G_3, LOG_G_4, LOG_G_5, LOG_G_6, LOG_G_7, LOG_G_8, LOG_G_9 = as.numeric(NA), LOG_L_1, LOG_L_2, LOG_L_3, LOG_L_4, LOG_L_5, LOG_L_6, LOG_L_7, LOG_L_8, LOG_L_9 = as.numeric(NA), LOG_S_1, LOG_S_2, LOG_S_3, LOG_S_4, LOG_S_5, LOG_S_6, LOG_S_7, LOG_S_8, LOG_S_9 = as.numeric(NA))] return(vi_c) } }
/R/vicPrep.R
permissive
bcgov/FAIBCompiler
R
false
false
8,302
r
#' prepare vi_c for compilation #' #' @description This function selects the tree-level data from vi_c (cardc) based on selected cluster/plot headers. #' Additonally, the function calculates basal area and tree per ha factor. #' #' @param compilationType character, either \code{PSP} or \code{nonPSP}. If it is \code{PSP}, it #' is consistent with original PSP compiler, otherwise, it #' is consistent with VRI compiler. #' @param clusterplotHeader data.table, Cluster and plot-level attributes. #' @param dataSourcePath character, Specifies the path that directs to the compilation_sa. #' #' @param walkThru logical, Indicates whether walkthrough sampling protocal is used, #' Tree weight is determined by walkthrough method. In walkthrough #' method, a tree is identified as \code{NA} (no walkthrough applied), #' \code{O} for out tree (not counted), and \code{W} for double counted tree. #' #' #' #' #' @return A data table that contains tree-level information. A log file that describes the detailed process. #' #' #' @importFrom data.table data.table ':=' set rbindlist setnames setkey #' @importFrom dplyr '%>%' #' @importFrom FAIBBase merge_dupUpdate PHFCalculator #' @export #' @docType methods #' @rdname vicPrep #' #' @author Yong Luo #' #' vicPrep<- function(compilationType, clusterplotHeader, dataSourcePath, walkThru = TRUE){ vi_c <- readRDS(file.path(dataSourcePath, "vi_c.rds")) %>% data.table vi_c[, clusterPlot := paste(CLSTR_ID, PLOT, sep = "_")] clusterplotHeader[, clusterPlot := paste(CLSTR_ID, PLOT, sep = "_")] vi_c <- vi_c[clusterPlot %in% unique(clusterplotHeader$clusterPlot), ] vi_c[DBH != 0, BA_TREE := pi * ((DBH/200)^2)] # remove get_vars function as SP0 remains same as that in vi_pc table vi_c[DBH == 0, ':='(SP0 = NA, BA_TREE = NA)] vi_c[, TREE_WT := 1] if(compilationType == "nonPSP"){ if(walkThru){ vi_c[toupper(WALKTHRU_STATUS) == "O", TREE_WT := 0] # tree is out and is not used vi_c[toupper(WALKTHRU_STATUS) == "W", TREE_WT := 2] # tree is } } vi_c <- FAIBBase::merge_dupUpdate(vi_c, unique(clusterplotHeader[,.(clusterPlot, SAMP_TYP, PLOT_WT, BLOWUP_MAIN, BLOWUP_SUBPLOT, FIZ, TYPE_CD, SAMPLE_ESTABLISHMENT_TYPE, DBH_LIMIT_TAG, SAMPLE_BREAK_POINT)], by = "clusterPlot"), by = "clusterPlot", all.x = TRUE) if(compilationType == "nonPSP"){ vi_c <- vi_c[DBH >= 4, ] # for nonPSP, only dbh bigger than or equal to 4 cm is valid vi_c[, PHF_TREE := FAIBBase::PHFCalculator(sampleType = SAMP_TYP, blowUp = BLOWUP_MAIN, treeWeight = TREE_WT, plotWeight = PLOT_WT, treeBasalArea = BA_TREE)] # for NFI (F), CMI and YSMI, the plots use a 100 m2 subplot for # trees with a dbh < 9, therefore should be extrapolate to 400 m2 (size of large tree plot) vi_c[TYPE_CD %in% c("F", "M", "Y", "L") & DBH < 9, PHF_TREE := PHF_TREE*4] # 2) FHYSM Only - Can we change the PHF_TREE from 25 to 100 for the 4-9cm DBH deciduous trees in the 5.64m subplot? # • The reason why is because there are non-standard diameter limits used in this project. All conifers >1m height in the 11.28m radius main plot are tagged (all conifers get a PHF_TREE = 25) while deciduous are tagged using standard YSM/CMI protocol. ie., trees 4-9cm dbh in the 5.64m radius subplot (PHF_TREE = 100), and trees >9cm dbh in the 11.28m radius main plot (PHF_TREE = 25). # • It looks like it only impacts one tree so far but these FHYSM samples will be remeasured and we may get more. deci_sp <- lookup_species() deci_sp <- unique(deci_sp[SP_TYPE == "D"]$SPECIES) vi_c[SAMPLE_ESTABLISHMENT_TYPE == "FHYSM" & DBH < 9 & SPECIES %in% deci_sp, PHF_TREE := PHF_TREE*4] } else { vi_c[DBH >= SAMPLE_BREAK_POINT, PHF_TREE := FAIBBase::PHFCalculator(sampleType = SAMP_TYP, blowUp = BLOWUP_MAIN, treeWeight = TREE_WT, plotWeight = 1, treeBasalArea = BA_TREE)] vi_c[DBH < SAMPLE_BREAK_POINT & is.na(PHF_TREE) & !is.na(BLOWUP_SUBPLOT), PHF_TREE := FAIBBase::PHFCalculator(sampleType = SAMP_TYP, blowUp = BLOWUP_SUBPLOT, treeWeight = TREE_WT, plotWeight = 1, treeBasalArea = BA_TREE)] } vi_c[BROKEN_TOP_IND == "Y" & !is.na(TREE_LEN), HT_BTOP := TREE_LEN] ## as long as TREE_LEN is available, the break height is TREE_LEN vi_c[BROKEN_TOP_IND == "Y" & is.na(TREE_LEN) & !is.na(HEIGHT_TO_BREAK), HT_BTOP := HEIGHT_TO_BREAK] ## otherwise, the height_to_break is used as break height if(compilationType == "PSP"){ vi_c <- vi_c[order(CLSTR_ID, PLOT, TREE_NO),.(CLSTR_ID, PLOT, BEC_ZONE, BEC_SBZ, BEC_VAR, FIZ, TYPE_CD, TREE_NO, SPECIES, SPECIES_ORG, LV_D, S_F, NO_LOGS = 1, TREE_WT, DBH, SP0, BA_TREE, PHF_TREE, HEIGHT = TREE_LEN, BARK_PER, HT_PROJ, DIAM_BTP, BROKEN_TOP_IND, HT_BTOP, MEASUREMENT_ANOMALY_CODE, TREE_PLANTED_IND, TREE_CLASS_CODE, TAGGING_SECTOR_NO, SITE_SECTOR_NO, RESIDUAL)] return(vi_c) } else { vi_c <- vi_c[order(CLSTR_ID, PLOT, TREE_NO),.(CLSTR_ID, PLOT, BEC_ZONE, BEC_SBZ, BEC_VAR, FIZ, TYPE_CD, TREE_NO, SPECIES, SPECIES_ORG, LV_D, S_F, NO_LOGS, TREE_WT, DBH, SP0, BA_TREE, PHF_TREE, HEIGHT = TREE_LEN, BARK_PER, HT_PROJ, DIAM_BTP, BROKEN_TOP_IND, HT_BTOP, MEASUREMENT_ANOMALY_CODE, TREE_PLANTED_IND, TREE_CLASS_CODE, TAGGING_SECTOR_NO, SITE_SECTOR_NO, RESIDUAL, LOG_G_1, LOG_G_2, LOG_G_3, LOG_G_4, LOG_G_5, LOG_G_6, LOG_G_7, LOG_G_8, LOG_G_9 = as.numeric(NA), LOG_L_1, LOG_L_2, LOG_L_3, LOG_L_4, LOG_L_5, LOG_L_6, LOG_L_7, LOG_L_8, LOG_L_9 = as.numeric(NA), LOG_S_1, LOG_S_2, LOG_S_3, LOG_S_4, LOG_S_5, LOG_S_6, LOG_S_7, LOG_S_8, LOG_S_9 = as.numeric(NA))] return(vi_c) } }
################################################################################### ################## Exploration of USCG Emissions Data in Louisiana ################ ######################### Joan Meiners 2018 ####################################### ### This script is for Current YEAR (2018) emissions data. See separate similar scripts for other years #### setwd("/Users/joanmeiners/Dropbox/NOLA.com/USCG-Emissions/") #load libraries library(dplyr) library(tidyverse) # load data CY18_calls = read.csv("CY18_calls.csv", header = TRUE) CY18_incident_details = read.csv("CY18_incident_details.csv", header = TRUE) CY18_incident_commons = read.csv("CY18_incident_commons.csv", header = TRUE) CY18_material_involved = read.csv("CY18_material_involved.csv", header = TRUE) # join datasets by SEQNOS CY18 = full_join(CY18_calls, CY18_incident_commons) CY18 = full_join(CY18, CY18_incident_details) CY18 = full_join(CY18, CY18_material_involved) #View(CY18) dim(CY18) # explore LA records sort(table(CY18$RESPONSIBLE_STATE)) sort(table(CY18$LOCATION_STATE)) head(sort(table(CY18$BODY_OF_WATER), decreasing = TRUE), 20) # how many total spills reported per state SpillsCY18 = CY18 %>% group_by(LOCATION_STATE) %>% summarise( TOTAL_Spills = n()) #View(SpillsCY18) # look at how many were evacuated per state EvacuationsCY18 = CY18 %>% group_by(LOCATION_STATE) %>% filter(NUMBER_EVACUATED > 0) %>% summarise( EVAC_Spills = n(), TOTAL_EVACUATED = sum(NUMBER_EVACUATED, na.rm = TRUE)) #View(EvacuationsCY18) # Number injured InjuredCY18 = CY18 %>% group_by(LOCATION_STATE) %>% filter(NUMBER_INJURED > 0) %>% summarise( INJ_Spills = n(), TOTAL_INJURED = sum(NUMBER_INJURED, na.rm = TRUE)) #View(InjuredCY18) # Fatalities by state FatalitiesCY18 = CY18 %>% group_by(LOCATION_STATE) %>% filter(NUMBER_FATALITIES > 0) %>% summarise( FATAL_Spills = n(), TOTAL_FATALITIES = sum(NUMBER_FATALITIES, na.rm = TRUE)) #View(FatalitiesCY18) # Road closure time by state Road_closureCY18 = CY18 %>% group_by(LOCATION_STATE) %>% filter(ROAD_CLOSURE_TIME > 0) %>% summarise( ROADCLOSE_Spills = n(), TOTAL_ROAD_CLOSURE_TIME = sum(ROAD_CLOSURE_TIME, na.rm = TRUE)) #View(Road_closureCY18) # Medium description by state medium_descCY18 = filter(CY18, MEDIUM_DESC == "WATER") %>% group_by(LOCATION_STATE) %>% summarise( WATER_Spills = n()) View(medium_descCY18) # sources of spills into MISSISSIPPI description by state mississippiCY18 = filter(CY18, BODY_OF_WATER == "MISSISSIPPI RIVER") %>% group_by(LOCATION_STATE) %>% summarise( MISS_Spills = n()) #View(mississippiCY18) # sources of spills into GULF OF MEXICO description by state gulfCY18 = filter(CY18, BODY_OF_WATER == "GULF OF MEXICO") %>% group_by(LOCATION_STATE) %>% summarise( GULF_Spills = n()) #View(gulfCurrent) # material spilled == OIL: CRUDE library(stringr) crudeCY18 = filter(CY18, str_detect(NAME_OF_MATERIAL, "CRUDE") & IF_REACHED_WATER == "YES") %>% group_by(LOCATION_STATE) %>% summarise( Crude = n()) View(crudeCY18) # join all state columns sumCY18 = full_join(SpillsCY18, EvacuationsCY18) sumCY18 = full_join(sumCY18, InjuredCY18) sumCY18 = full_join(sumCY18, FatalitiesCY18) sumCY18 = full_join(sumCY18, Road_closureCY18) sumCY18 = full_join(sumCY18, medium_descCY18) sumCY18 = full_join(sumCY18, mississippiCY18) sumCY18 = full_join(sumCY18, gulfCY18) sumCY18 = full_join(sumCY18, crudeCY18) sumCY18["Year"]="2018" View(sumCY18) # companies responsible for of spills in Louisiana companiesCY18 = filter(CY18, LOCATION_STATE == "LA") %>% group_by(RESPONSIBLE_COMPANY) %>% summarise( CY18_Spills = n()) #View(companiesCY18)
/Emissions_script_CY18.R
no_license
beecycles/USCG-Emissions
R
false
false
3,693
r
################################################################################### ################## Exploration of USCG Emissions Data in Louisiana ################ ######################### Joan Meiners 2018 ####################################### ### This script is for Current YEAR (2018) emissions data. See separate similar scripts for other years #### setwd("/Users/joanmeiners/Dropbox/NOLA.com/USCG-Emissions/") #load libraries library(dplyr) library(tidyverse) # load data CY18_calls = read.csv("CY18_calls.csv", header = TRUE) CY18_incident_details = read.csv("CY18_incident_details.csv", header = TRUE) CY18_incident_commons = read.csv("CY18_incident_commons.csv", header = TRUE) CY18_material_involved = read.csv("CY18_material_involved.csv", header = TRUE) # join datasets by SEQNOS CY18 = full_join(CY18_calls, CY18_incident_commons) CY18 = full_join(CY18, CY18_incident_details) CY18 = full_join(CY18, CY18_material_involved) #View(CY18) dim(CY18) # explore LA records sort(table(CY18$RESPONSIBLE_STATE)) sort(table(CY18$LOCATION_STATE)) head(sort(table(CY18$BODY_OF_WATER), decreasing = TRUE), 20) # how many total spills reported per state SpillsCY18 = CY18 %>% group_by(LOCATION_STATE) %>% summarise( TOTAL_Spills = n()) #View(SpillsCY18) # look at how many were evacuated per state EvacuationsCY18 = CY18 %>% group_by(LOCATION_STATE) %>% filter(NUMBER_EVACUATED > 0) %>% summarise( EVAC_Spills = n(), TOTAL_EVACUATED = sum(NUMBER_EVACUATED, na.rm = TRUE)) #View(EvacuationsCY18) # Number injured InjuredCY18 = CY18 %>% group_by(LOCATION_STATE) %>% filter(NUMBER_INJURED > 0) %>% summarise( INJ_Spills = n(), TOTAL_INJURED = sum(NUMBER_INJURED, na.rm = TRUE)) #View(InjuredCY18) # Fatalities by state FatalitiesCY18 = CY18 %>% group_by(LOCATION_STATE) %>% filter(NUMBER_FATALITIES > 0) %>% summarise( FATAL_Spills = n(), TOTAL_FATALITIES = sum(NUMBER_FATALITIES, na.rm = TRUE)) #View(FatalitiesCY18) # Road closure time by state Road_closureCY18 = CY18 %>% group_by(LOCATION_STATE) %>% filter(ROAD_CLOSURE_TIME > 0) %>% summarise( ROADCLOSE_Spills = n(), TOTAL_ROAD_CLOSURE_TIME = sum(ROAD_CLOSURE_TIME, na.rm = TRUE)) #View(Road_closureCY18) # Medium description by state medium_descCY18 = filter(CY18, MEDIUM_DESC == "WATER") %>% group_by(LOCATION_STATE) %>% summarise( WATER_Spills = n()) View(medium_descCY18) # sources of spills into MISSISSIPPI description by state mississippiCY18 = filter(CY18, BODY_OF_WATER == "MISSISSIPPI RIVER") %>% group_by(LOCATION_STATE) %>% summarise( MISS_Spills = n()) #View(mississippiCY18) # sources of spills into GULF OF MEXICO description by state gulfCY18 = filter(CY18, BODY_OF_WATER == "GULF OF MEXICO") %>% group_by(LOCATION_STATE) %>% summarise( GULF_Spills = n()) #View(gulfCurrent) # material spilled == OIL: CRUDE library(stringr) crudeCY18 = filter(CY18, str_detect(NAME_OF_MATERIAL, "CRUDE") & IF_REACHED_WATER == "YES") %>% group_by(LOCATION_STATE) %>% summarise( Crude = n()) View(crudeCY18) # join all state columns sumCY18 = full_join(SpillsCY18, EvacuationsCY18) sumCY18 = full_join(sumCY18, InjuredCY18) sumCY18 = full_join(sumCY18, FatalitiesCY18) sumCY18 = full_join(sumCY18, Road_closureCY18) sumCY18 = full_join(sumCY18, medium_descCY18) sumCY18 = full_join(sumCY18, mississippiCY18) sumCY18 = full_join(sumCY18, gulfCY18) sumCY18 = full_join(sumCY18, crudeCY18) sumCY18["Year"]="2018" View(sumCY18) # companies responsible for of spills in Louisiana companiesCY18 = filter(CY18, LOCATION_STATE == "LA") %>% group_by(RESPONSIBLE_COMPANY) %>% summarise( CY18_Spills = n()) #View(companiesCY18)
dt = read.table('FrecoveryDiversity.txt', header=T) dt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette')) result = kruskal.test(shannon ~ Group, data = dt) kw_result = data.frame(unclass(result), check.names = FALSE, stringsAsFactors = FALSE) write.table(kw_result,file = "Frecovery_kruskal_shannon.txt", sep = "\t",quote = F,col.names=NA) pairwise_wilcox = pairwise.wilcox.test(dt$shannon, dt$Group, p.adjust.method = "BH") pairwise_wilcox = data.frame(unclass(pairwise_wilcox), check.names = FALSE, stringsAsFactors = FALSE) write.table(pairwise_wilcox,file = "Frecovery_pairwise_wilcox_shannon.txt", sep = "\t", quote = F,col.names=NA) dt = read.table('MrecoveryDiversity.txt', header=T) dt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette')) result = kruskal.test(shannon ~ Group, data = dt) kw_result = data.frame(unclass(result), check.names = FALSE, stringsAsFactors = FALSE) write.table(kw_result,file = "Mrecovery_kruskal_shannon.txt", sep = "\t",quote = F,col.names=NA) pairwise_wilcox = pairwise.wilcox.test(dt$shannon, dt$Group, p.adjust.method = "BH") pairwise_wilcox = data.frame(unclass(pairwise_wilcox), check.names = FALSE, stringsAsFactors = FALSE) write.table(pairwise_wilcox,file = "Mrecovery_pairwise_wilcox_shannon.txt", sep = "\t", quote = F,col.names=NA) dt = read.table('FrecoveryDiversity.txt', header=T) dt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette')) result = kruskal.test(number ~ Group, data = dt) kw_result = data.frame(unclass(result), check.names = FALSE, stringsAsFactors = FALSE) write.table(kw_result,file = "Frecovery_kruskal_number.txt", sep = "\t",quote = F,col.names=NA) pairwise_wilcox = pairwise.wilcox.test(dt$number, dt$Group, p.adjust.method = "BH") pairwise_wilcox = data.frame(unclass(pairwise_wilcox), check.names = FALSE, stringsAsFactors = FALSE) write.table(pairwise_wilcox,file = "Frecovery_pairwise_wilcox_number.txt", sep = "\t", quote = F,col.names=NA)
/rat/Mshannon_KW.R
no_license
Nonewood/Bioinformatics
R
false
false
1,961
r
dt = read.table('FrecoveryDiversity.txt', header=T) dt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette')) result = kruskal.test(shannon ~ Group, data = dt) kw_result = data.frame(unclass(result), check.names = FALSE, stringsAsFactors = FALSE) write.table(kw_result,file = "Frecovery_kruskal_shannon.txt", sep = "\t",quote = F,col.names=NA) pairwise_wilcox = pairwise.wilcox.test(dt$shannon, dt$Group, p.adjust.method = "BH") pairwise_wilcox = data.frame(unclass(pairwise_wilcox), check.names = FALSE, stringsAsFactors = FALSE) write.table(pairwise_wilcox,file = "Frecovery_pairwise_wilcox_shannon.txt", sep = "\t", quote = F,col.names=NA) dt = read.table('MrecoveryDiversity.txt', header=T) dt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette')) result = kruskal.test(shannon ~ Group, data = dt) kw_result = data.frame(unclass(result), check.names = FALSE, stringsAsFactors = FALSE) write.table(kw_result,file = "Mrecovery_kruskal_shannon.txt", sep = "\t",quote = F,col.names=NA) pairwise_wilcox = pairwise.wilcox.test(dt$shannon, dt$Group, p.adjust.method = "BH") pairwise_wilcox = data.frame(unclass(pairwise_wilcox), check.names = FALSE, stringsAsFactors = FALSE) write.table(pairwise_wilcox,file = "Mrecovery_pairwise_wilcox_shannon.txt", sep = "\t", quote = F,col.names=NA) dt = read.table('FrecoveryDiversity.txt', header=T) dt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette')) result = kruskal.test(number ~ Group, data = dt) kw_result = data.frame(unclass(result), check.names = FALSE, stringsAsFactors = FALSE) write.table(kw_result,file = "Frecovery_kruskal_number.txt", sep = "\t",quote = F,col.names=NA) pairwise_wilcox = pairwise.wilcox.test(dt$number, dt$Group, p.adjust.method = "BH") pairwise_wilcox = data.frame(unclass(pairwise_wilcox), check.names = FALSE, stringsAsFactors = FALSE) write.table(pairwise_wilcox,file = "Frecovery_pairwise_wilcox_number.txt", sep = "\t", quote = F,col.names=NA)
# Logical operators 5 > 3 # greater than 5 >= 3 # greater than or equal 5 < 3 # less than 5 <= 3 # less than or equal 5 == 3 # equal 5 != 3 # not equal "B" == "A" # also for characters, depends on coding !(TRUE) # NOT TRUE | FALSE # OR TRUE & FALSE # AND
/Lecture/w2/code_examples/code_examples/ex_logical.R
no_license
tuananh8497/IDS_FOLDER
R
false
false
258
r
# Logical operators 5 > 3 # greater than 5 >= 3 # greater than or equal 5 < 3 # less than 5 <= 3 # less than or equal 5 == 3 # equal 5 != 3 # not equal "B" == "A" # also for characters, depends on coding !(TRUE) # NOT TRUE | FALSE # OR TRUE & FALSE # AND
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/oxy-db_basics.R \name{openBaseDB} \alias{openBaseDB} \title{Create/open and prepare SQLite database} \usage{ openBaseDB(outfolder, dbname) } \arguments{ \item{outfolder}{Which folder are you building your databases in?} \item{dbname}{What is the name of the database? (exclude .db)} } \value{ Nothing, writes SQLITE database to outfolder } \description{ Create/open and prepare SQLite database } \seealso{ \code{\link[RSQLite]{SQLite}} \code{\link[DBI]{dbExecute}} }
/man/openBaseDB.Rd
no_license
cran/MetaDBparse
R
false
true
547
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/oxy-db_basics.R \name{openBaseDB} \alias{openBaseDB} \title{Create/open and prepare SQLite database} \usage{ openBaseDB(outfolder, dbname) } \arguments{ \item{outfolder}{Which folder are you building your databases in?} \item{dbname}{What is the name of the database? (exclude .db)} } \value{ Nothing, writes SQLITE database to outfolder } \description{ Create/open and prepare SQLite database } \seealso{ \code{\link[RSQLite]{SQLite}} \code{\link[DBI]{dbExecute}} }
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/oesophagus.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.25,family="gaussian",standardize=FALSE) sink('./oesophagus_040.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/ReliefF/oesophagus/oesophagus_040.R
no_license
esbgkannan/QSMART
R
false
false
355
r
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/oesophagus.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.25,family="gaussian",standardize=FALSE) sink('./oesophagus_040.txt',append=TRUE) print(glm$glmnet.fit) sink()