content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
\name{FreqSurv_HReg} \alias{FreqSurv_HReg} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The function to fit parametric Weibull models for the frequentist analysis of univariate survival data. } \description{ Independent univariate right-censored survival data can be analyzed using hierarchical models. } \usage{ FreqSurv_HReg(Formula, data, na.action = "na.fail", subset=NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{Formula}{ a \code{Formula} object of the form \eqn{y}+\eqn{\delta} ~ \eqn{x}. See Details and Examples below. } \item{data}{ a data.frame in which to interpret the variables named in \code{Formula}. } \item{na.action}{ how NAs are treated. See \code{model.frame}. } \item{subset}{ a specification of the rows to be used: defaults to all rows. See \code{model.frame}. } } \details{ See \code{\link{BayesSurv_HReg}} for a detailed description of the models. } \value{ \code{FreqSurv_HReg} returns an object of class \code{Freq_HReg}. \cr } \references{ Lee, K. H., Haneuse, S., Schrag, D., and Dominici, F. (2015), Bayesian semiparametric analysis of semicompeting risks data: investigating hospital readmission after a pancreatic cancer diagnosis, \emph{Journal of the Royal Statistical Society: Series C}, 64, 2, 253-273.\cr \cr Alvares, D., Haneuse, S., Lee, C., Lee, K. H. (2018+), SemiCompRisks: an R package for independent and cluster-correlated analyses of semi-competing risks data, \emph{submitted}, arXiv:1801.03567. \cr } \author{ Sebastien Haneuse and Kyu Ha Lee\cr Maintainer: Kyu Ha Lee <klee@hsph.harvard.edu> } %\note{ %further notes %} %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{print.Freq_HReg}}, \code{\link{summary.Freq_HReg}}, \code{\link{predict.Freq_HReg}}, \code{\link{BayesSurv_HReg}}. } \examples{ \dontrun{ # loading a data set data(survData) form <- Formula(time + event ~ cov1 + cov2) fit_WB <- FreqSurv_HReg(form, data=survData) fit_WB summ.fit_WB <- summary(fit_WB); names(summ.fit_WB) summ.fit_WB pred_WB <- predict(fit_WB, tseq=seq(from=0, to=30, by=5)) plot(pred_WB, plot.est="Haz") plot(pred_WB, plot.est="Surv") } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ frequentist framework } \keyword{ univariate analysis } \keyword{ hazard regression models }% __ONLY ONE__ keyword per line
/man/FreqSurv_HReg.Rd
no_license
csetraynor/SemiCompRisks
R
false
false
2,427
rd
\name{FreqSurv_HReg} \alias{FreqSurv_HReg} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The function to fit parametric Weibull models for the frequentist analysis of univariate survival data. } \description{ Independent univariate right-censored survival data can be analyzed using hierarchical models. } \usage{ FreqSurv_HReg(Formula, data, na.action = "na.fail", subset=NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{Formula}{ a \code{Formula} object of the form \eqn{y}+\eqn{\delta} ~ \eqn{x}. See Details and Examples below. } \item{data}{ a data.frame in which to interpret the variables named in \code{Formula}. } \item{na.action}{ how NAs are treated. See \code{model.frame}. } \item{subset}{ a specification of the rows to be used: defaults to all rows. See \code{model.frame}. } } \details{ See \code{\link{BayesSurv_HReg}} for a detailed description of the models. } \value{ \code{FreqSurv_HReg} returns an object of class \code{Freq_HReg}. \cr } \references{ Lee, K. H., Haneuse, S., Schrag, D., and Dominici, F. (2015), Bayesian semiparametric analysis of semicompeting risks data: investigating hospital readmission after a pancreatic cancer diagnosis, \emph{Journal of the Royal Statistical Society: Series C}, 64, 2, 253-273.\cr \cr Alvares, D., Haneuse, S., Lee, C., Lee, K. H. (2018+), SemiCompRisks: an R package for independent and cluster-correlated analyses of semi-competing risks data, \emph{submitted}, arXiv:1801.03567. \cr } \author{ Sebastien Haneuse and Kyu Ha Lee\cr Maintainer: Kyu Ha Lee <klee@hsph.harvard.edu> } %\note{ %further notes %} %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{print.Freq_HReg}}, \code{\link{summary.Freq_HReg}}, \code{\link{predict.Freq_HReg}}, \code{\link{BayesSurv_HReg}}. } \examples{ \dontrun{ # loading a data set data(survData) form <- Formula(time + event ~ cov1 + cov2) fit_WB <- FreqSurv_HReg(form, data=survData) fit_WB summ.fit_WB <- summary(fit_WB); names(summ.fit_WB) summ.fit_WB pred_WB <- predict(fit_WB, tseq=seq(from=0, to=30, by=5)) plot(pred_WB, plot.est="Haz") plot(pred_WB, plot.est="Surv") } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ frequentist framework } \keyword{ univariate analysis } \keyword{ hazard regression models }% __ONLY ONE__ keyword per line
library(beadarrayMSV) ### Name: findPolyploidClusters ### Title: K-means clustering ### Aliases: findPolyploidClusters ### ** Examples ## Not run: ##D #Read pre-processed data directly into AlleleSetIllumina object ##D rPath <- system.file("extdata", package="beadarrayMSV") ##D normOpts <- setNormOptions() ##D dataFiles <- makeFilenames('testdata',normOpts,rPath) ##D beadFile <- paste(rPath,'beadData_testdata.txt',sep='/') ##D beadInfo <- read.table(beadFile,sep='\t',header=TRUE,as.is=TRUE) ##D BSRed <- createAlleleSetFromFiles(dataFiles[1:4],markers=1:10,beadInfo=beadInfo) ##D ##D #Generate list of marker categories ##D gO <- setGenoOptions() ##D polyCent <- generatePolyCenters(ploidy=gO$ploidy) ##D print(polyCent) ##D ##D #Estimate list of likely center points for an MSV-5 marker ##D ind <- 2 ##D dev.new(); par(mfrow=c(3,1),mai=c(.5,.5,.5,.1)) ##D polyCl <- findClusters(assayData(BSRed)$theta[ind,], ##D breaks=seq(-0.25,1.25,0.04),plot=TRUE) ##D print(polyCl) ##D ##D #Clustering using all samples ##D sclR <- median(assayData(BSRed)$intensity[ind,],na.rm=TRUE)*ind*gO$rPenalty ##D X <- matrix(cbind(assayData(BSRed)$theta[ind,], ##D assayData(BSRed)$intensity[ind,]/sclR, ##D assayData(BSRed)$SE[ind,]),ncol=3) ##D clObj <- findPolyploidClusters(X,centers=polyCl$clPeaks,plot=TRUE) ##D plot(X[,1],X[,2],col=clObj$cluster) ##D print(clObj) ## End(Not run)
/data/genthat_extracted_code/beadarrayMSV/examples/findPolyploidClusters.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,423
r
library(beadarrayMSV) ### Name: findPolyploidClusters ### Title: K-means clustering ### Aliases: findPolyploidClusters ### ** Examples ## Not run: ##D #Read pre-processed data directly into AlleleSetIllumina object ##D rPath <- system.file("extdata", package="beadarrayMSV") ##D normOpts <- setNormOptions() ##D dataFiles <- makeFilenames('testdata',normOpts,rPath) ##D beadFile <- paste(rPath,'beadData_testdata.txt',sep='/') ##D beadInfo <- read.table(beadFile,sep='\t',header=TRUE,as.is=TRUE) ##D BSRed <- createAlleleSetFromFiles(dataFiles[1:4],markers=1:10,beadInfo=beadInfo) ##D ##D #Generate list of marker categories ##D gO <- setGenoOptions() ##D polyCent <- generatePolyCenters(ploidy=gO$ploidy) ##D print(polyCent) ##D ##D #Estimate list of likely center points for an MSV-5 marker ##D ind <- 2 ##D dev.new(); par(mfrow=c(3,1),mai=c(.5,.5,.5,.1)) ##D polyCl <- findClusters(assayData(BSRed)$theta[ind,], ##D breaks=seq(-0.25,1.25,0.04),plot=TRUE) ##D print(polyCl) ##D ##D #Clustering using all samples ##D sclR <- median(assayData(BSRed)$intensity[ind,],na.rm=TRUE)*ind*gO$rPenalty ##D X <- matrix(cbind(assayData(BSRed)$theta[ind,], ##D assayData(BSRed)$intensity[ind,]/sclR, ##D assayData(BSRed)$SE[ind,]),ncol=3) ##D clObj <- findPolyploidClusters(X,centers=polyCl$clPeaks,plot=TRUE) ##D plot(X[,1],X[,2],col=clObj$cluster) ##D print(clObj) ## End(Not run)
AAmap <- read.table("AAmap.chr1.txt", header=TRUE) #calculating cM/Mb by dividing the difference in cM divided by difference in Mb #between current and next row cM <- (AAmap$Genetic_Map.cM.[-1] - AAmap$Genetic_Map.cM.) Mb <- ((AAmap$Physical_Position_Build36.hg18.[-1] - AAmap$Physical_Position_Build36.hg18.)/1e6) AAmap$recombinationrate <- cM/Mb
/SLiM_ratesfiles/calculate_rates.R
no_license
imanhamid/misc_scripts
R
false
false
351
r
AAmap <- read.table("AAmap.chr1.txt", header=TRUE) #calculating cM/Mb by dividing the difference in cM divided by difference in Mb #between current and next row cM <- (AAmap$Genetic_Map.cM.[-1] - AAmap$Genetic_Map.cM.) Mb <- ((AAmap$Physical_Position_Build36.hg18.[-1] - AAmap$Physical_Position_Build36.hg18.)/1e6) AAmap$recombinationrate <- cM/Mb
################################################################################ ################################################################################ ### SIMPLE VERTICAL MULTIPLE-RESPONSE ### ### BAR CHART ### ################################################################################ ################################################################################ # This script plots a simple horizontal bar chart for multiple categories or # responses in an external file. Specifically, it works with survey data and # it allows the user to compare a group of questions and their answers. # It modifies the code created by Thomas Ralhlf in his book # "Data Visualization with R" of 2017, in order to make it easier and clearer # to work with survey data from any source. # Date of last modification: January 18th, 2019 ################################################################################ ### STEP 0. CLEAN THE ENVIRONMENT ### ################################################################################ # To avoid any conflicts with other packages, let's aasure ourselves the-------- # R environment is clean from objetcs and from certain packages. # 0.1 Clean environment--------------------------------------------------------- rm(list = ls()) # 0.2 Detach any possible conflicting packages---------------------------------- if ("memisc" %in% (.packages())) { detach("package:memisc", unload = TRUE) } if ("foreign" %in% (.packages())) { detach("package:foreign", unload = TRUE) } if ("Hmisc" %in% (.packages())) { detach("package:Hmisc", unload = TRUE) } if ("haven" %in% (.packages())) { detach("package:haven", unload = TRUE) } ################################################################################ ### STEP 1. IMPORT DATA FROM FILE ### ################################################################################ # 1.1 Load the memisc package. This package is the fastest and most.------------ # space-efficient for working with SPSS data. It also allows you to keep the # labels from imported data in order to use it in the chart labelling later on. library("memisc") # 1.2 Unzip the downloaded survey file.----------------------------------------- # We assume it was manually downloaded to the directory "./SAV_files/..." # which you should create in your project directory or will set as your working # directory. zipFile <- "./SAV_files/EVS/datasets/EVS_80-08.zip" exDir <- "./SAV_files/EVS/datasets/" unzip(zipfile = zipFile, exdir = exDir, list = TRUE) datasetName <- "ZA4800_v4-0-0.sav" unzip(zipfile = zipFile, exdir = exDir, list = FALSE, files = datasetName) # 1.3 Read the data into R and find the relevant variables for our analysis----- # Specifically, we would like to know what is measuring each variable from our # imported data set in order to select the relevan variables. data <- spss.system.file(paste(exDir, datasetName, sep = "")) description(data) codebook <- description(data) # 1.4 Subset relevant variables for analysis.----------------------------------- # Once we have already reviewed the variables and what they are # measuring, we can subset the relevant survey questions into our data. # These might include, for example, the year, country, # thematic questions as well as socio demographic # questions such as a person's sex or age (v240). year <- labels(Table(data[["year"]])[1]) data <- subset(data, select = c(country, v1, v2, v3, v4, v5, v6)) # 1.5 Retrieve the country names.----------------------------------------------- # To know which ones were surveyed in this wave/dataset. countries <- names((table(data$country))) countries # Display the countries available # 1.6 Select a country and subset the data using the selected country.---------- # Once we have subsetted the data using the country we choose, we drop it from # our dataset to avoid To keep space in the hard disk, we delete the unziped # file. selectedCountry <- "Germany" data <- subset(data, country == selectedCountry) data$country <- NULL file.remove(paste(exDir, datasetName, sep = "")) selectedCountry <- "Alemania" ################################################################################ ### STEP 2. FORMAT DATA FOR PLOT ### ################################################################################ # 2.1 Define the number of categories according to questins subsetted.---------- barLabels <- unlist(annotation(data), use.names = TRUE) varLabels <- names(barLabels) varLabels <- gsub(pattern = ".description", replacement = "", x = varLabels) n_sections <- ncol(data) # 2.2. Format string labels.---------------------------------------------------- # We break lines each 2 words in order to adapt the categories' labels to the # plot and we also capitalize each label. words <- 2 for (i in 1:n_sections) { char_string <- substring(barLabels[i], 1:nchar(barLabels[i]), 1:nchar(barLabels[i])) index <- which(char_string == " ") breaks <- length(index) %/% words for (lines in 1:breaks) { position <- lines*words substr(barLabels[i], index[position], index[position]) <- "\n" } } barLabels <- Hmisc::capitalize(barLabels) # 2.3 Calculate the values for each category.----------------------------------- # First, we create an empty data frame with a variable for each kind of response # we are interested in to show in the chart. We also # 2.3.1 Look for the relevant categories --------------------------------------- Table(data[[1]], include.missings = TRUE) # 2.3.2 Aggregate the values for each category we've chosen.-------------------- fData <- data.frame(variables = varLabels, `important` = vector(length = length(varLabels), mode = "numeric"), `not important` = vector(length = length(varLabels), mode = "numeric"), `don't know-no answer` = vector(length = length(varLabels), mode = "numeric")) for (i in 1:n_sections) { x <- Table(data[[i]])["very important"] + Table(data[[i]])["quite important"] y <- Table(data[[i]])["not at all important"] + Table(data[[i]])["not important"] z <- Table(data[[i]], include.missings = TRUE)["*don't know"] + Table(data[[i]], include.missings = TRUE)["*no answer"] fData[i, 2] <- x fData[i, 3] <- y fData[i, 4] <- z } fData$total <- rowSums(fData[,2:4]) totalSurveys <- fData$total[1] fData <- cbind(variables = fData$variables, (fData[,2:4]/fData$total)*100) ################################################################################ ### STEP 3. PLOT CHART ### ################################################################################ # 3.1 Prepare plot file and area.----------------------------------------------- inch <- 2.54 fileName <- paste("PDF_files/",selectedCountry, "_", year,"_barchart_v.pdf", sep = "") cairo_pdf(filename = fileName, width = 27.94/inch, height = 21.59/inch, bg = "grey98") par(omi = c(2/inch, 2/inch, 2/inch, 2/inch), mai = c(2/inch, 2/inch, 2/inch, 2/inch), family = "Lato Light", las = 1, mgp = c(3, 3, 0)) xlength <- n_sections + 0.15 ylength <- 100 barwidth <- 0.85 valuespace <- 0.15/barwidth # 3.2 Draw the barplot using formated data-------------------------------------- values <- fData[nrow(fData):1, "important"] #categories <- varLabels[length(varLabels):1] categories <- barLabels[length(barLabels):1] bp <- barplot(height = values, width = barwidth, space = valuespace, names.arg = FALSE, horiz = FALSE, border = NA, xlim = c(0, xlength), ylim = c(0, ylength), axes = FALSE) myColor <- rgb(226, 0, 156, maxColorValue = 255) values2 <- vector(mode = "numeric", length = n_sections) values2[which.max(values)] <- max(values) bp <- barplot(height = values2, width = barwidth, space = valuespace, names.arg = FALSE, horiz = FALSE, border = NA, xlim = c(0, xlength), ylim = c(0, ylength), col = myColor, axes = FALSE, add = TRUE) for (i in 1:length(barLabels)) { if (i == which.max(values)) { myFont <- "Lato Bold" } else { myFont <- "Lato Light" } text(labels = categories[i], x = bp[i], y = (-lines*3), xpd = TRUE, adj = 0.5, family = myFont, cex = 0.70) text(labels = format(round(values[i], 1), nsmall = 1), bp[i], 10, family = myFont, cex = 1.25, col = ifelse(i == which.max(values), "white", "black")) } # 3.3 To recognize difefferent measures of the values, draw -------------------- # a background with rectangles of two light but different colors. color1 <- rgb(red = 191, green = 239, blue = 255, alpha = 80, maxColorValue = 255) color2 <- rgb(red = 191, green = 239, blue = 255, alpha = 110, maxColorValue = 255) rect(xleft = 0, ybottom = 0, xright = xlength, ytop = 20, col = color1, border = NA) rect(xleft = 0, ybottom = 20, xright = xlength, ytop = 40, col = color2, border = NA) rect(xleft = 0, ybottom = 40, xright = xlength, ytop = 60, col = color1, border = NA) rect(xleft = 0, ybottom = 60, xright = xlength, ytop = 80, col = color2, border = NA) rect(xleft = 0, ybottom = 80, xright = xlength, ytop = 100, col = color1, border = NA) # 3.4 Draw a line to see the majority------------------------------------------- arrows(x0 = 0, x1 = xlength, y0 = 50, y1 = 50, lwd = 1.5, length = 0, col = "skyblue3") arrows(x0 = 0, x1 = -0.1, y0 = 50, y1 = 50, lwd = 2, length = 0) arrows(x0 = xlength, y0 = 50, x1 = xlength + 0.1, y1 = 50, lwd = 2, length = 0) text(labels = "Mayoría", x = xlength + (valuespace*1.5), y = 48, adj = 0.5, xpd = TRUE, cex = 0.65, font = 3) text(labels = "50%", x = xlength + (valuespace*1.5), y = 52, adj = 0.5, xpd = TRUE, cex = 0.65, font = 3) # 2.9 Titling and further labeling---------------------------------------------- chartTitle <- paste("\"Qué tan importante es...\", ", selectedCountry, year, sep = "") chartSubtitle <- "Muy importante o algo importante:" scaleTag <- "Valores en porcentaje (%)" sourceTag <- paste(format(totalSurveys, big.mark = ","), " encuestas. Fuente: Estudio de Valores Europeo, ", year, ".", sep = "") Sys.setlocale("LC_ALL", "es_ES.UTF-8") #Español date <- Sys.Date() date <- format(date, "%d de %B de %Y") sourceTag2 <- paste("Base de datos: ", datasetName, ". Consultada en www.gesis.org el ", date, ".", sep = "") mtext(chartTitle, side = 3, line = 0, adj = 0, cex = 1.5, family = "Lato Black", outer = TRUE) mtext(chartSubtitle, side = 3, line = -2, adj = 0, cex = 1.25, outer = TRUE) text(labels = scaleTag, x = xlength, y = ylength*(1.01), adj = 1, xpd = TRUE, cex = 0.65, font = 3) mtext(text = c(seq(0, 100, 20)), side = 2, line = 0, at = c(seq(0, 100, 20))) mtext(sourceTag, side = 1, line = 0, adj = 1, cex = 0.85, outer = TRUE, font = 3 ) mtext(sourceTag2, side = 1, line = 1, adj = 1, cex = 0.85, outer = TRUE, font = 3 ) dev.off()
/Barcharts/Barchart Multiple Categories Vertical .R
no_license
sicabi/RGraphics
R
false
false
11,586
r
################################################################################ ################################################################################ ### SIMPLE VERTICAL MULTIPLE-RESPONSE ### ### BAR CHART ### ################################################################################ ################################################################################ # This script plots a simple horizontal bar chart for multiple categories or # responses in an external file. Specifically, it works with survey data and # it allows the user to compare a group of questions and their answers. # It modifies the code created by Thomas Ralhlf in his book # "Data Visualization with R" of 2017, in order to make it easier and clearer # to work with survey data from any source. # Date of last modification: January 18th, 2019 ################################################################################ ### STEP 0. CLEAN THE ENVIRONMENT ### ################################################################################ # To avoid any conflicts with other packages, let's aasure ourselves the-------- # R environment is clean from objetcs and from certain packages. # 0.1 Clean environment--------------------------------------------------------- rm(list = ls()) # 0.2 Detach any possible conflicting packages---------------------------------- if ("memisc" %in% (.packages())) { detach("package:memisc", unload = TRUE) } if ("foreign" %in% (.packages())) { detach("package:foreign", unload = TRUE) } if ("Hmisc" %in% (.packages())) { detach("package:Hmisc", unload = TRUE) } if ("haven" %in% (.packages())) { detach("package:haven", unload = TRUE) } ################################################################################ ### STEP 1. IMPORT DATA FROM FILE ### ################################################################################ # 1.1 Load the memisc package. This package is the fastest and most.------------ # space-efficient for working with SPSS data. It also allows you to keep the # labels from imported data in order to use it in the chart labelling later on. library("memisc") # 1.2 Unzip the downloaded survey file.----------------------------------------- # We assume it was manually downloaded to the directory "./SAV_files/..." # which you should create in your project directory or will set as your working # directory. zipFile <- "./SAV_files/EVS/datasets/EVS_80-08.zip" exDir <- "./SAV_files/EVS/datasets/" unzip(zipfile = zipFile, exdir = exDir, list = TRUE) datasetName <- "ZA4800_v4-0-0.sav" unzip(zipfile = zipFile, exdir = exDir, list = FALSE, files = datasetName) # 1.3 Read the data into R and find the relevant variables for our analysis----- # Specifically, we would like to know what is measuring each variable from our # imported data set in order to select the relevan variables. data <- spss.system.file(paste(exDir, datasetName, sep = "")) description(data) codebook <- description(data) # 1.4 Subset relevant variables for analysis.----------------------------------- # Once we have already reviewed the variables and what they are # measuring, we can subset the relevant survey questions into our data. # These might include, for example, the year, country, # thematic questions as well as socio demographic # questions such as a person's sex or age (v240). year <- labels(Table(data[["year"]])[1]) data <- subset(data, select = c(country, v1, v2, v3, v4, v5, v6)) # 1.5 Retrieve the country names.----------------------------------------------- # To know which ones were surveyed in this wave/dataset. countries <- names((table(data$country))) countries # Display the countries available # 1.6 Select a country and subset the data using the selected country.---------- # Once we have subsetted the data using the country we choose, we drop it from # our dataset to avoid To keep space in the hard disk, we delete the unziped # file. selectedCountry <- "Germany" data <- subset(data, country == selectedCountry) data$country <- NULL file.remove(paste(exDir, datasetName, sep = "")) selectedCountry <- "Alemania" ################################################################################ ### STEP 2. FORMAT DATA FOR PLOT ### ################################################################################ # 2.1 Define the number of categories according to questins subsetted.---------- barLabels <- unlist(annotation(data), use.names = TRUE) varLabels <- names(barLabels) varLabels <- gsub(pattern = ".description", replacement = "", x = varLabels) n_sections <- ncol(data) # 2.2. Format string labels.---------------------------------------------------- # We break lines each 2 words in order to adapt the categories' labels to the # plot and we also capitalize each label. words <- 2 for (i in 1:n_sections) { char_string <- substring(barLabels[i], 1:nchar(barLabels[i]), 1:nchar(barLabels[i])) index <- which(char_string == " ") breaks <- length(index) %/% words for (lines in 1:breaks) { position <- lines*words substr(barLabels[i], index[position], index[position]) <- "\n" } } barLabels <- Hmisc::capitalize(barLabels) # 2.3 Calculate the values for each category.----------------------------------- # First, we create an empty data frame with a variable for each kind of response # we are interested in to show in the chart. We also # 2.3.1 Look for the relevant categories --------------------------------------- Table(data[[1]], include.missings = TRUE) # 2.3.2 Aggregate the values for each category we've chosen.-------------------- fData <- data.frame(variables = varLabels, `important` = vector(length = length(varLabels), mode = "numeric"), `not important` = vector(length = length(varLabels), mode = "numeric"), `don't know-no answer` = vector(length = length(varLabels), mode = "numeric")) for (i in 1:n_sections) { x <- Table(data[[i]])["very important"] + Table(data[[i]])["quite important"] y <- Table(data[[i]])["not at all important"] + Table(data[[i]])["not important"] z <- Table(data[[i]], include.missings = TRUE)["*don't know"] + Table(data[[i]], include.missings = TRUE)["*no answer"] fData[i, 2] <- x fData[i, 3] <- y fData[i, 4] <- z } fData$total <- rowSums(fData[,2:4]) totalSurveys <- fData$total[1] fData <- cbind(variables = fData$variables, (fData[,2:4]/fData$total)*100) ################################################################################ ### STEP 3. PLOT CHART ### ################################################################################ # 3.1 Prepare plot file and area.----------------------------------------------- inch <- 2.54 fileName <- paste("PDF_files/",selectedCountry, "_", year,"_barchart_v.pdf", sep = "") cairo_pdf(filename = fileName, width = 27.94/inch, height = 21.59/inch, bg = "grey98") par(omi = c(2/inch, 2/inch, 2/inch, 2/inch), mai = c(2/inch, 2/inch, 2/inch, 2/inch), family = "Lato Light", las = 1, mgp = c(3, 3, 0)) xlength <- n_sections + 0.15 ylength <- 100 barwidth <- 0.85 valuespace <- 0.15/barwidth # 3.2 Draw the barplot using formated data-------------------------------------- values <- fData[nrow(fData):1, "important"] #categories <- varLabels[length(varLabels):1] categories <- barLabels[length(barLabels):1] bp <- barplot(height = values, width = barwidth, space = valuespace, names.arg = FALSE, horiz = FALSE, border = NA, xlim = c(0, xlength), ylim = c(0, ylength), axes = FALSE) myColor <- rgb(226, 0, 156, maxColorValue = 255) values2 <- vector(mode = "numeric", length = n_sections) values2[which.max(values)] <- max(values) bp <- barplot(height = values2, width = barwidth, space = valuespace, names.arg = FALSE, horiz = FALSE, border = NA, xlim = c(0, xlength), ylim = c(0, ylength), col = myColor, axes = FALSE, add = TRUE) for (i in 1:length(barLabels)) { if (i == which.max(values)) { myFont <- "Lato Bold" } else { myFont <- "Lato Light" } text(labels = categories[i], x = bp[i], y = (-lines*3), xpd = TRUE, adj = 0.5, family = myFont, cex = 0.70) text(labels = format(round(values[i], 1), nsmall = 1), bp[i], 10, family = myFont, cex = 1.25, col = ifelse(i == which.max(values), "white", "black")) } # 3.3 To recognize difefferent measures of the values, draw -------------------- # a background with rectangles of two light but different colors. color1 <- rgb(red = 191, green = 239, blue = 255, alpha = 80, maxColorValue = 255) color2 <- rgb(red = 191, green = 239, blue = 255, alpha = 110, maxColorValue = 255) rect(xleft = 0, ybottom = 0, xright = xlength, ytop = 20, col = color1, border = NA) rect(xleft = 0, ybottom = 20, xright = xlength, ytop = 40, col = color2, border = NA) rect(xleft = 0, ybottom = 40, xright = xlength, ytop = 60, col = color1, border = NA) rect(xleft = 0, ybottom = 60, xright = xlength, ytop = 80, col = color2, border = NA) rect(xleft = 0, ybottom = 80, xright = xlength, ytop = 100, col = color1, border = NA) # 3.4 Draw a line to see the majority------------------------------------------- arrows(x0 = 0, x1 = xlength, y0 = 50, y1 = 50, lwd = 1.5, length = 0, col = "skyblue3") arrows(x0 = 0, x1 = -0.1, y0 = 50, y1 = 50, lwd = 2, length = 0) arrows(x0 = xlength, y0 = 50, x1 = xlength + 0.1, y1 = 50, lwd = 2, length = 0) text(labels = "Mayoría", x = xlength + (valuespace*1.5), y = 48, adj = 0.5, xpd = TRUE, cex = 0.65, font = 3) text(labels = "50%", x = xlength + (valuespace*1.5), y = 52, adj = 0.5, xpd = TRUE, cex = 0.65, font = 3) # 2.9 Titling and further labeling---------------------------------------------- chartTitle <- paste("\"Qué tan importante es...\", ", selectedCountry, year, sep = "") chartSubtitle <- "Muy importante o algo importante:" scaleTag <- "Valores en porcentaje (%)" sourceTag <- paste(format(totalSurveys, big.mark = ","), " encuestas. Fuente: Estudio de Valores Europeo, ", year, ".", sep = "") Sys.setlocale("LC_ALL", "es_ES.UTF-8") #Español date <- Sys.Date() date <- format(date, "%d de %B de %Y") sourceTag2 <- paste("Base de datos: ", datasetName, ". Consultada en www.gesis.org el ", date, ".", sep = "") mtext(chartTitle, side = 3, line = 0, adj = 0, cex = 1.5, family = "Lato Black", outer = TRUE) mtext(chartSubtitle, side = 3, line = -2, adj = 0, cex = 1.25, outer = TRUE) text(labels = scaleTag, x = xlength, y = ylength*(1.01), adj = 1, xpd = TRUE, cex = 0.65, font = 3) mtext(text = c(seq(0, 100, 20)), side = 2, line = 0, at = c(seq(0, 100, 20))) mtext(sourceTag, side = 1, line = 0, adj = 1, cex = 0.85, outer = TRUE, font = 3 ) mtext(sourceTag2, side = 1, line = 1, adj = 1, cex = 0.85, outer = TRUE, font = 3 ) dev.off()
# plot2.R ## 1. Load data ### 1.1 Set Data file name and path path <- "./household_power_consumption.txt" ### 1.2 Read out data RawData <- read.table(path, header=TRUE, sep=";", na.strings = "?") ### 1.3 Define Subset of data as request SubsetData <- RawData[RawData$Date %in% c("1/2/2007","2/2/2007"),] ### 1.4 Convert Date and Time var. to Date/time classes DateTime <- strptime(paste(SubsetData$Date, SubsetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S") ## 2 Second plot (Plot2) plot(DateTime, SubsetData$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab="") ## 3 Save plot as PNG file dev.copy(png, file = "plot2.png", height = 480, width = 480) dev.off()
/plot2.R
no_license
Yage0820/ExData_Plotting1
R
false
false
688
r
# plot2.R ## 1. Load data ### 1.1 Set Data file name and path path <- "./household_power_consumption.txt" ### 1.2 Read out data RawData <- read.table(path, header=TRUE, sep=";", na.strings = "?") ### 1.3 Define Subset of data as request SubsetData <- RawData[RawData$Date %in% c("1/2/2007","2/2/2007"),] ### 1.4 Convert Date and Time var. to Date/time classes DateTime <- strptime(paste(SubsetData$Date, SubsetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S") ## 2 Second plot (Plot2) plot(DateTime, SubsetData$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab="") ## 3 Save plot as PNG file dev.copy(png, file = "plot2.png", height = 480, width = 480) dev.off()
db_get_last_prediction <- function(wt_query, table_cast_results = "CAST_RESULTS", db_config){ # OJO usar comillas simples para encerrar CHAR variables de la BDD. iam=match.call()[[1]] type_condition= paste0(" AND `type` = '",tolower(wt_query$type),"'"); if("type" %in% names(wt_query) && wt_query$type=="normality") type_condition=''; #Normality results table doesn't have type column query <- paste0("SELECT * FROM ",table_cast_results," WHERE ld_id = ", wt_query$ld_id,type_condition," AND fault = '", tolower(wt_query$fault),"' ORDER BY date_time DESC LIMIT 1") rs <- db_query(query=query,db_config=db_config) if(!rs$error){ if(nrow(rs$data)>0 && ("date_time" %in% names(rs$data))) return(list(error=FALSE,data=as.POSIXlt(rs$data$date_time, tz = "UTC"),msg="OK"))# It is supposed the server is in UTC timezone else return(list(error=FALSE,data=NULL,msg="OK")) }else{ return(list(error=TRUE, data=NULL, msg=paste0("\n\n",iam,": On call queryTimeDB:",rs$msg,"\n\n"))) } }
/functions_common/db_get_last_prediction.R
permissive
alecuba16/SOM_windturbine
R
false
false
1,112
r
db_get_last_prediction <- function(wt_query, table_cast_results = "CAST_RESULTS", db_config){ # OJO usar comillas simples para encerrar CHAR variables de la BDD. iam=match.call()[[1]] type_condition= paste0(" AND `type` = '",tolower(wt_query$type),"'"); if("type" %in% names(wt_query) && wt_query$type=="normality") type_condition=''; #Normality results table doesn't have type column query <- paste0("SELECT * FROM ",table_cast_results," WHERE ld_id = ", wt_query$ld_id,type_condition," AND fault = '", tolower(wt_query$fault),"' ORDER BY date_time DESC LIMIT 1") rs <- db_query(query=query,db_config=db_config) if(!rs$error){ if(nrow(rs$data)>0 && ("date_time" %in% names(rs$data))) return(list(error=FALSE,data=as.POSIXlt(rs$data$date_time, tz = "UTC"),msg="OK"))# It is supposed the server is in UTC timezone else return(list(error=FALSE,data=NULL,msg="OK")) }else{ return(list(error=TRUE, data=NULL, msg=paste0("\n\n",iam,": On call queryTimeDB:",rs$msg,"\n\n"))) } }
options( java.parameters = "-Xmx4g" ) getwd() setwd("C:/ProgramExt/WS-R/SnomedCT_TrOWL") getwd() pdf("SnomedCT_10_Samples_TrOWL.pdf", width=9) # ## read file myData <- read.csv("snomed_jan17.owl_10Samples_TrOWL.csv") ## make a plot plot(AxiomCount ~ msec, ylab="Axiom Count of Sample", xlab="Execution Time of a Sample (msec.)", data = myData, main="10 Samples of SnomedCT on TrOWL") ## use file names as title ## fit a linear model model1 <- lm(AxiomCount ~ msec, data = myData) # summary(model1) abline(model1, lwd=3, col="orange") ## overlay fitted regression line ## fit poly^2 model # model2 <- lm(myData$AxiomCount ~ poly(myData$msec, degree=2, raw=T)) # summary(model2) # lines(smooth.spline(myData$msec, predict(model2)), col="blue", lwd=3) ## fit poly^3 model # model3 <- lm(myData$AxiomCount ~ poly(myData$msec, degree=3, raw=T)) # summary(model3) # lines(smooth.spline(myData$msec, predict(model3)), col="red", lwd=2) # Test with 10 sec (10,000 msec) myData <- data.frame(msec=c(10000)) y_10sec = predict(model1, newdata = myData) # Test with 30 sec (30,000 msec) myData <- data.frame(msec=c(30000)) y_30sec = predict(model1, newdata = myData) # Test with 60 sec (60,000 msec) myData <- data.frame(msec=c(60000)) y_60sec = predict(model1, newdata = myData) fileConn<-file("prediction4thresholds/SnomedCT_Predictions_From_10_Samples_For_TrOWL.txt") writeLines(c(paste("CSVFile:","snomed_jan17.owl"),paste("10sec:", y_10sec),paste("30sec:", y_30sec),paste("60sec:", y_60sec)), fileConn) close(fileConn) dev.off()
/S.5.2/TrOWL.model.generation/source.code/Reg_AxiomCount - TrOWL.r
no_license
IsaGuclu/ResourceAwareApproach
R
false
false
1,544
r
options( java.parameters = "-Xmx4g" ) getwd() setwd("C:/ProgramExt/WS-R/SnomedCT_TrOWL") getwd() pdf("SnomedCT_10_Samples_TrOWL.pdf", width=9) # ## read file myData <- read.csv("snomed_jan17.owl_10Samples_TrOWL.csv") ## make a plot plot(AxiomCount ~ msec, ylab="Axiom Count of Sample", xlab="Execution Time of a Sample (msec.)", data = myData, main="10 Samples of SnomedCT on TrOWL") ## use file names as title ## fit a linear model model1 <- lm(AxiomCount ~ msec, data = myData) # summary(model1) abline(model1, lwd=3, col="orange") ## overlay fitted regression line ## fit poly^2 model # model2 <- lm(myData$AxiomCount ~ poly(myData$msec, degree=2, raw=T)) # summary(model2) # lines(smooth.spline(myData$msec, predict(model2)), col="blue", lwd=3) ## fit poly^3 model # model3 <- lm(myData$AxiomCount ~ poly(myData$msec, degree=3, raw=T)) # summary(model3) # lines(smooth.spline(myData$msec, predict(model3)), col="red", lwd=2) # Test with 10 sec (10,000 msec) myData <- data.frame(msec=c(10000)) y_10sec = predict(model1, newdata = myData) # Test with 30 sec (30,000 msec) myData <- data.frame(msec=c(30000)) y_30sec = predict(model1, newdata = myData) # Test with 60 sec (60,000 msec) myData <- data.frame(msec=c(60000)) y_60sec = predict(model1, newdata = myData) fileConn<-file("prediction4thresholds/SnomedCT_Predictions_From_10_Samples_For_TrOWL.txt") writeLines(c(paste("CSVFile:","snomed_jan17.owl"),paste("10sec:", y_10sec),paste("30sec:", y_30sec),paste("60sec:", y_60sec)), fileConn) close(fileConn) dev.off()
x = read.table("/home/zabolotsky/abbakumov_r/R_занятие_06_регрессия/03_ts_graphs/wine_Austral.dat", header=T, sep="\t") print(names(x)) print(head(x)) print(dim(x)) plot(x$sweet, type="l") par(mfrow=c(1,2)) plot(x$red, type="l") log_red = log(x$red) plot(log_red, type="l") time_ = 1:(168+12) time_2 = time_^2 month_01 = rep(c(1,0,0,0,0,0,0,0,0,0,0,0), (14 + 1)) month_02 = rep(c(0,1,0,0,0,0,0,0,0,0,0,0), (14 + 1)) month_03 = rep(c(0,0,1,0,0,0,0,0,0,0,0,0), (14 + 1)) month_04 = rep(c(0,0,0,1,0,0,0,0,0,0,0,0), (14 + 1)) month_05 = rep(c(0,0,0,0,1,0,0,0,0,0,0,0), (14 + 1)) month_06 = rep(c(0,0,0,0,0,1,0,0,0,0,0,0), (14 + 1)) month_07 = rep(c(0,0,0,0,0,0,1,0,0,0,0,0), (14 + 1)) month_08 = rep(c(0,0,0,0,0,0,0,1,0,0,0,0), (14 + 1)) month_09 = rep(c(0,0,0,0,0,0,0,0,1,0,0,0), (14 + 1)) month_10 = rep(c(0,0,0,0,0,0,0,0,0,1,0,0), (14 + 1)) month_11 = rep(c(0,0,0,0,0,0,0,0,0,0,1,0), (14 + 1)) month_12 = rep(c(0,0,0,0,0,0,0,0,0,0,0,1), (14 + 1)) log_red[169:(168+12)] = NA log_red_2 = data.frame(log_red, time_, time_2, month_01, month_02, month_03, month_04, month_05, month_06, month_07, month_08, month_09, month_10, month_11, month_12) res_01 = lm(log_red ~ time_ + time_2 + month_01 + month_02 + month_04 + month_05 + month_06 + month_07 + month_08 + month_09 + month_10 + month_11 + month_12, log_red_2) print(res_01) print(summary(res_01)) print(res_01$fitted.values) plot(log_red_2$log_red, type="l", col="green") lines(res_01$fitted.values, col="red") log_predict = predict.lm(res_01, log_red_2) plot(log_red_2$log_red, type="l", col="green") lines(log_predict, col="red") pred = exp(log_predict) plot(pred, type="l", col="green") lines(x$red, type="l", col="red")
/lec7_3.R
no_license
bognev/abbakumovr
R
false
false
1,807
r
x = read.table("/home/zabolotsky/abbakumov_r/R_занятие_06_регрессия/03_ts_graphs/wine_Austral.dat", header=T, sep="\t") print(names(x)) print(head(x)) print(dim(x)) plot(x$sweet, type="l") par(mfrow=c(1,2)) plot(x$red, type="l") log_red = log(x$red) plot(log_red, type="l") time_ = 1:(168+12) time_2 = time_^2 month_01 = rep(c(1,0,0,0,0,0,0,0,0,0,0,0), (14 + 1)) month_02 = rep(c(0,1,0,0,0,0,0,0,0,0,0,0), (14 + 1)) month_03 = rep(c(0,0,1,0,0,0,0,0,0,0,0,0), (14 + 1)) month_04 = rep(c(0,0,0,1,0,0,0,0,0,0,0,0), (14 + 1)) month_05 = rep(c(0,0,0,0,1,0,0,0,0,0,0,0), (14 + 1)) month_06 = rep(c(0,0,0,0,0,1,0,0,0,0,0,0), (14 + 1)) month_07 = rep(c(0,0,0,0,0,0,1,0,0,0,0,0), (14 + 1)) month_08 = rep(c(0,0,0,0,0,0,0,1,0,0,0,0), (14 + 1)) month_09 = rep(c(0,0,0,0,0,0,0,0,1,0,0,0), (14 + 1)) month_10 = rep(c(0,0,0,0,0,0,0,0,0,1,0,0), (14 + 1)) month_11 = rep(c(0,0,0,0,0,0,0,0,0,0,1,0), (14 + 1)) month_12 = rep(c(0,0,0,0,0,0,0,0,0,0,0,1), (14 + 1)) log_red[169:(168+12)] = NA log_red_2 = data.frame(log_red, time_, time_2, month_01, month_02, month_03, month_04, month_05, month_06, month_07, month_08, month_09, month_10, month_11, month_12) res_01 = lm(log_red ~ time_ + time_2 + month_01 + month_02 + month_04 + month_05 + month_06 + month_07 + month_08 + month_09 + month_10 + month_11 + month_12, log_red_2) print(res_01) print(summary(res_01)) print(res_01$fitted.values) plot(log_red_2$log_red, type="l", col="green") lines(res_01$fitted.values, col="red") log_predict = predict.lm(res_01, log_red_2) plot(log_red_2$log_red, type="l", col="green") lines(log_predict, col="red") pred = exp(log_predict) plot(pred, type="l", col="green") lines(x$red, type="l", col="red")
library(data.table) require(ggplot2) require(grid) require(gridExtra) library(RColorBrewer) source("/home/diogo/workspace/tagc-rainet-RNA/src/fr/tagc/rainet/core/execution/analysis/RBPDomain/Rscripts/r_functions.R") #inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/ReadCatrapid/Ensembl82/mrna_vs_lncrna/protein_target_ratio_cutoff50_cutoff50_forR.tsv" inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/ReadCatrapid/Ensembl82/mrna_vs_lncrna/t_test/protein_target_ratio_ttest.out" #inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/ReadCatrapid/Ensembl82/mrna_vs_lncrna/RBP_only/protein_target_ratio_rbp_only_cutoff50_cutoff50.tsv_forR" dataset <- fread(inputFile, stringsAsFactors = FALSE, header = TRUE, sep="\t") min(dataset$t_test_statistic) max(dataset$t_test_statistic) # plt1 <- ggplot( dataset, aes(x = dataset$t_test_statistic)) + geom_histogram() + xlab("t test statistic") + ylab("# proteins (whole proteome)") + theme_minimal() plt1
/src/fr/tagc/rainet/core/execution/analysis/lncRNA_vs_mRNA/odds_ratio_distribution.R
no_license
diogomribeiro/RAINET
R
false
false
1,039
r
library(data.table) require(ggplot2) require(grid) require(gridExtra) library(RColorBrewer) source("/home/diogo/workspace/tagc-rainet-RNA/src/fr/tagc/rainet/core/execution/analysis/RBPDomain/Rscripts/r_functions.R") #inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/ReadCatrapid/Ensembl82/mrna_vs_lncrna/protein_target_ratio_cutoff50_cutoff50_forR.tsv" inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/ReadCatrapid/Ensembl82/mrna_vs_lncrna/t_test/protein_target_ratio_ttest.out" #inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/ReadCatrapid/Ensembl82/mrna_vs_lncrna/RBP_only/protein_target_ratio_rbp_only_cutoff50_cutoff50.tsv_forR" dataset <- fread(inputFile, stringsAsFactors = FALSE, header = TRUE, sep="\t") min(dataset$t_test_statistic) max(dataset$t_test_statistic) # plt1 <- ggplot( dataset, aes(x = dataset$t_test_statistic)) + geom_histogram() + xlab("t test statistic") + ylab("# proteins (whole proteome)") + theme_minimal() plt1
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{order_by_slide_and_tissue_category} \alias{order_by_slide_and_tissue_category} \title{Order a data frame by slide ID and tissue category, putting the categories in the given order and the "Total" category in the proper place.} \usage{ order_by_slide_and_tissue_category(d, tissue_categories, .by = "Slide ID") } \arguments{ \item{d}{A data frame with \code{.by} and Tissue Category columns} \item{tissue_categories}{A vector of category names in the desired order} \item{.by}{First column to sort by} } \value{ The input, sorted } \description{ Order a data frame by slide ID and tissue category, putting the categories in the given order and the "Total" category in the proper place. }
/man/order_by_slide_and_tissue_category.Rd
permissive
iceberg22/phenoptrReports
R
false
true
787
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{order_by_slide_and_tissue_category} \alias{order_by_slide_and_tissue_category} \title{Order a data frame by slide ID and tissue category, putting the categories in the given order and the "Total" category in the proper place.} \usage{ order_by_slide_and_tissue_category(d, tissue_categories, .by = "Slide ID") } \arguments{ \item{d}{A data frame with \code{.by} and Tissue Category columns} \item{tissue_categories}{A vector of category names in the desired order} \item{.by}{First column to sort by} } \value{ The input, sorted } \description{ Order a data frame by slide ID and tissue category, putting the categories in the given order and the "Total" category in the proper place. }
#' A local source. #' #' This is mainly useful for testing, since makes it possible to refer to #' local and remote tables using exactly the same syntax. #' #' Generally, \code{src_local} should not be called directly, but instead #' one of the (currently three) constructors should be used. #' #' @param tbl name of the function used to generate \code{tbl} objects #' @param pkg,env Either the name of a package or an environment object in #' which to look for objects. #' @keywords internal #' @export #' @examples #' if (require("Lahman")) { #' src_dt("Lahman") #' src_df("Lahman") #' #' batting_df <- tbl(src_df("Lahman"), "Batting") #' batting_dt <- tbl(src_dt("Lahman"), "Batting") #' } src_local <- function(tbl, pkg = NULL, env = NULL) { if (!xor(is.null(pkg), is.null(env))) { stop("Must supply exactly one of pkg and env", call. = FALSE) } if (is.null(env)) { env <- as.environment(paste0("package:", pkg)) name <- paste0("<package: ", pkg, ">") } else { name <- capture.output(print(env)) } structure( list(tbl_f = match.fun(tbl), name = name, env = env), class = c("src_local", "src") ) } #' @rdname src_local #' @export src_df <- function(pkg = NULL, env = NULL) { src_local("tbl_df", pkg, env) } #' @rdname src_local #' @export src_dt <- function(pkg = NULL, env = NULL) { src_local("tbl_dt", pkg, env) } #' @export src_tbls.src_local <- function(x, ...) { objs <- ls(envir = x$env) Filter(function(obj) is.data.frame(get(obj, envir = x$env)), objs) } #' @export tbl.src_local <- function(src, from, ...) { src$tbl_f(get(from, src$env)) } #' @export copy_to.src_local <- function(dest, df, name = deparse(substitute(df)), ...) { assign(name, envir = dest$env, df) tbl(dest, name) } #' @export format.src_local <- function(x, ...) { paste0("src: ", x$name, "\n", wrap("tbls: ", paste0(sort(src_tbls(x)), collapse = ", "))) }
/dplyr/R/src-local.r
no_license
radfordneal/R-package-mods
R
false
false
1,905
r
#' A local source. #' #' This is mainly useful for testing, since makes it possible to refer to #' local and remote tables using exactly the same syntax. #' #' Generally, \code{src_local} should not be called directly, but instead #' one of the (currently three) constructors should be used. #' #' @param tbl name of the function used to generate \code{tbl} objects #' @param pkg,env Either the name of a package or an environment object in #' which to look for objects. #' @keywords internal #' @export #' @examples #' if (require("Lahman")) { #' src_dt("Lahman") #' src_df("Lahman") #' #' batting_df <- tbl(src_df("Lahman"), "Batting") #' batting_dt <- tbl(src_dt("Lahman"), "Batting") #' } src_local <- function(tbl, pkg = NULL, env = NULL) { if (!xor(is.null(pkg), is.null(env))) { stop("Must supply exactly one of pkg and env", call. = FALSE) } if (is.null(env)) { env <- as.environment(paste0("package:", pkg)) name <- paste0("<package: ", pkg, ">") } else { name <- capture.output(print(env)) } structure( list(tbl_f = match.fun(tbl), name = name, env = env), class = c("src_local", "src") ) } #' @rdname src_local #' @export src_df <- function(pkg = NULL, env = NULL) { src_local("tbl_df", pkg, env) } #' @rdname src_local #' @export src_dt <- function(pkg = NULL, env = NULL) { src_local("tbl_dt", pkg, env) } #' @export src_tbls.src_local <- function(x, ...) { objs <- ls(envir = x$env) Filter(function(obj) is.data.frame(get(obj, envir = x$env)), objs) } #' @export tbl.src_local <- function(src, from, ...) { src$tbl_f(get(from, src$env)) } #' @export copy_to.src_local <- function(dest, df, name = deparse(substitute(df)), ...) { assign(name, envir = dest$env, df) tbl(dest, name) } #' @export format.src_local <- function(x, ...) { paste0("src: ", x$name, "\n", wrap("tbls: ", paste0(sort(src_tbls(x)), collapse = ", "))) }
source("global.R") ###Setting up the dashboard page dashboardPage( dashboardHeader( title = "OHI Global Data Explorer", titleWidth = 300), ### Dashboard Sidebar dashboardSidebar( sidebarMenu( menuItem("The OHI Story", tabName = "dashboard", icon = icon("globe", lib="glyphicon")), # menuItem("Livelihoods & Economies", tabName = "liveco"), # menuItem("Tourism & Recreation", tabName = "tr"), # menuItem("Biodiversity", tabName = "bio"), # menuItem("Sense of Place", tabName = "sop"), # menuItem("Artisanal Opportunities", tabName = "ao"), # menuItem("Food Provision", tabName = "fp"), menuItem("Mariculture", tabName = "mar") #, #menuItem("Fisheries", tabName = "fis") #, # menuItem("Coastal Protection", tabName = "cp"), # menuItem("Carbon Storage", tabName = "cs") ), # Footer tag, include hyperlink tags$a(href="https://iwensu0313.github.io/", tags$footer("\u00a9 Iwen Su, OHI Global Fellow", align = "right", style = " position:absolute; bottom:0; width:100%; height:50px; /* Height of the footer */ color: white; padding: 10px; z-index: 1000;") ), width = 200), ### Dashboard Body dashboardBody( #adding this tag to make header longer, from here:https://rstudio.github.io/shinydashboard/appearance.html#long-titles tags$head( tags$link(rel = "stylesheet", type = "text/css", href = "custom.css") ), ### Side Bar Tabs tabItems( ## The OHI Story ## tabItem(tabName = "dashboard", frontp() # content is in front_page.R ), ## FOR BUILDING OUT FUTURE GOALS ## # ## Livelihood and economies ## # # tabItem(tabName = "liveco", # # ## Livelihoods & Economies tab title ## # tab_title_ui(goal_text = "LIVELIHOODS & ECONOMIES", # definition = "", # goal_description = "") # ), # # # ## Tourism & recreation ## # # tabItem(tabName = "tr", # # ## Tourism & Recreation tab title ## # tab_title_ui(goal_text = "TOURISM & RECREATION", # definition = "", # goal_description = "") # # ), # # ## Biodiversity ## # # tabItem(tabName = "bio", # # ## Biodiversity tab title ## # tab_title_ui(goal_text = "BIODIVERSITY", # definition = "", # goal_description = "") # # ), # # ## Sense of Place ## # # tabItem(tabName = "sop", # # ## Sense of Place tab title ## # tab_title_ui(goal_text = "SENSE OF PLACE", # definition = "", # goal_description = "") # # ), # # ## Local Fishing & Resource Access Opportunities ## # # tabItem(tabName = "ao", # # ## Local Fishing & Resource Access Opportunities tab title ## # tab_title_ui(goal_text = "ARTISANAL FISHING OPPORTUNITY", # definition = "", # goal_description = "") # # ), # # ## Food Provision ## # # tabItem(tabName = "fp", # # ## Food Provision tab title ## # tab_title_ui(goal_text = "FOOD PROVISION", # definition = "", # goal_description = "") # # ), ## Mariculture ## tabItem(tabName = "mar", div(class = "master", # master settings, e.g. page width ## Mariculture Tab Title ## tab_title_ui(goal_text = "MARICULTURE", goal_description = "Global mariculture has been growing impressively since the 1980s, while wild-caught fishery production has remained relatively static.", definition = list("Mariculture measures the ability to reach the highest levels of seafood gained from farm-raised facilities without damaging the ocean’s ability to provide fish sustainably now and in the future. The status of each country is calculated by taking tonnes of seafood produced, weighting it for sustainability and dividing it by the country's coastal population to scale it across the global. Since OHI also defines higher mariculture statuses as those that are maximizing sustainable harvest from the oceans, we compare the production per coastal population to the highest global historic production capacity. The mariculture and fisheries status both contribute equally to measuring the OHI Food Provisions goal.")), ## Mariculture Baseline Metrics ## summary_stats_ui(id = "mar_baseline", number_boxes = 3), ## Mariculture Global Map map_ui(id = "mar_global_map", title_text = paste0("Global Map of Mariculture Production in ", data_yr), sub_title_text = "Start exploring! Select data to view on the map & click on EEZ regions to see country and values. It may take a few seconds to load. The data in the map categorizes countries into 4 quantiles with 75-100% being the top producing countries.", select_type = "radio", select_location = "above", select_choices = c("All Production" = "prodTonnesAll", "Production per Capita" = "prodPerCap"), select_label = "", source_text = list( p("Sources:"), p(tags$sup("1."), tags$a(href="http://www.fao.org/fishery/statistics/software/fishstatj/en", "Food and Agriculture Organization"), ", Global Aquaculture Production Quantity (March 2018)"), p(tags$sup("2."), tags$a(href="http://sedac.ciesin.columbia.edu/data/collection/gpw-v4/documentation","Center for International Earth Science Information Network"), ", Gridded Population of the World, V4 (2016).") ) ), ## Annual Mariculture Production ## card_ui(id = "mar_prod", title_text = "Tonnes of Species Harvested by Country", sub_title_text = "Start exploring! Select or type in a country of interest. Click on names of species you want to remove from the plot. Hover over the points to view tonnes and species harvested.", select_type = "search", select_location = "above", select_choices = unique(mar_harvest$country), select_label = NULL, source_text = list( p("Sources:"), p(tags$sup("1."), tags$a(href="http://www.fao.org/fishery/statistics/software/fishstatj/en", "Food and Agriculture Organization"), ", Global Aquaculture Production Quantity (March 2018)")) ) ) # end div-master ) # end MAR tabItem #, ## Fisheries ## #tabItem(tabName = "fis", ## Fisheries Tab Title ## # tab_title_ui(goal_text = "FISHERIES", # goal_description = "sustainably wild-caught seafood from fisheries", # definition = list("the sustainable harvest of seafood from wild-caught fisheries")), # # ## Annual Fisheries Production ## # card_ui(id = "fis_prod", # title_text = "Tonnes of Species Harvested by Country", # sub_title_text = "Start exploring! Select or type in a country of interest. Click on names of species you want to remove from the plot. Hover over the points to view tonnes and species harvested.", # select_type = "search", # select_location = "above", # select_choices = unique(fis_harvest$country), # select_label = NULL, # source_text = list( # p("Sources:"), # p(tags$sup("1."), tags$a(href="http://www.fao.org/fishery/statistics/software/fishstatj/en", "Food and Agriculture Organization"), ", Global Aquaculture Production Quantity (March 2018)")) # ) # ) # #, # ## Coastal Protection ## # # tabItem(tabName = "cp", # # ## Coastal Protection tab title ## # tab_title_ui(goal_text = "COASTAL PROTECTION", # definition = "the amount of protection provided by marine and coastal habitats serving as natural buffers against incoming waves", # goal_description = "storage of carbon and protection of our coasts from storm damage by living natural habitats") # # ), # # ## Carbon Storage ## # # tabItem(tabName = "cs", # # ## Carbon Storage tab title ## # tab_title_ui(goal_text = "CARBON STORAGE", # definition = "the condition of coastal habitats that store and sequester atmospheric carbon", # goal_description = "storage of carbon and protection of our coasts from storm damage by living natural habitats") # # ) ) ) )
/ui.R
no_license
camilavargasp/global-dashboard
R
false
false
9,741
r
source("global.R") ###Setting up the dashboard page dashboardPage( dashboardHeader( title = "OHI Global Data Explorer", titleWidth = 300), ### Dashboard Sidebar dashboardSidebar( sidebarMenu( menuItem("The OHI Story", tabName = "dashboard", icon = icon("globe", lib="glyphicon")), # menuItem("Livelihoods & Economies", tabName = "liveco"), # menuItem("Tourism & Recreation", tabName = "tr"), # menuItem("Biodiversity", tabName = "bio"), # menuItem("Sense of Place", tabName = "sop"), # menuItem("Artisanal Opportunities", tabName = "ao"), # menuItem("Food Provision", tabName = "fp"), menuItem("Mariculture", tabName = "mar") #, #menuItem("Fisheries", tabName = "fis") #, # menuItem("Coastal Protection", tabName = "cp"), # menuItem("Carbon Storage", tabName = "cs") ), # Footer tag, include hyperlink tags$a(href="https://iwensu0313.github.io/", tags$footer("\u00a9 Iwen Su, OHI Global Fellow", align = "right", style = " position:absolute; bottom:0; width:100%; height:50px; /* Height of the footer */ color: white; padding: 10px; z-index: 1000;") ), width = 200), ### Dashboard Body dashboardBody( #adding this tag to make header longer, from here:https://rstudio.github.io/shinydashboard/appearance.html#long-titles tags$head( tags$link(rel = "stylesheet", type = "text/css", href = "custom.css") ), ### Side Bar Tabs tabItems( ## The OHI Story ## tabItem(tabName = "dashboard", frontp() # content is in front_page.R ), ## FOR BUILDING OUT FUTURE GOALS ## # ## Livelihood and economies ## # # tabItem(tabName = "liveco", # # ## Livelihoods & Economies tab title ## # tab_title_ui(goal_text = "LIVELIHOODS & ECONOMIES", # definition = "", # goal_description = "") # ), # # # ## Tourism & recreation ## # # tabItem(tabName = "tr", # # ## Tourism & Recreation tab title ## # tab_title_ui(goal_text = "TOURISM & RECREATION", # definition = "", # goal_description = "") # # ), # # ## Biodiversity ## # # tabItem(tabName = "bio", # # ## Biodiversity tab title ## # tab_title_ui(goal_text = "BIODIVERSITY", # definition = "", # goal_description = "") # # ), # # ## Sense of Place ## # # tabItem(tabName = "sop", # # ## Sense of Place tab title ## # tab_title_ui(goal_text = "SENSE OF PLACE", # definition = "", # goal_description = "") # # ), # # ## Local Fishing & Resource Access Opportunities ## # # tabItem(tabName = "ao", # # ## Local Fishing & Resource Access Opportunities tab title ## # tab_title_ui(goal_text = "ARTISANAL FISHING OPPORTUNITY", # definition = "", # goal_description = "") # # ), # # ## Food Provision ## # # tabItem(tabName = "fp", # # ## Food Provision tab title ## # tab_title_ui(goal_text = "FOOD PROVISION", # definition = "", # goal_description = "") # # ), ## Mariculture ## tabItem(tabName = "mar", div(class = "master", # master settings, e.g. page width ## Mariculture Tab Title ## tab_title_ui(goal_text = "MARICULTURE", goal_description = "Global mariculture has been growing impressively since the 1980s, while wild-caught fishery production has remained relatively static.", definition = list("Mariculture measures the ability to reach the highest levels of seafood gained from farm-raised facilities without damaging the ocean’s ability to provide fish sustainably now and in the future. The status of each country is calculated by taking tonnes of seafood produced, weighting it for sustainability and dividing it by the country's coastal population to scale it across the global. Since OHI also defines higher mariculture statuses as those that are maximizing sustainable harvest from the oceans, we compare the production per coastal population to the highest global historic production capacity. The mariculture and fisheries status both contribute equally to measuring the OHI Food Provisions goal.")), ## Mariculture Baseline Metrics ## summary_stats_ui(id = "mar_baseline", number_boxes = 3), ## Mariculture Global Map map_ui(id = "mar_global_map", title_text = paste0("Global Map of Mariculture Production in ", data_yr), sub_title_text = "Start exploring! Select data to view on the map & click on EEZ regions to see country and values. It may take a few seconds to load. The data in the map categorizes countries into 4 quantiles with 75-100% being the top producing countries.", select_type = "radio", select_location = "above", select_choices = c("All Production" = "prodTonnesAll", "Production per Capita" = "prodPerCap"), select_label = "", source_text = list( p("Sources:"), p(tags$sup("1."), tags$a(href="http://www.fao.org/fishery/statistics/software/fishstatj/en", "Food and Agriculture Organization"), ", Global Aquaculture Production Quantity (March 2018)"), p(tags$sup("2."), tags$a(href="http://sedac.ciesin.columbia.edu/data/collection/gpw-v4/documentation","Center for International Earth Science Information Network"), ", Gridded Population of the World, V4 (2016).") ) ), ## Annual Mariculture Production ## card_ui(id = "mar_prod", title_text = "Tonnes of Species Harvested by Country", sub_title_text = "Start exploring! Select or type in a country of interest. Click on names of species you want to remove from the plot. Hover over the points to view tonnes and species harvested.", select_type = "search", select_location = "above", select_choices = unique(mar_harvest$country), select_label = NULL, source_text = list( p("Sources:"), p(tags$sup("1."), tags$a(href="http://www.fao.org/fishery/statistics/software/fishstatj/en", "Food and Agriculture Organization"), ", Global Aquaculture Production Quantity (March 2018)")) ) ) # end div-master ) # end MAR tabItem #, ## Fisheries ## #tabItem(tabName = "fis", ## Fisheries Tab Title ## # tab_title_ui(goal_text = "FISHERIES", # goal_description = "sustainably wild-caught seafood from fisheries", # definition = list("the sustainable harvest of seafood from wild-caught fisheries")), # # ## Annual Fisheries Production ## # card_ui(id = "fis_prod", # title_text = "Tonnes of Species Harvested by Country", # sub_title_text = "Start exploring! Select or type in a country of interest. Click on names of species you want to remove from the plot. Hover over the points to view tonnes and species harvested.", # select_type = "search", # select_location = "above", # select_choices = unique(fis_harvest$country), # select_label = NULL, # source_text = list( # p("Sources:"), # p(tags$sup("1."), tags$a(href="http://www.fao.org/fishery/statistics/software/fishstatj/en", "Food and Agriculture Organization"), ", Global Aquaculture Production Quantity (March 2018)")) # ) # ) # #, # ## Coastal Protection ## # # tabItem(tabName = "cp", # # ## Coastal Protection tab title ## # tab_title_ui(goal_text = "COASTAL PROTECTION", # definition = "the amount of protection provided by marine and coastal habitats serving as natural buffers against incoming waves", # goal_description = "storage of carbon and protection of our coasts from storm damage by living natural habitats") # # ), # # ## Carbon Storage ## # # tabItem(tabName = "cs", # # ## Carbon Storage tab title ## # tab_title_ui(goal_text = "CARBON STORAGE", # definition = "the condition of coastal habitats that store and sequester atmospheric carbon", # goal_description = "storage of carbon and protection of our coasts from storm damage by living natural habitats") # # ) ) ) )
data <- read.csv("data/household_power_consumption.txt", sep = ";") data = read.table("data/household_power_consumption.txt", header = TRUE, sep = ";", na = "?", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")) data$formatted_date <- strptime(paste(data$Date, data$Time, sep = "/"), "%d/%m/%Y/%H:%M:%S") ##subset to specified dates dat <- subset(data, formatted_date >= "2007-02-01" & formatted_date < "2007-02-03") png("plot4.png", width = 480, height = 480) par(mfrow = c(2,2)) plot(dat$formatted_date, dat$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power") plot(dat$formatted_date, dat$Voltage, type = "l", xlab = "datetime", ylab = "Voltage") plot(dat$formatted_date, dat$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black") lines(dat$formatted_date, dat$Sub_metering_2, col = "red") lines(dat$formatted_date, dat$Sub_metering_3, col = "blue") legend("topright", col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, bty = "n") plot(dat$formatted_date, dat$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power") dev.off()
/plot4.R
no_license
misewerin/ExData_Plotting1
R
false
false
1,227
r
data <- read.csv("data/household_power_consumption.txt", sep = ";") data = read.table("data/household_power_consumption.txt", header = TRUE, sep = ";", na = "?", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")) data$formatted_date <- strptime(paste(data$Date, data$Time, sep = "/"), "%d/%m/%Y/%H:%M:%S") ##subset to specified dates dat <- subset(data, formatted_date >= "2007-02-01" & formatted_date < "2007-02-03") png("plot4.png", width = 480, height = 480) par(mfrow = c(2,2)) plot(dat$formatted_date, dat$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power") plot(dat$formatted_date, dat$Voltage, type = "l", xlab = "datetime", ylab = "Voltage") plot(dat$formatted_date, dat$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black") lines(dat$formatted_date, dat$Sub_metering_2, col = "red") lines(dat$formatted_date, dat$Sub_metering_3, col = "blue") legend("topright", col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, bty = "n") plot(dat$formatted_date, dat$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power") dev.off()
################################################################################ ## R - NLP & Text Mining Project 4 ## Analysis of 2014 time frame Elon Musk's Tweets. ################################################################################ ################################################################################ ## IMPORTING THE REQUIRED LIBRARIES ################################################################################ library(tm) library(twitteR) library(ggplot2) library(syuzhet) library(tidytext) library(wordcloud) ## TASK 1 - IMPORTING THE TWEETS year4.tweets.df <- read.csv("D:/001_Data/NLP/Elon Musk Tweets/2014.csv") head(year4.tweets.df$tweet) year4.tweets.df2 <- year4.tweets.df$tweet ## TASK 2 - CLEANING THE TWEETS year4.tweets.df2 <- gsub('http\\S+\\s*', '', year4.tweets.df2) ## Remove URLs year4.tweets.df2 <- gsub('[^\x01-\x7F]', '', year4.tweets.df2) ## Removing Unwanted Chars year4.tweets.df2 <- gsub('https:\\S+\\s*', '', year4.tweets.df2) ## Remove URLs year4.tweets.df2 <- gsub('https\\S+\\s*', '', year4.tweets.df2) ## Remove URLs year4.tweets.df2 <- gsub('\\b+RT', '', year4.tweets.df2) ## Remove RT year4.tweets.df2 <- gsub('#\\S+', '', year4.tweets.df2) ## Remove Hashtags year4.tweets.df2 <- gsub('@\\S+', '', year4.tweets.df2) ## Remove Mentions year4.tweets.df2 <- gsub('[[:cntrl:]]', '', year4.tweets.df2) ## Remove Controls and special characters year4.tweets.df2 <- gsub("\\d", '', year4.tweets.df2) ## Remove Controls and special characters year4.tweets.df2 <- gsub('[[:punct:]]', '', year4.tweets.df2) ## Remove Punctuations year4.tweets.df2 <- gsub("^[[:space:]]*","", year4.tweets.df2) ## Remove leading whitespaces year4.tweets.df2 <- gsub("[[:space:]]*$","", year4.tweets.df2) ## Remove trailing whitespaces year4.tweets.df2 <- gsub(' +',' ', year4.tweets.df2) ## Remove extra whitespaces ## TASK 3 - CALCULATING SENTIMENTS/ EMOTIONS year4.tweets.word.df <- as.vector(year4.tweets.df2) year4.tweets.emotion.df <- get_nrc_sentiment(year4.tweets.word.df) year4.tweets.emotion.df2 <- cbind(year4.tweets.df2, year4.tweets.emotion.df) head(year4.tweets.word.df) head(year4.tweets.emotion.df2) ## TASK 4 - FINDING THE MOST POSITIVE & NEGATIVE ELON MUSK'S TWEET year4.tweets.sent.value <- get_sentiment(year4.tweets.word.df) year4.tweets.positive.tweets <- year4.tweets.word.df[year4.tweets.sent.value > 0] year4.tweets.negative.tweets <- year4.tweets.word.df[year4.tweets.sent.value < 0] year4.tweets.neutral.tweets <- year4.tweets.word.df[year4.tweets.sent.value == 0] year4.tweets.most.positive <- year4.tweets.word.df[year4.tweets.sent.value == max(year4.tweets.sent.value)] print(year4.tweets.most.positive) year4.tweets.most.negative <- year4.tweets.word.df[year4.tweets.sent.value <= min(year4.tweets.sent.value)] print(year4.tweets.most.negative) ## TASK 5 - CREATING PIE CHART OF SENTIMENTS year4.tweets.Positive <- length(year4.tweets.positive.tweets) year4.tweets.Neutral <- length(year4.tweets.neutral.tweets) year4.tweets.Negative <- length(year4.tweets.negative.tweets) year4.tweets.Sentiments <- c(year4.tweets.Positive, year4.tweets.Neutral, year4.tweets.Negative) print(year4.tweets.Sentiments) year4.tweets.labels <- c("Positive", "Negative", "Neutral") pie(year4.tweets.Sentiments, year4.tweets.Sentiments, main = "Sentiment Analysis On Elon Musk Tweets (Year = 2014)", col = rainbow(length(year4.tweets.Sentiments))) legend('topright', year4.tweets.labels, cex=0.8, fill = rainbow(length(year4.tweets.labels))) ## TASK 6 - TERM DOCUMENT MATRIX OF 2014 ELON MUSK TWEETS year4.tweets.tweet_corpus <- Corpus(VectorSource(year4.tweets.word.df)) year4.tweets.tdm <- TermDocumentMatrix(year4.tweets.tweet_corpus, control = list(removePunctuation = TRUE, wordLengths=c(5, 15), stopwords = c("tweets") ,stopwords("english"), removeNumbers = TRUE, tolower = TRUE)) ## CALCULATING THE COUNTS OF DIFFERENT EMOTIONS FROM ELON MUSK TWEETS (Year = 2014) head(year4.tweets.tdm) year4.tweets.DF <- tidy(year4.tweets.tdm) head(year4.tweets.DF) head(year4.tweets.DF$term) year4.tweets.DF.texts <- as.vector(year4.tweets.DF$term) # SENTIMENT ANALYSIS ON ELON MUSK'S TWEETS year4.tweets.DF.texts.Sentiment<-get_nrc_sentiment((year4.tweets.DF.texts)) year4.tweets.positive =sum(year4.tweets.DF.texts.Sentiment$positive) year4.tweets.anger =sum(year4.tweets.DF.texts.Sentiment$anger) year4.tweets.anticipation =sum(year4.tweets.DF.texts.Sentiment$anticipation) year4.tweets.disgust =sum(year4.tweets.DF.texts.Sentiment$disgust) year4.tweets.fear =sum(year4.tweets.DF.texts.Sentiment$fear) year4.tweets.joy =sum(year4.tweets.DF.texts.Sentiment$joy) year4.tweets.sadness =sum(year4.tweets.DF.texts.Sentiment$sadness) year4.tweets.surprise =sum(year4.tweets.DF.texts.Sentiment$surprise) year4.tweets.trust =sum(year4.tweets.DF.texts.Sentiment$trust) year4.tweets.negative =sum(year4.tweets.DF.texts.Sentiment$negative) # BAR CHART ON CALCULATED SENTIMENTS year4.tweets.yAxis <- c(year4.tweets.positive, + year4.tweets.anger, + year4.tweets.anticipation, + year4.tweets.disgust, + year4.tweets.fear, + year4.tweets.joy, + year4.tweets.sadness, + year4.tweets.surprise, + year4.tweets.trust, + year4.tweets.negative) year4.tweets.xAxis <- c("Positive","Anger","Anticipation","Disgust","Fear", "Joy","Sadness","Surprise","Trust","Negative") year4.tweets.colors <- c("green","red","blue", "orange","red","green","orange","blue","green","red") year4.tweets.yRange <- range(0, year4.tweets.yAxis) barplot(year4.tweets.yAxis, names.arg = year4.tweets.xAxis, xlab = "Sentiment Analysis", ylab = "Score", main = "ELON MUSK'S TWEET (YEAR-2014) SENTIMENT ANALYSIS", col = year4.tweets.colors, border = "black", ylim = year4.tweets.yRange, xpd = F, axisnames = T, cex.axis = 0.8, cex.sub = 0.8, col.sub = "blue") year4.tweets.tdm.matrix <- as.matrix(year4.tweets.tdm) year4.tweets.word_freqs <- sort(rowSums(year4.tweets.tdm.matrix), decreasing=TRUE) year4.tweets.dm <- data.frame(word=names(year4.tweets.word_freqs), freq=year4.tweets.word_freqs) ## TASK 7 - PREPARING THE WORD CLOUD TO EXTRACT INSIGHTS wordcloud(year4.tweets.dm$word, year4.tweets.dm$freq, min.freq = 10, max.words = 100, random.order=FALSE, colors=brewer.pal(8, "Dark2")) ## THE END ################################################################################ ################################################################################
/2014_Tweet_Analysis/2014-Elon Musk Tweet Analysis.R
permissive
drdataSpp/Spp-NLP-On-Elon-Musks-Tweets
R
false
false
7,092
r
################################################################################ ## R - NLP & Text Mining Project 4 ## Analysis of 2014 time frame Elon Musk's Tweets. ################################################################################ ################################################################################ ## IMPORTING THE REQUIRED LIBRARIES ################################################################################ library(tm) library(twitteR) library(ggplot2) library(syuzhet) library(tidytext) library(wordcloud) ## TASK 1 - IMPORTING THE TWEETS year4.tweets.df <- read.csv("D:/001_Data/NLP/Elon Musk Tweets/2014.csv") head(year4.tweets.df$tweet) year4.tweets.df2 <- year4.tweets.df$tweet ## TASK 2 - CLEANING THE TWEETS year4.tweets.df2 <- gsub('http\\S+\\s*', '', year4.tweets.df2) ## Remove URLs year4.tweets.df2 <- gsub('[^\x01-\x7F]', '', year4.tweets.df2) ## Removing Unwanted Chars year4.tweets.df2 <- gsub('https:\\S+\\s*', '', year4.tweets.df2) ## Remove URLs year4.tweets.df2 <- gsub('https\\S+\\s*', '', year4.tweets.df2) ## Remove URLs year4.tweets.df2 <- gsub('\\b+RT', '', year4.tweets.df2) ## Remove RT year4.tweets.df2 <- gsub('#\\S+', '', year4.tweets.df2) ## Remove Hashtags year4.tweets.df2 <- gsub('@\\S+', '', year4.tweets.df2) ## Remove Mentions year4.tweets.df2 <- gsub('[[:cntrl:]]', '', year4.tweets.df2) ## Remove Controls and special characters year4.tweets.df2 <- gsub("\\d", '', year4.tweets.df2) ## Remove Controls and special characters year4.tweets.df2 <- gsub('[[:punct:]]', '', year4.tweets.df2) ## Remove Punctuations year4.tweets.df2 <- gsub("^[[:space:]]*","", year4.tweets.df2) ## Remove leading whitespaces year4.tweets.df2 <- gsub("[[:space:]]*$","", year4.tweets.df2) ## Remove trailing whitespaces year4.tweets.df2 <- gsub(' +',' ', year4.tweets.df2) ## Remove extra whitespaces ## TASK 3 - CALCULATING SENTIMENTS/ EMOTIONS year4.tweets.word.df <- as.vector(year4.tweets.df2) year4.tweets.emotion.df <- get_nrc_sentiment(year4.tweets.word.df) year4.tweets.emotion.df2 <- cbind(year4.tweets.df2, year4.tweets.emotion.df) head(year4.tweets.word.df) head(year4.tweets.emotion.df2) ## TASK 4 - FINDING THE MOST POSITIVE & NEGATIVE ELON MUSK'S TWEET year4.tweets.sent.value <- get_sentiment(year4.tweets.word.df) year4.tweets.positive.tweets <- year4.tweets.word.df[year4.tweets.sent.value > 0] year4.tweets.negative.tweets <- year4.tweets.word.df[year4.tweets.sent.value < 0] year4.tweets.neutral.tweets <- year4.tweets.word.df[year4.tweets.sent.value == 0] year4.tweets.most.positive <- year4.tweets.word.df[year4.tweets.sent.value == max(year4.tweets.sent.value)] print(year4.tweets.most.positive) year4.tweets.most.negative <- year4.tweets.word.df[year4.tweets.sent.value <= min(year4.tweets.sent.value)] print(year4.tweets.most.negative) ## TASK 5 - CREATING PIE CHART OF SENTIMENTS year4.tweets.Positive <- length(year4.tweets.positive.tweets) year4.tweets.Neutral <- length(year4.tweets.neutral.tweets) year4.tweets.Negative <- length(year4.tweets.negative.tweets) year4.tweets.Sentiments <- c(year4.tweets.Positive, year4.tweets.Neutral, year4.tweets.Negative) print(year4.tweets.Sentiments) year4.tweets.labels <- c("Positive", "Negative", "Neutral") pie(year4.tweets.Sentiments, year4.tweets.Sentiments, main = "Sentiment Analysis On Elon Musk Tweets (Year = 2014)", col = rainbow(length(year4.tweets.Sentiments))) legend('topright', year4.tweets.labels, cex=0.8, fill = rainbow(length(year4.tweets.labels))) ## TASK 6 - TERM DOCUMENT MATRIX OF 2014 ELON MUSK TWEETS year4.tweets.tweet_corpus <- Corpus(VectorSource(year4.tweets.word.df)) year4.tweets.tdm <- TermDocumentMatrix(year4.tweets.tweet_corpus, control = list(removePunctuation = TRUE, wordLengths=c(5, 15), stopwords = c("tweets") ,stopwords("english"), removeNumbers = TRUE, tolower = TRUE)) ## CALCULATING THE COUNTS OF DIFFERENT EMOTIONS FROM ELON MUSK TWEETS (Year = 2014) head(year4.tweets.tdm) year4.tweets.DF <- tidy(year4.tweets.tdm) head(year4.tweets.DF) head(year4.tweets.DF$term) year4.tweets.DF.texts <- as.vector(year4.tweets.DF$term) # SENTIMENT ANALYSIS ON ELON MUSK'S TWEETS year4.tweets.DF.texts.Sentiment<-get_nrc_sentiment((year4.tweets.DF.texts)) year4.tweets.positive =sum(year4.tweets.DF.texts.Sentiment$positive) year4.tweets.anger =sum(year4.tweets.DF.texts.Sentiment$anger) year4.tweets.anticipation =sum(year4.tweets.DF.texts.Sentiment$anticipation) year4.tweets.disgust =sum(year4.tweets.DF.texts.Sentiment$disgust) year4.tweets.fear =sum(year4.tweets.DF.texts.Sentiment$fear) year4.tweets.joy =sum(year4.tweets.DF.texts.Sentiment$joy) year4.tweets.sadness =sum(year4.tweets.DF.texts.Sentiment$sadness) year4.tweets.surprise =sum(year4.tweets.DF.texts.Sentiment$surprise) year4.tweets.trust =sum(year4.tweets.DF.texts.Sentiment$trust) year4.tweets.negative =sum(year4.tweets.DF.texts.Sentiment$negative) # BAR CHART ON CALCULATED SENTIMENTS year4.tweets.yAxis <- c(year4.tweets.positive, + year4.tweets.anger, + year4.tweets.anticipation, + year4.tweets.disgust, + year4.tweets.fear, + year4.tweets.joy, + year4.tweets.sadness, + year4.tweets.surprise, + year4.tweets.trust, + year4.tweets.negative) year4.tweets.xAxis <- c("Positive","Anger","Anticipation","Disgust","Fear", "Joy","Sadness","Surprise","Trust","Negative") year4.tweets.colors <- c("green","red","blue", "orange","red","green","orange","blue","green","red") year4.tweets.yRange <- range(0, year4.tweets.yAxis) barplot(year4.tweets.yAxis, names.arg = year4.tweets.xAxis, xlab = "Sentiment Analysis", ylab = "Score", main = "ELON MUSK'S TWEET (YEAR-2014) SENTIMENT ANALYSIS", col = year4.tweets.colors, border = "black", ylim = year4.tweets.yRange, xpd = F, axisnames = T, cex.axis = 0.8, cex.sub = 0.8, col.sub = "blue") year4.tweets.tdm.matrix <- as.matrix(year4.tweets.tdm) year4.tweets.word_freqs <- sort(rowSums(year4.tweets.tdm.matrix), decreasing=TRUE) year4.tweets.dm <- data.frame(word=names(year4.tweets.word_freqs), freq=year4.tweets.word_freqs) ## TASK 7 - PREPARING THE WORD CLOUD TO EXTRACT INSIGHTS wordcloud(year4.tweets.dm$word, year4.tweets.dm$freq, min.freq = 10, max.words = 100, random.order=FALSE, colors=brewer.pal(8, "Dark2")) ## THE END ################################################################################ ################################################################################
##' @include semiParametric.R survivalModels.R NULL ##' Method to calculate model restricted means ##' @name calcModelRmst ##' @rdname calcModelRmst-methods ##' @param object (SurvivalModel) A survival model - note there cannot be ##' any covariates and armAsFactor must be FALSE ##' @param ... additional arguments for this generic ##' @return (data.frame or FlexTable) ##' @export setGeneric("calcModelRmst", function(object, ...){ standardGeneric("calcModelRmst") }) ##' @name calcModelRmst ##' @aliases calcModelRmst,SurvivalModel-method ##' @rdname calcModelRmst-methods ##' @param model (character) The name of the model for which to calculate the restricted mean ##' @param times (nuermic vector) times to calculate the restricted mean ##' @param class ('data.frame' or "FlexTable' (default)) type of output required ##' @param digits (numeric default=3) if outputting a FlexTable then the number of digits ##' to round the entries to ##' @export setMethod("calcModelRmst", "SurvivalModel", function(object, model, times, class=c("data.frame","FlexTable")[2], digits=3, ...){ #validation if(object@armAsFactor){ stop("Cannot calculate restricted means if armAsFactor is TRUE") } if(length(object@covariates)!=0){ stop("Cannot calculate restricted means if covariates fitted in model") } if(any(!is.numeric(times) | times < 0)){ stop("Times must be numeric and non-negative") } if(length(class) != 1 || !class %in% c("data.frame","FlexTable")){ stop("Invalid class argument, should be 'data.frame' or 'FlexTable") } if(length(digits)!=1 || !is.numeric(digits) || !digits > 0 || is.infinite(digits) || is.na(digits)){ stop("Invalid digits argument") } if(length(model)!=1 || !model %in% names(object@models)){ stop("Invalid model argument must be one of ", paste(names(object@models),collapse=", ")) } #for each arm rmsts <- lapply(object@models[[model]],function(oneModel){ #get the cdf function tempF <- oneModel$dfns$p #if spline need to add the knots argument for the dfns$p function to work args <- list() if(!is.null(oneModel$knots)){ args$knots <- oneModel$knots } #survival function survFn <- function(x){ 1 - do.call("tempF", c(args, list(q=x), oneModel$res[,"est"])) } #calculate restricted means (an optimization would be to #not handle times indpendently but calculate [0,t1], [t1, t2], ... and #sum them up as needed) vapply(times, function(time){ tryCatch( integrate(survFn, 0, time)$value, error=function(cond) NA ) }, FUN.VALUE = numeric(1)) }) rmsts <- as.data.frame(do.call("rbind",rmsts)) colnames(rmsts) <- NULL #if two arms add a difference row if(nrow(rmsts)==2){ rmsts <- rbind(rmsts,difference=as.numeric(rmsts[2,])-as.numeric(rmsts[1,]) ) } #Add row of times rmsts <- rbind(time=times, rmsts) if(class=="data.frame") return(rmsts) #create FlexTable numRows <- nrow(rmsts) numCols <- 1+ncol(rmsts) MyFTable <- MyFTable <- FlexTable(numrow=numRows,numcol=numCols, body.par.props=parProperties(text.align="right"), header.text.props = textProperties(font.weight = "bold"), body.cell.props = cellProperties(padding.right=1)) #Set borders MyFTable[1:numRows,1:numCols,side='bottom'] <- borderProperties(width=0) MyFTable[1:numRows,1:numCols,side='left'] <- borderProperties(width=0) MyFTable[1:numRows,1:numCols,side='top'] <- borderProperties(width=0) MyFTable[1:numRows,1:numCols,side='right'] <- borderProperties(width=0) MyFTable[numRows,1:numCols,side='bottom'] <- borderProperties(width=3) MyFTable[1,1:numCols,side='top'] <- borderProperties(width=3) MyFTable[2,1:numCols,side='top'] <- borderProperties(width=3) #Add in data to table MyFTable[2:numRows,2:numCols] <- round(rmsts[2:numRows,], digits=digits) MyFTable[1,2:numCols] <- times MyFTable[1:numRows, 1] <- rownames(rmsts) #Add header denoting which distribution hR <- FlexRow(paste(getDistributionDisplayNames(model),"\nrestricted means"), colspan = numCols, par.properties=parProperties(text.align="center",padding=1), cell.properties = cellProperties(border.width = 0), text.properties = textProperties(font.weight = "bold")) MyFTable <- addHeaderRow(MyFTable,hR) MyFTable } ) ##' Method to calculate RMST on subset of data contained in a ##' SemiParametricModel object ##' @name calcRmst ##' @rdname calcRmst-methods ##' @param object (SemiParametricModel) The object which was created when ##' fitting the Cox model ##' @param ... additional parameters needed for specific instances of this ##' generic ##' @return (rmst object) contains list of RMST values, differences and call or ##' a FlexTable for output into a word document (depending on the class variable) ##' @export setGeneric("calcRmst", function(object, ...){ standardGeneric("calcRmst") }) ##' @name calcRmst ##' @aliases calcRmst,SemiParametricModel-method ##' @rdname calcRmst-methods ##' @param class ('rmst' or "FlexTable' (default)) type of output required ##' @param digits (numeric default=3) if outputting a FlexTable then the number of digits ##' to round the entries to ##' @param pval_digits (numeric default = same value as digits argument) decimal place rounding of p value ##' @export setMethod("calcRmst", "SemiParametricModel", function(object,class=c("rmst","FlexTable")[2], digits=3, pval_digits=digits, ...){ if(length(class) != 1 || !class %in% c("rmst","FlexTable")){ stop("Invalid class argument, should be 'rmst' or 'FlexTable") } if(isSingleArm(object)){ stop("Cannot calculate rmst for a single arm trial") } # Create formula for Kaplan-Meier estimator formulaToFit <- survivalFormula(armAsFactor=!isSingleArm(object), covariates=character(0), timeCol = object@endPointDef[["timeCol"]], censorCol = object@endPointDef[["censorCol"]]) # Call RMST result <- rmst(formula = formulaToFit, data = object@survData@subject.data, ...) if(class=="rmst"){ return(result) } #create FlexTable (note rmst function only works with two arms so size of table is fixed) numRows <- 3 numCols <- 6 MyFTable <- FlexTable(numrow=numRows,numcol=numCols, body.par.props=parProperties(text.align="right"), header.text.props = textProperties(font.weight = "bold"), body.cell.props = cellProperties(padding.right = 1)) #Check if the estimate is less than 0 and if so reverse the signs and switch the lower and upper CI's to show the #absolute difference if(result$diff[,1] < 0){ storedDiff <- result$diff result$diff[,1] = -storedDiff[,1] result$diff[,3] = -storedDiff[,4] result$diff[,4] = -storedDiff[,3] } #Add data MyFTable[3,2:5] <- round(result$diff[1,1:4], digits) #P value rounding pval <- result$diff[1,5] p_digits <- as.numeric(pval_digits) MyFTable[3,6] <- if(pval >= 10^(-p_digits)) round(pval, p_digits) else paste(" < 0." , paste(rep(0, p_digits -1),collapse=""), sep="","1") MyFTable[1:2,2:5] <- round(result$RMST[,1:4],digits) #Add 1st column (the arm names) MyFTable[1:3,1] <- c(as.character(getArmNames(object@survData)),"Difference") MyFTable[1:numRows,1] <- parProperties(text.align="left") MyFTable[1:numRows,1] <- textProperties(font.weight = "bold") #Add header hR <- FlexRow(c("Arm","RMST", "SE","Lower CI", "Upper CI", "p-value"), par.properties=parProperties(text.align="left"), cell.properties =cellProperties(padding.right = 1), text.properties = textProperties(font.weight = "bold")) MyFTable <- addHeaderRow(MyFTable,hR) MyFTable })
/R/rmst.R
no_license
scientific-computing-solutions/sibyl
R
false
false
8,338
r
##' @include semiParametric.R survivalModels.R NULL ##' Method to calculate model restricted means ##' @name calcModelRmst ##' @rdname calcModelRmst-methods ##' @param object (SurvivalModel) A survival model - note there cannot be ##' any covariates and armAsFactor must be FALSE ##' @param ... additional arguments for this generic ##' @return (data.frame or FlexTable) ##' @export setGeneric("calcModelRmst", function(object, ...){ standardGeneric("calcModelRmst") }) ##' @name calcModelRmst ##' @aliases calcModelRmst,SurvivalModel-method ##' @rdname calcModelRmst-methods ##' @param model (character) The name of the model for which to calculate the restricted mean ##' @param times (nuermic vector) times to calculate the restricted mean ##' @param class ('data.frame' or "FlexTable' (default)) type of output required ##' @param digits (numeric default=3) if outputting a FlexTable then the number of digits ##' to round the entries to ##' @export setMethod("calcModelRmst", "SurvivalModel", function(object, model, times, class=c("data.frame","FlexTable")[2], digits=3, ...){ #validation if(object@armAsFactor){ stop("Cannot calculate restricted means if armAsFactor is TRUE") } if(length(object@covariates)!=0){ stop("Cannot calculate restricted means if covariates fitted in model") } if(any(!is.numeric(times) | times < 0)){ stop("Times must be numeric and non-negative") } if(length(class) != 1 || !class %in% c("data.frame","FlexTable")){ stop("Invalid class argument, should be 'data.frame' or 'FlexTable") } if(length(digits)!=1 || !is.numeric(digits) || !digits > 0 || is.infinite(digits) || is.na(digits)){ stop("Invalid digits argument") } if(length(model)!=1 || !model %in% names(object@models)){ stop("Invalid model argument must be one of ", paste(names(object@models),collapse=", ")) } #for each arm rmsts <- lapply(object@models[[model]],function(oneModel){ #get the cdf function tempF <- oneModel$dfns$p #if spline need to add the knots argument for the dfns$p function to work args <- list() if(!is.null(oneModel$knots)){ args$knots <- oneModel$knots } #survival function survFn <- function(x){ 1 - do.call("tempF", c(args, list(q=x), oneModel$res[,"est"])) } #calculate restricted means (an optimization would be to #not handle times indpendently but calculate [0,t1], [t1, t2], ... and #sum them up as needed) vapply(times, function(time){ tryCatch( integrate(survFn, 0, time)$value, error=function(cond) NA ) }, FUN.VALUE = numeric(1)) }) rmsts <- as.data.frame(do.call("rbind",rmsts)) colnames(rmsts) <- NULL #if two arms add a difference row if(nrow(rmsts)==2){ rmsts <- rbind(rmsts,difference=as.numeric(rmsts[2,])-as.numeric(rmsts[1,]) ) } #Add row of times rmsts <- rbind(time=times, rmsts) if(class=="data.frame") return(rmsts) #create FlexTable numRows <- nrow(rmsts) numCols <- 1+ncol(rmsts) MyFTable <- MyFTable <- FlexTable(numrow=numRows,numcol=numCols, body.par.props=parProperties(text.align="right"), header.text.props = textProperties(font.weight = "bold"), body.cell.props = cellProperties(padding.right=1)) #Set borders MyFTable[1:numRows,1:numCols,side='bottom'] <- borderProperties(width=0) MyFTable[1:numRows,1:numCols,side='left'] <- borderProperties(width=0) MyFTable[1:numRows,1:numCols,side='top'] <- borderProperties(width=0) MyFTable[1:numRows,1:numCols,side='right'] <- borderProperties(width=0) MyFTable[numRows,1:numCols,side='bottom'] <- borderProperties(width=3) MyFTable[1,1:numCols,side='top'] <- borderProperties(width=3) MyFTable[2,1:numCols,side='top'] <- borderProperties(width=3) #Add in data to table MyFTable[2:numRows,2:numCols] <- round(rmsts[2:numRows,], digits=digits) MyFTable[1,2:numCols] <- times MyFTable[1:numRows, 1] <- rownames(rmsts) #Add header denoting which distribution hR <- FlexRow(paste(getDistributionDisplayNames(model),"\nrestricted means"), colspan = numCols, par.properties=parProperties(text.align="center",padding=1), cell.properties = cellProperties(border.width = 0), text.properties = textProperties(font.weight = "bold")) MyFTable <- addHeaderRow(MyFTable,hR) MyFTable } ) ##' Method to calculate RMST on subset of data contained in a ##' SemiParametricModel object ##' @name calcRmst ##' @rdname calcRmst-methods ##' @param object (SemiParametricModel) The object which was created when ##' fitting the Cox model ##' @param ... additional parameters needed for specific instances of this ##' generic ##' @return (rmst object) contains list of RMST values, differences and call or ##' a FlexTable for output into a word document (depending on the class variable) ##' @export setGeneric("calcRmst", function(object, ...){ standardGeneric("calcRmst") }) ##' @name calcRmst ##' @aliases calcRmst,SemiParametricModel-method ##' @rdname calcRmst-methods ##' @param class ('rmst' or "FlexTable' (default)) type of output required ##' @param digits (numeric default=3) if outputting a FlexTable then the number of digits ##' to round the entries to ##' @param pval_digits (numeric default = same value as digits argument) decimal place rounding of p value ##' @export setMethod("calcRmst", "SemiParametricModel", function(object,class=c("rmst","FlexTable")[2], digits=3, pval_digits=digits, ...){ if(length(class) != 1 || !class %in% c("rmst","FlexTable")){ stop("Invalid class argument, should be 'rmst' or 'FlexTable") } if(isSingleArm(object)){ stop("Cannot calculate rmst for a single arm trial") } # Create formula for Kaplan-Meier estimator formulaToFit <- survivalFormula(armAsFactor=!isSingleArm(object), covariates=character(0), timeCol = object@endPointDef[["timeCol"]], censorCol = object@endPointDef[["censorCol"]]) # Call RMST result <- rmst(formula = formulaToFit, data = object@survData@subject.data, ...) if(class=="rmst"){ return(result) } #create FlexTable (note rmst function only works with two arms so size of table is fixed) numRows <- 3 numCols <- 6 MyFTable <- FlexTable(numrow=numRows,numcol=numCols, body.par.props=parProperties(text.align="right"), header.text.props = textProperties(font.weight = "bold"), body.cell.props = cellProperties(padding.right = 1)) #Check if the estimate is less than 0 and if so reverse the signs and switch the lower and upper CI's to show the #absolute difference if(result$diff[,1] < 0){ storedDiff <- result$diff result$diff[,1] = -storedDiff[,1] result$diff[,3] = -storedDiff[,4] result$diff[,4] = -storedDiff[,3] } #Add data MyFTable[3,2:5] <- round(result$diff[1,1:4], digits) #P value rounding pval <- result$diff[1,5] p_digits <- as.numeric(pval_digits) MyFTable[3,6] <- if(pval >= 10^(-p_digits)) round(pval, p_digits) else paste(" < 0." , paste(rep(0, p_digits -1),collapse=""), sep="","1") MyFTable[1:2,2:5] <- round(result$RMST[,1:4],digits) #Add 1st column (the arm names) MyFTable[1:3,1] <- c(as.character(getArmNames(object@survData)),"Difference") MyFTable[1:numRows,1] <- parProperties(text.align="left") MyFTable[1:numRows,1] <- textProperties(font.weight = "bold") #Add header hR <- FlexRow(c("Arm","RMST", "SE","Lower CI", "Upper CI", "p-value"), par.properties=parProperties(text.align="left"), cell.properties =cellProperties(padding.right = 1), text.properties = textProperties(font.weight = "bold")) MyFTable <- addHeaderRow(MyFTable,hR) MyFTable })
train <- read.csv('~/kaggle/Whale/download/data/train.csv') w <- train$label print_bits <- function(w) { n <- length(w) bits <- paste(as.character(w[1:(n-2)]), as.character(w[2:(n-1)]), as.character(w[3:(n-0)]),sep='') print(table(bits)) return(table(bits)) } l <- train$label print("original order labels") b1 <- print_bits(l) print("random order labels") b2 <- print_bits(sample(l, length(l))) print("original order / random order") print(b1/b2)
/Whale/analysis/next_previous.R
no_license
chrishefele/kaggle-sample-code
R
false
false
510
r
train <- read.csv('~/kaggle/Whale/download/data/train.csv') w <- train$label print_bits <- function(w) { n <- length(w) bits <- paste(as.character(w[1:(n-2)]), as.character(w[2:(n-1)]), as.character(w[3:(n-0)]),sep='') print(table(bits)) return(table(bits)) } l <- train$label print("original order labels") b1 <- print_bits(l) print("random order labels") b2 <- print_bits(sample(l, length(l))) print("original order / random order") print(b1/b2)
data <- scan("data/iotest1.txt") sort(data) sort(data, decreasing = T) sum(data) mean(data)
/Rexam/test1.R
no_license
HWANG593/R_Programming
R
false
false
92
r
data <- scan("data/iotest1.txt") sort(data) sort(data, decreasing = T) sum(data) mean(data)
#R class final project, analysis stock rm(list = ls()) #install.packages("quantmod") library("quantmod") ############################data select: #find company, default: search data from yahoo #input stock name you want to search. Ex:GOOG, 2330.TW... StockName<-scan(what = "") yourStock<-getSymbols(StockName, auto.assign = F) #delete NA yourStock<-na.omit(yourStock) ############################Split-Apply-Combine #list head data #Open opening share price #Hight hightest price #Low lowest price #Close closing price #Volime sum of sale #Adjusted retroactive #each row mean one dy tail(yourStock) #count moving average(MA) #month MA yourStockma20<-runMean(yourStock[,4],n=20) #season MA yourStockma60<-runMean(yourStock[,4],n=60) ############################Split-Apply-Combine & visualize # count Dividend yield #count Dividend yield this year and compare to past 3 year #count Dividend yield this year and compare to past 3 year DY<-function(thisY, past){ #count this year dividend yield dy<-((thisY[2]) / (thisY[1]))*100 #compare to last three years if(dy>max(past)){ result<-"Higher than past three years" } else if(dy<min(past)){ result<-"lower than past three years" } else{ result<-"between past three years" } #count below the decimal point 0.xxx ans = round(dy, 3) output1<-data.frame(LastThreeYear=c(past[1],past[2],past[3]), ThisYear=c(ans, "",""), CompareTOlastThreeYear=c(result, "", "")) bar<-c("last 3" = past[3], "last 2" = past[2], "last 1" = past[1], "this year" = ans) barplot(bar, main = "past three year & this year Dividend yield comparison", xlab = "last three year & this year", ylab = "rate") return(output1) } DoDY<-function(){ #scan data #first parameter is buy price(Ex:30$/share) #second parameter is expected cash(Ex:1.5$/share) #scan():input your number in console, one data one enter #if input is over, enter again #input buy price & expected cash #use last day close price to count yield rate temp<-tail(yourStock[,4],1) thisYear<-c(temp[[1]]) #input expect cash ex:1.5$/share print("input expect cash ex:1.5$/share") temp<-scan() thisYear[2] = temp #input past three Dividend yield print("input past three Dividend yield") pastThreeYear<-scan() # print past three year & this year Dividend yield and compare DY(thisYear, pastThreeYear) } DoDY() ############################visualize #Plot data with assign range #assign what color you want. Ex: w/b print("assign what color you want. Ex: w(white)/b(black)") color<-scan(what = "") #range of data you want to see. Ex:"2019-12-01::2020-05-31" print("range of data you want to see. Ex:2019-12-01::2020-05-31") print("input range:") range<-scan(what = "") if( color == "b" ){ chartSeries(yourStock[range,], name = StockName) } else if (color == "w") { chartSeries(yourStock[range,], theme = "white", name = StockName) }else{print("wrong color")} #plot, maybe white theme is easier to see #add 20MA&60MA in chart addTA(yourStockma20,on=1,col="blue") addTA(yourStockma60,on=1,col="red") #rm(color,range) yourStock=as.matrix(to.weekly(yourStock)) profit=setNames(numeric(length(rownames(yourStock))), rownames(yourStock)) ############################Split-Apply-Combine & visualize ##設定profit向量紀錄每周損益 lastC=yourStock[1,4]  ##先記錄第一週的收盤價 for (m in rownames(yourStock)[-1]) {  ##開始以每週為單位跑迴圈 fee=ceiling(yourStock[m,1]*1000*(0.001425*2*0.5+0.003))   ##設定手續費與稅(假設手續費打5折) if(yourStock[m,1]<=lastC){profit[m]=(yourStock[m,4]-yourStock[m,1])*1000-fee}  ##開低買進的損益 lastC=yourStock[m,4]  ##紀錄本周收盤價,做為下週判斷開高開低的依據 } head(cbind(yourStock,profit),20) #lwd => line thick cumsum => sum of profit plot(cumsum(profit),type="l",col="red",lwd=2)
/final_project.R
no_license
ipwefpo/R_final-project
R
false
false
4,152
r
#R class final project, analysis stock rm(list = ls()) #install.packages("quantmod") library("quantmod") ############################data select: #find company, default: search data from yahoo #input stock name you want to search. Ex:GOOG, 2330.TW... StockName<-scan(what = "") yourStock<-getSymbols(StockName, auto.assign = F) #delete NA yourStock<-na.omit(yourStock) ############################Split-Apply-Combine #list head data #Open opening share price #Hight hightest price #Low lowest price #Close closing price #Volime sum of sale #Adjusted retroactive #each row mean one dy tail(yourStock) #count moving average(MA) #month MA yourStockma20<-runMean(yourStock[,4],n=20) #season MA yourStockma60<-runMean(yourStock[,4],n=60) ############################Split-Apply-Combine & visualize # count Dividend yield #count Dividend yield this year and compare to past 3 year #count Dividend yield this year and compare to past 3 year DY<-function(thisY, past){ #count this year dividend yield dy<-((thisY[2]) / (thisY[1]))*100 #compare to last three years if(dy>max(past)){ result<-"Higher than past three years" } else if(dy<min(past)){ result<-"lower than past three years" } else{ result<-"between past three years" } #count below the decimal point 0.xxx ans = round(dy, 3) output1<-data.frame(LastThreeYear=c(past[1],past[2],past[3]), ThisYear=c(ans, "",""), CompareTOlastThreeYear=c(result, "", "")) bar<-c("last 3" = past[3], "last 2" = past[2], "last 1" = past[1], "this year" = ans) barplot(bar, main = "past three year & this year Dividend yield comparison", xlab = "last three year & this year", ylab = "rate") return(output1) } DoDY<-function(){ #scan data #first parameter is buy price(Ex:30$/share) #second parameter is expected cash(Ex:1.5$/share) #scan():input your number in console, one data one enter #if input is over, enter again #input buy price & expected cash #use last day close price to count yield rate temp<-tail(yourStock[,4],1) thisYear<-c(temp[[1]]) #input expect cash ex:1.5$/share print("input expect cash ex:1.5$/share") temp<-scan() thisYear[2] = temp #input past three Dividend yield print("input past three Dividend yield") pastThreeYear<-scan() # print past three year & this year Dividend yield and compare DY(thisYear, pastThreeYear) } DoDY() ############################visualize #Plot data with assign range #assign what color you want. Ex: w/b print("assign what color you want. Ex: w(white)/b(black)") color<-scan(what = "") #range of data you want to see. Ex:"2019-12-01::2020-05-31" print("range of data you want to see. Ex:2019-12-01::2020-05-31") print("input range:") range<-scan(what = "") if( color == "b" ){ chartSeries(yourStock[range,], name = StockName) } else if (color == "w") { chartSeries(yourStock[range,], theme = "white", name = StockName) }else{print("wrong color")} #plot, maybe white theme is easier to see #add 20MA&60MA in chart addTA(yourStockma20,on=1,col="blue") addTA(yourStockma60,on=1,col="red") #rm(color,range) yourStock=as.matrix(to.weekly(yourStock)) profit=setNames(numeric(length(rownames(yourStock))), rownames(yourStock)) ############################Split-Apply-Combine & visualize ##設定profit向量紀錄每周損益 lastC=yourStock[1,4]  ##先記錄第一週的收盤價 for (m in rownames(yourStock)[-1]) {  ##開始以每週為單位跑迴圈 fee=ceiling(yourStock[m,1]*1000*(0.001425*2*0.5+0.003))   ##設定手續費與稅(假設手續費打5折) if(yourStock[m,1]<=lastC){profit[m]=(yourStock[m,4]-yourStock[m,1])*1000-fee}  ##開低買進的損益 lastC=yourStock[m,4]  ##紀錄本周收盤價,做為下週判斷開高開低的依據 } head(cbind(yourStock,profit),20) #lwd => line thick cumsum => sum of profit plot(cumsum(profit),type="l",col="red",lwd=2)
##-------------------------------------------- ## ## Class: PCE 350 Data Science Methods Class ## ##---- Test normaization ---- ## ## Read the tweet data set tweets = read.csv('Binary Classification_ Twitter sentiment analysis.csv', header = TRUE, stringsAsFactors = FALSE) colnames(tweets) <- c("sentiment", "tweets") # Set the column names tweets[, 'sentiment'] = ifelse(tweets$sentiment == 4, 1, 0) # set sentiment to {0,1} head(tweets) # Have a look at the data frame ## Create a tm text corpus from the tweets library(tm) ## tm package for text mining tweet.corpus <- Corpus(VectorSource(tweets['tweets'])) class(tweet.corpus) # What is the class of the corpus ## Normalize tweets text tweet.corpus <- tm_map(tweet.corpus, content_transformer(removeNumbers)) tweet.corpus <- tm_map(tweet.corpus, content_transformer(removePunctuation)) tweet.corpus <- tm_map(tweet.corpus, content_transformer(stripWhitespace)) tweet.corpus <- tm_map(tweet.corpus, content_transformer(tolower)) ## ----------------------------------------------- ## ----- Convert the corpus to a term document matrix to.tdm = function(corpus, sparse = 0.998){ require(tm) ## Compute a term-document matrix and then require(slam) # Sparse matrix package tdm <- TermDocumentMatrix(corpus, control = list(stopwords = FALSE)) tdm <- removeSparseTerms(tdm, sparse) tdm } tdm = to.tdm(tweet.corpus) # Create a term document matrix str(tdm) # Look at sparse tdm findFreqTerms(tdm, 2000) # Words that occur at least 2000 times ## Compute the word fequency from the tdm to.wf = function(tdm){ ## compute the word frequencies. require(slam) freq <- row_sums(tdm, na.rm = T) ## Sort the word frequency and build a dataframe ## including the cumulative frequecy of the words. freq <- sort(freq, decreasing = TRUE) word.freq <- data.frame(word = factor(names(freq), levels = names(freq)), frequency = freq) word.freq['Cumulative'] <- cumsum(word.freq['frequency'])/sum(word.freq$frequency) word.freq } wf = to.wf(tdm) head(wf, n = 10) ## Make a bar chart of the word frequency word.bar = function(wf, num = 50){ require(ggplot2) ggplot(wf[1:num,], aes(word, frequency)) + geom_bar(stat = 'identity') + ggtitle('Frequency of common words') + ylab('Frequency') + theme(axis.text.x = element_text(angle = 90, hjust = 1)) } word.bar(wf) ## Make cumulative distribution plots of the most frequent words word.cdf = function(wf, num = 50){ require(ggplot2) ggplot(wf[1:num,], aes(word, Cumulative)) + geom_bar(stat = 'identity') + ggtitle('Cumulative fraction of common words') + ylab('Cumulative frequency') + theme(axis.text.x = element_text(angle = 90, hjust = 1)) } word.cdf(wf) ## ---------------------------------------------------------- ## -----------Stop words ------------------------------------- ## ## Load stop words from a file and ensure they are stopWords = read.csv('stopwords.csv', header = TRUE, stringsAsFactors = FALSE) stopWords = unique(stopWords) # Ensure the list is unique stopWords[1:100,] # Look at the first 100 stop words ## Remove the stop words from the corpus tweet.corpus <- tm_map(tweet.corpus, removeWords, stopWords[, 'words']) ## View the results tdm = to.tdm(tweet.corpus) # Create a term document matrix findFreqTerms(tdm, 2000) # Words that occur at least 2000 times wf = to.wf(tdm) # Compute word fequency head(wf, n = 10) # Look at the most common words word.bar(wf) # Plot word frequency word.cdf(wf) # Plot cdf ## -------------------------------------------------- ## ------------ Stem the words ---------------------- ## ## Use the porter stemmer in Snowball package ## require(SnowballC) ## For Porter stemming words tweet.corpus <- tm_map(tweet.corpus, stemDocument) ## View the results tdm = to.tdm(tweet.corpus, sparse = 0.99) # Create a term document matrix findFreqTerms(tdm, 2000) # Words that occur at least 2000 times wf = to.wf(tdm) # Compute word fequency head(wf, n = 10) # Look at the most common words word.bar(wf) # Plot word frequency word.cdf(wf) # Plot cdf
/R/Lecture10/Lecture10/old/NormalizeText.R
no_license
StephenElston/DataScience410
R
false
false
4,123
r
##-------------------------------------------- ## ## Class: PCE 350 Data Science Methods Class ## ##---- Test normaization ---- ## ## Read the tweet data set tweets = read.csv('Binary Classification_ Twitter sentiment analysis.csv', header = TRUE, stringsAsFactors = FALSE) colnames(tweets) <- c("sentiment", "tweets") # Set the column names tweets[, 'sentiment'] = ifelse(tweets$sentiment == 4, 1, 0) # set sentiment to {0,1} head(tweets) # Have a look at the data frame ## Create a tm text corpus from the tweets library(tm) ## tm package for text mining tweet.corpus <- Corpus(VectorSource(tweets['tweets'])) class(tweet.corpus) # What is the class of the corpus ## Normalize tweets text tweet.corpus <- tm_map(tweet.corpus, content_transformer(removeNumbers)) tweet.corpus <- tm_map(tweet.corpus, content_transformer(removePunctuation)) tweet.corpus <- tm_map(tweet.corpus, content_transformer(stripWhitespace)) tweet.corpus <- tm_map(tweet.corpus, content_transformer(tolower)) ## ----------------------------------------------- ## ----- Convert the corpus to a term document matrix to.tdm = function(corpus, sparse = 0.998){ require(tm) ## Compute a term-document matrix and then require(slam) # Sparse matrix package tdm <- TermDocumentMatrix(corpus, control = list(stopwords = FALSE)) tdm <- removeSparseTerms(tdm, sparse) tdm } tdm = to.tdm(tweet.corpus) # Create a term document matrix str(tdm) # Look at sparse tdm findFreqTerms(tdm, 2000) # Words that occur at least 2000 times ## Compute the word fequency from the tdm to.wf = function(tdm){ ## compute the word frequencies. require(slam) freq <- row_sums(tdm, na.rm = T) ## Sort the word frequency and build a dataframe ## including the cumulative frequecy of the words. freq <- sort(freq, decreasing = TRUE) word.freq <- data.frame(word = factor(names(freq), levels = names(freq)), frequency = freq) word.freq['Cumulative'] <- cumsum(word.freq['frequency'])/sum(word.freq$frequency) word.freq } wf = to.wf(tdm) head(wf, n = 10) ## Make a bar chart of the word frequency word.bar = function(wf, num = 50){ require(ggplot2) ggplot(wf[1:num,], aes(word, frequency)) + geom_bar(stat = 'identity') + ggtitle('Frequency of common words') + ylab('Frequency') + theme(axis.text.x = element_text(angle = 90, hjust = 1)) } word.bar(wf) ## Make cumulative distribution plots of the most frequent words word.cdf = function(wf, num = 50){ require(ggplot2) ggplot(wf[1:num,], aes(word, Cumulative)) + geom_bar(stat = 'identity') + ggtitle('Cumulative fraction of common words') + ylab('Cumulative frequency') + theme(axis.text.x = element_text(angle = 90, hjust = 1)) } word.cdf(wf) ## ---------------------------------------------------------- ## -----------Stop words ------------------------------------- ## ## Load stop words from a file and ensure they are stopWords = read.csv('stopwords.csv', header = TRUE, stringsAsFactors = FALSE) stopWords = unique(stopWords) # Ensure the list is unique stopWords[1:100,] # Look at the first 100 stop words ## Remove the stop words from the corpus tweet.corpus <- tm_map(tweet.corpus, removeWords, stopWords[, 'words']) ## View the results tdm = to.tdm(tweet.corpus) # Create a term document matrix findFreqTerms(tdm, 2000) # Words that occur at least 2000 times wf = to.wf(tdm) # Compute word fequency head(wf, n = 10) # Look at the most common words word.bar(wf) # Plot word frequency word.cdf(wf) # Plot cdf ## -------------------------------------------------- ## ------------ Stem the words ---------------------- ## ## Use the porter stemmer in Snowball package ## require(SnowballC) ## For Porter stemming words tweet.corpus <- tm_map(tweet.corpus, stemDocument) ## View the results tdm = to.tdm(tweet.corpus, sparse = 0.99) # Create a term document matrix findFreqTerms(tdm, 2000) # Words that occur at least 2000 times wf = to.wf(tdm) # Compute word fequency head(wf, n = 10) # Look at the most common words word.bar(wf) # Plot word frequency word.cdf(wf) # Plot cdf
library(checkarg) ### Name: isNumberOrInfScalar ### Title: Wrapper for the checkarg function, using specific parameter ### settings. ### Aliases: isNumberOrInfScalar ### ** Examples isNumberOrInfScalar(2) # returns TRUE (argument is valid) isNumberOrInfScalar("X") # returns FALSE (argument is invalid) #isNumberOrInfScalar("X", stopIfNot = TRUE) # throws exception with message defined by message and argumentName parameters isNumberOrInfScalar(2, default = 1) # returns 2 (the argument, rather than the default, since it is not NULL) #isNumberOrInfScalar("X", default = 1) # throws exception with message defined by message and argumentName parameters isNumberOrInfScalar(NULL, default = 1) # returns 1 (the default, rather than the argument, since it is NULL)
/data/genthat_extracted_code/checkarg/examples/isNumberOrInfScalar.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
791
r
library(checkarg) ### Name: isNumberOrInfScalar ### Title: Wrapper for the checkarg function, using specific parameter ### settings. ### Aliases: isNumberOrInfScalar ### ** Examples isNumberOrInfScalar(2) # returns TRUE (argument is valid) isNumberOrInfScalar("X") # returns FALSE (argument is invalid) #isNumberOrInfScalar("X", stopIfNot = TRUE) # throws exception with message defined by message and argumentName parameters isNumberOrInfScalar(2, default = 1) # returns 2 (the argument, rather than the default, since it is not NULL) #isNumberOrInfScalar("X", default = 1) # throws exception with message defined by message and argumentName parameters isNumberOrInfScalar(NULL, default = 1) # returns 1 (the default, rather than the argument, since it is NULL)
[variance explained](http://varianceexplained.org/r/trump-tweets/) [student project](https://www.msi.co.jp/tmstudio/stu17contents/No4_muc17_TMS.pdf) [teaching notes](http://utstat.toronto.edu/~nathan/teaching/sta4002/Class4/trumptweets-students.html) ## geom_bar() vs geom_col() https://ggplot2.tidyverse.org/reference/geom_bar.html library(dplyr) library(purrr) library(twitteR) twitter_consumer_key <- '6GdTR9EYoAAG7vciCnUgcC4MX' twitter_consumer_secret <- '28ijnGOvIWbJtN67lKYuwBCUfy1zj6mOx5cY7DyfLHRyQ4QH4c' twitter_access_token <- "1014706107510501377-uJQ96SoRrmHr3mMeqq5AzMUs0ls6hN" twitter_access_token_secret <- "Zgmoq0q2tidFx14kkkqQidfnqqxsx0nk9sEo7mdvpPMxl" # You'd need to set global options with an authenticated app # "Using browser based authentication" setup_twitter_oauth(getOption("twitter_consumer_key"), getOption("twitter_consumer_secret"), getOption("twitter_access_token"), getOption("twitter_access_token_secret")) # "Using direct authentication" setup_twitter_oauth(twitter_consumer_key, twitter_consumer_secret, twitter_access_token, twitter_access_token_secret) # We can request only 3200 tweets at a time; it will return fewer # depending on the API trump_tweets <- userTimeline("realDonaldTrump", n = 3200) trump_tweets_df <- tibble::as_tibble(map_df(trump_tweets, as.data.frame)) # if you want to follow along without setting up Twitter authentication, # just use my dataset: load(url("http://varianceexplained.org/files/trump_tweets_df.rda")) library(tidyr) tweets <- trump_tweets_df %>% select(id, statusSource, text, created) %>% extract(statusSource, "source", "Twitter for (.*?)<") %>% filter(source %in% c("iPhone", "Android")) library(lubridate) library(scales) library(ggplot2) library(stringr) tweets %>% count(source, hour = hour(with_tz(created, "EST"))) %>% mutate(percent = n / sum(n)) %>% ggplot(aes(hour, percent, color = source)) + geom_line() + scale_y_continuous(labels = percent_format()) + labs(x = "Hour of day (EST)", y = "% of tweets", color = "") tweet_quote <- tweets %>% count(source, quote = ifelse(str_detect(text, '^"'), "with quote", "without quote")) ggplot(tweet_quote, aes(source, n, fill = quote)) + geom_bar(stat = 'identity', position = 'dodge') # By default, geom_bar uses stat="bin". This makes the height of each bar equal to the number of cases in each group, and it is incompatible with mapping values to the y aesthetic. If you want the heights of the bars to represent values in the data, use stat="identity" and map a value to the y aesthetic. tweet_picture_counts <- tweets %>% filter(!str_detect(text, '^"')) %>% count(source, picture = ifelse(str_detect(text, "t.co"), "Picture/link", "No picture/link")) ggplot(tweet_picture_counts, aes(source, n, fill = picture)) + geom_bar(stat = "identity", position = "dodge") + labs(x = "", y = "Number of tweets", fill = "") library(tidytext) reg <- "([^A-Za-z\\d#@']|'(?![A-Za-z\\d#@]))" tweet_words <- tweets %>% filter(!str_detect(text, '^"')) %>% mutate(text = str_replace_all(text, "https://t.co/[A-Za-z\\d]+|&amp;", "")) %>% unnest_tokens(word, text, token = "regex", pattern = reg) %>% filter(!word %in% stop_words$word, str_detect(word, "[a-z]")) tweet_words %>% count(word) %>% top_n(30) %>% ggplot(aes(reorder(word, n), n)) + geom_col() + coord_flip() tweet_words %>% count(word) %>% arrange(desc(n)) %>% top_n(30) %>% ggplot(aes(reorder(word, n), n)) + geom_col() + coord_flip() android_iphone_ratios <- tweet_words %>% count(word, source) %>% filter(sum(n) >= 5) %>% spread(source, n, fill = 0) %>% ungroup() %>% mutate_each(funs((. + 1) / sum(. + 1)), -word) %>% mutate(logratio = log2(Android / iPhone)) %>% arrange(desc(logratio)) android_iphone_ratios %>% top_n(30) %>% ggplot(aes(word, logratio)) + geom_col() + coord_flip()
/studyNotes/trumptweets.R
no_license
ThymeLy/100DaysofML
R
false
false
3,945
r
[variance explained](http://varianceexplained.org/r/trump-tweets/) [student project](https://www.msi.co.jp/tmstudio/stu17contents/No4_muc17_TMS.pdf) [teaching notes](http://utstat.toronto.edu/~nathan/teaching/sta4002/Class4/trumptweets-students.html) ## geom_bar() vs geom_col() https://ggplot2.tidyverse.org/reference/geom_bar.html library(dplyr) library(purrr) library(twitteR) twitter_consumer_key <- '6GdTR9EYoAAG7vciCnUgcC4MX' twitter_consumer_secret <- '28ijnGOvIWbJtN67lKYuwBCUfy1zj6mOx5cY7DyfLHRyQ4QH4c' twitter_access_token <- "1014706107510501377-uJQ96SoRrmHr3mMeqq5AzMUs0ls6hN" twitter_access_token_secret <- "Zgmoq0q2tidFx14kkkqQidfnqqxsx0nk9sEo7mdvpPMxl" # You'd need to set global options with an authenticated app # "Using browser based authentication" setup_twitter_oauth(getOption("twitter_consumer_key"), getOption("twitter_consumer_secret"), getOption("twitter_access_token"), getOption("twitter_access_token_secret")) # "Using direct authentication" setup_twitter_oauth(twitter_consumer_key, twitter_consumer_secret, twitter_access_token, twitter_access_token_secret) # We can request only 3200 tweets at a time; it will return fewer # depending on the API trump_tweets <- userTimeline("realDonaldTrump", n = 3200) trump_tweets_df <- tibble::as_tibble(map_df(trump_tweets, as.data.frame)) # if you want to follow along without setting up Twitter authentication, # just use my dataset: load(url("http://varianceexplained.org/files/trump_tweets_df.rda")) library(tidyr) tweets <- trump_tweets_df %>% select(id, statusSource, text, created) %>% extract(statusSource, "source", "Twitter for (.*?)<") %>% filter(source %in% c("iPhone", "Android")) library(lubridate) library(scales) library(ggplot2) library(stringr) tweets %>% count(source, hour = hour(with_tz(created, "EST"))) %>% mutate(percent = n / sum(n)) %>% ggplot(aes(hour, percent, color = source)) + geom_line() + scale_y_continuous(labels = percent_format()) + labs(x = "Hour of day (EST)", y = "% of tweets", color = "") tweet_quote <- tweets %>% count(source, quote = ifelse(str_detect(text, '^"'), "with quote", "without quote")) ggplot(tweet_quote, aes(source, n, fill = quote)) + geom_bar(stat = 'identity', position = 'dodge') # By default, geom_bar uses stat="bin". This makes the height of each bar equal to the number of cases in each group, and it is incompatible with mapping values to the y aesthetic. If you want the heights of the bars to represent values in the data, use stat="identity" and map a value to the y aesthetic. tweet_picture_counts <- tweets %>% filter(!str_detect(text, '^"')) %>% count(source, picture = ifelse(str_detect(text, "t.co"), "Picture/link", "No picture/link")) ggplot(tweet_picture_counts, aes(source, n, fill = picture)) + geom_bar(stat = "identity", position = "dodge") + labs(x = "", y = "Number of tweets", fill = "") library(tidytext) reg <- "([^A-Za-z\\d#@']|'(?![A-Za-z\\d#@]))" tweet_words <- tweets %>% filter(!str_detect(text, '^"')) %>% mutate(text = str_replace_all(text, "https://t.co/[A-Za-z\\d]+|&amp;", "")) %>% unnest_tokens(word, text, token = "regex", pattern = reg) %>% filter(!word %in% stop_words$word, str_detect(word, "[a-z]")) tweet_words %>% count(word) %>% top_n(30) %>% ggplot(aes(reorder(word, n), n)) + geom_col() + coord_flip() tweet_words %>% count(word) %>% arrange(desc(n)) %>% top_n(30) %>% ggplot(aes(reorder(word, n), n)) + geom_col() + coord_flip() android_iphone_ratios <- tweet_words %>% count(word, source) %>% filter(sum(n) >= 5) %>% spread(source, n, fill = 0) %>% ungroup() %>% mutate_each(funs((. + 1) / sum(. + 1)), -word) %>% mutate(logratio = log2(Android / iPhone)) %>% arrange(desc(logratio)) android_iphone_ratios %>% top_n(30) %>% ggplot(aes(word, logratio)) + geom_col() + coord_flip()
library(animint2) #Reading Happiness Index Data of 2015 HappinessData <- read.csv('./data/2015.csv') viz.plot.one <- ggplot()+ ggtitle("Happiness Score vs. Life Expectancy")+ geom_point(aes(x=Happiness.Score, y=Health..Life.Expectancy., color=Region), size=6, showSelected="Region", shape=4, data=HappinessData)+ geom_line(aes(x=Happiness.Score, y=Health..Life.Expectancy.), alpha=0.3, showSelected="Region", size=1, data=HappinessData)+ theme_light()+ xlab("Happiness Score")+ ylab("Life Expectancy")+ theme_animint(width=600, height=600) viz.plot.two <- ggplot()+ ggtitle("Freedom vs. Happiness Rank")+ geom_point(aes(x=Freedom, y=Happiness.Rank), showSelected="Region", shape=23, data=HappinessData)+ geom_bar(aes(x=Freedom, y=Happiness.Rank, fill=Region, color=Region), showSelected="Region", stat="identity", position="identity", data=HappinessData)+ theme_light()+ xlab("Freedom")+ ylab("Happiness Rank")+ theme_animint(width=600, height=600) (viz.publish <- animint( viz.plot.one, viz.plot.two, title="Happiness Dataset Viz", duration=list(Region=1500), time=list(variable="Region", ms=3000), selector.types=list(Region="multiple"), first=list(Region="Eastern Asia") )) animint2gist(viz.publish)
/Other Viz/AnimatedEasyTask.R
no_license
lazycipher/Tests-for-Animated-interactive-ggplots
R
false
false
1,435
r
library(animint2) #Reading Happiness Index Data of 2015 HappinessData <- read.csv('./data/2015.csv') viz.plot.one <- ggplot()+ ggtitle("Happiness Score vs. Life Expectancy")+ geom_point(aes(x=Happiness.Score, y=Health..Life.Expectancy., color=Region), size=6, showSelected="Region", shape=4, data=HappinessData)+ geom_line(aes(x=Happiness.Score, y=Health..Life.Expectancy.), alpha=0.3, showSelected="Region", size=1, data=HappinessData)+ theme_light()+ xlab("Happiness Score")+ ylab("Life Expectancy")+ theme_animint(width=600, height=600) viz.plot.two <- ggplot()+ ggtitle("Freedom vs. Happiness Rank")+ geom_point(aes(x=Freedom, y=Happiness.Rank), showSelected="Region", shape=23, data=HappinessData)+ geom_bar(aes(x=Freedom, y=Happiness.Rank, fill=Region, color=Region), showSelected="Region", stat="identity", position="identity", data=HappinessData)+ theme_light()+ xlab("Freedom")+ ylab("Happiness Rank")+ theme_animint(width=600, height=600) (viz.publish <- animint( viz.plot.one, viz.plot.two, title="Happiness Dataset Viz", duration=list(Region=1500), time=list(variable="Region", ms=3000), selector.types=list(Region="multiple"), first=list(Region="Eastern Asia") )) animint2gist(viz.publish)
fun_circle1 <- function() { fun_circle2() return(NULL) } fun_circle2 <- function() { fun_circle3() return(NULL) } fun_circle3 <- function() { fun_circle4() return(NULL) } fun_circle4 <- function() { fun_circle1() return(NULL) }
/data/r-testfile-2.R
no_license
ims-fhs/badhacker
R
false
false
247
r
fun_circle1 <- function() { fun_circle2() return(NULL) } fun_circle2 <- function() { fun_circle3() return(NULL) } fun_circle3 <- function() { fun_circle4() return(NULL) } fun_circle4 <- function() { fun_circle1() return(NULL) }
\name{KellyRatio} \alias{KellyRatio} \title{calculate Kelly criterion ratio (leverage or bet size) for a strategy} \usage{ KellyRatio(R, Rf = 0, method = "half") } \arguments{ \item{R}{a vector of returns to perform a mean over} \item{Rf}{risk free rate, in same period as your returns} \item{method}{method=half will use the half-Kelly, this is the default} } \description{ Kelly criterion ratio (leverage or bet size) for a strategy. } \details{ The Kelly Criterion was identified by Bell Labs scientist John Kelly, and applied to blackjack and stock strategy sizing by Ed Thorpe. The Kelly ratio can be simply stated as: \dQuote{bet size is the ratio of edge over odds.} Mathematically, you are maximizing log-utility. As such, the Kelly criterion is equal to the expected excess return of the strategy divided by the expected variance of the excess return, or \deqn{leverage=\frac{(\overline{R}_{s}-R_{f})}{StdDev(R)^{2}}}{leverage = (mean(R)-Rf=0)/StdDev(R)^2} As a performance metric, the Kelly Ratio is calculated retrospectively on a particular investment as a measure of the edge that investment has over the risk free rate. It may be use as a stack ranking method to compare investments in a manner similar to the various ratios related to the Sharpe ratio. } \examples{ data(managers) KellyRatio(managers[,1,drop=FALSE], Rf=.04/12) KellyRatio(managers[,1,drop=FALSE], Rf=managers[,10,drop=FALSE]) KellyRatio(managers[,1:6], Rf=managers[,10,drop=FALSE]) } \author{ Brian G. Peterson } \references{ Thorp, Edward O. (1997; revised 1998). The Kelly Criterion in Blackjack, Sports Betting, and the Stock Market. \url{http://www.bjmath.com/bjmath/thorp/paper.htm} \cr \url{http://en.wikipedia.org/wiki/Kelly_criterion} } \keyword{distribution} \keyword{models} \keyword{multivariate} \keyword{ts}
/man/KellyRatio.Rd
no_license
sanjivkv/PerformanceAnalytics
R
false
false
1,880
rd
\name{KellyRatio} \alias{KellyRatio} \title{calculate Kelly criterion ratio (leverage or bet size) for a strategy} \usage{ KellyRatio(R, Rf = 0, method = "half") } \arguments{ \item{R}{a vector of returns to perform a mean over} \item{Rf}{risk free rate, in same period as your returns} \item{method}{method=half will use the half-Kelly, this is the default} } \description{ Kelly criterion ratio (leverage or bet size) for a strategy. } \details{ The Kelly Criterion was identified by Bell Labs scientist John Kelly, and applied to blackjack and stock strategy sizing by Ed Thorpe. The Kelly ratio can be simply stated as: \dQuote{bet size is the ratio of edge over odds.} Mathematically, you are maximizing log-utility. As such, the Kelly criterion is equal to the expected excess return of the strategy divided by the expected variance of the excess return, or \deqn{leverage=\frac{(\overline{R}_{s}-R_{f})}{StdDev(R)^{2}}}{leverage = (mean(R)-Rf=0)/StdDev(R)^2} As a performance metric, the Kelly Ratio is calculated retrospectively on a particular investment as a measure of the edge that investment has over the risk free rate. It may be use as a stack ranking method to compare investments in a manner similar to the various ratios related to the Sharpe ratio. } \examples{ data(managers) KellyRatio(managers[,1,drop=FALSE], Rf=.04/12) KellyRatio(managers[,1,drop=FALSE], Rf=managers[,10,drop=FALSE]) KellyRatio(managers[,1:6], Rf=managers[,10,drop=FALSE]) } \author{ Brian G. Peterson } \references{ Thorp, Edward O. (1997; revised 1998). The Kelly Criterion in Blackjack, Sports Betting, and the Stock Market. \url{http://www.bjmath.com/bjmath/thorp/paper.htm} \cr \url{http://en.wikipedia.org/wiki/Kelly_criterion} } \keyword{distribution} \keyword{models} \keyword{multivariate} \keyword{ts}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tool_functions.R \name{prune_model} \alias{prune_model} \title{Prune the model} \usage{ prune_model(ztrain, ztest, model, int, Time, vel, mle, condition = 0) } \arguments{ \item{ztrain}{training neural data} \item{ztest}{testing neural data} \item{int}{vector of integer on when the trial starts} \item{Time}{vector of time of the intervals} \item{vel}{is the reference velocity to use, need to be compatible with int and Time} \item{mle}{whether mle or kf} \item{condition}{whether to prune only on some equation} } \value{ a list with pruned model and current risk } \description{ This function prunes the current model }
/man/prune_model.Rd
no_license
fmatano/StateSpaceStepwise
R
false
true
708
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tool_functions.R \name{prune_model} \alias{prune_model} \title{Prune the model} \usage{ prune_model(ztrain, ztest, model, int, Time, vel, mle, condition = 0) } \arguments{ \item{ztrain}{training neural data} \item{ztest}{testing neural data} \item{int}{vector of integer on when the trial starts} \item{Time}{vector of time of the intervals} \item{vel}{is the reference velocity to use, need to be compatible with int and Time} \item{mle}{whether mle or kf} \item{condition}{whether to prune only on some equation} } \value{ a list with pruned model and current risk } \description{ This function prunes the current model }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/isValidFunctions.R \name{isValidTime} \alias{isValidTime} \title{Check Time String Format} \usage{ isValidTime(date, format = "\%H:\%M:\%S") } \arguments{ \item{format}{By default equals to "\%H:\%M:\%S"} } \description{ Return a boolean depending on whether the time string is in the right format or not. }
/man/isValidTime.Rd
permissive
fabarca/reutiles
R
false
true
386
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/isValidFunctions.R \name{isValidTime} \alias{isValidTime} \title{Check Time String Format} \usage{ isValidTime(date, format = "\%H:\%M:\%S") } \arguments{ \item{format}{By default equals to "\%H:\%M:\%S"} } \description{ Return a boolean depending on whether the time string is in the right format or not. }
\name{rp.skewness} \alias{rp.skewness} \title{Skewness} \usage{ rp.skewness(...) } \arguments{ \item{...}{parameters to be passed to \code{rp.univar} function} } \value{ a numeric value with variable's skewness } \description{ Calculates skewness of given variable. See \code{\link{rp.univar}} for details. }
/man/rp.skewness.Rd
no_license
casunlight/rapport
R
false
false
322
rd
\name{rp.skewness} \alias{rp.skewness} \title{Skewness} \usage{ rp.skewness(...) } \arguments{ \item{...}{parameters to be passed to \code{rp.univar} function} } \value{ a numeric value with variable's skewness } \description{ Calculates skewness of given variable. See \code{\link{rp.univar}} for details. }
/01_Sensitivity/Morris.R
no_license
tdeswaef/AquaCrop-Identifiability
R
false
false
21,408
r
abcrf.formula <- function(formula, data, group=list(), lda=TRUE, ntree=500, sampsize=min(1e5, nrow(data)), paral=FALSE, ncores= if(paral) max(detectCores()-1,1) else 1, ...) { # formula and data.frame check if (!inherits(formula, "formula")) stop("abcrf.formula is only for formula objects") if (!inherits(data, "data.frame")) stop("data needs to be a data.frame object") if(is.na(ncores)){ warning("Unable to automatically detect the number of CPU cores, \n1 CPU core will be used or please specify ncores.") ncores <- 1 } if ( (!is.logical(paral)) && (length(paral) != 1L) ) stop("paral should be TRUE or FALSE") if ( !is.list(group) ) stop("group needs to be a list") # modindex and sumsta recovery mf <- match.call(expand.dots=FALSE) m <- match(c("formula", "data"), names(mf)) mf <- mf[c(1L,m)] mf[[1L]] <- as.name("model.frame") mf <- eval(mf, parent.frame() ) if (!is.factor(model.response(mf))) stop("response should be a factor containing the model indexes") if (nrow(data) == 0L || is.null(nrow(data))) stop("no simulation in the reference table (response, sumstat)") if ( (!is.logical(lda)) && (length(lda) != 1L) ) stop("lda should be TRUE or FALSE") if(length(group)!=0) { ngroup <- length(group) varn <- formula[[2]] data[[as.character(varn)]] <- as.vector(data[[as.character(varn)]]) allmod <- unique(data[[as.character(varn)]]) for (k in 1:ngroup) for (l in 1:length(group[[k]])) data[[as.character(varn)]][which(data[[as.character(varn)]]==group[[k]][l])] <- paste("g",k,sep="") if (!setequal(allmod,unlist(group))) { diffe <- setdiff(allmod,unlist(group)) for (l in 1:length(diffe)) data <- data[-which(data[[as.character(varn)]]==diffe[l]),] } data[[as.character(varn)]] <- as.factor(data[[as.character(varn)]]) } if (lda) { model.lda <- lda(formula, data) data <- cbind(data, as.matrix(predict(model.lda, data)$x )) } else { model.lda <- NULL } m <- names(match.call(expand.dots=TRUE)) if ((!"sampsize" %in% m) && (nrow(data) <= 15)) sampsize <- as.integer(sampsize / 10) if(sampsize > nrow(data)) stop("sampsize too large") model.rf <- ranger(formula, data, num.trees=ntree, sample.fraction=sampsize/nrow(data), num.threads = ncores, keep.inbag = TRUE, importance = 'impurity', ...) # class error computation class.error = vector() for (i in 1:nrow(model.rf$confusion.matrix)) { rowSum <- sum(model.rf$confusion.matrix[i,]) accurate <- diag(model.rf$confusion.matrix)[i] error <- rowSum - accurate class.error[i] <- error / rowSum } model.rf$confusion.matrix <- cbind(model.rf$confusion.matrix, class.error) colnames(model.rf$confusion.matrix) <- c(paste(model.rf$forest$levels),"class.error") model.rf$model.rf cl <- match.call() cl[[1]] <- as.name("abcrf") x <- list(call=cl, lda=lda, formula=formula, group=group, model.rf=model.rf, model.lda=model.lda, prior.err=model.rf$prediction.error) class(x) <- "abcrf" x } abcrf.default <- function(...) { cl <- match.call() cl[[1]] <- as.name("abcrf") cat("call:\n") print(cl) stop("the first argument should be a formula") } abcrf <- function(...) UseMethod("abcrf") print.abcrf <- function(x, ...) { cat("\nCall:\n",deparse(x$call, width.cutoff=500L), "\n") if (x$lda) cat("includes the axes of a preliminary LDA\n\n") cat("Number of simulations: ", length(x$model.rf$num.samples), "\n", sep="") cat("Out-of-bag prior error rate: ", round(x$prior.err * 100, digits = 4), "%\n\n", sep = "") cat("Confusion matrix:\n") print(x$model.rf$confusion, ...) }
/R/abcrf.R
no_license
JulienPascal/abcrf
R
false
false
3,840
r
abcrf.formula <- function(formula, data, group=list(), lda=TRUE, ntree=500, sampsize=min(1e5, nrow(data)), paral=FALSE, ncores= if(paral) max(detectCores()-1,1) else 1, ...) { # formula and data.frame check if (!inherits(formula, "formula")) stop("abcrf.formula is only for formula objects") if (!inherits(data, "data.frame")) stop("data needs to be a data.frame object") if(is.na(ncores)){ warning("Unable to automatically detect the number of CPU cores, \n1 CPU core will be used or please specify ncores.") ncores <- 1 } if ( (!is.logical(paral)) && (length(paral) != 1L) ) stop("paral should be TRUE or FALSE") if ( !is.list(group) ) stop("group needs to be a list") # modindex and sumsta recovery mf <- match.call(expand.dots=FALSE) m <- match(c("formula", "data"), names(mf)) mf <- mf[c(1L,m)] mf[[1L]] <- as.name("model.frame") mf <- eval(mf, parent.frame() ) if (!is.factor(model.response(mf))) stop("response should be a factor containing the model indexes") if (nrow(data) == 0L || is.null(nrow(data))) stop("no simulation in the reference table (response, sumstat)") if ( (!is.logical(lda)) && (length(lda) != 1L) ) stop("lda should be TRUE or FALSE") if(length(group)!=0) { ngroup <- length(group) varn <- formula[[2]] data[[as.character(varn)]] <- as.vector(data[[as.character(varn)]]) allmod <- unique(data[[as.character(varn)]]) for (k in 1:ngroup) for (l in 1:length(group[[k]])) data[[as.character(varn)]][which(data[[as.character(varn)]]==group[[k]][l])] <- paste("g",k,sep="") if (!setequal(allmod,unlist(group))) { diffe <- setdiff(allmod,unlist(group)) for (l in 1:length(diffe)) data <- data[-which(data[[as.character(varn)]]==diffe[l]),] } data[[as.character(varn)]] <- as.factor(data[[as.character(varn)]]) } if (lda) { model.lda <- lda(formula, data) data <- cbind(data, as.matrix(predict(model.lda, data)$x )) } else { model.lda <- NULL } m <- names(match.call(expand.dots=TRUE)) if ((!"sampsize" %in% m) && (nrow(data) <= 15)) sampsize <- as.integer(sampsize / 10) if(sampsize > nrow(data)) stop("sampsize too large") model.rf <- ranger(formula, data, num.trees=ntree, sample.fraction=sampsize/nrow(data), num.threads = ncores, keep.inbag = TRUE, importance = 'impurity', ...) # class error computation class.error = vector() for (i in 1:nrow(model.rf$confusion.matrix)) { rowSum <- sum(model.rf$confusion.matrix[i,]) accurate <- diag(model.rf$confusion.matrix)[i] error <- rowSum - accurate class.error[i] <- error / rowSum } model.rf$confusion.matrix <- cbind(model.rf$confusion.matrix, class.error) colnames(model.rf$confusion.matrix) <- c(paste(model.rf$forest$levels),"class.error") model.rf$model.rf cl <- match.call() cl[[1]] <- as.name("abcrf") x <- list(call=cl, lda=lda, formula=formula, group=group, model.rf=model.rf, model.lda=model.lda, prior.err=model.rf$prediction.error) class(x) <- "abcrf" x } abcrf.default <- function(...) { cl <- match.call() cl[[1]] <- as.name("abcrf") cat("call:\n") print(cl) stop("the first argument should be a formula") } abcrf <- function(...) UseMethod("abcrf") print.abcrf <- function(x, ...) { cat("\nCall:\n",deparse(x$call, width.cutoff=500L), "\n") if (x$lda) cat("includes the axes of a preliminary LDA\n\n") cat("Number of simulations: ", length(x$model.rf$num.samples), "\n", sep="") cat("Out-of-bag prior error rate: ", round(x$prior.err * 100, digits = 4), "%\n\n", sep = "") cat("Confusion matrix:\n") print(x$model.rf$confusion, ...) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/blob.R \name{ps_blob_file} \alias{ps_blob_file} \title{Blob File} \usage{ ps_blob_file(file) } \arguments{ \item{file}{A string of the file name.} } \description{ Converts a file in the directory into a named blob. }
/man/ps_blob_file.Rd
permissive
poissonconsulting/poissqlite
R
false
true
295
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/blob.R \name{ps_blob_file} \alias{ps_blob_file} \title{Blob File} \usage{ ps_blob_file(file) } \arguments{ \item{file}{A string of the file name.} } \description{ Converts a file in the directory into a named blob. }
steepest.ascent <- function(X, start, step=.0005){ #X is the vector with all sample values xi #start should be given as a 5-element vector c(p, mu1, mu2, sigma1, sigma2) #I only had one p value because p2 = 1-p1 #M[1]=p, M[2]=mu1, M[3]=mu2, M[4]=sigma1, M[5]=sigma2 M <- start #M matrix will store each step in a separate row # Q will be the denominator in a lot of the gradient's elements, # so calculating it separately will simplify the code overall Q <- M[1]*((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4])) + (1-M[1])*((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5])) grad <- c(sum((((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4])) - ((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5])))/Q), #p portion of gradient sum((M[1]*((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4]))*2*(X-M[2]))/Q), #mu1 portion of gradient sum((1-M[1])*((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5]))*2*(X-M[3])/Q), #mu2 portion of gradient sum((M[1]/(sqrt(2*pi)*Q*M[4]^4))*exp(-(X-M[2])^2/(2*M[4]))*(M[4]^2.5*(-X+M[2])-.5*M[4]^2.5 +.5*M[4]^1.5*(X-M[2])^2)),#sigma1 sum(((1-M[1])/(sqrt(2*pi)*Q*M[5]^4))*exp(-(X-M[3])^2/(2*M[5]))*(M[5]^2.5*(-X+M[3])-.5*M[5]^2.5 +.5*M[5]^1.5*(X-M[3])^2))#sigma2 ) norm.grad <- sqrt(sum(grad^2)) slope <- grad/norm.grad ## The count variable will be used to make the program stop in the event that it gets stuck between two values count <- 1 while (norm.grad > .01 & count<10000){ #while loop will run until normalized gradient is sufficiently small M <- M+slope*step Q <- M[1]*((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4])) + (1-M[1])*((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5])) grad <- c(sum((((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4])) - ((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5])))/Q), #p sum((M[1]*((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4]))*2*(X-M[2]))/Q), #mu1 sum((1-M[1])*((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5]))*2*(X-M[3])/Q), #mu2 sum((M[1]/(sqrt(2*pi)*Q*M[4]^4))*exp(-(X-M[2])^2/(2*M[4]))*(M[4]^2.5*(-X+M[2])-.5*M[4]^2.5 +.5*M[4]^1.5*(X-M[2])^2)),#sigma1 sum(((1-M[1])/(sqrt(2*pi)*Q*M[5]^4))*exp(-(X-M[3])^2/(2*M[5]))*(M[5]^2.5*(-X+M[3])-.5*M[5]^2.5 +.5*M[5]^1.5*(X-M[3])^2))#sigma2 ) norm.grad <- sqrt(sum(grad^2)) slope <- grad/norm.grad # print(norm.grad) # this print statement I used to diagnose problems with the model by printing different varaibles as it ran. count <- count+1 } return(data.frame(p=M[1], Mu1=M[2],Mu2=M[3],sigma1=M[4],sigma2=M[5])) }
/Steepest Ascent.R
no_license
LannyFox/HW3
R
false
false
2,542
r
steepest.ascent <- function(X, start, step=.0005){ #X is the vector with all sample values xi #start should be given as a 5-element vector c(p, mu1, mu2, sigma1, sigma2) #I only had one p value because p2 = 1-p1 #M[1]=p, M[2]=mu1, M[3]=mu2, M[4]=sigma1, M[5]=sigma2 M <- start #M matrix will store each step in a separate row # Q will be the denominator in a lot of the gradient's elements, # so calculating it separately will simplify the code overall Q <- M[1]*((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4])) + (1-M[1])*((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5])) grad <- c(sum((((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4])) - ((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5])))/Q), #p portion of gradient sum((M[1]*((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4]))*2*(X-M[2]))/Q), #mu1 portion of gradient sum((1-M[1])*((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5]))*2*(X-M[3])/Q), #mu2 portion of gradient sum((M[1]/(sqrt(2*pi)*Q*M[4]^4))*exp(-(X-M[2])^2/(2*M[4]))*(M[4]^2.5*(-X+M[2])-.5*M[4]^2.5 +.5*M[4]^1.5*(X-M[2])^2)),#sigma1 sum(((1-M[1])/(sqrt(2*pi)*Q*M[5]^4))*exp(-(X-M[3])^2/(2*M[5]))*(M[5]^2.5*(-X+M[3])-.5*M[5]^2.5 +.5*M[5]^1.5*(X-M[3])^2))#sigma2 ) norm.grad <- sqrt(sum(grad^2)) slope <- grad/norm.grad ## The count variable will be used to make the program stop in the event that it gets stuck between two values count <- 1 while (norm.grad > .01 & count<10000){ #while loop will run until normalized gradient is sufficiently small M <- M+slope*step Q <- M[1]*((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4])) + (1-M[1])*((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5])) grad <- c(sum((((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4])) - ((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5])))/Q), #p sum((M[1]*((2*pi*M[4])^-.5)*exp(-(X-M[2])^2/(2*M[4]))*2*(X-M[2]))/Q), #mu1 sum((1-M[1])*((2*pi*M[5])^-.5)*exp(-(X-M[3])^2/(2*M[5]))*2*(X-M[3])/Q), #mu2 sum((M[1]/(sqrt(2*pi)*Q*M[4]^4))*exp(-(X-M[2])^2/(2*M[4]))*(M[4]^2.5*(-X+M[2])-.5*M[4]^2.5 +.5*M[4]^1.5*(X-M[2])^2)),#sigma1 sum(((1-M[1])/(sqrt(2*pi)*Q*M[5]^4))*exp(-(X-M[3])^2/(2*M[5]))*(M[5]^2.5*(-X+M[3])-.5*M[5]^2.5 +.5*M[5]^1.5*(X-M[3])^2))#sigma2 ) norm.grad <- sqrt(sum(grad^2)) slope <- grad/norm.grad # print(norm.grad) # this print statement I used to diagnose problems with the model by printing different varaibles as it ran. count <- count+1 } return(data.frame(p=M[1], Mu1=M[2],Mu2=M[3],sigma1=M[4],sigma2=M[5])) }
\name{mvdv-class} \Rdversion{1.1} \docType{class} \alias{mvdv-class} \alias{dmvdv} \alias{pmvdv} \alias{rmvdv} \title{Multivariate Distributions Constructed from Vines} \description{ Density, distribution function, and random generator for a multivariate distribution via vines. The class \code{"mvdv"} is designed after the class \code{"mvdc"} class from the copula package. } \section{Objects from the Class \code{"mvdv"}}{ Objects can be created by calling the functions \code{\link{estimateMvdv}}, \code{\link{generateMvdv}} and \code{\link{vector2mvdv}}. } \usage{ dmvdv(mvdv, x) pmvdv(mvdv, x) rmvdv(mvdv, n) } \arguments{ \item{mvdv}{Object of class \code{"mvdv"}.} \item{x}{A vector of the vine dimension or a matrix with number of columns being the vine dimension, giving the coordinates of the points where the density or distribution function needs to be evaluated.} \item{n}{Number of observations to be generated.} } \value{ \code{dmvdv} gives the density, \code{pmvdv} gives the cumulative distribution function, and \code{rmvdv} generates random variates. } \section{\code{"mvdv"} Slots}{ \describe{ \item{\code{vine}:}{Object of class \code{"Vine"}.} \item{\code{margins}:}{Character vector that determines the marginal distributions of the multivariate distribution. Each component determines the density, distribution, and quantile function names. For example, \code{"norm"} can be used to specify marginal distribution, because \code{"dnorm"}, \code{"pnorm"}, and \code{"qnorm"} are all available. Supported values for each component are: \code{"norm"}, \code{"kernel"}, \code{"gamma"} and \code{"userdist"}. The value \code{"userdist"} supports user-defined distribution, i.e., the user can freely specify \code{"duserdist"}, \code{"puserdist"}, and \code{"quserdist"}.} \item{\code{marginParams}:}{Object of class \code{"list"}. Each component of this list is a list with named components which are used to specify the parameters of the marginal distributions.} } } \seealso{ \code{\link{mvdc}}, \code{\link{estimateMvdv}}, \code{\link{generateMvdv}}, \code{\link{mvdv2vector}}, \code{\link{vector2mvdv}}. } \keyword{classes}
/man/mvdv-class.Rd
no_license
DianaCarrera/VinecopulaedasExtra
R
false
false
2,264
rd
\name{mvdv-class} \Rdversion{1.1} \docType{class} \alias{mvdv-class} \alias{dmvdv} \alias{pmvdv} \alias{rmvdv} \title{Multivariate Distributions Constructed from Vines} \description{ Density, distribution function, and random generator for a multivariate distribution via vines. The class \code{"mvdv"} is designed after the class \code{"mvdc"} class from the copula package. } \section{Objects from the Class \code{"mvdv"}}{ Objects can be created by calling the functions \code{\link{estimateMvdv}}, \code{\link{generateMvdv}} and \code{\link{vector2mvdv}}. } \usage{ dmvdv(mvdv, x) pmvdv(mvdv, x) rmvdv(mvdv, n) } \arguments{ \item{mvdv}{Object of class \code{"mvdv"}.} \item{x}{A vector of the vine dimension or a matrix with number of columns being the vine dimension, giving the coordinates of the points where the density or distribution function needs to be evaluated.} \item{n}{Number of observations to be generated.} } \value{ \code{dmvdv} gives the density, \code{pmvdv} gives the cumulative distribution function, and \code{rmvdv} generates random variates. } \section{\code{"mvdv"} Slots}{ \describe{ \item{\code{vine}:}{Object of class \code{"Vine"}.} \item{\code{margins}:}{Character vector that determines the marginal distributions of the multivariate distribution. Each component determines the density, distribution, and quantile function names. For example, \code{"norm"} can be used to specify marginal distribution, because \code{"dnorm"}, \code{"pnorm"}, and \code{"qnorm"} are all available. Supported values for each component are: \code{"norm"}, \code{"kernel"}, \code{"gamma"} and \code{"userdist"}. The value \code{"userdist"} supports user-defined distribution, i.e., the user can freely specify \code{"duserdist"}, \code{"puserdist"}, and \code{"quserdist"}.} \item{\code{marginParams}:}{Object of class \code{"list"}. Each component of this list is a list with named components which are used to specify the parameters of the marginal distributions.} } } \seealso{ \code{\link{mvdc}}, \code{\link{estimateMvdv}}, \code{\link{generateMvdv}}, \code{\link{mvdv2vector}}, \code{\link{vector2mvdv}}. } \keyword{classes}
### Iván Eduardo Sedeño Jiménez ### R code for ISLR chapter 2. ##Ex.3 setwd("/Users/invitado/Documents/Temas compu") bias = function(x)350*exp(-0.65*x) variance = function(x)x^3/3 train_error = function(x)450/(exp(0.7*x-1.5)+1)+15 test_error = function(x)variance(x)+bias(x)+bayes_error(x)+rnorm(1,mean=100,sd=20) bayes_error = function(x)rnorm(1,mean=100,sd=11)+0*x png("ch2_ex3.png") curve(bias, xlim=c(0,10), ylim=c(0,600), xlab="flexibility", ylab="", col=1) # bias curve(variance, add = TRUE, col=2) # variance curve(train_error, add=TRUE, col=3) # train error curve(bayes_error, add=TRUE, col=4) # irreducible error curve(test_error, add = TRUE, col=5) # test error legend(1,600,legend=c("bias","variance","train error","bayes error","test error"),col=1:5,lwd=1) dev.off() ###Ex.7 #Observations o1 = c(0,3,0) o2 = c(2,0,0) o3 = c(0,1,3) o4 = c(0,1,2) o5 = c(-1,0,1) o6 = c(1,1,1) #Origin orig = c(0,0,0) #All observations together obs = matrix(c(o1,o2,o3,o4,o5,o6),ncol=3,byrow = T) #Computing distances dist = function(x){sqrt(sum((x-orig)^2))} by(obs,1:nrow(obs),dist,simplify = FALSE) ###EX.8 ###a) getwd() setwd("./ISLR-master/") college = read.csv("College.csv") ###b) rownames(college)=college[,1] fix(college) college=college[,-1] fix(college) ###c) ###.1 summary(college) ###.2 png("ch02_ex08_c_2.png") pairs(college[,1:10]) dev.off() ###.3 png("ch02_ex08_c_3.png") plot(college$Outstate~college$Private) dev.off() ###.4 Elite=rep("No",nrow(college)) Elite[college$Top10perc >50]="Yes" Elite=as.factor(Elite) college=data.frame(college ,Elite) summary(college) png("ch02_ex08_c_4.png") plot(college$Outstate~college$Elite) dev.off() ###.5 png("ch02_ex08_c_5.png") par(mfrow=c(2,2)) hist(College$Apps, breaks=30, main="Apps") hist(College$Accept, breaks=25, main="Accept") hist(College$Enroll, breaks=20, main="Expend") hist(College$Top10perc, breaks=15, main="Top10perc") dev.off() ###.6 summary(college) plot(college) plot(college[,-c(1:2,8:12,15:19)]) png("ch02_ex08_c_6.png") par(mfrow=c(2,2)) plot(college$PhD,college$Terminal) plot(college$Enroll,college$F.Undergrad) plot(college$Accept,college$Enroll) plot(college$Top10perc,college$Top25perc) dev.off() ###EX.9 library(ISLR) data(Auto) ###We make sure there are no na`s Auto = na.omit(Auto) ###a) summary(Auto) str(Auto) ###b) lapply(Auto[,1:7],range) ###c) lapply(Auto[,1:7],mean) lapply(Auto[,1:7],sd) ###d) lapply(Auto[-(10:85),1:7],range) lapply(Auto[-(10:85),1:7],mean) lapply(Auto[-(10:85),1:7],sd) ###e) png("ch02_ex09_e.png") pairs(Auto[,1:7]) dev.off() ###10 ###a) library(MASS) Boston ###b) names(Boston) png("ch02_ex10_b.png") pairs(Boston) dev.off() ###c) library(ggplot2) library(reshape2) scatter_crim =ggplot(melt(Boston,id="crim"),aes(x=value,y=crim)) + geom_point() + facet_wrap(~variable, scales = 'free_x') png("ch02_ex10_c.png") scatter_crim dev.off() ###d) png("ch02_ex10_d.png") ggplot(melt(t(Boston)),aes(x=Var2,y=value)) + geom_point() + facet_wrap(~Var1, scales = 'free') dev.off() ###e) table(Boston$chas) median(Boston$ptratio) ###g) min_medv = Boston$medv == min(Boston$medv) Boston[min_medv,] sapply(Boston,quantile) ###h) nrow(Boston[Boston$rm > 7,]) nrow(Boston[Boston$rm > 8,]) rbind(sapply(Boston[Boston$rm > 8,], mean), sapply(Boston, quantile))
/Tarea_01.R
no_license
Iseez/ISLR_curso
R
false
false
3,289
r
### Iván Eduardo Sedeño Jiménez ### R code for ISLR chapter 2. ##Ex.3 setwd("/Users/invitado/Documents/Temas compu") bias = function(x)350*exp(-0.65*x) variance = function(x)x^3/3 train_error = function(x)450/(exp(0.7*x-1.5)+1)+15 test_error = function(x)variance(x)+bias(x)+bayes_error(x)+rnorm(1,mean=100,sd=20) bayes_error = function(x)rnorm(1,mean=100,sd=11)+0*x png("ch2_ex3.png") curve(bias, xlim=c(0,10), ylim=c(0,600), xlab="flexibility", ylab="", col=1) # bias curve(variance, add = TRUE, col=2) # variance curve(train_error, add=TRUE, col=3) # train error curve(bayes_error, add=TRUE, col=4) # irreducible error curve(test_error, add = TRUE, col=5) # test error legend(1,600,legend=c("bias","variance","train error","bayes error","test error"),col=1:5,lwd=1) dev.off() ###Ex.7 #Observations o1 = c(0,3,0) o2 = c(2,0,0) o3 = c(0,1,3) o4 = c(0,1,2) o5 = c(-1,0,1) o6 = c(1,1,1) #Origin orig = c(0,0,0) #All observations together obs = matrix(c(o1,o2,o3,o4,o5,o6),ncol=3,byrow = T) #Computing distances dist = function(x){sqrt(sum((x-orig)^2))} by(obs,1:nrow(obs),dist,simplify = FALSE) ###EX.8 ###a) getwd() setwd("./ISLR-master/") college = read.csv("College.csv") ###b) rownames(college)=college[,1] fix(college) college=college[,-1] fix(college) ###c) ###.1 summary(college) ###.2 png("ch02_ex08_c_2.png") pairs(college[,1:10]) dev.off() ###.3 png("ch02_ex08_c_3.png") plot(college$Outstate~college$Private) dev.off() ###.4 Elite=rep("No",nrow(college)) Elite[college$Top10perc >50]="Yes" Elite=as.factor(Elite) college=data.frame(college ,Elite) summary(college) png("ch02_ex08_c_4.png") plot(college$Outstate~college$Elite) dev.off() ###.5 png("ch02_ex08_c_5.png") par(mfrow=c(2,2)) hist(College$Apps, breaks=30, main="Apps") hist(College$Accept, breaks=25, main="Accept") hist(College$Enroll, breaks=20, main="Expend") hist(College$Top10perc, breaks=15, main="Top10perc") dev.off() ###.6 summary(college) plot(college) plot(college[,-c(1:2,8:12,15:19)]) png("ch02_ex08_c_6.png") par(mfrow=c(2,2)) plot(college$PhD,college$Terminal) plot(college$Enroll,college$F.Undergrad) plot(college$Accept,college$Enroll) plot(college$Top10perc,college$Top25perc) dev.off() ###EX.9 library(ISLR) data(Auto) ###We make sure there are no na`s Auto = na.omit(Auto) ###a) summary(Auto) str(Auto) ###b) lapply(Auto[,1:7],range) ###c) lapply(Auto[,1:7],mean) lapply(Auto[,1:7],sd) ###d) lapply(Auto[-(10:85),1:7],range) lapply(Auto[-(10:85),1:7],mean) lapply(Auto[-(10:85),1:7],sd) ###e) png("ch02_ex09_e.png") pairs(Auto[,1:7]) dev.off() ###10 ###a) library(MASS) Boston ###b) names(Boston) png("ch02_ex10_b.png") pairs(Boston) dev.off() ###c) library(ggplot2) library(reshape2) scatter_crim =ggplot(melt(Boston,id="crim"),aes(x=value,y=crim)) + geom_point() + facet_wrap(~variable, scales = 'free_x') png("ch02_ex10_c.png") scatter_crim dev.off() ###d) png("ch02_ex10_d.png") ggplot(melt(t(Boston)),aes(x=Var2,y=value)) + geom_point() + facet_wrap(~Var1, scales = 'free') dev.off() ###e) table(Boston$chas) median(Boston$ptratio) ###g) min_medv = Boston$medv == min(Boston$medv) Boston[min_medv,] sapply(Boston,quantile) ###h) nrow(Boston[Boston$rm > 7,]) nrow(Boston[Boston$rm > 8,]) rbind(sapply(Boston[Boston$rm > 8,], mean), sapply(Boston, quantile))
getUnconstrainedSamples = function(fit) { usamples_list = lapply(fit$latent_dynamics_files(), function(file) { read_csv(file, comment = "#", col_types = cols(.default = col_double())) %>% select(-lp__, -accept_stat__, -stepsize__, -treedepth__, -n_leapfrog__, -divergent__, -energy__, -starts_with("p_"), -starts_with("g_")) %>% as.matrix() }) usamples = array(0, dim = c(nrow(usamples_list[[1]]), length(usamples_list), ncol(usamples_list[[1]]))) for(i in 1:length(usamples_list)) { usamples[, i,] = usamples_list[[i]] } return(usamples) } getExtras = function(fit) { lapply(fit$latent_dynamics_files(), function(file) { read_csv(file, comment = "#", col_types = cols(.default = col_double())) %>% select(lp__, accept_stat__, stepsize__, treedepth__, n_leapfrog__, divergent__, energy__) }) } getInitFile = function(stan_fit, ldraw) { init = constrain_pars(stan_fit, ldraw %>% as.matrix) init_file = tempfile("init", fileext = ".dat") stan_rdump(names(init), init_file, env = list2env(init)) return(init_file) } getStepsizes = function(fit) { sapply(fit$latent_dynamics_files(), function(file) { read_csv(file, comment = "#", col_types = cols(.default = col_double())) %>% tail(1) %>% pull(stepsize__) }) }
/R/helpers.R
no_license
yizhang-yiz/campfire
R
false
false
1,362
r
getUnconstrainedSamples = function(fit) { usamples_list = lapply(fit$latent_dynamics_files(), function(file) { read_csv(file, comment = "#", col_types = cols(.default = col_double())) %>% select(-lp__, -accept_stat__, -stepsize__, -treedepth__, -n_leapfrog__, -divergent__, -energy__, -starts_with("p_"), -starts_with("g_")) %>% as.matrix() }) usamples = array(0, dim = c(nrow(usamples_list[[1]]), length(usamples_list), ncol(usamples_list[[1]]))) for(i in 1:length(usamples_list)) { usamples[, i,] = usamples_list[[i]] } return(usamples) } getExtras = function(fit) { lapply(fit$latent_dynamics_files(), function(file) { read_csv(file, comment = "#", col_types = cols(.default = col_double())) %>% select(lp__, accept_stat__, stepsize__, treedepth__, n_leapfrog__, divergent__, energy__) }) } getInitFile = function(stan_fit, ldraw) { init = constrain_pars(stan_fit, ldraw %>% as.matrix) init_file = tempfile("init", fileext = ".dat") stan_rdump(names(init), init_file, env = list2env(init)) return(init_file) } getStepsizes = function(fit) { sapply(fit$latent_dynamics_files(), function(file) { read_csv(file, comment = "#", col_types = cols(.default = col_double())) %>% tail(1) %>% pull(stepsize__) }) }
library(doParallel) .fork_not_windows <- function(expected, expr) { err <- NULL obs <- tryCatch(expr, error=function(e) { if (!all(grepl("fork clusters are not supported on Windows", conditionMessage(e)))) err <<- conditionMessage(e) expected }) checkTrue(is.null(err)) checkIdentical(expected, obs) } test_bpvectorize_Params <- function() { params <- list(serial=SerialParam(), mc=MulticoreParam(2), snow0=SnowParam(2, "FORK"), snow1=SnowParam(2, "PSOCK"), dopar=DoparParam()) dop <- registerDoParallel(cores=2) x <- 1:10 expected <- sqrt(x) for (ptype in names(params)) { psqrt <- bpvectorize(sqrt, BPPARAM=params[[ptype]]) .fork_not_windows(expected, psqrt(x)) } }
/inst/unitTests/test_bpvectorize.R
no_license
vjcitn/BiocParallel
R
false
false
859
r
library(doParallel) .fork_not_windows <- function(expected, expr) { err <- NULL obs <- tryCatch(expr, error=function(e) { if (!all(grepl("fork clusters are not supported on Windows", conditionMessage(e)))) err <<- conditionMessage(e) expected }) checkTrue(is.null(err)) checkIdentical(expected, obs) } test_bpvectorize_Params <- function() { params <- list(serial=SerialParam(), mc=MulticoreParam(2), snow0=SnowParam(2, "FORK"), snow1=SnowParam(2, "PSOCK"), dopar=DoparParam()) dop <- registerDoParallel(cores=2) x <- 1:10 expected <- sqrt(x) for (ptype in names(params)) { psqrt <- bpvectorize(sqrt, BPPARAM=params[[ptype]]) .fork_not_windows(expected, psqrt(x)) } }
#' Split river segments at runoff unit (polygon) boundaries. #' #' Splits an 'sf' linestring object at the boundaries of #' runoff units (polygons). #' #' @param zoneID Name of the column in \code{HS} with unique IDs. #' @inheritParams compute_HSweights #' #' @return Returns an 'sf' linestring object which has been split at #' the polygon (grid) boundaries with attributes (columns): #' \itemize{ #' \item \emph{ID}. Unique ID of the split river segments. #' \item \emph{riverID}. ID of the original river segment prior #' to splitting. #' \item \emph{zoneID}. ID of the runoff unit split river segment #' is contained in. #' \item Other columns inherited from \code{river}. #' } #' #' @examples #' \dontrun{ #' library(raster) #' library(hydrostreamer) #' #' # load data #' data(river) #' data(basin) #' runoff <- brick(system.file("extdata", "runoff.tif", #' package = "hydrostreamer")) #' #' # create HS #' grid <- raster_to_HS(grid, aoi=basin) #' #' splitriver <- split_river_with_grid(river, grid, #' riverID="ID") #' } #' split_river_with_grid <- function(river, HS, riverID = "riverID", zoneID = "zoneID") { ID <- NULL geometry <- NULL grid <- dplyr::select(HS, zoneID = !!zoneID) river <- suppressMessages(suppressWarnings(sf::st_intersection(river, grid))) river$line_length <- sf::st_length(river) river$line_length_corr <- river$line_length ### handle river segments at the boundaries gridint <- suppressMessages( suppressWarnings( sf::st_intersection(river, sf::st_geometry( sf::st_cast( sf::st_cast(grid, "MULTILINESTRING"), "LINESTRING"))))) ind <- sf::st_is(gridint, "LINESTRING") if(sum(ind) != 0) { gridint <- gridint[ind,] gridint <- dplyr::distinct(gridint, !!riverID, geometry) ids <- unique(gridint$riverID) for(i in ids) { ind_riv <- river$riverID == i ind_grid <- gridint$riverID == i l <- sf::st_length(gridint[ind_grid,]) river$line_length_corr[ind_riv] <- river$line_length[ind_riv] - l/2 } } #add unique IDs river$ID <- 1:NROW(river) river <- river %>% dplyr::select(ID, riverID, zoneID, dplyr::everything()) river <- tibble::as_tibble(river) %>% sf::st_as_sf() return(river) }
/R/split_river_with_grid.R
permissive
tanxuezhi/hydrostreamer
R
false
false
2,810
r
#' Split river segments at runoff unit (polygon) boundaries. #' #' Splits an 'sf' linestring object at the boundaries of #' runoff units (polygons). #' #' @param zoneID Name of the column in \code{HS} with unique IDs. #' @inheritParams compute_HSweights #' #' @return Returns an 'sf' linestring object which has been split at #' the polygon (grid) boundaries with attributes (columns): #' \itemize{ #' \item \emph{ID}. Unique ID of the split river segments. #' \item \emph{riverID}. ID of the original river segment prior #' to splitting. #' \item \emph{zoneID}. ID of the runoff unit split river segment #' is contained in. #' \item Other columns inherited from \code{river}. #' } #' #' @examples #' \dontrun{ #' library(raster) #' library(hydrostreamer) #' #' # load data #' data(river) #' data(basin) #' runoff <- brick(system.file("extdata", "runoff.tif", #' package = "hydrostreamer")) #' #' # create HS #' grid <- raster_to_HS(grid, aoi=basin) #' #' splitriver <- split_river_with_grid(river, grid, #' riverID="ID") #' } #' split_river_with_grid <- function(river, HS, riverID = "riverID", zoneID = "zoneID") { ID <- NULL geometry <- NULL grid <- dplyr::select(HS, zoneID = !!zoneID) river <- suppressMessages(suppressWarnings(sf::st_intersection(river, grid))) river$line_length <- sf::st_length(river) river$line_length_corr <- river$line_length ### handle river segments at the boundaries gridint <- suppressMessages( suppressWarnings( sf::st_intersection(river, sf::st_geometry( sf::st_cast( sf::st_cast(grid, "MULTILINESTRING"), "LINESTRING"))))) ind <- sf::st_is(gridint, "LINESTRING") if(sum(ind) != 0) { gridint <- gridint[ind,] gridint <- dplyr::distinct(gridint, !!riverID, geometry) ids <- unique(gridint$riverID) for(i in ids) { ind_riv <- river$riverID == i ind_grid <- gridint$riverID == i l <- sf::st_length(gridint[ind_grid,]) river$line_length_corr[ind_riv] <- river$line_length[ind_riv] - l/2 } } #add unique IDs river$ID <- 1:NROW(river) river <- river %>% dplyr::select(ID, riverID, zoneID, dplyr::everything()) river <- tibble::as_tibble(river) %>% sf::st_as_sf() return(river) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/heatmap.R \name{merge_heatmap_opts} \alias{merge_heatmap_opts} \title{Merge default options for a heatmap} \usage{ merge_heatmap_opts(opts = list()) } \arguments{ \item{opts}{[list] A partially specified list used to customize appearance in ggplot theme(). Options that are already specified will not be changed, those that are not will be filled in with defaults.} } \value{ opts [list] A version of opts with unspecified options filled in with defaults. } \description{ Merge default options for a heatmap }
/man/merge_heatmap_opts.Rd
no_license
krisrs1128/ggscaffold
R
false
true
592
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/heatmap.R \name{merge_heatmap_opts} \alias{merge_heatmap_opts} \title{Merge default options for a heatmap} \usage{ merge_heatmap_opts(opts = list()) } \arguments{ \item{opts}{[list] A partially specified list used to customize appearance in ggplot theme(). Options that are already specified will not be changed, those that are not will be filled in with defaults.} } \value{ opts [list] A version of opts with unspecified options filled in with defaults. } \description{ Merge default options for a heatmap }
library(ggvis) shinyUI( navbarPage(p(h5(strong("Navigate Bar for Plots"), style = "color:Green")), tabPanel(p(h3(strong("Input Data"),style = "color:green")), titlePanel(p(h2("Basic Visualisation Tool",style = "color:orange"))), br(), sidebarPanel(style="color:black","Input Data", wellPanel(fileInput('data', 'Choose CSV File', accept=c('text/csv','text/comma-separated-values,text/plain','.csv')), numericInput("obs", "Number of observations to view:",0), checkboxInput('header', 'Header', TRUE), radioButtons('sep', 'Delimeter', c(Comma=',', Semicolon=';', Tab='\t'), 'Comma'), radioButtons('quote', 'Quote', c(None='', 'Double Quote'='"', 'Single Quote'="'"), 'None') ), width=4 ), mainPanel( tabsetPanel(position ="above", tabPanel(h4("Description"), h3("Welcome to R Visuals",style = "color:blue"), p("R has been a great Tool for Visualisations with numerous packages like ggplot2, ggvis, plotify etc."), p("This tool intends to cover Basic Plots using",strong("ggplot2",style = "color:blue"), "and", strong("ggvis",style = "color:blue"), "packages"), br(), p(strong("Tool Features - Input Data",style = "color:blue")), p("1. Supports any kind of csv/text datasets"), p("2. Provides Initial Observations based on Users Input to understand the Structure of Dataset"), p("3. Provides Summary of Dataset"), p("4. Provides Structure of Dataset - Very helpful to select Options for Making Plots"), p("5. Navigate Bar to switch for Plots"), br(), p(strong("Tool Features - Basic Plots using",em("ggplot Package"),style = "color:blue")), p("1. Scatter Plots - For Continuous X and Y Variables "), p("2. Bar Plots - For Categorical Variables (X) with Option for Fill (Y)"), p("3. Histograms - For Continuous Variables (only X)"), p("4. Box Plots - For Categorical Variable (X) and Continous Variable (Y)"), br(), p(strong("Tool Features - Advanced Plots using", em("ggvis Package"),style = "color:blue")), p("1. Scatter Plots - For Continuous X and Y Variables with additional Fill Variable"), p("2. Smooth Lines - Smoothing on Scatter Plots to observe Trend Patterns"), br(), p("Hope you have fun Using the Tool!!!"), br(), br(), br(), p(em("For any clasrifications/ suggestions feel free to contact me at", em("krish7189@yahoo.co.in",style = "color:blue"),em("or at my blog"),em("rcodeeasy.blogspot.sg",style = "color:blue"))) ), tabPanel(h4("DataSet"),h4(textOutput("headname")), tableOutput("head"), h4("Summary of the Data Set"), tableOutput("Summary"), h4("Structure of Data Set"), verbatimTextOutput("str") )))), tabPanel(p(h3(strong("Basic Plots"),style = "color:green")), titlePanel(p(h2("GGPlots",style = "color:orange"))), sidebarPanel("Variable Selection", wellPanel( selectInput("xvar","Choose X-axis Variable:",""), selectInput("yvar","Choose Y-axis Variable:","") )), mainPanel( tabsetPanel(position ="above", "Scatter Plots", tabPanel(h4("Scatter Plots"), h3("Scatter Plots for Continuous X and Y Variables"), h5("Choose both X and Y Variable for a Scatter Plot"), plotOutput("ScatterPlot")), "Bar Plots", tabPanel(h4("Bar Plot"), h3("Bar Plots for Categorical Variables"), h5("Choose X variable for genertaing Bar Plot and Y Variable as a Fill"), plotOutput("barplot")), "Histograms", tabPanel(h4("Histogram"), h3("Histograms for Continuos Data"), h5("Choose only x-Variable"),plotOutput("hist")), "Box Plots", tabPanel(h4("Box Plots"), h3("Box plots"), h5("Choose X (Categorical Variable) and Y (Continuous Variable)"),plotOutput("beanplot")) ))), tabPanel(p(h3(strong("Advanced Plots"),style = "color:green")), titlePanel(p(h2("GGVIS Plots",style = "color:orange"))), sidebarPanel("Variable Selection", wellPanel( selectInput("xvar1","Choose X-axis Variable:",""), selectInput("yvar1","Choose Y-axis Variable:",""), selectInput("fill1","Choose fill Variable:","") )), mainPanel( tabsetPanel(position ="above", "Scatter Plots", tabPanel(h4("Scatter Plots"), h3("Scatter Plots for Continuous X and Y Variables along with fill"), h5("Choose both X and Y Variable along with fill for enhanced visualisation "), ggvisOutput("plot_scatter"), uiOutput("plot_scatter_ui")), "Scatter Plots with Smooth Lines", tabPanel(h4("Smooth Lines"), h3("Scatter Plots for Continuous X and Y Variables with Smooth line"), h5("Choose both X and Y Variable"), ggvisOutput("plot_smooth"), uiOutput("plot_smooth_ui")) ))) ))
/shiny_cluster_8sep/graficas/Carpeta sin título 2/ui.R
no_license
isabelmillan/shiny_B
R
false
false
10,467
r
library(ggvis) shinyUI( navbarPage(p(h5(strong("Navigate Bar for Plots"), style = "color:Green")), tabPanel(p(h3(strong("Input Data"),style = "color:green")), titlePanel(p(h2("Basic Visualisation Tool",style = "color:orange"))), br(), sidebarPanel(style="color:black","Input Data", wellPanel(fileInput('data', 'Choose CSV File', accept=c('text/csv','text/comma-separated-values,text/plain','.csv')), numericInput("obs", "Number of observations to view:",0), checkboxInput('header', 'Header', TRUE), radioButtons('sep', 'Delimeter', c(Comma=',', Semicolon=';', Tab='\t'), 'Comma'), radioButtons('quote', 'Quote', c(None='', 'Double Quote'='"', 'Single Quote'="'"), 'None') ), width=4 ), mainPanel( tabsetPanel(position ="above", tabPanel(h4("Description"), h3("Welcome to R Visuals",style = "color:blue"), p("R has been a great Tool for Visualisations with numerous packages like ggplot2, ggvis, plotify etc."), p("This tool intends to cover Basic Plots using",strong("ggplot2",style = "color:blue"), "and", strong("ggvis",style = "color:blue"), "packages"), br(), p(strong("Tool Features - Input Data",style = "color:blue")), p("1. Supports any kind of csv/text datasets"), p("2. Provides Initial Observations based on Users Input to understand the Structure of Dataset"), p("3. Provides Summary of Dataset"), p("4. Provides Structure of Dataset - Very helpful to select Options for Making Plots"), p("5. Navigate Bar to switch for Plots"), br(), p(strong("Tool Features - Basic Plots using",em("ggplot Package"),style = "color:blue")), p("1. Scatter Plots - For Continuous X and Y Variables "), p("2. Bar Plots - For Categorical Variables (X) with Option for Fill (Y)"), p("3. Histograms - For Continuous Variables (only X)"), p("4. Box Plots - For Categorical Variable (X) and Continous Variable (Y)"), br(), p(strong("Tool Features - Advanced Plots using", em("ggvis Package"),style = "color:blue")), p("1. Scatter Plots - For Continuous X and Y Variables with additional Fill Variable"), p("2. Smooth Lines - Smoothing on Scatter Plots to observe Trend Patterns"), br(), p("Hope you have fun Using the Tool!!!"), br(), br(), br(), p(em("For any clasrifications/ suggestions feel free to contact me at", em("krish7189@yahoo.co.in",style = "color:blue"),em("or at my blog"),em("rcodeeasy.blogspot.sg",style = "color:blue"))) ), tabPanel(h4("DataSet"),h4(textOutput("headname")), tableOutput("head"), h4("Summary of the Data Set"), tableOutput("Summary"), h4("Structure of Data Set"), verbatimTextOutput("str") )))), tabPanel(p(h3(strong("Basic Plots"),style = "color:green")), titlePanel(p(h2("GGPlots",style = "color:orange"))), sidebarPanel("Variable Selection", wellPanel( selectInput("xvar","Choose X-axis Variable:",""), selectInput("yvar","Choose Y-axis Variable:","") )), mainPanel( tabsetPanel(position ="above", "Scatter Plots", tabPanel(h4("Scatter Plots"), h3("Scatter Plots for Continuous X and Y Variables"), h5("Choose both X and Y Variable for a Scatter Plot"), plotOutput("ScatterPlot")), "Bar Plots", tabPanel(h4("Bar Plot"), h3("Bar Plots for Categorical Variables"), h5("Choose X variable for genertaing Bar Plot and Y Variable as a Fill"), plotOutput("barplot")), "Histograms", tabPanel(h4("Histogram"), h3("Histograms for Continuos Data"), h5("Choose only x-Variable"),plotOutput("hist")), "Box Plots", tabPanel(h4("Box Plots"), h3("Box plots"), h5("Choose X (Categorical Variable) and Y (Continuous Variable)"),plotOutput("beanplot")) ))), tabPanel(p(h3(strong("Advanced Plots"),style = "color:green")), titlePanel(p(h2("GGVIS Plots",style = "color:orange"))), sidebarPanel("Variable Selection", wellPanel( selectInput("xvar1","Choose X-axis Variable:",""), selectInput("yvar1","Choose Y-axis Variable:",""), selectInput("fill1","Choose fill Variable:","") )), mainPanel( tabsetPanel(position ="above", "Scatter Plots", tabPanel(h4("Scatter Plots"), h3("Scatter Plots for Continuous X and Y Variables along with fill"), h5("Choose both X and Y Variable along with fill for enhanced visualisation "), ggvisOutput("plot_scatter"), uiOutput("plot_scatter_ui")), "Scatter Plots with Smooth Lines", tabPanel(h4("Smooth Lines"), h3("Scatter Plots for Continuous X and Y Variables with Smooth line"), h5("Choose both X and Y Variable"), ggvisOutput("plot_smooth"), uiOutput("plot_smooth_ui")) ))) ))
library(ggplot2) library(data.table) library(usmap) # First three lines are metadata metadata <- readLines('./CollegeData.csv', n = 3) metadata <- strsplit(metadata, ',') fullColumnNames <- metadata[[1]] columnNames <- metadata[[2]] columnClasses <- metadata[[3]] # Custom class for date formatting setClass('myDate') setAs('character','myDate', function(from) as.Date(from, format = '%m/%d/%Y')) columnClasses[columnClasses == 'Date'] <- 'myDate' # Read data collegeData <- read.csv('./CollegeData.csv', header = FALSE, skip = 3, colClasses = columnClasses) colnames(collegeData) <- columnNames collegeData <- as.data.table(collegeData) gradeToNumber <- function(grade) { return( switch(grade, 'A+' = 12, 'A' = 11, 'A-' = 10, 'B+' = 9, 'B' = 8, 'B-' = 7, 'C+' = 6, 'C' = 5, 'C-' = 4, 'D+' = 3, 'D' = 2, 'D-' = 1, 'F' = 0, NA )) } # Transform data as needed collegeData <- collegeData[Ignore != 'yes' & !is.na(AveragePositivityRate)] collegeData <- collegeData[, PartyScene := sapply(PartyScene, gradeToNumber)] # Visualization of the data we have ggplot(data = collegeData, aes(x = Enrollment, y = AveragePositivityRate, color = TestingStrategy)) + geom_point() + labs(title = 'University Enrollment Scatter Plot') + theme(plot.title = element_text(hjust = 0.5)) ggsave('./output/EnrollmentScatterPlot.png') # Map universities represented collegesPerState <- collegeData[, list(count = length(State)), by = list(state = State)] plot_usmap( data = collegesPerState, values = 'count', include = factor(collegesPerState$state), color = 'transparent' ) + scale_fill_continuous( low = 'orange', high = 'red', name = 'Number of Universities', label = scales::comma ) + labs(title = paste('States Represented (', nrow(collegeData), ' Total)', sep = '')) + theme(plot.title = element_text(hjust = 0.5, size = 22)) ggsave('./output/UniversitiesRepresented.png') outputModel <- function(model, fileSuffix) { # boxplot(model[['residuals']],main='Boxplot: Residuals',ylab='residual value') # Save the model summary sink(file = paste('./output/Regression_', fileSuffix, '.txt', sep = '')) print(summary(model)) sink() # Get coefficient data from our model coefficients <- coef(summary(model)) coefficients <- as.data.frame(coefficients) colnames(coefficients) <- c('Estimate', 'StdError', 't', 'p') # Remove the Intercept row rowDrops <- c('(Intercept)') coefficients <- coefficients[!(rownames(coefficients) %in% rowDrops), ] # Visualization for coefficient confidence interval ggplot(data = coefficients, aes(x = rownames(coefficients), ymin = Estimate - 2 * StdError, ymax = Estimate + 2 * StdError, color = p)) + geom_hline(yintercept = 0) + geom_errorbar() + coord_flip() + labs(title = 'Regression Coefficient 95% Confidence Intervals', x = 'Coefficient', y = '95% Confidence Interval' ) + theme(plot.title = element_text(hjust = 0.5)) ggsave(paste('./output/ConfidenceIntervals_', fileSuffix, '.png', sep = '')) } # Run regression model <- lm( AveragePositivityRate ~ # + Funding + Enrollment # + Region + StartDate + TestingStrategy # + TestOnContactTraced + HasAsymptomaticTesting + PreArrivalTesting + UniversityAssistedQuarantine + PreArrivalQuarantine + DailyHealthChecks + HasInPersonClasses # + EventPersonLimit # + EventApproval # + LibraryOpen # + FansAtSportingEvents + PartyScene , data = collegeData) outputModel(model, 'WithTestingStrategy') model <- lm( AveragePositivityRate ~ # + Funding + Enrollment # + Region + StartDate # + TestingStrategy # + TestOnContactTraced + HasAsymptomaticTesting + PreArrivalTesting + UniversityAssistedQuarantine + PreArrivalQuarantine + DailyHealthChecks + HasInPersonClasses # + EventPersonLimit # + EventApproval # + LibraryOpen # + FansAtSportingEvents + PartyScene , data = collegeData) outputModel(model, 'WithoutTestingStrategy')
/Regression.R
no_license
jackson-nestelroad/global-health-project-regression
R
false
false
4,271
r
library(ggplot2) library(data.table) library(usmap) # First three lines are metadata metadata <- readLines('./CollegeData.csv', n = 3) metadata <- strsplit(metadata, ',') fullColumnNames <- metadata[[1]] columnNames <- metadata[[2]] columnClasses <- metadata[[3]] # Custom class for date formatting setClass('myDate') setAs('character','myDate', function(from) as.Date(from, format = '%m/%d/%Y')) columnClasses[columnClasses == 'Date'] <- 'myDate' # Read data collegeData <- read.csv('./CollegeData.csv', header = FALSE, skip = 3, colClasses = columnClasses) colnames(collegeData) <- columnNames collegeData <- as.data.table(collegeData) gradeToNumber <- function(grade) { return( switch(grade, 'A+' = 12, 'A' = 11, 'A-' = 10, 'B+' = 9, 'B' = 8, 'B-' = 7, 'C+' = 6, 'C' = 5, 'C-' = 4, 'D+' = 3, 'D' = 2, 'D-' = 1, 'F' = 0, NA )) } # Transform data as needed collegeData <- collegeData[Ignore != 'yes' & !is.na(AveragePositivityRate)] collegeData <- collegeData[, PartyScene := sapply(PartyScene, gradeToNumber)] # Visualization of the data we have ggplot(data = collegeData, aes(x = Enrollment, y = AveragePositivityRate, color = TestingStrategy)) + geom_point() + labs(title = 'University Enrollment Scatter Plot') + theme(plot.title = element_text(hjust = 0.5)) ggsave('./output/EnrollmentScatterPlot.png') # Map universities represented collegesPerState <- collegeData[, list(count = length(State)), by = list(state = State)] plot_usmap( data = collegesPerState, values = 'count', include = factor(collegesPerState$state), color = 'transparent' ) + scale_fill_continuous( low = 'orange', high = 'red', name = 'Number of Universities', label = scales::comma ) + labs(title = paste('States Represented (', nrow(collegeData), ' Total)', sep = '')) + theme(plot.title = element_text(hjust = 0.5, size = 22)) ggsave('./output/UniversitiesRepresented.png') outputModel <- function(model, fileSuffix) { # boxplot(model[['residuals']],main='Boxplot: Residuals',ylab='residual value') # Save the model summary sink(file = paste('./output/Regression_', fileSuffix, '.txt', sep = '')) print(summary(model)) sink() # Get coefficient data from our model coefficients <- coef(summary(model)) coefficients <- as.data.frame(coefficients) colnames(coefficients) <- c('Estimate', 'StdError', 't', 'p') # Remove the Intercept row rowDrops <- c('(Intercept)') coefficients <- coefficients[!(rownames(coefficients) %in% rowDrops), ] # Visualization for coefficient confidence interval ggplot(data = coefficients, aes(x = rownames(coefficients), ymin = Estimate - 2 * StdError, ymax = Estimate + 2 * StdError, color = p)) + geom_hline(yintercept = 0) + geom_errorbar() + coord_flip() + labs(title = 'Regression Coefficient 95% Confidence Intervals', x = 'Coefficient', y = '95% Confidence Interval' ) + theme(plot.title = element_text(hjust = 0.5)) ggsave(paste('./output/ConfidenceIntervals_', fileSuffix, '.png', sep = '')) } # Run regression model <- lm( AveragePositivityRate ~ # + Funding + Enrollment # + Region + StartDate + TestingStrategy # + TestOnContactTraced + HasAsymptomaticTesting + PreArrivalTesting + UniversityAssistedQuarantine + PreArrivalQuarantine + DailyHealthChecks + HasInPersonClasses # + EventPersonLimit # + EventApproval # + LibraryOpen # + FansAtSportingEvents + PartyScene , data = collegeData) outputModel(model, 'WithTestingStrategy') model <- lm( AveragePositivityRate ~ # + Funding + Enrollment # + Region + StartDate # + TestingStrategy # + TestOnContactTraced + HasAsymptomaticTesting + PreArrivalTesting + UniversityAssistedQuarantine + PreArrivalQuarantine + DailyHealthChecks + HasInPersonClasses # + EventPersonLimit # + EventApproval # + LibraryOpen # + FansAtSportingEvents + PartyScene , data = collegeData) outputModel(model, 'WithoutTestingStrategy')
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/preprocessing_filtering_reduction.R \name{create_scExp} \alias{create_scExp} \title{Wrapper to create the single cell experiment from count matrix and feature dataframe} \usage{ create_scExp( datamatrix, annot, remove_zero_cells = TRUE, remove_zero_features = TRUE, remove_non_canonical = TRUE, remove_chr_M = TRUE, mainExpName = "main", verbose = TRUE ) } \arguments{ \item{datamatrix}{A matrix or sparseMatrix of raw counts. Features x Cells (rows x columns).} \item{annot}{A data.frame containing informations on cells. Should have the same number of rows as the number of columns in datamatrix.} \item{remove_zero_cells}{remove cells with zero counts ? (TRUE)} \item{remove_zero_features}{remove cells with zero counts ? (TRUE)} \item{remove_non_canonical}{remove non canonical chromosomes ?(TRUE)} \item{remove_chr_M}{remove chromosomes M ? (TRUE)} \item{mainExpName}{Name of the mainExpName e.g. 'bins', 'peaks'... ("default")} \item{verbose}{(TRUE)} } \value{ Returns a SingleCellExperiment object. } \description{ Create the single cell experiment from (sparse) datamatrix and feature dataframe containing feature names and location. Also optionally removes zero count Features, zero count Cells, non canconical chromosomes, and chromosome M. Calculates QC Metrics (scran). } \examples{ raw <- create_scDataset_raw() scExp = create_scExp(raw$mat, raw$annot) scExp }
/man/create_scExp.Rd
no_license
vallotlab/ChromSCape
R
false
true
1,476
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/preprocessing_filtering_reduction.R \name{create_scExp} \alias{create_scExp} \title{Wrapper to create the single cell experiment from count matrix and feature dataframe} \usage{ create_scExp( datamatrix, annot, remove_zero_cells = TRUE, remove_zero_features = TRUE, remove_non_canonical = TRUE, remove_chr_M = TRUE, mainExpName = "main", verbose = TRUE ) } \arguments{ \item{datamatrix}{A matrix or sparseMatrix of raw counts. Features x Cells (rows x columns).} \item{annot}{A data.frame containing informations on cells. Should have the same number of rows as the number of columns in datamatrix.} \item{remove_zero_cells}{remove cells with zero counts ? (TRUE)} \item{remove_zero_features}{remove cells with zero counts ? (TRUE)} \item{remove_non_canonical}{remove non canonical chromosomes ?(TRUE)} \item{remove_chr_M}{remove chromosomes M ? (TRUE)} \item{mainExpName}{Name of the mainExpName e.g. 'bins', 'peaks'... ("default")} \item{verbose}{(TRUE)} } \value{ Returns a SingleCellExperiment object. } \description{ Create the single cell experiment from (sparse) datamatrix and feature dataframe containing feature names and location. Also optionally removes zero count Features, zero count Cells, non canconical chromosomes, and chromosome M. Calculates QC Metrics (scran). } \examples{ raw <- create_scDataset_raw() scExp = create_scExp(raw$mat, raw$annot) scExp }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emmGrid-methods.R \name{regrid} \alias{regrid} \title{Reconstruct a reference grid with a new transformation or posterior sample} \usage{ regrid(object, transform = c("response", "mu", "unlink", "none", "pass", links), inv.link.lbl = "response", predict.type, bias.adjust = get_emm_option("back.bias.adj"), sigma, N.sim, sim = mvtnorm::rmvnorm, ...) } \arguments{ \item{object}{An object of class \code{emmGrid}} \item{transform}{Character, list, or logical value. If \code{"response"}, \code{"mu"}, or \code{TRUE}, the inverse transformation is applied to the estimates in the grid (but if there is both a link function and a response transformation, \code{"mu"} back-transforms only the link part); if \code{"none"} or \code{FALSE}, \code{object} is re-gridded so that its \code{bhat} slot contains \code{predict(object)} and its \code{linfct} slot is the identity. Any internal transformation information is preserved. If \code{transform = "pass"}, the object is not re-gridded in any way (this may be useful in conjunction with \code{N.sim}). If \code{transform} is a character value in \code{links} (which is the set of valid arguments for the \code{\link{make.link}} function, excepting \code{"identity"}), or if \code{transform} is a list of the same form as returned by \code{make.links} or \code{\link{make.tran}}, the results are formulated as if the response had been transformed with that link function.} \item{inv.link.lbl}{Character value. This applies only when \code{transform} is in \code{links}, and is used to label the predictions if subsequently summarized with \code{type = "response"}.} \item{predict.type}{Character value. If provided, the returned object is updated with the given type to use by default by \code{summary.emmGrid} (see \code{\link{update.emmGrid}}). This may be useful if, for example, when one specifies \code{transform = "log"} but desires summaries to be produced by default on the response scale.} \item{bias.adjust}{Logical value for whether to adjust for bias in back-transforming (\code{transform = "response"}). This requires a value of \code{sigma} to exist in the object or be specified.} \item{sigma}{Error SD assumed for bias correction (when \code{transform = "response"} and a transformation is in effect). If not specified, \code{object@misc$sigma} is used, and an error is thrown if it is not found.} \item{N.sim}{Integer value. If specified and \code{object} is based on a frequentist model (i.e., does not have a posterior sample), then a fake posterior sample is generated using the function \code{sim}.} \item{sim}{A function of three arguments (no names are assumed). If \code{N.sim} is supplied with a frequentist model, this function is called with respective arguments \code{N.sim}, \code{object@bhat}, and \code{object@V}. The default is the multivariate normal distribution.} \item{...}{Ignored.} } \value{ An \code{emmGrid} object with the requested changes } \description{ The typical use of this function is to cause EMMs to be computed on a different scale, e.g., the back-transformed scale rather than the linear-predictor scale. In other words, if you want back-transformed results, do you want to average and then back-transform, or back-transform and then average? } \details{ The \code{regrid} function reparameterizes an existing \code{ref.grid} so that its \code{linfct} slot is the identity matrix and its \code{bhat} slot consists of the estimates at the grid points. If \code{transform} is \code{TRUE}, the inverse transform is applied to the estimates. Outwardly, when \code{transform = "response"}, the result of \code{\link{summary.emmGrid}} after applying \code{regrid} is identical to the summary of the original object using \samp{type="response"}. But subsequent EMMs or contrasts will be conducted on the new scale -- which is the reason this function exists. This function may also be used to convert a reference grid for a frequentist model to one for a Bayesian model. To do so, specify a value for \code{N.sim} and a posterior sample is simulated using the function \code{sim}. . The grid may be further processed in accordance with the other arguments; or if \code{transform = "pass"}, it is simply returned with the only change being the addition of the posterior sample. } \note{ Another way to use \code{regrid} is to supply a \code{transform} argument to \code{\link{ref_grid}} (either directly of indirectly via \code{\link{emmeans}}). This is often a simpler approach if the reference grid has not already been constructed. } \section{Degrees of freedom}{ In cases where the degrees of freedom depended on the linear function being estimated (e.g., Satterthwaite method), the d.f. from the reference grid are saved, and a kind of \dQuote{containment} method is substituted in the returned object, whereby the calculated d.f. for a new linear function will be the minimum d.f. among those having nonzero coefficients. This is kind of an \emph{ad hoc} method, and it can over-estimate the degrees of freedom in some cases. An annotation is displayed below any subsequent summary results stating that the degrees-of-freedom method is inherited from the previous method at the time of re-gridding. } \examples{ pigs.lm <- lm(log(conc) ~ source + factor(percent), data = pigs) rg <- ref_grid(pigs.lm) # This will yield EMMs as GEOMETRIC means of concentrations: (emm1 <- emmeans(rg, "source", type = "response")) pairs(emm1) ## We obtain RATIOS # This will yield EMMs as ARITHMETIC means of concentrations: (emm2 <- emmeans(regrid(rg, transform = "response"), "source")) pairs(emm2) ## We obtain DIFFERENCES # Same result, useful if we hadn't already created 'rg' # emm2 <- emmeans(pigs.lm, "source", transform = "response") # Simulate a posterior sample set.seed(2.71828) rgb <- regrid(rg, N.sim = 200, transform = "pass") emmeans(rgb, "source", type = "response") ## similar to emm1 }
/man/regrid.Rd
no_license
karthy257/emmeans
R
false
true
6,005
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emmGrid-methods.R \name{regrid} \alias{regrid} \title{Reconstruct a reference grid with a new transformation or posterior sample} \usage{ regrid(object, transform = c("response", "mu", "unlink", "none", "pass", links), inv.link.lbl = "response", predict.type, bias.adjust = get_emm_option("back.bias.adj"), sigma, N.sim, sim = mvtnorm::rmvnorm, ...) } \arguments{ \item{object}{An object of class \code{emmGrid}} \item{transform}{Character, list, or logical value. If \code{"response"}, \code{"mu"}, or \code{TRUE}, the inverse transformation is applied to the estimates in the grid (but if there is both a link function and a response transformation, \code{"mu"} back-transforms only the link part); if \code{"none"} or \code{FALSE}, \code{object} is re-gridded so that its \code{bhat} slot contains \code{predict(object)} and its \code{linfct} slot is the identity. Any internal transformation information is preserved. If \code{transform = "pass"}, the object is not re-gridded in any way (this may be useful in conjunction with \code{N.sim}). If \code{transform} is a character value in \code{links} (which is the set of valid arguments for the \code{\link{make.link}} function, excepting \code{"identity"}), or if \code{transform} is a list of the same form as returned by \code{make.links} or \code{\link{make.tran}}, the results are formulated as if the response had been transformed with that link function.} \item{inv.link.lbl}{Character value. This applies only when \code{transform} is in \code{links}, and is used to label the predictions if subsequently summarized with \code{type = "response"}.} \item{predict.type}{Character value. If provided, the returned object is updated with the given type to use by default by \code{summary.emmGrid} (see \code{\link{update.emmGrid}}). This may be useful if, for example, when one specifies \code{transform = "log"} but desires summaries to be produced by default on the response scale.} \item{bias.adjust}{Logical value for whether to adjust for bias in back-transforming (\code{transform = "response"}). This requires a value of \code{sigma} to exist in the object or be specified.} \item{sigma}{Error SD assumed for bias correction (when \code{transform = "response"} and a transformation is in effect). If not specified, \code{object@misc$sigma} is used, and an error is thrown if it is not found.} \item{N.sim}{Integer value. If specified and \code{object} is based on a frequentist model (i.e., does not have a posterior sample), then a fake posterior sample is generated using the function \code{sim}.} \item{sim}{A function of three arguments (no names are assumed). If \code{N.sim} is supplied with a frequentist model, this function is called with respective arguments \code{N.sim}, \code{object@bhat}, and \code{object@V}. The default is the multivariate normal distribution.} \item{...}{Ignored.} } \value{ An \code{emmGrid} object with the requested changes } \description{ The typical use of this function is to cause EMMs to be computed on a different scale, e.g., the back-transformed scale rather than the linear-predictor scale. In other words, if you want back-transformed results, do you want to average and then back-transform, or back-transform and then average? } \details{ The \code{regrid} function reparameterizes an existing \code{ref.grid} so that its \code{linfct} slot is the identity matrix and its \code{bhat} slot consists of the estimates at the grid points. If \code{transform} is \code{TRUE}, the inverse transform is applied to the estimates. Outwardly, when \code{transform = "response"}, the result of \code{\link{summary.emmGrid}} after applying \code{regrid} is identical to the summary of the original object using \samp{type="response"}. But subsequent EMMs or contrasts will be conducted on the new scale -- which is the reason this function exists. This function may also be used to convert a reference grid for a frequentist model to one for a Bayesian model. To do so, specify a value for \code{N.sim} and a posterior sample is simulated using the function \code{sim}. . The grid may be further processed in accordance with the other arguments; or if \code{transform = "pass"}, it is simply returned with the only change being the addition of the posterior sample. } \note{ Another way to use \code{regrid} is to supply a \code{transform} argument to \code{\link{ref_grid}} (either directly of indirectly via \code{\link{emmeans}}). This is often a simpler approach if the reference grid has not already been constructed. } \section{Degrees of freedom}{ In cases where the degrees of freedom depended on the linear function being estimated (e.g., Satterthwaite method), the d.f. from the reference grid are saved, and a kind of \dQuote{containment} method is substituted in the returned object, whereby the calculated d.f. for a new linear function will be the minimum d.f. among those having nonzero coefficients. This is kind of an \emph{ad hoc} method, and it can over-estimate the degrees of freedom in some cases. An annotation is displayed below any subsequent summary results stating that the degrees-of-freedom method is inherited from the previous method at the time of re-gridding. } \examples{ pigs.lm <- lm(log(conc) ~ source + factor(percent), data = pigs) rg <- ref_grid(pigs.lm) # This will yield EMMs as GEOMETRIC means of concentrations: (emm1 <- emmeans(rg, "source", type = "response")) pairs(emm1) ## We obtain RATIOS # This will yield EMMs as ARITHMETIC means of concentrations: (emm2 <- emmeans(regrid(rg, transform = "response"), "source")) pairs(emm2) ## We obtain DIFFERENCES # Same result, useful if we hadn't already created 'rg' # emm2 <- emmeans(pigs.lm, "source", transform = "response") # Simulate a posterior sample set.seed(2.71828) rgb <- regrid(rg, N.sim = 200, transform = "pass") emmeans(rgb, "source", type = "response") ## similar to emm1 }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Alin.R \name{compute.score} \alias{compute.score} \title{Compute score} \usage{ compute.score(als2, Sim) } \description{ Compute score } \keyword{internal}
/man/compute.score.Rd
no_license
KariAntonio/AligNet
R
false
true
235
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Alin.R \name{compute.score} \alias{compute.score} \title{Compute score} \usage{ compute.score(als2, Sim) } \description{ Compute score } \keyword{internal}
# # # # # # # # # # # # # # # # PCR and PLS regression # # # # # # # # # # # # # # # # library(ISLR) library(pls) # Omit missing values Hitters = na.omit(Hitters) # PCR set.seed(2) pcr.fit = pcr(Salary~.,data=Hitters,scale=TRUE,validation="CV") summary(pcr.fit) # Plot results validationplot(pcr.fit,val.type = "MSEP") # Separate train and test x = model.matrix(Salary~.,Hitters)[,-1] y = Hitters$Salary train = sample(1:nrow(x),nrow(x)/2) test = (-train) y.test = y[test] set.seed(1) pcr.fit = pcr(Salary~.,data=Hitters,subset=train,scale = TRUE,validation= "CV") validationplot(pcr.fit,val.type="MSEP") pcr.pred = predict(pcr.fit,x[test,],ncomp=7) mean((pcr.pred-y.test)^2) pcr.fit = pcr(y~x,scale=TRUE,ncomp=7) summary(pcr.fit) # Partial least squares set.seed(1) pls.fit = plsr(Salary~.,data=Hitters,subset=train,scale=TRUE,validation="CV") summary(pls.fit) validationplot(pls.fit,val.type="MSEP") pls.pred = predict(pls.fit,x[test,],ncomp=2) mean((pls.pred-y.test)^2) pls.fit = plsr(Salary~.,data=Hitters,scale=TRUE,ncomp=2) summary(pls.fit)
/Tutorial7.R
no_license
ruibarrigana/Rtutorials
R
false
false
1,061
r
# # # # # # # # # # # # # # # # PCR and PLS regression # # # # # # # # # # # # # # # # library(ISLR) library(pls) # Omit missing values Hitters = na.omit(Hitters) # PCR set.seed(2) pcr.fit = pcr(Salary~.,data=Hitters,scale=TRUE,validation="CV") summary(pcr.fit) # Plot results validationplot(pcr.fit,val.type = "MSEP") # Separate train and test x = model.matrix(Salary~.,Hitters)[,-1] y = Hitters$Salary train = sample(1:nrow(x),nrow(x)/2) test = (-train) y.test = y[test] set.seed(1) pcr.fit = pcr(Salary~.,data=Hitters,subset=train,scale = TRUE,validation= "CV") validationplot(pcr.fit,val.type="MSEP") pcr.pred = predict(pcr.fit,x[test,],ncomp=7) mean((pcr.pred-y.test)^2) pcr.fit = pcr(y~x,scale=TRUE,ncomp=7) summary(pcr.fit) # Partial least squares set.seed(1) pls.fit = plsr(Salary~.,data=Hitters,subset=train,scale=TRUE,validation="CV") summary(pls.fit) validationplot(pls.fit,val.type="MSEP") pls.pred = predict(pls.fit,x[test,],ncomp=2) mean((pls.pred-y.test)^2) pls.fit = plsr(Salary~.,data=Hitters,scale=TRUE,ncomp=2) summary(pls.fit)
#' @title Extract i.i.d. decomposition from a Cox model #' @description Compute the influence function for each observation used to estimate the model #' @name iid #' #' @param object object The fitted Cox regression model object either #' obtained with \code{coxph} (survival package) or \code{cph} #' (rms package). #' @param newdata Optional new data at which to do i.i.d. decomposition #' @param tauHazard the vector of times at which the i.i.d decomposition of the baseline hazard will be computed #' @param keep.times Logical. If \code{TRUE} add the evaluation times to the output. #' @param store.iid the method used to compute the influence function and the standard error. #' Can be \code{"full"}, \code{"approx"} or \code{"minimal"}. See the details section. #' @details If there is no event in a strata, the influence function for the baseline hazard is set to 0. #' #' \code{store.iid} equal to \code{"full"} exports the influence function for the coefficients #' and the baseline hazard at each event time. #' \code{store.iid} equal to \code{"approx"} does the same except that the terms that do not contributes #' to the variance are not ignored (i.e. set to 0) #' \code{store.iid} equal to \code{"minimal"} exports the influence function for the coefficients. For the #' baseline hazard it only computes the quantities necessary to compute the influence function in order to save memory. #' #' @return A list containing: #' \itemize{ #' \item{IFbeta}{Influence function for the regression coefficient.} #' \item{IFhazard}{Time differential of the influence function of the hazard.} #' \item{IFcumhazard}{Influence function of the cumulative hazard.} #' \item{calcIFhazard}{Elements used to compute the influence function at a given time.} #' \item{time}{Times at which the influence function has been evaluated.} #' \item{etime1.min}{Time of first event (i.e. jump) in each strata.} #' \item{etime.max}{Last observation time (i.e. jump or censoring) in each strata.} #' \item{indexObs}{Index of the observation in the original dataset.} #' } #' #' @examples #' library(survival) #' library(data.table) #' set.seed(10) #' d <- sampleData(100, outcome = "survival")[,.(eventtime,event,X1,X6)] #' setkey(d, eventtime) #' #' m.cox <- coxph(Surv(eventtime, event) ~ X1+X6, data = d, y = TRUE, x = TRUE) #' system.time(IF.cox <- iidCox(m.cox)) #' system.time(IF.cox_approx <- iidCox(m.cox, store.iid = "approx")) #' #' #' IF.cox <- iidCox(m.cox, tauHazard = sort(unique(c(7,d$eventtime)))) #' #' #' @rdname iid #' @export iidCox <- function(object, newdata = NULL, tauHazard = NULL, keep.times = TRUE, store.iid = "full"){ #### extract elements from object #### infoVar <- CoxVariableName(object) iInfo <- CoxVarCov(object) object.design <- CoxDesign(object) object.status <- object.design[,"status"] object.time <- object.design[,"stop"] object.strata <- CoxStrata(object, data = NULL, stratavars = infoVar$stratavars) object.levelStrata <- levels(object.strata) object.eXb <- exp(CoxLP(object, data = NULL, center = FALSE)) object.LPdata <- as.matrix(object.design[,infoVar$lpvars,drop = FALSE]) nStrata <- length(levels(object.strata)) #### Extract new observations #### if(!is.null(newdata)){ if("data.frame" %in% class(newdata) == FALSE){ stop("class of \'newdata\' must inherit from data.frame \n") } # if(infoVar$status %in% names(newdata)){ # call Cox model with with event==1 tempo <- with(newdata, eval(CoxFormula(object)[[2]])) new.status <- tempo[,2] new.time <- tempo[,1] # }else{ # Cox model from CSC # new.status <- newdata[[infoVar$status]] # new.time <- newdata[[infoVar$time]] # } new.strata <- CoxStrata(object, data = newdata, sterms = infoVar$sterms, stratavars = infoVar$stratavars, levels = object.levelStrata, stratalevels = infoVar$stratalevels) new.eXb <- exp(CoxLP(object, data = newdata, center = FALSE)) new.LPdata <- model.matrix(object, newdata) }else{ new.status <- object.status new.time <- object.time new.strata <- object.strata new.eXb <- object.eXb new.LPdata <- object.LPdata } #### tests #### ## time at which the influence function is evaluated if(is.list(tauHazard) && length(tauHazard)!=nStrata){ stop("argument \"tauHazard\" must be a list with ",nStrata," elements \n", "each element being the vector of times for each strata \n") } if(store.iid %in% c("full","approx","minimal") == FALSE){ stop("store.iid can only be \"full\", or \"approx\" or \"minimal\"\n") } #### Compute quantities of interest #### p <- NCOL(object.LPdata) ## baseline hazard lambda0 <- predictCox(object, type = "hazard", centered = FALSE, keep.strata = TRUE) etime1.min <- rep(NA, nStrata) ## S0, E, jump times object.index_strata <- list() object.order_strata <- list() object.eXb_strata <- list() object.LPdata_strata <- list() object.status_strata <- list() object.time_strata <- list() new.index_strata <- list() new.order_strata <- list() new.eXb_strata <- list() new.LPdata_strata <- list() new.status_strata <- list() new.time_strata <- list() Ecpp <- list() new.indexJump <- list() new.order <- NULL for(iStrata in 1:nStrata){ ## reorder object data object.index_strata[[iStrata]] <- which(object.strata == object.levelStrata[iStrata]) object.order_strata[[iStrata]] <- order(object.time[object.index_strata[[iStrata]]]) object.eXb_strata[[iStrata]] <- object.eXb[object.index_strata[[iStrata]][object.order_strata[[iStrata]]]] object.LPdata_strata[[iStrata]] <- object.LPdata[object.index_strata[[iStrata]][object.order_strata[[iStrata]]],,drop = FALSE] object.status_strata[[iStrata]] <- object.status[object.index_strata[[iStrata]][object.order_strata[[iStrata]]]] object.time_strata[[iStrata]] <- object.time[object.index_strata[[iStrata]][object.order_strata[[iStrata]]]] ## reorder new data if(!is.null(newdata)){ new.index_strata[[iStrata]] <- which(new.strata == object.levelStrata[iStrata]) new.order_strata[[iStrata]] <- order(new.time[new.index_strata[[iStrata]]]) new.eXb_strata[[iStrata]] <- new.eXb[new.index_strata[[iStrata]][new.order_strata[[iStrata]]]] new.LPdata_strata[[iStrata]] <- new.LPdata[new.index_strata[[iStrata]][new.order_strata[[iStrata]]],,drop = FALSE] new.status_strata[[iStrata]] <- new.status[new.index_strata[[iStrata]][new.order_strata[[iStrata]]]] new.time_strata[[iStrata]] <- new.time[new.index_strata[[iStrata]][new.order_strata[[iStrata]]]] }else{ new.index_strata[[iStrata]] <- object.index_strata[[iStrata]] new.order_strata[[iStrata]] <- object.order_strata[[iStrata]] new.eXb_strata[[iStrata]] <- object.eXb_strata[[iStrata]] new.LPdata_strata[[iStrata]] <- object.LPdata_strata[[iStrata]] new.status_strata[[iStrata]] <- object.status_strata[[iStrata]] new.time_strata[[iStrata]] <- object.time_strata[[iStrata]] } ## E Ecpp[[iStrata]] <- calcE_cpp(status = object.status_strata[[iStrata]], eventtime = object.time_strata[[iStrata]], eXb = object.eXb_strata[[iStrata]], X = object.LPdata_strata[[iStrata]], p = p, add0 = TRUE) new.indexJump[[iStrata]] <- prodlim::sindex(Ecpp[[iStrata]]$Utime1, new.time) - 1 # if event/censoring is before the first event in the training dataset # then sindex return 0 thus indexJump is -1 # the following 3 lines convert -1 to 0 if(any(new.indexJump[[iStrata]]<0)){ new.indexJump[[iStrata]][new.indexJump[[iStrata]]<0] <- 0 } ## store order if(length(new.order>0)){ new.order <- c(new.order, new.index_strata[[iStrata]][new.order_strata[[iStrata]]]) }else{ new.order <- new.index_strata[[iStrata]][new.order_strata[[iStrata]]] } } #### Computation of the influence function #### IFbeta <- NULL IFcumhazard <- NULL IFhazard <- NULL calcIFhazard <- list(delta_iS0 = NULL, Elambda0 = NULL, cumElambda0 = NULL, lambda0_iS0= NULL, cumLambda0_iS0= NULL, time1 = NULL) ls.Utime1 <- NULL #### beta for(iStrata in 1:nStrata){ new.indexJump_strata <- new.indexJump[[iStrata]][new.index_strata[[iStrata]][new.order_strata[[iStrata]]]] ## IF if(p>0){ if(store.iid != "approx"){ IFbeta_tempo <- IFbeta_cpp(newT = new.time_strata[[iStrata]], neweXb = new.eXb_strata[[iStrata]], newX = new.LPdata_strata[[iStrata]], newStatus = new.status_strata[[iStrata]], newIndexJump = new.indexJump_strata, S01 = Ecpp[[iStrata]]$S0, E1 = Ecpp[[iStrata]]$E, time1 = Ecpp[[iStrata]]$Utime1, iInfo = iInfo, p = p) }else{ IFbeta_tempo <- IFbetaApprox_cpp(newX = new.LPdata_strata[[iStrata]], newStatus = new.status_strata[[iStrata]], newIndexJump = new.indexJump_strata, E1 = Ecpp[[iStrata]]$E, iInfo = iInfo, p = p) } }else{ IFbeta_tempo <- matrix(NA, ncol = 1, nrow = length(new.index_strata[[iStrata]])) } ## output IFbeta <- rbind(IFbeta, IFbeta_tempo) } ## set original order IFbeta <- IFbeta[order(new.order),,drop=FALSE] #### lambda for(iStrata in 1:nStrata){ ## hazard if(nStrata==1){ # select only the time,lambda corresponding to the events and not censored observations timeStrata <- lambda0$time[lambda0$time %in% Ecpp[[1]]$Utime1] lambda0Strata <- lambda0$hazard[lambda0$time %in% Ecpp[[1]]$Utime1] }else{ # same within the strata index.strata <- which(lambda0$strata == object.levelStrata[iStrata]) index.keep <- index.strata[lambda0$time[index.strata] %in% Ecpp[[iStrata]]$Utime1] timeStrata <- lambda0$time[index.keep] lambda0Strata <- lambda0$hazard[index.keep] } etime1.min[iStrata] <- timeStrata[1] ## tauHazard if(is.null(tauHazard)){ tauHazard_strata <- object.time_strata[[iStrata]][object.status_strata[[iStrata]] == 1] }else if(is.list(tauHazard)){ tauHazard_strata <- tauHazard[[nStrata]] }else{ tauHazard_strata <- tauHazard } ## E nUtime1_strata <- length(Ecpp[[iStrata]]$Utime1) if(p>0){ Etempo <- Ecpp[[iStrata]]$E[-NROW(Ecpp[[iStrata]]$E),,drop = FALSE] }else{ Etempo <- matrix(0, ncol = 1, nrow = nUtime1_strata-1) } ## IF if(any(new.status_strata[[iStrata]]>0)){ IFlambda_res <- IFlambda0_cpp(tau = tauHazard_strata, IFbeta = IFbeta, newT = new.time, neweXb = new.eXb, newStatus = new.status, newIndexJump = new.indexJump[[iStrata]], newStrata = as.numeric(new.strata), S01 = Ecpp[[iStrata]]$S0, E1 = Etempo, time1 = timeStrata, lastTime1 = Ecpp[[iStrata]]$Utime1[nUtime1_strata], # here lastTime1 will not correspond to timeStrata[length(timeStrata)] when there are censored observations lambda0 = lambda0Strata, p = p, strata = iStrata, exact = (store.iid!="approx"), minimalExport = (store.iid=="minimal") ) }else{ if(length(tauHazard_strata)==0){tauHazard_strata <- max(object.time_strata[[iStrata]])} IFlambda_res <- list(hazard = matrix(0, ncol = length(tauHazard_strata), nrow = NROW(IFbeta)), cumhazard = matrix(0, ncol = length(tauHazard_strata), nrow = NROW(IFbeta)) ) if(length(tauHazard_strata)==0){tauHazard_strata <- NA} } # output ls.Utime1 <- c(ls.Utime1, list(tauHazard_strata)) if(store.iid=="minimal"){ calcIFhazard$delta_iS0 <- c(calcIFhazard$delta_iS0, list(IFlambda_res$delta_iS0)) calcIFhazard$Elambda0 <- c(calcIFhazard$Elambda0, list(IFlambda_res$Elambda0)) calcIFhazard$cumElambda0 <- c(calcIFhazard$cumElambda0, list(IFlambda_res$cumElambda0)) calcIFhazard$lambda0_iS0 <- c(calcIFhazard$lambda0_iS0, list(IFlambda_res$lambda0_iS0)) calcIFhazard$cumLambda0_iS0 <- c(calcIFhazard$cumLambda0_iS0, list(IFlambda_res$cumLambda0_iS0)) calcIFhazard$time1 <- c(calcIFhazard$time1, list(timeStrata)) # event time by strata }else{ if(keep.times){ colnames(IFlambda_res$hazard) <- tauHazard_strata colnames(IFlambda_res$cumhazard) <- tauHazard_strata } IFhazard <- c(IFhazard, list(IFlambda_res$hazard)) IFcumhazard <- c(IFcumhazard, list(IFlambda_res$cumhazard)) } } #### export return(list(IFbeta = IFbeta, IFhazard = IFhazard, IFcumhazard = IFcumhazard, calcIFhazard = calcIFhazard, time = ls.Utime1, # time at which the IF is assessed etime1.min = etime1.min, etime.max = lambda0$lastEventTime, indexObs = new.order, store.iid = store.iid )) }
/R/iidCox.R
no_license
PablitoCho/riskRegression
R
false
false
14,526
r
#' @title Extract i.i.d. decomposition from a Cox model #' @description Compute the influence function for each observation used to estimate the model #' @name iid #' #' @param object object The fitted Cox regression model object either #' obtained with \code{coxph} (survival package) or \code{cph} #' (rms package). #' @param newdata Optional new data at which to do i.i.d. decomposition #' @param tauHazard the vector of times at which the i.i.d decomposition of the baseline hazard will be computed #' @param keep.times Logical. If \code{TRUE} add the evaluation times to the output. #' @param store.iid the method used to compute the influence function and the standard error. #' Can be \code{"full"}, \code{"approx"} or \code{"minimal"}. See the details section. #' @details If there is no event in a strata, the influence function for the baseline hazard is set to 0. #' #' \code{store.iid} equal to \code{"full"} exports the influence function for the coefficients #' and the baseline hazard at each event time. #' \code{store.iid} equal to \code{"approx"} does the same except that the terms that do not contributes #' to the variance are not ignored (i.e. set to 0) #' \code{store.iid} equal to \code{"minimal"} exports the influence function for the coefficients. For the #' baseline hazard it only computes the quantities necessary to compute the influence function in order to save memory. #' #' @return A list containing: #' \itemize{ #' \item{IFbeta}{Influence function for the regression coefficient.} #' \item{IFhazard}{Time differential of the influence function of the hazard.} #' \item{IFcumhazard}{Influence function of the cumulative hazard.} #' \item{calcIFhazard}{Elements used to compute the influence function at a given time.} #' \item{time}{Times at which the influence function has been evaluated.} #' \item{etime1.min}{Time of first event (i.e. jump) in each strata.} #' \item{etime.max}{Last observation time (i.e. jump or censoring) in each strata.} #' \item{indexObs}{Index of the observation in the original dataset.} #' } #' #' @examples #' library(survival) #' library(data.table) #' set.seed(10) #' d <- sampleData(100, outcome = "survival")[,.(eventtime,event,X1,X6)] #' setkey(d, eventtime) #' #' m.cox <- coxph(Surv(eventtime, event) ~ X1+X6, data = d, y = TRUE, x = TRUE) #' system.time(IF.cox <- iidCox(m.cox)) #' system.time(IF.cox_approx <- iidCox(m.cox, store.iid = "approx")) #' #' #' IF.cox <- iidCox(m.cox, tauHazard = sort(unique(c(7,d$eventtime)))) #' #' #' @rdname iid #' @export iidCox <- function(object, newdata = NULL, tauHazard = NULL, keep.times = TRUE, store.iid = "full"){ #### extract elements from object #### infoVar <- CoxVariableName(object) iInfo <- CoxVarCov(object) object.design <- CoxDesign(object) object.status <- object.design[,"status"] object.time <- object.design[,"stop"] object.strata <- CoxStrata(object, data = NULL, stratavars = infoVar$stratavars) object.levelStrata <- levels(object.strata) object.eXb <- exp(CoxLP(object, data = NULL, center = FALSE)) object.LPdata <- as.matrix(object.design[,infoVar$lpvars,drop = FALSE]) nStrata <- length(levels(object.strata)) #### Extract new observations #### if(!is.null(newdata)){ if("data.frame" %in% class(newdata) == FALSE){ stop("class of \'newdata\' must inherit from data.frame \n") } # if(infoVar$status %in% names(newdata)){ # call Cox model with with event==1 tempo <- with(newdata, eval(CoxFormula(object)[[2]])) new.status <- tempo[,2] new.time <- tempo[,1] # }else{ # Cox model from CSC # new.status <- newdata[[infoVar$status]] # new.time <- newdata[[infoVar$time]] # } new.strata <- CoxStrata(object, data = newdata, sterms = infoVar$sterms, stratavars = infoVar$stratavars, levels = object.levelStrata, stratalevels = infoVar$stratalevels) new.eXb <- exp(CoxLP(object, data = newdata, center = FALSE)) new.LPdata <- model.matrix(object, newdata) }else{ new.status <- object.status new.time <- object.time new.strata <- object.strata new.eXb <- object.eXb new.LPdata <- object.LPdata } #### tests #### ## time at which the influence function is evaluated if(is.list(tauHazard) && length(tauHazard)!=nStrata){ stop("argument \"tauHazard\" must be a list with ",nStrata," elements \n", "each element being the vector of times for each strata \n") } if(store.iid %in% c("full","approx","minimal") == FALSE){ stop("store.iid can only be \"full\", or \"approx\" or \"minimal\"\n") } #### Compute quantities of interest #### p <- NCOL(object.LPdata) ## baseline hazard lambda0 <- predictCox(object, type = "hazard", centered = FALSE, keep.strata = TRUE) etime1.min <- rep(NA, nStrata) ## S0, E, jump times object.index_strata <- list() object.order_strata <- list() object.eXb_strata <- list() object.LPdata_strata <- list() object.status_strata <- list() object.time_strata <- list() new.index_strata <- list() new.order_strata <- list() new.eXb_strata <- list() new.LPdata_strata <- list() new.status_strata <- list() new.time_strata <- list() Ecpp <- list() new.indexJump <- list() new.order <- NULL for(iStrata in 1:nStrata){ ## reorder object data object.index_strata[[iStrata]] <- which(object.strata == object.levelStrata[iStrata]) object.order_strata[[iStrata]] <- order(object.time[object.index_strata[[iStrata]]]) object.eXb_strata[[iStrata]] <- object.eXb[object.index_strata[[iStrata]][object.order_strata[[iStrata]]]] object.LPdata_strata[[iStrata]] <- object.LPdata[object.index_strata[[iStrata]][object.order_strata[[iStrata]]],,drop = FALSE] object.status_strata[[iStrata]] <- object.status[object.index_strata[[iStrata]][object.order_strata[[iStrata]]]] object.time_strata[[iStrata]] <- object.time[object.index_strata[[iStrata]][object.order_strata[[iStrata]]]] ## reorder new data if(!is.null(newdata)){ new.index_strata[[iStrata]] <- which(new.strata == object.levelStrata[iStrata]) new.order_strata[[iStrata]] <- order(new.time[new.index_strata[[iStrata]]]) new.eXb_strata[[iStrata]] <- new.eXb[new.index_strata[[iStrata]][new.order_strata[[iStrata]]]] new.LPdata_strata[[iStrata]] <- new.LPdata[new.index_strata[[iStrata]][new.order_strata[[iStrata]]],,drop = FALSE] new.status_strata[[iStrata]] <- new.status[new.index_strata[[iStrata]][new.order_strata[[iStrata]]]] new.time_strata[[iStrata]] <- new.time[new.index_strata[[iStrata]][new.order_strata[[iStrata]]]] }else{ new.index_strata[[iStrata]] <- object.index_strata[[iStrata]] new.order_strata[[iStrata]] <- object.order_strata[[iStrata]] new.eXb_strata[[iStrata]] <- object.eXb_strata[[iStrata]] new.LPdata_strata[[iStrata]] <- object.LPdata_strata[[iStrata]] new.status_strata[[iStrata]] <- object.status_strata[[iStrata]] new.time_strata[[iStrata]] <- object.time_strata[[iStrata]] } ## E Ecpp[[iStrata]] <- calcE_cpp(status = object.status_strata[[iStrata]], eventtime = object.time_strata[[iStrata]], eXb = object.eXb_strata[[iStrata]], X = object.LPdata_strata[[iStrata]], p = p, add0 = TRUE) new.indexJump[[iStrata]] <- prodlim::sindex(Ecpp[[iStrata]]$Utime1, new.time) - 1 # if event/censoring is before the first event in the training dataset # then sindex return 0 thus indexJump is -1 # the following 3 lines convert -1 to 0 if(any(new.indexJump[[iStrata]]<0)){ new.indexJump[[iStrata]][new.indexJump[[iStrata]]<0] <- 0 } ## store order if(length(new.order>0)){ new.order <- c(new.order, new.index_strata[[iStrata]][new.order_strata[[iStrata]]]) }else{ new.order <- new.index_strata[[iStrata]][new.order_strata[[iStrata]]] } } #### Computation of the influence function #### IFbeta <- NULL IFcumhazard <- NULL IFhazard <- NULL calcIFhazard <- list(delta_iS0 = NULL, Elambda0 = NULL, cumElambda0 = NULL, lambda0_iS0= NULL, cumLambda0_iS0= NULL, time1 = NULL) ls.Utime1 <- NULL #### beta for(iStrata in 1:nStrata){ new.indexJump_strata <- new.indexJump[[iStrata]][new.index_strata[[iStrata]][new.order_strata[[iStrata]]]] ## IF if(p>0){ if(store.iid != "approx"){ IFbeta_tempo <- IFbeta_cpp(newT = new.time_strata[[iStrata]], neweXb = new.eXb_strata[[iStrata]], newX = new.LPdata_strata[[iStrata]], newStatus = new.status_strata[[iStrata]], newIndexJump = new.indexJump_strata, S01 = Ecpp[[iStrata]]$S0, E1 = Ecpp[[iStrata]]$E, time1 = Ecpp[[iStrata]]$Utime1, iInfo = iInfo, p = p) }else{ IFbeta_tempo <- IFbetaApprox_cpp(newX = new.LPdata_strata[[iStrata]], newStatus = new.status_strata[[iStrata]], newIndexJump = new.indexJump_strata, E1 = Ecpp[[iStrata]]$E, iInfo = iInfo, p = p) } }else{ IFbeta_tempo <- matrix(NA, ncol = 1, nrow = length(new.index_strata[[iStrata]])) } ## output IFbeta <- rbind(IFbeta, IFbeta_tempo) } ## set original order IFbeta <- IFbeta[order(new.order),,drop=FALSE] #### lambda for(iStrata in 1:nStrata){ ## hazard if(nStrata==1){ # select only the time,lambda corresponding to the events and not censored observations timeStrata <- lambda0$time[lambda0$time %in% Ecpp[[1]]$Utime1] lambda0Strata <- lambda0$hazard[lambda0$time %in% Ecpp[[1]]$Utime1] }else{ # same within the strata index.strata <- which(lambda0$strata == object.levelStrata[iStrata]) index.keep <- index.strata[lambda0$time[index.strata] %in% Ecpp[[iStrata]]$Utime1] timeStrata <- lambda0$time[index.keep] lambda0Strata <- lambda0$hazard[index.keep] } etime1.min[iStrata] <- timeStrata[1] ## tauHazard if(is.null(tauHazard)){ tauHazard_strata <- object.time_strata[[iStrata]][object.status_strata[[iStrata]] == 1] }else if(is.list(tauHazard)){ tauHazard_strata <- tauHazard[[nStrata]] }else{ tauHazard_strata <- tauHazard } ## E nUtime1_strata <- length(Ecpp[[iStrata]]$Utime1) if(p>0){ Etempo <- Ecpp[[iStrata]]$E[-NROW(Ecpp[[iStrata]]$E),,drop = FALSE] }else{ Etempo <- matrix(0, ncol = 1, nrow = nUtime1_strata-1) } ## IF if(any(new.status_strata[[iStrata]]>0)){ IFlambda_res <- IFlambda0_cpp(tau = tauHazard_strata, IFbeta = IFbeta, newT = new.time, neweXb = new.eXb, newStatus = new.status, newIndexJump = new.indexJump[[iStrata]], newStrata = as.numeric(new.strata), S01 = Ecpp[[iStrata]]$S0, E1 = Etempo, time1 = timeStrata, lastTime1 = Ecpp[[iStrata]]$Utime1[nUtime1_strata], # here lastTime1 will not correspond to timeStrata[length(timeStrata)] when there are censored observations lambda0 = lambda0Strata, p = p, strata = iStrata, exact = (store.iid!="approx"), minimalExport = (store.iid=="minimal") ) }else{ if(length(tauHazard_strata)==0){tauHazard_strata <- max(object.time_strata[[iStrata]])} IFlambda_res <- list(hazard = matrix(0, ncol = length(tauHazard_strata), nrow = NROW(IFbeta)), cumhazard = matrix(0, ncol = length(tauHazard_strata), nrow = NROW(IFbeta)) ) if(length(tauHazard_strata)==0){tauHazard_strata <- NA} } # output ls.Utime1 <- c(ls.Utime1, list(tauHazard_strata)) if(store.iid=="minimal"){ calcIFhazard$delta_iS0 <- c(calcIFhazard$delta_iS0, list(IFlambda_res$delta_iS0)) calcIFhazard$Elambda0 <- c(calcIFhazard$Elambda0, list(IFlambda_res$Elambda0)) calcIFhazard$cumElambda0 <- c(calcIFhazard$cumElambda0, list(IFlambda_res$cumElambda0)) calcIFhazard$lambda0_iS0 <- c(calcIFhazard$lambda0_iS0, list(IFlambda_res$lambda0_iS0)) calcIFhazard$cumLambda0_iS0 <- c(calcIFhazard$cumLambda0_iS0, list(IFlambda_res$cumLambda0_iS0)) calcIFhazard$time1 <- c(calcIFhazard$time1, list(timeStrata)) # event time by strata }else{ if(keep.times){ colnames(IFlambda_res$hazard) <- tauHazard_strata colnames(IFlambda_res$cumhazard) <- tauHazard_strata } IFhazard <- c(IFhazard, list(IFlambda_res$hazard)) IFcumhazard <- c(IFcumhazard, list(IFlambda_res$cumhazard)) } } #### export return(list(IFbeta = IFbeta, IFhazard = IFhazard, IFcumhazard = IFcumhazard, calcIFhazard = calcIFhazard, time = ls.Utime1, # time at which the IF is assessed etime1.min = etime1.min, etime.max = lambda0$lastEventTime, indexObs = new.order, store.iid = store.iid )) }
## Contaminated read simulation NUCLEOTIDES <- c('A', 'T', 'C', 'G') #adapters <- list("solexa_adapter_1"="AGATCGGAAGAGCTCGTATGCCGTCTTCTGCTTG") adapter <- "AGATCGGAAGAGCTCGTATGCCGTCTTCTGCTTG" #adapter <- "TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT" set.seed(0) addErrors <- # given a sequence, add errors based on an Illumina-encoded quality line function(seq, quality) { # Note: I have checked this against # http://seqanswers.com/forums/showthread.php?t=1523 and it appears # correct - VB stopifnot(nchar(seq) == nchar(quality)) probs.wrong <- sapply(charToRaw(quality), function(x) 1/(10^((as.integer(x) - 64)/10))) paste(mapply(function(base, p.wrong) { if (rbinom(1, 1, p.wrong)) return(sample(setdiff(NUCLEOTIDES, base), 1)) else return(base) }, unlist(strsplit(seq, '')), probs.wrong), collapse="") } generateRandomSeq <- # generate a random sequence, possibly with contamination of an adapter, it's length uniform function(length, adapter=NULL, quality=NULL, is.contam=FALSE, min.contam=3) { if (!is.contam) paste(sample(NUCLEOTIDES, length, replace=TRUE), collapse="") else { ll <- sample(min.contam:nchar(adapter), 1) contam <- substr(adapter, 1, ll) pre.error.seq <- paste(paste(sample(NUCLEOTIDES, length-nchar(contam), replace=TRUE), collapse=""), contam, sep="") return(list(contam.n=ll, seq=addErrors(pre.error.seq, quality))) } } contaminateFASTQEntry <- function(con, outfile, rate, adapters, min.contam=3) { blocks.processed <- 0 reads <- readLines(con) outlist <- vector('character', length(reads)) while (blocks.processed*4 < length(reads)) { quality <- reads[4*blocks.processed+4] seq <- reads[4*blocks.processed+2] header <- reads[4*blocks.processed+1] if (runif(1) <= rate) { # contaminate tmp <- generateRandomSeq(nchar(seq), adapter=adapter, quality=quality, is.contam=TRUE, min.contam=min.contam) seq <- tmp$seq ll <- tmp$contam.n header <- sprintf("%s-contaminated-%d", header, ll) } else { header <- sprintf("%s-uncontaminated", header) seq <- generateRandomSeq(nchar(seq), is.contam=FALSE, min.contam=min.contam) } # output results to vector outlist[4*blocks.processed+1] <- header outlist[4*blocks.processed+2] <- seq outlist[4*blocks.processed+3] <- sprintf("+%s", substr(header, 2, nchar(header))) outlist[4*blocks.processed+4] <- quality blocks.processed <- blocks.processed + 1 if (blocks.processed %% 100 == 0) message(sprintf("%d blocks processed.", blocks.processed)) } writeLines(outlist, con=file(outfile)) }
/testing/read-sim.R
permissive
hjanime/scythe
R
false
false
2,623
r
## Contaminated read simulation NUCLEOTIDES <- c('A', 'T', 'C', 'G') #adapters <- list("solexa_adapter_1"="AGATCGGAAGAGCTCGTATGCCGTCTTCTGCTTG") adapter <- "AGATCGGAAGAGCTCGTATGCCGTCTTCTGCTTG" #adapter <- "TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT" set.seed(0) addErrors <- # given a sequence, add errors based on an Illumina-encoded quality line function(seq, quality) { # Note: I have checked this against # http://seqanswers.com/forums/showthread.php?t=1523 and it appears # correct - VB stopifnot(nchar(seq) == nchar(quality)) probs.wrong <- sapply(charToRaw(quality), function(x) 1/(10^((as.integer(x) - 64)/10))) paste(mapply(function(base, p.wrong) { if (rbinom(1, 1, p.wrong)) return(sample(setdiff(NUCLEOTIDES, base), 1)) else return(base) }, unlist(strsplit(seq, '')), probs.wrong), collapse="") } generateRandomSeq <- # generate a random sequence, possibly with contamination of an adapter, it's length uniform function(length, adapter=NULL, quality=NULL, is.contam=FALSE, min.contam=3) { if (!is.contam) paste(sample(NUCLEOTIDES, length, replace=TRUE), collapse="") else { ll <- sample(min.contam:nchar(adapter), 1) contam <- substr(adapter, 1, ll) pre.error.seq <- paste(paste(sample(NUCLEOTIDES, length-nchar(contam), replace=TRUE), collapse=""), contam, sep="") return(list(contam.n=ll, seq=addErrors(pre.error.seq, quality))) } } contaminateFASTQEntry <- function(con, outfile, rate, adapters, min.contam=3) { blocks.processed <- 0 reads <- readLines(con) outlist <- vector('character', length(reads)) while (blocks.processed*4 < length(reads)) { quality <- reads[4*blocks.processed+4] seq <- reads[4*blocks.processed+2] header <- reads[4*blocks.processed+1] if (runif(1) <= rate) { # contaminate tmp <- generateRandomSeq(nchar(seq), adapter=adapter, quality=quality, is.contam=TRUE, min.contam=min.contam) seq <- tmp$seq ll <- tmp$contam.n header <- sprintf("%s-contaminated-%d", header, ll) } else { header <- sprintf("%s-uncontaminated", header) seq <- generateRandomSeq(nchar(seq), is.contam=FALSE, min.contam=min.contam) } # output results to vector outlist[4*blocks.processed+1] <- header outlist[4*blocks.processed+2] <- seq outlist[4*blocks.processed+3] <- sprintf("+%s", substr(header, 2, nchar(header))) outlist[4*blocks.processed+4] <- quality blocks.processed <- blocks.processed + 1 if (blocks.processed %% 100 == 0) message(sprintf("%d blocks processed.", blocks.processed)) } writeLines(outlist, con=file(outfile)) }
# # This is a Shiny web application. You can run the application by clicking # the 'Run App' button above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) # Define UI for application that draws a histogram ui <- fluidPage( # Application title titlePanel("Old Faithful Geyser Data Star Wars 06022021 GA"), # Sidebar with a slider input for number of bins sidebarLayout( sidebarPanel( sliderInput("bins", "Number of bins:", min = 1, max = 50, value = 30) ), # Show a plot of the generated distribution mainPanel( plotOutput("distPlot") ) ) ) # Define server logic required to draw a histogram server <- function(input, output) { output$distPlot <- renderPlot({ # generate bins based on input$bins from ui.R x <- faithful[, 2] bins <- seq(min(x), max(x), length.out = input$bins + 1) # draw the histogram with the specified number of bins hist(x, breaks = bins, col = 'darkgray', border = 'white') }) } # Run the application shinyApp(ui = ui, server = server)
/app.R
no_license
nielsenmarkus11/healthcare-prod
R
false
false
1,263
r
# # This is a Shiny web application. You can run the application by clicking # the 'Run App' button above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) # Define UI for application that draws a histogram ui <- fluidPage( # Application title titlePanel("Old Faithful Geyser Data Star Wars 06022021 GA"), # Sidebar with a slider input for number of bins sidebarLayout( sidebarPanel( sliderInput("bins", "Number of bins:", min = 1, max = 50, value = 30) ), # Show a plot of the generated distribution mainPanel( plotOutput("distPlot") ) ) ) # Define server logic required to draw a histogram server <- function(input, output) { output$distPlot <- renderPlot({ # generate bins based on input$bins from ui.R x <- faithful[, 2] bins <- seq(min(x), max(x), length.out = input$bins + 1) # draw the histogram with the specified number of bins hist(x, breaks = bins, col = 'darkgray', border = 'white') }) } # Run the application shinyApp(ui = ui, server = server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/score_lnk.r \name{score_lnk} \alias{score_lnk} \title{Score Linkage Variables} \usage{ score_lnk(df) } \arguments{ \item{df}{site MSD file} } \description{ Score Linkage Variables }
/man/score_lnk.Rd
permissive
USAID-OHA-SI/asSIMSble
R
false
true
260
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/score_lnk.r \name{score_lnk} \alias{score_lnk} \title{Score Linkage Variables} \usage{ score_lnk(df) } \arguments{ \item{df}{site MSD file} } \description{ Score Linkage Variables }
## HyperG R Package ## ## Copyright (c) 2021 David J. Marchette <dmarchette@gmail.com> ## ## Permission is hereby granted, free of charge, to any person obtaining a copy ## of this software and associated documentation files (the "Software"), to deal ## in the Software without restriction, including without limitation the rights ## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ## copies of the Software, and to permit persons to whom the Software is ## furnished to do so, subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included in all ## copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ## SOFTWARE. ## summary.hypergraph <- function(object,...) { cat("Hypergraph on",horder(object),"vertices with",hsize(object), "hyperedges\n\n") cat("Hyper-edge orders:\n") print(edge_orders(object)) cat("The hypergraph",ifelse(hypergraph.is.connected(object),"is","is not"), "connected.\n") cat("\t",ifelse(has.empty.hyperedges(object),"does","does not"), "have empty hyper-edges\n") cat("\t",ifelse(has.loops(object),"does","does not"), "have loops\n") cat("\t",ifelse(has.isolates(object),"does","does not"), "have isolated vertices\n") }
/R/summary.hypergraph.R
no_license
cran/HyperG
R
false
false
1,729
r
## HyperG R Package ## ## Copyright (c) 2021 David J. Marchette <dmarchette@gmail.com> ## ## Permission is hereby granted, free of charge, to any person obtaining a copy ## of this software and associated documentation files (the "Software"), to deal ## in the Software without restriction, including without limitation the rights ## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ## copies of the Software, and to permit persons to whom the Software is ## furnished to do so, subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included in all ## copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ## SOFTWARE. ## summary.hypergraph <- function(object,...) { cat("Hypergraph on",horder(object),"vertices with",hsize(object), "hyperedges\n\n") cat("Hyper-edge orders:\n") print(edge_orders(object)) cat("The hypergraph",ifelse(hypergraph.is.connected(object),"is","is not"), "connected.\n") cat("\t",ifelse(has.empty.hyperedges(object),"does","does not"), "have empty hyper-edges\n") cat("\t",ifelse(has.loops(object),"does","does not"), "have loops\n") cat("\t",ifelse(has.isolates(object),"does","does not"), "have isolated vertices\n") }
#----To clear the console----- dev.off() # Clear the graph window cat('\014') # Clear the console rm(list=ls()) # Clear all user objects from the environment!!! ######-------- Part 1--------------------------- #Import data fname <- file.choose() fname df <- read.csv(fname, header=T, stringsAsFactors = F) ##reading the csv file library(tidyverse) all_complaints <- df %>% select(2, 6, 25) #Select "Complaint Type" and "Borough" columns all_complaints$Created.Date <- as.POSIXct(strptime(all_complaints$Created.Date, "%m/%d/%Y %I:%M:%S %p")) all_complaints$Year <- format(all_complaints$Created.Date, "%Y") all_complaints$Complaint.Type[grepl("^Noise.*", all_complaints$Complaint.Type)] <- "Noise" all_complaints <- all_complaints %>% group_by(Borough,Complaint.Type) %>% summarise(Count = n()) ##complains by borough,complain type all_complaints_NY <- all_complaints %>% group_by(Complaint.Type) %>% summarise(Count = sum(Count)) %>% arrange(desc(Count)) top10_complaints_NY <- top_n(all_complaints_NY, 10, Count) #top 10 complains in NY # function below to fix all capitalized words capwords <- function(s, strict = FALSE) { cap <- function(s) paste(toupper(substring(s, 1, 1)), {s <- substring(s, 2); if(strict) tolower(s) else s}, sep = "", collapse = " " ) sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s))) } # function below to make subplots figure1 <- function(df){ df <- df[order(df$Count),] order <- df$Complaint.Type order <- sapply(order, capwords, T) df$Complaint.Type <- order df$Complaint.Type <- factor(df$Complaint.Type, levels = order) return(ggplot(df) + geom_bar(aes(x=Complaint.Type , y = Count ), fill = "Orange", stat = "identity") + theme_minimal() + xlab("") + ylab("Number of Complaints") + coord_flip() ) } # subplot 1 p_top10_complaints_NY <- figure1(top10_complaints_NY) + ggtitle("New York City \nTop 10 Complaints") top10_complaints_borough <- top_n(group_by(all_complaints, Borough), 10, Count) top10_complaints_Man <- filter(top10_complaints_borough, Borough == "MANHATTAN") # subplot 2 p_top10_complaints_Man <- figure1(top10_complaints_Man) + ggtitle("Manhattan \nTop 10 Complaints") top10_complaints_Qns <- filter(top10_complaints_borough, Borough == "QUEENS") # subplot 3 p_top10_complaints_Qns <- figure1(top10_complaints_Qns) + ggtitle("Queens \nTop 10 Complaints") top10_complaints_Bn <- filter(top10_complaints_borough, Borough == "BROOKLYN") # subplot 4 p_top10_complaints_Bn <- figure1(top10_complaints_Bn) + ggtitle("Brooklyn \nTop 10 Complaints") top10_complaints_Brx <- filter(top10_complaints_borough, Borough == "BRONX") # subplot 5 p_top10_complaints_Brx <- figure1(top10_complaints_Brx) + ggtitle("Bronx \nTop 10 Complaints") top10_complaints_SI <- filter(top10_complaints_borough, Borough == "STATEN ISLAND") top10_complaints_SI$Complaint.Type[4] <- "Missed Collection" # subplot 6 p_top10_complaints_SI <- figure1(top10_complaints_SI) + ggtitle("Staten Island \nTop 10 Complaints") #To plot multiple plot on single page multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { library(grid) # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } } multiplot(p_top10_complaints_NY, p_top10_complaints_Man, p_top10_complaints_Bn, p_top10_complaints_Qns, p_top10_complaints_Brx, p_top10_complaints_SI, cols=2) ####-------------------------------------------------------------------------- library(wordcloud) a <- as.matrix(all_complaints$Complaint.Type) a <- as.matrix(df$Complaint.Type) wordcloud(a,max.words=30, min.freq=10, colors=brewer.pal(8, 'Dark2')) ####----------------------------------Part 2--------------- library(zipcode) library(tidyverse) library(maps) install.packages("ggplot2") library(ggplot2) #library fname1 <- file.choose() fname1 df1 <- read.csv(fname1, header=T, stringsAsFactors = F) ### reading file resptime <- df1 %>% select(2,3,4,9,17) # taking the required columns df <- df1[c(17,4)] df<-df[!(df$City==""),] length(unique(df$City)) df df$City <-tolower(df$City) x <- aggregate(x = df$ResponseTime, by = list(df$City), FUN = mean) #mean names(x)[1] <- "City" names(x)[2] <- "AvgResTime" x top <- x[order(-x$AvgResTime),] top top <- top_n(top,5) top fastest <-x[order(x$AvgResTime),] fastest <- top_n(fastest,5) fastest barplot(height=top$AvgResTime, main="City vs Response time", xlab="City", ylab="Response Time", names.arg=top$City, border="blue", col = "skyblue") #bar plot barplot(height=fastest$AvgResTime, main="City vs Response time", xlab="City", ylab="Response Time", names.arg=fastest$City, border="green",col = "skyblue") ggplot(top, aes(x=City, y=AvgResTime)) + geom_point()+ ylim(300,425) ggplot(top, aes(x=City, y=AvgResTime)) + geom_bar(stat = "identity", color="blue", fill="red") ggplot(fastest, aes(x=City, y=AvgResTime)) + geom_point()+ ylim(300,425) ggplot(fastest, aes(x=City, y=AvgResTime)) + geom_bar(stat = "identity", color="blue", fill="red") ###--------------------------------------------------------------------------------------------------------------------------------
/Rcode_311_Calls.R
no_license
iamlalitmohan/Info-Visualization
R
false
false
6,374
r
#----To clear the console----- dev.off() # Clear the graph window cat('\014') # Clear the console rm(list=ls()) # Clear all user objects from the environment!!! ######-------- Part 1--------------------------- #Import data fname <- file.choose() fname df <- read.csv(fname, header=T, stringsAsFactors = F) ##reading the csv file library(tidyverse) all_complaints <- df %>% select(2, 6, 25) #Select "Complaint Type" and "Borough" columns all_complaints$Created.Date <- as.POSIXct(strptime(all_complaints$Created.Date, "%m/%d/%Y %I:%M:%S %p")) all_complaints$Year <- format(all_complaints$Created.Date, "%Y") all_complaints$Complaint.Type[grepl("^Noise.*", all_complaints$Complaint.Type)] <- "Noise" all_complaints <- all_complaints %>% group_by(Borough,Complaint.Type) %>% summarise(Count = n()) ##complains by borough,complain type all_complaints_NY <- all_complaints %>% group_by(Complaint.Type) %>% summarise(Count = sum(Count)) %>% arrange(desc(Count)) top10_complaints_NY <- top_n(all_complaints_NY, 10, Count) #top 10 complains in NY # function below to fix all capitalized words capwords <- function(s, strict = FALSE) { cap <- function(s) paste(toupper(substring(s, 1, 1)), {s <- substring(s, 2); if(strict) tolower(s) else s}, sep = "", collapse = " " ) sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s))) } # function below to make subplots figure1 <- function(df){ df <- df[order(df$Count),] order <- df$Complaint.Type order <- sapply(order, capwords, T) df$Complaint.Type <- order df$Complaint.Type <- factor(df$Complaint.Type, levels = order) return(ggplot(df) + geom_bar(aes(x=Complaint.Type , y = Count ), fill = "Orange", stat = "identity") + theme_minimal() + xlab("") + ylab("Number of Complaints") + coord_flip() ) } # subplot 1 p_top10_complaints_NY <- figure1(top10_complaints_NY) + ggtitle("New York City \nTop 10 Complaints") top10_complaints_borough <- top_n(group_by(all_complaints, Borough), 10, Count) top10_complaints_Man <- filter(top10_complaints_borough, Borough == "MANHATTAN") # subplot 2 p_top10_complaints_Man <- figure1(top10_complaints_Man) + ggtitle("Manhattan \nTop 10 Complaints") top10_complaints_Qns <- filter(top10_complaints_borough, Borough == "QUEENS") # subplot 3 p_top10_complaints_Qns <- figure1(top10_complaints_Qns) + ggtitle("Queens \nTop 10 Complaints") top10_complaints_Bn <- filter(top10_complaints_borough, Borough == "BROOKLYN") # subplot 4 p_top10_complaints_Bn <- figure1(top10_complaints_Bn) + ggtitle("Brooklyn \nTop 10 Complaints") top10_complaints_Brx <- filter(top10_complaints_borough, Borough == "BRONX") # subplot 5 p_top10_complaints_Brx <- figure1(top10_complaints_Brx) + ggtitle("Bronx \nTop 10 Complaints") top10_complaints_SI <- filter(top10_complaints_borough, Borough == "STATEN ISLAND") top10_complaints_SI$Complaint.Type[4] <- "Missed Collection" # subplot 6 p_top10_complaints_SI <- figure1(top10_complaints_SI) + ggtitle("Staten Island \nTop 10 Complaints") #To plot multiple plot on single page multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { library(grid) # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } } multiplot(p_top10_complaints_NY, p_top10_complaints_Man, p_top10_complaints_Bn, p_top10_complaints_Qns, p_top10_complaints_Brx, p_top10_complaints_SI, cols=2) ####-------------------------------------------------------------------------- library(wordcloud) a <- as.matrix(all_complaints$Complaint.Type) a <- as.matrix(df$Complaint.Type) wordcloud(a,max.words=30, min.freq=10, colors=brewer.pal(8, 'Dark2')) ####----------------------------------Part 2--------------- library(zipcode) library(tidyverse) library(maps) install.packages("ggplot2") library(ggplot2) #library fname1 <- file.choose() fname1 df1 <- read.csv(fname1, header=T, stringsAsFactors = F) ### reading file resptime <- df1 %>% select(2,3,4,9,17) # taking the required columns df <- df1[c(17,4)] df<-df[!(df$City==""),] length(unique(df$City)) df df$City <-tolower(df$City) x <- aggregate(x = df$ResponseTime, by = list(df$City), FUN = mean) #mean names(x)[1] <- "City" names(x)[2] <- "AvgResTime" x top <- x[order(-x$AvgResTime),] top top <- top_n(top,5) top fastest <-x[order(x$AvgResTime),] fastest <- top_n(fastest,5) fastest barplot(height=top$AvgResTime, main="City vs Response time", xlab="City", ylab="Response Time", names.arg=top$City, border="blue", col = "skyblue") #bar plot barplot(height=fastest$AvgResTime, main="City vs Response time", xlab="City", ylab="Response Time", names.arg=fastest$City, border="green",col = "skyblue") ggplot(top, aes(x=City, y=AvgResTime)) + geom_point()+ ylim(300,425) ggplot(top, aes(x=City, y=AvgResTime)) + geom_bar(stat = "identity", color="blue", fill="red") ggplot(fastest, aes(x=City, y=AvgResTime)) + geom_point()+ ylim(300,425) ggplot(fastest, aes(x=City, y=AvgResTime)) + geom_bar(stat = "identity", color="blue", fill="red") ###--------------------------------------------------------------------------------------------------------------------------------
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/internals.R \name{merge.snpR.stats} \alias{merge.snpR.stats} \title{Merge newly calculated stats into a snpRdata object.} \usage{ \method{merge}{snpR.stats}(x, stats, type = "stats") } \arguments{ \item{x}{snpRdata object} \item{stats}{data.frame/table/list. New data to be added to the existing snpRdata object, x.} \item{type}{character, default "stats". The type of statistic to merge, see list in description.} } \description{ Internal function to quickly and accurately merge newly calculated statistics into a snpRdata object. This should never be called externally. Takes and returns the same snpRdata object, with new data. } \details{ Mostly relies on the smart.merge subfunction, which must be edited with care and testing. LD merging is independant. Type options: stats, pairwise, window.stats, pairwise.window.stats, and LD, corresponding to slots of the snpRdata S4 object class. For examples, see functions that use merge.snpR.stats, such as calc_pi or calc_pairwise_ld. }
/man/merge.snpR.stats.Rd
no_license
ldutoit/snpR
R
false
true
1,069
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/internals.R \name{merge.snpR.stats} \alias{merge.snpR.stats} \title{Merge newly calculated stats into a snpRdata object.} \usage{ \method{merge}{snpR.stats}(x, stats, type = "stats") } \arguments{ \item{x}{snpRdata object} \item{stats}{data.frame/table/list. New data to be added to the existing snpRdata object, x.} \item{type}{character, default "stats". The type of statistic to merge, see list in description.} } \description{ Internal function to quickly and accurately merge newly calculated statistics into a snpRdata object. This should never be called externally. Takes and returns the same snpRdata object, with new data. } \details{ Mostly relies on the smart.merge subfunction, which must be edited with care and testing. LD merging is independant. Type options: stats, pairwise, window.stats, pairwise.window.stats, and LD, corresponding to slots of the snpRdata S4 object class. For examples, see functions that use merge.snpR.stats, such as calc_pi or calc_pairwise_ld. }
############################### Simulation - MCMC kernels (E-step) ############################# mcmc<-function(model,data,control=list()) { # E-step - simulate unknown parameters # Input: kiter, Uargs, structural.model, mean.phi (unchanged) # Output: varList, DYF, phiM (changed) kiter <- 1 Gamma.laplace <- NULL saemixObject<-new(Class="SaemixObject",data=data,model=model,options=control) saemix.options<-saemixObject["options"] saemix.model<-saemixObject["model"] saemix.data<-saemixObject["data"] saemix.data@ocov<-saemix.data@ocov[saemix.data@data[,"mdv"]==0,,drop=FALSE] saemix.data@data<-saemix.data@data[saemix.data@data[,"mdv"]==0,] saemix.data@ntot.obs<-dim(saemix.data@data)[1] # Initialising random generator OLDRAND<-TRUE set.seed(saemix.options$seed) #intitialisation # xinit<-initialiseMainAlgo(saemix.data,saemix.model,saemix.options) xinit<-initialiseMainAlgo(saemix.data,saemix.model,saemix.options) saemix.model<-xinit$saemix.model Dargs<-xinit$Dargs Uargs<-xinit$Uargs varList<-xinit$varList phiM<-xinit$phiM mean.phi<-xinit$mean.phi DYF<-xinit$DYF opt<-xinit$opt betas<-betas.ini<-xinit$betas fixed.psi<-xinit$fixedpsi.ini var.eta<-varList$diag.omega structural.model<-saemix.model["model"] # Function to perform MCMC simulation nb.etas<-length(varList$ind.eta) domega<-cutoff(mydiag(varList$omega[varList$ind.eta,varList$ind.eta]),.Machine$double.eps) omega.eta<-varList$omega[varList$ind.eta,varList$ind.eta,drop=FALSE] omega.eta<-omega.eta-mydiag(mydiag(varList$omega[varList$ind.eta,varList$ind.eta]))+mydiag(domega) chol.omega<-try(chol(omega.eta)) somega<-solve(omega.eta) # "/" dans Matlab = division matricielle, selon la doc "roughly" B*INV(A) (et *= produit matriciel...) VK<-rep(c(1:nb.etas),2) mean.phiM<-do.call(rbind,rep(list(mean.phi),Uargs$nchains)) phiM[,varList$ind0.eta]<-mean.phiM[,varList$ind0.eta] saemix.options<-saemixObject["options"] map_range <- saemix.options$map.range if(Dargs$type=="structural"){ U.y<-compute.LLy_c(phiM,varList$pres,Uargs,Dargs,DYF) } else{ U.y <- compute.LLy_d(phiM,Uargs,Dargs,DYF) } etaM<-phiM[,varList$ind.eta]-mean.phiM[,varList$ind.eta,drop=FALSE] phiMc<-phiM psiM <- transphi(phiM,Dargs$transform.par) eta_map <- etaM eta_listref <- list(as.data.frame(matrix(nrow = saemix.options$L_mcmc,ncol = ncol(phiM)))) for (i in 1:(nrow(phiM))) { eta_listref[[i]] <- as.data.frame(matrix(nrow = saemix.options$L_mcmc,ncol = ncol(phiM))) eta_listref[[i]][,ncol(eta_listref[[i]])] <- i } eta_list <- list(as.data.frame(matrix(nrow = saemix.options$L_mcmc,ncol = ncol(phiM)))) for (i in 1:(nrow(phiM))) { eta_list[[i]] <- as.data.frame(matrix(nrow = saemix.options$L_mcmc,ncol = ncol(phiM))) eta_list[[i]][,ncol(eta_list[[i]])] <- i } if(opt$nbiter.mcmc[1]>0) { for (m in 1:saemix.options$L_mcmc) { if(m%%100==0){ print(m) } for (i in 1:(nrow(phiM))) { eta_listref[[i]][m,] <- etaM[i,] } for (i in 1:(nrow(phiM))) { eta_list[[i]][m,] <- etaM[i,] } for(u in 1:opt$nbiter.mcmc[1]) { # 1er noyau etaMc<-matrix(rnorm(Dargs$NM*nb.etas),ncol=nb.etas)%*%chol.omega phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else { Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } deltau<-Uc.y-U.y ind<-which(deltau<(-1)*log(runif(Dargs$NM))) etaM[ind,]<-etaMc[ind,] U.y[ind]<-Uc.y[ind] } U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) # Second stage if(opt$nbiter.mcmc[2]>0) { nt2<-nbc2<-matrix(data=0,nrow=nb.etas,ncol=1) nrs2<-1 for (u in 1:opt$nbiter.mcmc[2]) { for(vk2 in 1:nb.etas) { etaMc<-etaM # cat('vk2=',vk2,' nrs2=',nrs2,"\n") etaMc[,vk2]<-etaM[,vk2]+matrix(rnorm(Dargs$NM*nrs2), ncol=nrs2)%*%mydiag(varList$domega2[vk2,nrs2],nrow=1) # 2e noyau ? ou 1er noyau+permutation? phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc psiMc<-transphi(phiMc,Dargs$transform.par) if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else { Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc*(etaMc%*%somega)) deltu<-Uc.y-U.y+Uc.eta-U.eta ind<-which(deltu<(-1)*log(runif(Dargs$NM))) etaM[ind,]<-etaMc[ind,] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] nbc2[vk2]<-nbc2[vk2]+length(ind) nt2[vk2]<-nt2[vk2]+Dargs$NM } } varList$domega2[,nrs2]<-varList$domega2[,nrs2]*(1+opt$stepsize.rw* (nbc2/nt2-opt$proba.mcmc)) } if(opt$nbiter.mcmc[3]>0) { nt2<-nbc2<-matrix(data=0,nrow=nb.etas,ncol=1) nrs2<-kiter%%(nb.etas-1)+2 if(is.nan(nrs2)) nrs2<-1 # to deal with case nb.etas=1 for (u in 1:opt$nbiter.mcmc[3]) { if(nrs2<nb.etas) { vk<-c(0,sample(c(1:(nb.etas-1)),nrs2-1)) nb.iter2<-nb.etas } else { vk<-0:(nb.etas-1) # if(nb.etas==1) vk<-c(0) nb.iter2<-1 } for(k2 in 1:nb.iter2) { vk2<-VK[k2+vk] etaMc<-etaM etaMc[,vk2]<-etaM[,vk2]+matrix(rnorm(Dargs$NM*nrs2), ncol=nrs2)%*%mydiag(varList$domega2[vk2,nrs2]) phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc psiMc<-transphi(phiMc,Dargs$transform.par) if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else { Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc*(etaMc%*%somega)) deltu<-Uc.y-U.y+Uc.eta-U.eta ind<-which(deltu<(-log(runif(Dargs$NM)))) etaM[ind,]<-etaMc[ind,] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] nbc2[vk2]<-nbc2[vk2]+length(ind) nt2[vk2]<-nt2[vk2]+Dargs$NM } } varList$domega2[,nrs2]<-varList$domega2[,nrs2]*(1+opt$stepsize.rw* (nbc2/nt2-opt$proba.mcmc)) } } } U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) if(opt$nbiter.mcmc[4]>0) { etaMc<-etaM propc <- U.eta prop <- U.eta saemix.options<-saemixObject["options"] saemix.model<-saemixObject["model"] saemix.data<-saemixObject["data"] saemix.options$map <- TRUE saemixObject["results"]["omega"] <- omega.eta saemixObject["results"]["mean.phi"] <- mean.phi saemixObject["results"]["phi"] <- phiM i1.omega2<-varList$ind.eta iomega.phi1<-solve(saemixObject["results"]["omega"][i1.omega2,i1.omega2]) id<-saemixObject["data"]["data"][,saemixObject["data"]["name.group"]] xind<-saemixObject["data"]["data"][,saemixObject["data"]["name.predictors"], drop=FALSE] yobs<-saemixObject["data"]["data"][,saemixObject["data"]["name.response"]] id.list<-unique(id) phi.map<-saemixObject["results"]["mean.phi"] if(Dargs$type=="structural"){ for(i in 1:saemixObject["data"]["N"]) { isuj<-id.list[i] xi<-xind[id==isuj,,drop=FALSE] yi<-yobs[id==isuj] idi<-rep(1,length(yi)) mean.phi1<-mean.phiM[i,i1.omega2] phii<-saemixObject["results"]["phi"][i,] phi1<-phii[i1.omega2] phi1.opti<-optim(par=phi1, fn=conditional.distribution_c, phii=phii,idi=idi, xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=varList$pres, err=saemixObject["model"]["error.model"]) phi.map[i,i1.omega2]<-phi1.opti$par } #rep the map nchains time phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) map.psi<-data.frame(id=id.list,map.psi) map.phi<-data.frame(id=id.list,phi.map) psi_map <- as.matrix(map.psi[,-c(1)]) # browser() phi_map <- as.matrix(map.phi[,-c(1)]) eta_map <- phi_map - mean.phiM fpred1<-structural.model(psi_map, Dargs$IdM, Dargs$XM) gradf <- matrix(0L, nrow = length(fpred1), ncol = nb.etas) for (j in 1:nb.etas) { psi_map2 <- psi_map psi_map2[,j] <- psi_map[,j]+psi_map[,j]/1000 fpred1<-structural.model(psi_map, Dargs$IdM, Dargs$XM) fpred2<-structural.model(psi_map2, Dargs$IdM, Dargs$XM) for (i in 1:(Dargs$NM)){ r = which(Dargs$IdM==i) gradf[r,j] <- (fpred2[r] - fpred1[r])/(psi_map[i,j]/1000) } } gradh <- list(omega.eta,omega.eta) for (i in 1:Dargs$NM){ gradh[[i]] <- gradh[[1]] } for (j in 1:nb.etas) { phi_map2 <- phi_map phi_map2[,j] <- phi_map[,j]+phi_map[,j]/1000 psi_map2 <- transphi(phi_map2,saemixObject["model"]["transform.par"]) for (i in 1:(Dargs$NM)){ gradh[[i]][,j] <- (psi_map2[i,] - psi_map[i,])/(phi_map[i,]/1000) } } #calculation of the covariance matrix of the proposal Gamma <- chol.Gamma <- inv.chol.Gamma <- inv.Gamma <- list(omega.eta,omega.eta) for (i in 1:(Dargs$NM)){ r = which(Dargs$IdM==i) temp <- gradf[r,]%*%gradh[[i]] Gamma[[i]] <- solve(t(temp)%*%temp/(varList$pres[1])^2+solve(omega.eta)) chol.Gamma[[i]] <- chol(Gamma[[i]]) inv.chol.Gamma[[i]] <- solve(chol.Gamma[[i]]) inv.Gamma[[i]] <- solve(Gamma[[i]]) } Gamma.laplace <- Gamma } else { for(i in 1:saemixObject["data"]["N"]) { isuj<-id.list[i] xi<-xind[id==isuj,,drop=FALSE] # if(is.null(dim(xi))) xi<-matrix(xi,ncol=1) yi<-yobs[id==isuj] idi<-rep(1,length(yi)) mean.phi1<-mean.phiM[i,i1.omega2] phii<-saemixObject["results"]["phi"][i,] phi1<-phii[i1.omega2] phi1.opti<-optim(par=phi1, fn=conditional.distribution_d, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"]) phi.map[i,i1.omega2]<-phi1.opti$par } #rep the map nchains time phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) map.psi<-data.frame(id=id.list,map.psi) map.phi<-data.frame(id=id.list,phi.map) psi_map <- as.matrix(map.psi[,-c(1)]) phi_map <- as.matrix(map.phi[,-c(1)]) eta_map <- phi_map[,varList$ind.eta] - mean.phiM[,varList$ind.eta] #gradient at the map estimation gradp <- matrix(0L, nrow = Dargs$NM, ncol = nb.etas) for (j in 1:nb.etas) { phi_map2 <- phi_map phi_map2[,j] <- phi_map[,j]+phi_map[,j]/100; psi_map2 <- transphi(phi_map2,saemixObject["model"]["transform.par"]) fpred1<-structural.model(psi_map, Dargs$IdM, Dargs$XM) DYF[Uargs$ind.ioM]<- fpred1 l1<-colSums(DYF) fpred2<-structural.model(psi_map2, Dargs$IdM, Dargs$XM) DYF[Uargs$ind.ioM]<- fpred2 l2<-colSums(DYF) for (i in 1:(Dargs$NM)){ gradp[i,j] <- (l2[i] - l1[i])/(phi_map[i,j]/100) } } #calculation of the covariance matrix of the proposal fpred<-structural.model(psi_map, Dargs$IdM, Dargs$XM) DYF[Uargs$ind.ioM]<- fpred denom <- colSums(DYF) Gamma <- chol.Gamma <- inv.chol.Gamma <- inv.Gamma <- list(omega.eta,omega.eta) z <- matrix(0L, nrow = length(fpred), ncol = 1) for (i in 1:(Dargs$NM)){ Gamma[[i]] <- solve(gradp[i,]%*%t(gradp[i,])/denom[i]^2+solve(omega.eta)) chol.Gamma[[i]] <- chol(Gamma[[i]]) inv.Gamma[[i]] <- solve(Gamma[[i]]) inv.chol.Gamma[[i]] <- solve(chol.Gamma[[i]]) } } etaM <- eta_map phiM<-etaM+mean.phiM psiM<-transphi(phiM,Dargs$transform.par) U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) if(Dargs$type=="structural"){ U.y<-compute.LLy_c(phiM,varList$pres,Uargs,Dargs,DYF) } else{ U.y <- compute.LLy_d(phiM,Uargs,Dargs,DYF) } df <- 3 for (m in 1:saemix.options$L_mcmc) { if(m%%100==0){ print(m) } for (i in 1:(nrow(phiM))) { eta_list[[i]][m,] <- etaM[i,] # eta_list[[i]][m,] <- psiM[i,] } for (u in 1:opt$nbiter.mcmc[4]) { #generate candidate eta for (i in 1:(Dargs$NM)){ Mi <- rnorm(nb.etas)%*%chol.Gamma[[i]] etaMc[i,varList$ind.eta]<- eta_map[i,varList$ind.eta] + Mi # etaMc[i,varList$ind.eta]<- eta_map[i,varList$ind.eta] + rt(nb.etas,df)%*%chol.Gamma[[i]] } phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc[,varList$ind.eta] psiMc<-transphi(phiMc,Dargs$transform.par) if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else{ Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc[,varList$ind.eta]*(etaMc[,varList$ind.eta]%*%somega)) for (i in 1:(Dargs$NM)){ # propc[i] <- 0.5*rowSums((etaMc[i,varList$ind.eta]-eta_map[i,varList$ind.eta])*(etaMc[i,varList$ind.eta]-eta_map[i,varList$ind.eta])%*%inv.Gamma[[i]]) # prop[i] <- 0.5*rowSums((etaM[i,varList$ind.eta]-eta_map[i,varList$ind.eta])*(etaM[i,varList$ind.eta]-eta_map[i,varList$ind.eta])%*%inv.Gamma[[i]]) propc[i] <- -sum(log(dt((etaMc[i,varList$ind.eta]-eta_map[i,varList$ind.eta])%*%inv.chol.Gamma[[i]],df,log=FALSE))) prop[i] <- -sum(log(dt((etaM[i,varList$ind.eta]-eta_map[i,varList$ind.eta])%*%inv.chol.Gamma[[i]],df,log=FALSE))) } deltu<-Uc.y-U.y+Uc.eta-U.eta + prop - propc ind<-which(deltu<(-1)*log(runif(Dargs$NM))) # print(length(ind)/Dargs$NM) etaM[ind,varList$ind.eta]<-etaMc[ind,varList$ind.eta] phiM[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaM[,varList$ind.eta] psiM<-transphi(phiM,Dargs$transform.par) # psiM[ind,varList$ind.eta]<-psiMc[ind,varList$ind.eta] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] } } } #MALA if(opt$nbiter.mcmc[5]>0) { etaMc<-etaM propc <- U.eta prop <- U.eta saemix.options<-saemixObject["options"] saemix.model<-saemixObject["model"] saemix.data<-saemixObject["data"] saemix.options$map <- TRUE saemixObject["results"]["omega"] <- omega.eta saemixObject["results"]["mean.phi"] <- mean.phi saemixObject["results"]["phi"] <- phiM i1.omega2<-varList$ind.eta iomega.phi1<-solve(saemixObject["results"]["omega"][i1.omega2,i1.omega2]) id<-saemixObject["data"]["data"][,saemixObject["data"]["name.group"]] xind<-saemixObject["data"]["data"][,saemixObject["data"]["name.predictors"], drop=FALSE] yobs<-saemixObject["data"]["data"][,saemixObject["data"]["name.response"]] id.list<-unique(id) phi.map<-saemixObject["results"]["mean.phi"] if(Dargs$type=="structural"){ for(i in 1:saemixObject["data"]["N"]) { isuj<-id.list[i] xi<-xind[id==isuj,,drop=FALSE] yi<-yobs[id==isuj] idi<-rep(1,length(yi)) mean.phi1<-mean.phiM[i,i1.omega2] phii<-saemixObject["results"]["phi"][i,] phi1<-phii[i1.omega2] phi1.opti<-optim(par=phi1, fn=conditional.distribution_c, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=varList$pres, err=saemixObject["model"]["error.model"]) phi.map[i,i1.omega2]<-phi1.opti$par } #rep the map nchains time phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) map.psi<-data.frame(id=id.list,map.psi) map.phi<-data.frame(id=id.list,phi.map) psi_map <- as.matrix(map.psi[,-c(1)]) phi_map <- as.matrix(map.phi[,-c(1)]) eta_map <- phi_map - mean.phiM } else { for(i in 1:saemixObject["data"]["N"]) { isuj<-id.list[i] xi<-xind[id==isuj,,drop=FALSE] # if(is.null(dim(xi))) xi<-matrix(xi,ncol=1) yi<-yobs[id==isuj] idi<-rep(1,length(yi)) mean.phi1<-mean.phiM[i,i1.omega2] phii<-saemixObject["results"]["phi"][i,] phi1<-phii[i1.omega2] phi1.opti<-optim(par=phi1, fn=conditional.distribution_d, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"]) # phi1.opti<-optim(par=phi1, fn=conditional.distribution, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=saemixObject["results"]["respar"], err=saemixObject["model"]["error.model"],control = list(maxit = 2)) phi.map[i,i1.omega2]<-phi1.opti$par } #rep the map nchains time phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) map.psi<-data.frame(id=id.list,map.psi) map.phi<-data.frame(id=id.list,phi.map) psi_map <- as.matrix(map.psi[,-c(1)]) phi_map <- as.matrix(map.phi[,-c(1)]) eta_map <- phi_map[,varList$ind.eta] - mean.phiM[,varList$ind.eta] } indiv <- control$indiv.index etaM <- eta_map phiM<-etaM+mean.phiM if(Dargs$type=="structural"){ U.y<-compute.LLy_c(phiM,varList$pres,Uargs,Dargs,DYF) } else{ U.y<-compute.LLy_d(phiM,Uargs,Dargs,DYF) } U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) count <- 0 for (m in 1:saemix.options$L_mcmc) { if(m%%100==0){ print(m) } for (i in 1:(nrow(phiM))) { eta_list[[i]][m,] <- etaM[i,] } nt2<-nbc2<-matrix(data=0,nrow=nb.etas,ncol=1) nrs2<-1 adap <- rep(1, Dargs$NM) sigma <- saemix.options$sigma.val gamma <- saemix.options$gamma.val l<-c() for (u in 1:opt$nbiter.mcmc[5]) { etaMc<-etaM propc <- matrix(nrow = Dargs$NM,ncol = nb.etas) prop <- matrix(nrow = Dargs$NM,ncol = nb.etas) gradU <- matrix(nrow = Dargs$NM,ncol = nb.etas) gradUc <- matrix(nrow = Dargs$NM,ncol = nb.etas) #Gradient in current eta for (kj in 1:(nb.etas)){ etaM2 <- etaM phiM2 <- phiM etaM2[,kj] <- etaM[,kj] + etaM[,kj]/100 phiM2 <- mean.phiM[,varList$ind.eta]+etaM2 if(Dargs$type=="structural"){ U2.y<-compute.LLy_c(phiM2,varList$pres,Uargs,Dargs,DYF) } else{ U2.y<-compute.LLy_d(phiM2,Uargs,Dargs,DYF) } U2.eta<-0.5*rowSums(etaM2*(etaM2%*%somega)) for (i in 1:Dargs$NM){ gradU[i,kj] <- -(U2.y[i]-U.y[i]+U2.eta[i]-U.eta[i])/(etaM[i,kj]/100) } } # if (u>1){ # adap <- adap - gamma*(deltu + log(0.57)) # } Z <- matrix(rnorm(Dargs$NM*nb.etas), ncol=nb.etas) for (i in 1:Dargs$NM){ etaMc[i,] <- etaM[i,] + sigma*adap[i]*gradU[i,] + sqrt(2*sigma*adap[i])*Z[i,] } phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else{ Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc*(etaMc%*%somega)) #Gradient in candidate eta for (kj in 1:(nb.etas)){ etaM2 <- etaMc phiM2 <- phiMc etaM2[,kj] <- etaMc[,kj] + etaMc[,kj]/100 phiM2 <- mean.phiM[,varList$ind.eta]+etaM2 if(Dargs$type=="structural"){ U2.y<-compute.LLy_c(phiM2,varList$pres,Uargs,Dargs,DYF) } else{ U2.y<-compute.LLy_d(phiM2,Uargs,Dargs,DYF) } U2.eta<-0.5*rowSums(etaM2*(etaM2%*%somega)) for (i in 1:Dargs$NM){ gradUc[i,kj] <- -(U2.y[i]-Uc.y[i]+U2.eta[i]-Uc.eta[i])/(etaMc[i,kj]/100) } } for (i in 1:(Dargs$NM)){ propc[i,] <- ((etaMc[i,]-etaM[i,] - sigma*adap[i]*gradU[i,])/sqrt(2*sigma*adap[i]))^2 prop[i,] <- ((etaM[i,]-etaMc[i,] - sigma*adap[i]*gradUc[i,])/sqrt(2*sigma*adap[i]))^2 } P<-0.5*rowSums(prop) Pc<-0.5*rowSums(propc) deltu<-Uc.y-U.y+Uc.eta-U.eta + P - Pc ind<-which(deltu<(-1)*log(runif(Dargs$NM))) if (length(which(ind==indiv))>0){ count <- count +1 } # print(which(ind==indiv)) # print(length(ind)/Dargs$NM) # print(ind) etaM[ind,]<-etaMc[ind,] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] nbc2<-nbc2+length(ind) nt2<-nt2+Dargs$NM } } } #NUTS with rstan if(opt$nbiter.mcmc[6]>0) { # etaMc<-etaM # propc <- U.eta # prop <- U.eta # saemix.options<-saemixObject["options"] # saemix.model<-saemixObject["model"] # saemix.data<-saemixObject["data"] # saemix.options$map <- TRUE # saemixObject["results"]["omega"] <- omega.eta # saemixObject["results"]["mean.phi"] <- mean.phi # saemixObject["results"]["phi"] <- phiM # i1.omega2<-varList$ind.eta # iomega.phi1<-solve(saemixObject["results"]["omega"][i1.omega2,i1.omega2]) # id<-saemixObject["data"]["data"][,saemixObject["data"]["name.group"]] # xind<-saemixObject["data"]["data"][,saemixObject["data"]["name.predictors"], drop=FALSE] # yobs<-saemixObject["data"]["data"][,saemixObject["data"]["name.response"]] # id.list<-unique(id) # phi.map<-saemixObject["results"]["mean.phi"] # if(Dargs$type=="structural"){ # for(i in 1:saemixObject["data"]["N"]) { # isuj<-id.list[i] # xi<-xind[id==isuj,,drop=FALSE] # yi<-yobs[id==isuj] # idi<-rep(1,length(yi)) # mean.phi1<-mean.phiM[i,i1.omega2] # phii<-saemixObject["results"]["phi"][i,] # phi1<-phii[i1.omega2] # phi1.opti<-optim(par=phi1, fn=conditional.distribution_c, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=varList$pres, err=saemixObject["model"]["error.model"]) # phi.map[i,i1.omega2]<-phi1.opti$par # } # #rep the map nchains time # phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] # map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) # map.psi<-data.frame(id=id.list,map.psi) # map.phi<-data.frame(id=id.list,phi.map) # psi_map <- as.matrix(map.psi[,-c(1)]) # phi_map <- as.matrix(map.phi[,-c(1)]) # eta_map <- phi_map - mean.phiM # } else { # for(i in 1:saemixObject["data"]["N"]) { # isuj<-id.list[i] # xi<-xind[id==isuj,,drop=FALSE] # # if(is.null(dim(xi))) xi<-matrix(xi,ncol=1) # yi<-yobs[id==isuj] # idi<-rep(1,length(yi)) # mean.phi1<-mean.phiM[i,i1.omega2] # phii<-saemixObject["results"]["phi"][i,] # phi1<-phii[i1.omega2] # phi1.opti<-optim(par=phi1, fn=conditional.distribution_d, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"]) # # phi1.opti<-optim(par=phi1, fn=conditional.distribution, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=saemixObject["results"]["respar"], err=saemixObject["model"]["error.model"],control = list(maxit = 2)) # phi.map[i,i1.omega2]<-phi1.opti$par # } # #rep the map nchains time # phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] # map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) # map.psi<-data.frame(id=id.list,map.psi) # map.phi<-data.frame(id=id.list,phi.map) # psi_map <- as.matrix(map.psi[,-c(1)]) # phi_map <- as.matrix(map.phi[,-c(1)]) # eta_map <- phi_map[,varList$ind.eta] - mean.phiM[,varList$ind.eta] # } # etaM <- eta_map # phiM<-etaM+mean.phiM # psiM<-transphi(phiM,Dargs$transform.par) ## using Rstan package ###Linear # indiv <- control$indiv.index # obs <- Dargs$yM[Dargs$IdM==indiv] # age <- Dargs$XM[Dargs$IdM==indiv,] # stan.model <- control$modelstan # # stan_data <- list(N = length(obs),height = obs # # ,age = age, # # beta1_pop=mean.phiM[indiv,1],beta2_pop=mean.phiM[indiv,2], # # omega_beta1=sqrt(omega.eta[1,1]),omega_beta2=sqrt(omega.eta[2,2]), # # pres=sqrt(varList$pres[1])) # stan_data <- list(N = length(obs),height = obs # ,age = age, # beta1_pop=mean.phiM[indiv,1],beta2_pop=mean.phiM[indiv,2],beta3_pop=mean.phiM[indiv,3], # omega_beta1=sqrt(omega.eta[1,1]),omega_beta2=sqrt(omega.eta[2,2]),omega_beta3=sqrt(omega.eta[3,3]), # pres=sqrt(varList$pres[1])) # warmup <- 1000 # fit <- sampling(stan.model, data = stan_data, iter = 6*L_mcmc+warmup,init = phiM[indiv,], # warmup = warmup,chains = 1,algorithm = "NUTS") #can try "HMC", "Fixed_param" # fit_samples = extract(fit) # psiMstan <- fit_samples$beta[seq(1,6*L_mcmc,6),] # phiMstan<-transpsi(psiMstan,Dargs$transform.par) # etaMstan <- phiMstan - matrix(rep(mean.phiM[1,],each=nrow(phiMstan)),nrow=nrow(phiMstan)) # eta_list[[indiv]] <- etaMstan if(Dargs$type=="structural"){ # ###WARFA indiv <- control$indiv.index obs <- Dargs$yM[Dargs$IdM==indiv] dose <- unique(Dargs$XM[Dargs$IdM==indiv,1]) time <- Dargs$XM[Dargs$IdM==indiv,2] mean.psiM <- transphi(mean.phiM,Dargs$transform.par) stan.model <- control$modelstan stan_data <- list(N = length(obs),concentration = obs ,time = time, dose = dose, beta1_pop=mean.phiM[indiv,1],beta2_pop=mean.phiM[indiv,2],beta3_pop=mean.phiM[indiv,3], omega_beta1=sqrt(omega.eta[1,1]),omega_beta2=sqrt(omega.eta[2,2]),omega_beta3=sqrt(omega.eta[3,3]), pres=sqrt(varList$pres[1])) warmup <- 1000 fit <- sampling(stan.model, data = stan_data, iter = 6*saemix.options$L_mcmc+warmup,warmup = warmup, chains = 1,algorithm = "NUTS", init = psiM[indiv,]) #can try "HMC", "Fixed_param" # browser() fit_samples = extract(fit) psiMstan <- fit_samples$beta[seq(1,6*saemix.options$L_mcmc,6),] phiMstan<-transpsi(psiMstan,Dargs$transform.par) etaMstan <- phiMstan - matrix(rep(mean.phiM[1,],each=nrow(phiMstan)),nrow=nrow(phiMstan)) colMeans(etaMstan) eta_map[indiv,] eta_list[[indiv]] <- as.data.frame(etaMstan) } else { ##RTTE indiv <- control$indiv.index stan.model <- control$modelstan T <- Dargs$XM[Dargs$IdM==indiv,1] T_c <- 20 event_times <- T[!(T %in% c(0, T_c))] cens_times <- T[T == T_c] N_e <- length(event_times) N_c <- length(cens_times) stan_data <- list(N_e = N_e, N_c = N_c ,event_times = event_times, cens_times = cens_times, beta_pop=mean.phiM[indiv,2],lambda_pop=mean.phiM[indiv,1], omega_beta=sqrt(omega.eta[2,2]),omega_lambda=sqrt(omega.eta[1,1])) warmup <- 1000 # browser() fit <- sampling(stan.model, data = stan_data, iter = 6*saemix.options$L_mcmc+warmup,warmup = warmup, chains = 1,algorithm = "NUTS") fit_samples = extract(fit) psiMstan <- fit_samples$param[seq(1,6*saemix.options$L_mcmc,6),] phiMstan<-transpsi(psiMstan,Dargs$transform.par) etaMstan <- phiMstan - matrix(rep(mean.phiM[1,],each=nrow(phiMstan)),nrow=nrow(phiMstan)) eta_list[[indiv]] <- etaMstan } } #Using ADVI outputs for Independent sampler (mu and gamma) if(opt$nbiter.mcmc[7]>0) { #Initialization etaMc<-etaM propc <- U.eta prop <- U.eta saemix.options<-saemixObject["options"] saemix.model<-saemixObject["model"] saemix.data<-saemixObject["data"] saemix.options$map <- TRUE saemixObject["results"]["omega"] <- omega.eta saemixObject["results"]["mean.phi"] <- mean.phi saemixObject["results"]["phi"] <- phiM i1.omega2<-varList$ind.eta iomega.phi1<-solve(saemixObject["results"]["omega"][i1.omega2,i1.omega2]) id<-saemixObject["data"]["data"][,saemixObject["data"]["name.group"]] xind<-saemixObject["data"]["data"][,saemixObject["data"]["name.predictors"], drop=FALSE] yobs<-saemixObject["data"]["data"][,saemixObject["data"]["name.response"]] id.list<-unique(id) phi.map<-saemixObject["results"]["mean.phi"] etaM <- mean.phiM mu.vi <- mean.phiM Gamma.vi <- chol.Gamma.vi <- inv.Gamma.vi <- list(omega.eta,omega.eta) for (i in 1:(Dargs$NM)){ Gamma.vi[[i]] <- control$Gamma[[i]] chol.Gamma.vi[[i]] <- chol(Gamma.vi[[i]]) inv.Gamma.vi[[i]] <- solve(Gamma.vi[[i]]) } etaM <- control$mu mu.vi<- control$mu phiM<-etaM+mean.phiM U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) if(Dargs$type=="structural"){ U.y<-compute.LLy_c(phiM,varList$pres,Uargs,Dargs,DYF) } else{ U.y <- compute.LLy_d(phiM,Uargs,Dargs,DYF) } propc <- U.eta prop <- U.eta nt2<-nbc2<-matrix(data=0,nrow=nb.etas,ncol=1) nrs2<-1 for (m in 1:saemix.options$L_mcmc) { if(m%%100==0){ print(m) } for (i in 1:(nrow(phiM))) { eta_list[[i]][m,] <- etaM[i,] } for (u in 1:opt$nbiter.mcmc[7]) { for (i in 1:(Dargs$NM)){ Mi <- rnorm(nb.etas)%*%chol.Gamma.vi[[i]] etaMc[i,varList$ind.eta]<- mu.vi[i,varList$ind.eta] + Mi } phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc[,varList$ind.eta] if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else{ Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc[,varList$ind.eta]*(etaMc[,varList$ind.eta]%*%somega)) for (i in 1:(Dargs$NM)){ propc[i] <- 0.5*rowSums((etaMc[i,varList$ind.eta]-mu.vi[i,varList$ind.eta])*(etaMc[i,varList$ind.eta]-mu.vi[i,varList$ind.eta])%*%inv.Gamma.vi[[i]]) prop[i] <- 0.5*rowSums((etaM[i,varList$ind.eta]-mu.vi[i,varList$ind.eta])*(etaM[i,varList$ind.eta]-mu.vi[i,varList$ind.eta])%*%inv.Gamma.vi[[i]]) } deltu<-Uc.y-U.y+Uc.eta-U.eta + prop - propc ind<-which(deltu<(-1)*log(runif(Dargs$NM))) # print(length(ind)/Dargs$NM) etaM[ind,varList$ind.eta]<-etaMc[ind,varList$ind.eta] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] } } } phiM[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaM[,varList$ind.eta] return(list(eta=eta_list, eta_ref=eta_listref, Gamma=Gamma.laplace, map = eta_map)) }
/fsaem/code/R/mcmc_final.R
no_license
BelhalK/PapersCode
R
false
false
29,015
r
############################### Simulation - MCMC kernels (E-step) ############################# mcmc<-function(model,data,control=list()) { # E-step - simulate unknown parameters # Input: kiter, Uargs, structural.model, mean.phi (unchanged) # Output: varList, DYF, phiM (changed) kiter <- 1 Gamma.laplace <- NULL saemixObject<-new(Class="SaemixObject",data=data,model=model,options=control) saemix.options<-saemixObject["options"] saemix.model<-saemixObject["model"] saemix.data<-saemixObject["data"] saemix.data@ocov<-saemix.data@ocov[saemix.data@data[,"mdv"]==0,,drop=FALSE] saemix.data@data<-saemix.data@data[saemix.data@data[,"mdv"]==0,] saemix.data@ntot.obs<-dim(saemix.data@data)[1] # Initialising random generator OLDRAND<-TRUE set.seed(saemix.options$seed) #intitialisation # xinit<-initialiseMainAlgo(saemix.data,saemix.model,saemix.options) xinit<-initialiseMainAlgo(saemix.data,saemix.model,saemix.options) saemix.model<-xinit$saemix.model Dargs<-xinit$Dargs Uargs<-xinit$Uargs varList<-xinit$varList phiM<-xinit$phiM mean.phi<-xinit$mean.phi DYF<-xinit$DYF opt<-xinit$opt betas<-betas.ini<-xinit$betas fixed.psi<-xinit$fixedpsi.ini var.eta<-varList$diag.omega structural.model<-saemix.model["model"] # Function to perform MCMC simulation nb.etas<-length(varList$ind.eta) domega<-cutoff(mydiag(varList$omega[varList$ind.eta,varList$ind.eta]),.Machine$double.eps) omega.eta<-varList$omega[varList$ind.eta,varList$ind.eta,drop=FALSE] omega.eta<-omega.eta-mydiag(mydiag(varList$omega[varList$ind.eta,varList$ind.eta]))+mydiag(domega) chol.omega<-try(chol(omega.eta)) somega<-solve(omega.eta) # "/" dans Matlab = division matricielle, selon la doc "roughly" B*INV(A) (et *= produit matriciel...) VK<-rep(c(1:nb.etas),2) mean.phiM<-do.call(rbind,rep(list(mean.phi),Uargs$nchains)) phiM[,varList$ind0.eta]<-mean.phiM[,varList$ind0.eta] saemix.options<-saemixObject["options"] map_range <- saemix.options$map.range if(Dargs$type=="structural"){ U.y<-compute.LLy_c(phiM,varList$pres,Uargs,Dargs,DYF) } else{ U.y <- compute.LLy_d(phiM,Uargs,Dargs,DYF) } etaM<-phiM[,varList$ind.eta]-mean.phiM[,varList$ind.eta,drop=FALSE] phiMc<-phiM psiM <- transphi(phiM,Dargs$transform.par) eta_map <- etaM eta_listref <- list(as.data.frame(matrix(nrow = saemix.options$L_mcmc,ncol = ncol(phiM)))) for (i in 1:(nrow(phiM))) { eta_listref[[i]] <- as.data.frame(matrix(nrow = saemix.options$L_mcmc,ncol = ncol(phiM))) eta_listref[[i]][,ncol(eta_listref[[i]])] <- i } eta_list <- list(as.data.frame(matrix(nrow = saemix.options$L_mcmc,ncol = ncol(phiM)))) for (i in 1:(nrow(phiM))) { eta_list[[i]] <- as.data.frame(matrix(nrow = saemix.options$L_mcmc,ncol = ncol(phiM))) eta_list[[i]][,ncol(eta_list[[i]])] <- i } if(opt$nbiter.mcmc[1]>0) { for (m in 1:saemix.options$L_mcmc) { if(m%%100==0){ print(m) } for (i in 1:(nrow(phiM))) { eta_listref[[i]][m,] <- etaM[i,] } for (i in 1:(nrow(phiM))) { eta_list[[i]][m,] <- etaM[i,] } for(u in 1:opt$nbiter.mcmc[1]) { # 1er noyau etaMc<-matrix(rnorm(Dargs$NM*nb.etas),ncol=nb.etas)%*%chol.omega phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else { Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } deltau<-Uc.y-U.y ind<-which(deltau<(-1)*log(runif(Dargs$NM))) etaM[ind,]<-etaMc[ind,] U.y[ind]<-Uc.y[ind] } U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) # Second stage if(opt$nbiter.mcmc[2]>0) { nt2<-nbc2<-matrix(data=0,nrow=nb.etas,ncol=1) nrs2<-1 for (u in 1:opt$nbiter.mcmc[2]) { for(vk2 in 1:nb.etas) { etaMc<-etaM # cat('vk2=',vk2,' nrs2=',nrs2,"\n") etaMc[,vk2]<-etaM[,vk2]+matrix(rnorm(Dargs$NM*nrs2), ncol=nrs2)%*%mydiag(varList$domega2[vk2,nrs2],nrow=1) # 2e noyau ? ou 1er noyau+permutation? phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc psiMc<-transphi(phiMc,Dargs$transform.par) if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else { Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc*(etaMc%*%somega)) deltu<-Uc.y-U.y+Uc.eta-U.eta ind<-which(deltu<(-1)*log(runif(Dargs$NM))) etaM[ind,]<-etaMc[ind,] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] nbc2[vk2]<-nbc2[vk2]+length(ind) nt2[vk2]<-nt2[vk2]+Dargs$NM } } varList$domega2[,nrs2]<-varList$domega2[,nrs2]*(1+opt$stepsize.rw* (nbc2/nt2-opt$proba.mcmc)) } if(opt$nbiter.mcmc[3]>0) { nt2<-nbc2<-matrix(data=0,nrow=nb.etas,ncol=1) nrs2<-kiter%%(nb.etas-1)+2 if(is.nan(nrs2)) nrs2<-1 # to deal with case nb.etas=1 for (u in 1:opt$nbiter.mcmc[3]) { if(nrs2<nb.etas) { vk<-c(0,sample(c(1:(nb.etas-1)),nrs2-1)) nb.iter2<-nb.etas } else { vk<-0:(nb.etas-1) # if(nb.etas==1) vk<-c(0) nb.iter2<-1 } for(k2 in 1:nb.iter2) { vk2<-VK[k2+vk] etaMc<-etaM etaMc[,vk2]<-etaM[,vk2]+matrix(rnorm(Dargs$NM*nrs2), ncol=nrs2)%*%mydiag(varList$domega2[vk2,nrs2]) phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc psiMc<-transphi(phiMc,Dargs$transform.par) if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else { Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc*(etaMc%*%somega)) deltu<-Uc.y-U.y+Uc.eta-U.eta ind<-which(deltu<(-log(runif(Dargs$NM)))) etaM[ind,]<-etaMc[ind,] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] nbc2[vk2]<-nbc2[vk2]+length(ind) nt2[vk2]<-nt2[vk2]+Dargs$NM } } varList$domega2[,nrs2]<-varList$domega2[,nrs2]*(1+opt$stepsize.rw* (nbc2/nt2-opt$proba.mcmc)) } } } U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) if(opt$nbiter.mcmc[4]>0) { etaMc<-etaM propc <- U.eta prop <- U.eta saemix.options<-saemixObject["options"] saemix.model<-saemixObject["model"] saemix.data<-saemixObject["data"] saemix.options$map <- TRUE saemixObject["results"]["omega"] <- omega.eta saemixObject["results"]["mean.phi"] <- mean.phi saemixObject["results"]["phi"] <- phiM i1.omega2<-varList$ind.eta iomega.phi1<-solve(saemixObject["results"]["omega"][i1.omega2,i1.omega2]) id<-saemixObject["data"]["data"][,saemixObject["data"]["name.group"]] xind<-saemixObject["data"]["data"][,saemixObject["data"]["name.predictors"], drop=FALSE] yobs<-saemixObject["data"]["data"][,saemixObject["data"]["name.response"]] id.list<-unique(id) phi.map<-saemixObject["results"]["mean.phi"] if(Dargs$type=="structural"){ for(i in 1:saemixObject["data"]["N"]) { isuj<-id.list[i] xi<-xind[id==isuj,,drop=FALSE] yi<-yobs[id==isuj] idi<-rep(1,length(yi)) mean.phi1<-mean.phiM[i,i1.omega2] phii<-saemixObject["results"]["phi"][i,] phi1<-phii[i1.omega2] phi1.opti<-optim(par=phi1, fn=conditional.distribution_c, phii=phii,idi=idi, xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=varList$pres, err=saemixObject["model"]["error.model"]) phi.map[i,i1.omega2]<-phi1.opti$par } #rep the map nchains time phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) map.psi<-data.frame(id=id.list,map.psi) map.phi<-data.frame(id=id.list,phi.map) psi_map <- as.matrix(map.psi[,-c(1)]) # browser() phi_map <- as.matrix(map.phi[,-c(1)]) eta_map <- phi_map - mean.phiM fpred1<-structural.model(psi_map, Dargs$IdM, Dargs$XM) gradf <- matrix(0L, nrow = length(fpred1), ncol = nb.etas) for (j in 1:nb.etas) { psi_map2 <- psi_map psi_map2[,j] <- psi_map[,j]+psi_map[,j]/1000 fpred1<-structural.model(psi_map, Dargs$IdM, Dargs$XM) fpred2<-structural.model(psi_map2, Dargs$IdM, Dargs$XM) for (i in 1:(Dargs$NM)){ r = which(Dargs$IdM==i) gradf[r,j] <- (fpred2[r] - fpred1[r])/(psi_map[i,j]/1000) } } gradh <- list(omega.eta,omega.eta) for (i in 1:Dargs$NM){ gradh[[i]] <- gradh[[1]] } for (j in 1:nb.etas) { phi_map2 <- phi_map phi_map2[,j] <- phi_map[,j]+phi_map[,j]/1000 psi_map2 <- transphi(phi_map2,saemixObject["model"]["transform.par"]) for (i in 1:(Dargs$NM)){ gradh[[i]][,j] <- (psi_map2[i,] - psi_map[i,])/(phi_map[i,]/1000) } } #calculation of the covariance matrix of the proposal Gamma <- chol.Gamma <- inv.chol.Gamma <- inv.Gamma <- list(omega.eta,omega.eta) for (i in 1:(Dargs$NM)){ r = which(Dargs$IdM==i) temp <- gradf[r,]%*%gradh[[i]] Gamma[[i]] <- solve(t(temp)%*%temp/(varList$pres[1])^2+solve(omega.eta)) chol.Gamma[[i]] <- chol(Gamma[[i]]) inv.chol.Gamma[[i]] <- solve(chol.Gamma[[i]]) inv.Gamma[[i]] <- solve(Gamma[[i]]) } Gamma.laplace <- Gamma } else { for(i in 1:saemixObject["data"]["N"]) { isuj<-id.list[i] xi<-xind[id==isuj,,drop=FALSE] # if(is.null(dim(xi))) xi<-matrix(xi,ncol=1) yi<-yobs[id==isuj] idi<-rep(1,length(yi)) mean.phi1<-mean.phiM[i,i1.omega2] phii<-saemixObject["results"]["phi"][i,] phi1<-phii[i1.omega2] phi1.opti<-optim(par=phi1, fn=conditional.distribution_d, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"]) phi.map[i,i1.omega2]<-phi1.opti$par } #rep the map nchains time phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) map.psi<-data.frame(id=id.list,map.psi) map.phi<-data.frame(id=id.list,phi.map) psi_map <- as.matrix(map.psi[,-c(1)]) phi_map <- as.matrix(map.phi[,-c(1)]) eta_map <- phi_map[,varList$ind.eta] - mean.phiM[,varList$ind.eta] #gradient at the map estimation gradp <- matrix(0L, nrow = Dargs$NM, ncol = nb.etas) for (j in 1:nb.etas) { phi_map2 <- phi_map phi_map2[,j] <- phi_map[,j]+phi_map[,j]/100; psi_map2 <- transphi(phi_map2,saemixObject["model"]["transform.par"]) fpred1<-structural.model(psi_map, Dargs$IdM, Dargs$XM) DYF[Uargs$ind.ioM]<- fpred1 l1<-colSums(DYF) fpred2<-structural.model(psi_map2, Dargs$IdM, Dargs$XM) DYF[Uargs$ind.ioM]<- fpred2 l2<-colSums(DYF) for (i in 1:(Dargs$NM)){ gradp[i,j] <- (l2[i] - l1[i])/(phi_map[i,j]/100) } } #calculation of the covariance matrix of the proposal fpred<-structural.model(psi_map, Dargs$IdM, Dargs$XM) DYF[Uargs$ind.ioM]<- fpred denom <- colSums(DYF) Gamma <- chol.Gamma <- inv.chol.Gamma <- inv.Gamma <- list(omega.eta,omega.eta) z <- matrix(0L, nrow = length(fpred), ncol = 1) for (i in 1:(Dargs$NM)){ Gamma[[i]] <- solve(gradp[i,]%*%t(gradp[i,])/denom[i]^2+solve(omega.eta)) chol.Gamma[[i]] <- chol(Gamma[[i]]) inv.Gamma[[i]] <- solve(Gamma[[i]]) inv.chol.Gamma[[i]] <- solve(chol.Gamma[[i]]) } } etaM <- eta_map phiM<-etaM+mean.phiM psiM<-transphi(phiM,Dargs$transform.par) U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) if(Dargs$type=="structural"){ U.y<-compute.LLy_c(phiM,varList$pres,Uargs,Dargs,DYF) } else{ U.y <- compute.LLy_d(phiM,Uargs,Dargs,DYF) } df <- 3 for (m in 1:saemix.options$L_mcmc) { if(m%%100==0){ print(m) } for (i in 1:(nrow(phiM))) { eta_list[[i]][m,] <- etaM[i,] # eta_list[[i]][m,] <- psiM[i,] } for (u in 1:opt$nbiter.mcmc[4]) { #generate candidate eta for (i in 1:(Dargs$NM)){ Mi <- rnorm(nb.etas)%*%chol.Gamma[[i]] etaMc[i,varList$ind.eta]<- eta_map[i,varList$ind.eta] + Mi # etaMc[i,varList$ind.eta]<- eta_map[i,varList$ind.eta] + rt(nb.etas,df)%*%chol.Gamma[[i]] } phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc[,varList$ind.eta] psiMc<-transphi(phiMc,Dargs$transform.par) if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else{ Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc[,varList$ind.eta]*(etaMc[,varList$ind.eta]%*%somega)) for (i in 1:(Dargs$NM)){ # propc[i] <- 0.5*rowSums((etaMc[i,varList$ind.eta]-eta_map[i,varList$ind.eta])*(etaMc[i,varList$ind.eta]-eta_map[i,varList$ind.eta])%*%inv.Gamma[[i]]) # prop[i] <- 0.5*rowSums((etaM[i,varList$ind.eta]-eta_map[i,varList$ind.eta])*(etaM[i,varList$ind.eta]-eta_map[i,varList$ind.eta])%*%inv.Gamma[[i]]) propc[i] <- -sum(log(dt((etaMc[i,varList$ind.eta]-eta_map[i,varList$ind.eta])%*%inv.chol.Gamma[[i]],df,log=FALSE))) prop[i] <- -sum(log(dt((etaM[i,varList$ind.eta]-eta_map[i,varList$ind.eta])%*%inv.chol.Gamma[[i]],df,log=FALSE))) } deltu<-Uc.y-U.y+Uc.eta-U.eta + prop - propc ind<-which(deltu<(-1)*log(runif(Dargs$NM))) # print(length(ind)/Dargs$NM) etaM[ind,varList$ind.eta]<-etaMc[ind,varList$ind.eta] phiM[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaM[,varList$ind.eta] psiM<-transphi(phiM,Dargs$transform.par) # psiM[ind,varList$ind.eta]<-psiMc[ind,varList$ind.eta] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] } } } #MALA if(opt$nbiter.mcmc[5]>0) { etaMc<-etaM propc <- U.eta prop <- U.eta saemix.options<-saemixObject["options"] saemix.model<-saemixObject["model"] saemix.data<-saemixObject["data"] saemix.options$map <- TRUE saemixObject["results"]["omega"] <- omega.eta saemixObject["results"]["mean.phi"] <- mean.phi saemixObject["results"]["phi"] <- phiM i1.omega2<-varList$ind.eta iomega.phi1<-solve(saemixObject["results"]["omega"][i1.omega2,i1.omega2]) id<-saemixObject["data"]["data"][,saemixObject["data"]["name.group"]] xind<-saemixObject["data"]["data"][,saemixObject["data"]["name.predictors"], drop=FALSE] yobs<-saemixObject["data"]["data"][,saemixObject["data"]["name.response"]] id.list<-unique(id) phi.map<-saemixObject["results"]["mean.phi"] if(Dargs$type=="structural"){ for(i in 1:saemixObject["data"]["N"]) { isuj<-id.list[i] xi<-xind[id==isuj,,drop=FALSE] yi<-yobs[id==isuj] idi<-rep(1,length(yi)) mean.phi1<-mean.phiM[i,i1.omega2] phii<-saemixObject["results"]["phi"][i,] phi1<-phii[i1.omega2] phi1.opti<-optim(par=phi1, fn=conditional.distribution_c, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=varList$pres, err=saemixObject["model"]["error.model"]) phi.map[i,i1.omega2]<-phi1.opti$par } #rep the map nchains time phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) map.psi<-data.frame(id=id.list,map.psi) map.phi<-data.frame(id=id.list,phi.map) psi_map <- as.matrix(map.psi[,-c(1)]) phi_map <- as.matrix(map.phi[,-c(1)]) eta_map <- phi_map - mean.phiM } else { for(i in 1:saemixObject["data"]["N"]) { isuj<-id.list[i] xi<-xind[id==isuj,,drop=FALSE] # if(is.null(dim(xi))) xi<-matrix(xi,ncol=1) yi<-yobs[id==isuj] idi<-rep(1,length(yi)) mean.phi1<-mean.phiM[i,i1.omega2] phii<-saemixObject["results"]["phi"][i,] phi1<-phii[i1.omega2] phi1.opti<-optim(par=phi1, fn=conditional.distribution_d, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"]) # phi1.opti<-optim(par=phi1, fn=conditional.distribution, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=saemixObject["results"]["respar"], err=saemixObject["model"]["error.model"],control = list(maxit = 2)) phi.map[i,i1.omega2]<-phi1.opti$par } #rep the map nchains time phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) map.psi<-data.frame(id=id.list,map.psi) map.phi<-data.frame(id=id.list,phi.map) psi_map <- as.matrix(map.psi[,-c(1)]) phi_map <- as.matrix(map.phi[,-c(1)]) eta_map <- phi_map[,varList$ind.eta] - mean.phiM[,varList$ind.eta] } indiv <- control$indiv.index etaM <- eta_map phiM<-etaM+mean.phiM if(Dargs$type=="structural"){ U.y<-compute.LLy_c(phiM,varList$pres,Uargs,Dargs,DYF) } else{ U.y<-compute.LLy_d(phiM,Uargs,Dargs,DYF) } U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) count <- 0 for (m in 1:saemix.options$L_mcmc) { if(m%%100==0){ print(m) } for (i in 1:(nrow(phiM))) { eta_list[[i]][m,] <- etaM[i,] } nt2<-nbc2<-matrix(data=0,nrow=nb.etas,ncol=1) nrs2<-1 adap <- rep(1, Dargs$NM) sigma <- saemix.options$sigma.val gamma <- saemix.options$gamma.val l<-c() for (u in 1:opt$nbiter.mcmc[5]) { etaMc<-etaM propc <- matrix(nrow = Dargs$NM,ncol = nb.etas) prop <- matrix(nrow = Dargs$NM,ncol = nb.etas) gradU <- matrix(nrow = Dargs$NM,ncol = nb.etas) gradUc <- matrix(nrow = Dargs$NM,ncol = nb.etas) #Gradient in current eta for (kj in 1:(nb.etas)){ etaM2 <- etaM phiM2 <- phiM etaM2[,kj] <- etaM[,kj] + etaM[,kj]/100 phiM2 <- mean.phiM[,varList$ind.eta]+etaM2 if(Dargs$type=="structural"){ U2.y<-compute.LLy_c(phiM2,varList$pres,Uargs,Dargs,DYF) } else{ U2.y<-compute.LLy_d(phiM2,Uargs,Dargs,DYF) } U2.eta<-0.5*rowSums(etaM2*(etaM2%*%somega)) for (i in 1:Dargs$NM){ gradU[i,kj] <- -(U2.y[i]-U.y[i]+U2.eta[i]-U.eta[i])/(etaM[i,kj]/100) } } # if (u>1){ # adap <- adap - gamma*(deltu + log(0.57)) # } Z <- matrix(rnorm(Dargs$NM*nb.etas), ncol=nb.etas) for (i in 1:Dargs$NM){ etaMc[i,] <- etaM[i,] + sigma*adap[i]*gradU[i,] + sqrt(2*sigma*adap[i])*Z[i,] } phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else{ Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc*(etaMc%*%somega)) #Gradient in candidate eta for (kj in 1:(nb.etas)){ etaM2 <- etaMc phiM2 <- phiMc etaM2[,kj] <- etaMc[,kj] + etaMc[,kj]/100 phiM2 <- mean.phiM[,varList$ind.eta]+etaM2 if(Dargs$type=="structural"){ U2.y<-compute.LLy_c(phiM2,varList$pres,Uargs,Dargs,DYF) } else{ U2.y<-compute.LLy_d(phiM2,Uargs,Dargs,DYF) } U2.eta<-0.5*rowSums(etaM2*(etaM2%*%somega)) for (i in 1:Dargs$NM){ gradUc[i,kj] <- -(U2.y[i]-Uc.y[i]+U2.eta[i]-Uc.eta[i])/(etaMc[i,kj]/100) } } for (i in 1:(Dargs$NM)){ propc[i,] <- ((etaMc[i,]-etaM[i,] - sigma*adap[i]*gradU[i,])/sqrt(2*sigma*adap[i]))^2 prop[i,] <- ((etaM[i,]-etaMc[i,] - sigma*adap[i]*gradUc[i,])/sqrt(2*sigma*adap[i]))^2 } P<-0.5*rowSums(prop) Pc<-0.5*rowSums(propc) deltu<-Uc.y-U.y+Uc.eta-U.eta + P - Pc ind<-which(deltu<(-1)*log(runif(Dargs$NM))) if (length(which(ind==indiv))>0){ count <- count +1 } # print(which(ind==indiv)) # print(length(ind)/Dargs$NM) # print(ind) etaM[ind,]<-etaMc[ind,] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] nbc2<-nbc2+length(ind) nt2<-nt2+Dargs$NM } } } #NUTS with rstan if(opt$nbiter.mcmc[6]>0) { # etaMc<-etaM # propc <- U.eta # prop <- U.eta # saemix.options<-saemixObject["options"] # saemix.model<-saemixObject["model"] # saemix.data<-saemixObject["data"] # saemix.options$map <- TRUE # saemixObject["results"]["omega"] <- omega.eta # saemixObject["results"]["mean.phi"] <- mean.phi # saemixObject["results"]["phi"] <- phiM # i1.omega2<-varList$ind.eta # iomega.phi1<-solve(saemixObject["results"]["omega"][i1.omega2,i1.omega2]) # id<-saemixObject["data"]["data"][,saemixObject["data"]["name.group"]] # xind<-saemixObject["data"]["data"][,saemixObject["data"]["name.predictors"], drop=FALSE] # yobs<-saemixObject["data"]["data"][,saemixObject["data"]["name.response"]] # id.list<-unique(id) # phi.map<-saemixObject["results"]["mean.phi"] # if(Dargs$type=="structural"){ # for(i in 1:saemixObject["data"]["N"]) { # isuj<-id.list[i] # xi<-xind[id==isuj,,drop=FALSE] # yi<-yobs[id==isuj] # idi<-rep(1,length(yi)) # mean.phi1<-mean.phiM[i,i1.omega2] # phii<-saemixObject["results"]["phi"][i,] # phi1<-phii[i1.omega2] # phi1.opti<-optim(par=phi1, fn=conditional.distribution_c, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=varList$pres, err=saemixObject["model"]["error.model"]) # phi.map[i,i1.omega2]<-phi1.opti$par # } # #rep the map nchains time # phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] # map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) # map.psi<-data.frame(id=id.list,map.psi) # map.phi<-data.frame(id=id.list,phi.map) # psi_map <- as.matrix(map.psi[,-c(1)]) # phi_map <- as.matrix(map.phi[,-c(1)]) # eta_map <- phi_map - mean.phiM # } else { # for(i in 1:saemixObject["data"]["N"]) { # isuj<-id.list[i] # xi<-xind[id==isuj,,drop=FALSE] # # if(is.null(dim(xi))) xi<-matrix(xi,ncol=1) # yi<-yobs[id==isuj] # idi<-rep(1,length(yi)) # mean.phi1<-mean.phiM[i,i1.omega2] # phii<-saemixObject["results"]["phi"][i,] # phi1<-phii[i1.omega2] # phi1.opti<-optim(par=phi1, fn=conditional.distribution_d, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"]) # # phi1.opti<-optim(par=phi1, fn=conditional.distribution, phii=phii,idi=idi,xi=xi,yi=yi,mphi=mean.phi1,idx=i1.omega2,iomega=iomega.phi1, trpar=saemixObject["model"]["transform.par"], model=saemixObject["model"]["model"], pres=saemixObject["results"]["respar"], err=saemixObject["model"]["error.model"],control = list(maxit = 2)) # phi.map[i,i1.omega2]<-phi1.opti$par # } # #rep the map nchains time # phi.map <- phi.map[rep(seq_len(nrow(phi.map)),Uargs$nchains ), ] # map.psi<-transphi(phi.map,saemixObject["model"]["transform.par"]) # map.psi<-data.frame(id=id.list,map.psi) # map.phi<-data.frame(id=id.list,phi.map) # psi_map <- as.matrix(map.psi[,-c(1)]) # phi_map <- as.matrix(map.phi[,-c(1)]) # eta_map <- phi_map[,varList$ind.eta] - mean.phiM[,varList$ind.eta] # } # etaM <- eta_map # phiM<-etaM+mean.phiM # psiM<-transphi(phiM,Dargs$transform.par) ## using Rstan package ###Linear # indiv <- control$indiv.index # obs <- Dargs$yM[Dargs$IdM==indiv] # age <- Dargs$XM[Dargs$IdM==indiv,] # stan.model <- control$modelstan # # stan_data <- list(N = length(obs),height = obs # # ,age = age, # # beta1_pop=mean.phiM[indiv,1],beta2_pop=mean.phiM[indiv,2], # # omega_beta1=sqrt(omega.eta[1,1]),omega_beta2=sqrt(omega.eta[2,2]), # # pres=sqrt(varList$pres[1])) # stan_data <- list(N = length(obs),height = obs # ,age = age, # beta1_pop=mean.phiM[indiv,1],beta2_pop=mean.phiM[indiv,2],beta3_pop=mean.phiM[indiv,3], # omega_beta1=sqrt(omega.eta[1,1]),omega_beta2=sqrt(omega.eta[2,2]),omega_beta3=sqrt(omega.eta[3,3]), # pres=sqrt(varList$pres[1])) # warmup <- 1000 # fit <- sampling(stan.model, data = stan_data, iter = 6*L_mcmc+warmup,init = phiM[indiv,], # warmup = warmup,chains = 1,algorithm = "NUTS") #can try "HMC", "Fixed_param" # fit_samples = extract(fit) # psiMstan <- fit_samples$beta[seq(1,6*L_mcmc,6),] # phiMstan<-transpsi(psiMstan,Dargs$transform.par) # etaMstan <- phiMstan - matrix(rep(mean.phiM[1,],each=nrow(phiMstan)),nrow=nrow(phiMstan)) # eta_list[[indiv]] <- etaMstan if(Dargs$type=="structural"){ # ###WARFA indiv <- control$indiv.index obs <- Dargs$yM[Dargs$IdM==indiv] dose <- unique(Dargs$XM[Dargs$IdM==indiv,1]) time <- Dargs$XM[Dargs$IdM==indiv,2] mean.psiM <- transphi(mean.phiM,Dargs$transform.par) stan.model <- control$modelstan stan_data <- list(N = length(obs),concentration = obs ,time = time, dose = dose, beta1_pop=mean.phiM[indiv,1],beta2_pop=mean.phiM[indiv,2],beta3_pop=mean.phiM[indiv,3], omega_beta1=sqrt(omega.eta[1,1]),omega_beta2=sqrt(omega.eta[2,2]),omega_beta3=sqrt(omega.eta[3,3]), pres=sqrt(varList$pres[1])) warmup <- 1000 fit <- sampling(stan.model, data = stan_data, iter = 6*saemix.options$L_mcmc+warmup,warmup = warmup, chains = 1,algorithm = "NUTS", init = psiM[indiv,]) #can try "HMC", "Fixed_param" # browser() fit_samples = extract(fit) psiMstan <- fit_samples$beta[seq(1,6*saemix.options$L_mcmc,6),] phiMstan<-transpsi(psiMstan,Dargs$transform.par) etaMstan <- phiMstan - matrix(rep(mean.phiM[1,],each=nrow(phiMstan)),nrow=nrow(phiMstan)) colMeans(etaMstan) eta_map[indiv,] eta_list[[indiv]] <- as.data.frame(etaMstan) } else { ##RTTE indiv <- control$indiv.index stan.model <- control$modelstan T <- Dargs$XM[Dargs$IdM==indiv,1] T_c <- 20 event_times <- T[!(T %in% c(0, T_c))] cens_times <- T[T == T_c] N_e <- length(event_times) N_c <- length(cens_times) stan_data <- list(N_e = N_e, N_c = N_c ,event_times = event_times, cens_times = cens_times, beta_pop=mean.phiM[indiv,2],lambda_pop=mean.phiM[indiv,1], omega_beta=sqrt(omega.eta[2,2]),omega_lambda=sqrt(omega.eta[1,1])) warmup <- 1000 # browser() fit <- sampling(stan.model, data = stan_data, iter = 6*saemix.options$L_mcmc+warmup,warmup = warmup, chains = 1,algorithm = "NUTS") fit_samples = extract(fit) psiMstan <- fit_samples$param[seq(1,6*saemix.options$L_mcmc,6),] phiMstan<-transpsi(psiMstan,Dargs$transform.par) etaMstan <- phiMstan - matrix(rep(mean.phiM[1,],each=nrow(phiMstan)),nrow=nrow(phiMstan)) eta_list[[indiv]] <- etaMstan } } #Using ADVI outputs for Independent sampler (mu and gamma) if(opt$nbiter.mcmc[7]>0) { #Initialization etaMc<-etaM propc <- U.eta prop <- U.eta saemix.options<-saemixObject["options"] saemix.model<-saemixObject["model"] saemix.data<-saemixObject["data"] saemix.options$map <- TRUE saemixObject["results"]["omega"] <- omega.eta saemixObject["results"]["mean.phi"] <- mean.phi saemixObject["results"]["phi"] <- phiM i1.omega2<-varList$ind.eta iomega.phi1<-solve(saemixObject["results"]["omega"][i1.omega2,i1.omega2]) id<-saemixObject["data"]["data"][,saemixObject["data"]["name.group"]] xind<-saemixObject["data"]["data"][,saemixObject["data"]["name.predictors"], drop=FALSE] yobs<-saemixObject["data"]["data"][,saemixObject["data"]["name.response"]] id.list<-unique(id) phi.map<-saemixObject["results"]["mean.phi"] etaM <- mean.phiM mu.vi <- mean.phiM Gamma.vi <- chol.Gamma.vi <- inv.Gamma.vi <- list(omega.eta,omega.eta) for (i in 1:(Dargs$NM)){ Gamma.vi[[i]] <- control$Gamma[[i]] chol.Gamma.vi[[i]] <- chol(Gamma.vi[[i]]) inv.Gamma.vi[[i]] <- solve(Gamma.vi[[i]]) } etaM <- control$mu mu.vi<- control$mu phiM<-etaM+mean.phiM U.eta<-0.5*rowSums(etaM*(etaM%*%somega)) if(Dargs$type=="structural"){ U.y<-compute.LLy_c(phiM,varList$pres,Uargs,Dargs,DYF) } else{ U.y <- compute.LLy_d(phiM,Uargs,Dargs,DYF) } propc <- U.eta prop <- U.eta nt2<-nbc2<-matrix(data=0,nrow=nb.etas,ncol=1) nrs2<-1 for (m in 1:saemix.options$L_mcmc) { if(m%%100==0){ print(m) } for (i in 1:(nrow(phiM))) { eta_list[[i]][m,] <- etaM[i,] } for (u in 1:opt$nbiter.mcmc[7]) { for (i in 1:(Dargs$NM)){ Mi <- rnorm(nb.etas)%*%chol.Gamma.vi[[i]] etaMc[i,varList$ind.eta]<- mu.vi[i,varList$ind.eta] + Mi } phiMc[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaMc[,varList$ind.eta] if(Dargs$type=="structural"){ Uc.y<-compute.LLy_c(phiMc,varList$pres,Uargs,Dargs,DYF) } else{ Uc.y<-compute.LLy_d(phiMc,Uargs,Dargs,DYF) } Uc.eta<-0.5*rowSums(etaMc[,varList$ind.eta]*(etaMc[,varList$ind.eta]%*%somega)) for (i in 1:(Dargs$NM)){ propc[i] <- 0.5*rowSums((etaMc[i,varList$ind.eta]-mu.vi[i,varList$ind.eta])*(etaMc[i,varList$ind.eta]-mu.vi[i,varList$ind.eta])%*%inv.Gamma.vi[[i]]) prop[i] <- 0.5*rowSums((etaM[i,varList$ind.eta]-mu.vi[i,varList$ind.eta])*(etaM[i,varList$ind.eta]-mu.vi[i,varList$ind.eta])%*%inv.Gamma.vi[[i]]) } deltu<-Uc.y-U.y+Uc.eta-U.eta + prop - propc ind<-which(deltu<(-1)*log(runif(Dargs$NM))) # print(length(ind)/Dargs$NM) etaM[ind,varList$ind.eta]<-etaMc[ind,varList$ind.eta] U.y[ind]<-Uc.y[ind] # Warning: Uc.y, Uc.eta = vecteurs U.eta[ind]<-Uc.eta[ind] } } } phiM[,varList$ind.eta]<-mean.phiM[,varList$ind.eta]+etaM[,varList$ind.eta] return(list(eta=eta_list, eta_ref=eta_listref, Gamma=Gamma.laplace, map = eta_map)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add_labels_to_reduced_matrix.R \name{add_labels_to_reduced_matrix} \alias{add_labels_to_reduced_matrix} \title{Augment a reduced matrix with label information and transition counts from original dataset} \usage{ add_labels_to_reduced_matrix(reduced_matrix, previous_matrix, ind_class = "sic07") } \arguments{ \item{reduced_matrix}{A matrix with 3 columns from industry transitions} \item{previous_matrix}{A data frame with the raw counts transition daya} \item{ind_class}{The label for the industry classification} } \value{ An updated matrix. } \description{ Augment a reduced matrix with label information and transition counts from original dataset } \examples{ add_labels_to_reduced_matrix(reduced_matrix,old_matrix,'sic07') }
/R/rkf/man/add_labels_to_reduced_matrix.Rd
no_license
alan-turing-institute/KnowledgeFlows
R
false
true
815
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add_labels_to_reduced_matrix.R \name{add_labels_to_reduced_matrix} \alias{add_labels_to_reduced_matrix} \title{Augment a reduced matrix with label information and transition counts from original dataset} \usage{ add_labels_to_reduced_matrix(reduced_matrix, previous_matrix, ind_class = "sic07") } \arguments{ \item{reduced_matrix}{A matrix with 3 columns from industry transitions} \item{previous_matrix}{A data frame with the raw counts transition daya} \item{ind_class}{The label for the industry classification} } \value{ An updated matrix. } \description{ Augment a reduced matrix with label information and transition counts from original dataset } \examples{ add_labels_to_reduced_matrix(reduced_matrix,old_matrix,'sic07') }
library("aroma.affymetrix") verbose <- Arguments$getVerbose(-8, timestamp=TRUE) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Setup data set # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - dataSet <- "HapMap,CEU,testset" chipType <- "Mapping50K_Hind240" cdf <- AffymetrixCdfFile$byChipType(chipType) csR <- AffymetrixCelSet$byName(dataSet, cdf=cdf) print(csR) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # SNPRMA # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ces <- justSNPRMA(csR, normalizeToHapmap=TRUE, returnESet=FALSE, verbose=verbose) print(ces) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # CRLMM # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - recalibrate <- TRUE crlmm <- CrlmmModel(ces, tags="*,oligo", recalibrate=recalibrate) print(crlmm) units <- fit(crlmm, ram="oligo", verbose=verbose) str(units) callSet <- getCallSet(crlmm) print(callSet) confSet <- getConfidenceScoreSet(crlmm) print(confSet) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Plot along genome # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - chr <- 2 chrTag <- sprintf("Chr%02d", chr) ugp <- getAromaUgpFile(cdf) units <- getUnitsOnChromosome(ugp, chromosome=chr) pos <- getPositions(ugp, units=units)/1e6 toPNG(getFullName(ces), tags=c(chrTag), { layout(matrix(seq_along(ces), ncol=2, byrow=TRUE)) par(mar=c(3.5,4,1.5,1), mgp=c(1.8,0.5,0), pch=".") for (ii in seq_along(ces)) { ce <- ces[[ii]] gc <- callSet[[ii]] data <- extractTotalAndFracB(ce, units=units, drop=TRUE) calls <- extractGenotypes(gc, units=units, drop=TRUE) col <- c(AA=1, AB=2, BB=3)[calls] plot(pos, data[,"freqB"], col=col, cex=4, ylim=c(0,1)) stext(side=3, pos=0, getName(ce)) stext(side=3, pos=1, chrTag) } # for (ii ...) })
/inst/testScripts/system/chipTypes/Mapping50K_Hind240/12.justSNPRMA,CRLMM.R
no_license
HenrikBengtsson/aroma.affymetrix
R
false
false
1,894
r
library("aroma.affymetrix") verbose <- Arguments$getVerbose(-8, timestamp=TRUE) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Setup data set # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - dataSet <- "HapMap,CEU,testset" chipType <- "Mapping50K_Hind240" cdf <- AffymetrixCdfFile$byChipType(chipType) csR <- AffymetrixCelSet$byName(dataSet, cdf=cdf) print(csR) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # SNPRMA # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ces <- justSNPRMA(csR, normalizeToHapmap=TRUE, returnESet=FALSE, verbose=verbose) print(ces) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # CRLMM # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - recalibrate <- TRUE crlmm <- CrlmmModel(ces, tags="*,oligo", recalibrate=recalibrate) print(crlmm) units <- fit(crlmm, ram="oligo", verbose=verbose) str(units) callSet <- getCallSet(crlmm) print(callSet) confSet <- getConfidenceScoreSet(crlmm) print(confSet) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Plot along genome # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - chr <- 2 chrTag <- sprintf("Chr%02d", chr) ugp <- getAromaUgpFile(cdf) units <- getUnitsOnChromosome(ugp, chromosome=chr) pos <- getPositions(ugp, units=units)/1e6 toPNG(getFullName(ces), tags=c(chrTag), { layout(matrix(seq_along(ces), ncol=2, byrow=TRUE)) par(mar=c(3.5,4,1.5,1), mgp=c(1.8,0.5,0), pch=".") for (ii in seq_along(ces)) { ce <- ces[[ii]] gc <- callSet[[ii]] data <- extractTotalAndFracB(ce, units=units, drop=TRUE) calls <- extractGenotypes(gc, units=units, drop=TRUE) col <- c(AA=1, AB=2, BB=3)[calls] plot(pos, data[,"freqB"], col=col, cex=4, ylim=c(0,1)) stext(side=3, pos=0, getName(ce)) stext(side=3, pos=1, chrTag) } # for (ii ...) })
/RawGeno/man/Treecon.inv.Rd
no_license
arrigon/RawGeno
R
false
false
898
rd
# This script uses the context likelihood of relatedness (CLR) network inference method to predict interactions between TF genes and gene modules. ''' CLR network inference: Faith JJ, Hayete B, Thaden JT, Mogno I, Wierzbowski J, Cottarel G, et al. Large-scale mapping and validation of Escherichia coli transcriptional regulation from a compendium of expression profiles. PLoS biology. 2007;5(1):e8. minet package: Meyer PE, Lafitte F, Bontempi G. minet: AR/Bioconductor package for inferring large transcriptional networks using mutual information. BMC bioinformatics. 2008;9(1):461. ''' setwd("/path/to/analysis/directory/") # Import and format data - 5 gene clusters and 131 TFs expdat <- read.csv("cluster_TF_expression.csv", row.names = 1) expdat <- as.matrix(expdat) # Run minet with CLR inference library(minet) set.seed(345) mim <- build.mim(t(expdat), estimator = "pearson") net <- minet::clr(mim) net_df <- data.frame(row=rownames(net)[row(net)], col=colnames(net)[col(net)], corr=c(net)) # Convert correlation matrix to list net_df<-net_df[!(net_df$corr==0),] # Logic index to remove zero correlation net_df0 <- net_df[grep("module*", net_df$col), ] net_df0 <- net_df0[grep("Glyma*", net_df0$row), ] net_df0 <- net_df0[order(-net_df0$corr),] # sort by highest weight write.csv(net_df0, file = "clr_network.csv")
/step5_clr_inference.R
no_license
zhuqingquan5510/rsv3-network
R
false
false
1,333
r
# This script uses the context likelihood of relatedness (CLR) network inference method to predict interactions between TF genes and gene modules. ''' CLR network inference: Faith JJ, Hayete B, Thaden JT, Mogno I, Wierzbowski J, Cottarel G, et al. Large-scale mapping and validation of Escherichia coli transcriptional regulation from a compendium of expression profiles. PLoS biology. 2007;5(1):e8. minet package: Meyer PE, Lafitte F, Bontempi G. minet: AR/Bioconductor package for inferring large transcriptional networks using mutual information. BMC bioinformatics. 2008;9(1):461. ''' setwd("/path/to/analysis/directory/") # Import and format data - 5 gene clusters and 131 TFs expdat <- read.csv("cluster_TF_expression.csv", row.names = 1) expdat <- as.matrix(expdat) # Run minet with CLR inference library(minet) set.seed(345) mim <- build.mim(t(expdat), estimator = "pearson") net <- minet::clr(mim) net_df <- data.frame(row=rownames(net)[row(net)], col=colnames(net)[col(net)], corr=c(net)) # Convert correlation matrix to list net_df<-net_df[!(net_df$corr==0),] # Logic index to remove zero correlation net_df0 <- net_df[grep("module*", net_df$col), ] net_df0 <- net_df0[grep("Glyma*", net_df0$row), ] net_df0 <- net_df0[order(-net_df0$corr),] # sort by highest weight write.csv(net_df0, file = "clr_network.csv")
#=========================================================================== # BASIC TEXAS SETUP #=========================================================================== root = getwd() while(basename(root) != "texas") { root = dirname(root) } library(tidyverse) library(lubridate) library(readxl) source(file.path(root, "code", "paths.R")) source(file.path(root, "code", "texas_constants.R")) options(scipen = 99999) #============================================================================== # read in royalty data #============================================================================== # From PIA on negative payments: The “negative payment” isn’t a payment at all. # It’s actually the result of the company overpaying in the past # (something that happens quite often, in fact, and sometimes they only # catch it after a long time), so it’s an adjustment. If you look at the # records, you’ll see that even when it happens several times to the same # company, they usually end up with a positive amount. There are exceptions, # such as if they overpaid, but now the lease has expired, and the last # recorded transaction was a refund, then it shows as negative. glo_royalties <- file.path(raw_payments, "royalties_2019.01.02.xlsx") %>% read_excel() %>% gather(PaymentDate, PaymentAmount, `200501`:`201810`) %>% rename(Lease_Number = MFN, Lease_Status = Status, Production_Type = Product, TotalPaymentAmount = `Total Of SumOfcurPaymentAmount`, Effective_Date = `Effective Date`, Status_Date = `Status Date`)%>% mutate(PaymentDate = ymd(paste0(PaymentDate, "01"))) # find when payments start and end # filter out entries outside of the start and end payment dates glo_royalties <- glo_royalties %>% group_by(Lease_Number, Production_Type) %>% arrange(Lease_Number, PaymentDate) %>% mutate(StartPaymentDate = PaymentDate[which(!is.na(PaymentAmount))[1]]) %>% arrange(Lease_Number, desc(PaymentDate)) %>% mutate(EndPaymentDate = PaymentDate[which(!is.na(PaymentAmount))[1]]) %>% filter(PaymentDate >= StartPaymentDate, PaymentDate <= EndPaymentDate) %>% arrange(Lease_Number, Production_Type, PaymentDate) %>% mutate(PaymentAmount = ifelse(is.na(PaymentAmount), 0, PaymentAmount)) # IMPORTANT: since we believe the GLO royalty payment file contains just the # payments made to the state of texas, we need to *double* the RAL royalty # revenue numbers, so that we are including the (presumably) unmeasured # surface owner share load(file.path(gen, "leases_state.Rda")) leases <- leases_state %>% as.data.frame %>% select(Lease_Number, Type, Effective_Date, Oil_Royalty, Gas_Royalty) %>% as_tibble # note: there are now some dupes at the Lease_Number, PaymentDate, # Production_Type level, driven by lease status, so we drop lease status from # this file and deduplicate here glo_royalties <- glo_royalties %>% inner_join(select(leases, Lease_Number, Type)) %>% mutate(PaymentAmount = if_else(Type == "RAL", PaymentAmount * 2, PaymentAmount)) %>% select(-Type) %>% ungroup %>% select(-Lease_Status, -Status_Date, -Effective_Date) %>% unique #============================================================================== # turn royalty stream into inferred production stream using royalty rate # information and contemporaneous price information #============================================================================== load(file.path(gen, "prices.Rda")) prices <- prices %>% rename(Gas_Price = gas, Oil_Price = oil) # idea: gas revenue in month t = gas_royalty * price_gas * gas_production, so # gas_production = gas_revenue / (gas_price * gas_royalty) # right now dropping implied production that occurs before a lease's # effective date # note: at some point we should decide what dates we actually want to use, but # at present (Jan 2019) the royalty data goes through October 2018, and the # price data goes through September 2018. To make things simple and # conservative, we cut things off at June 2018 prices <- prices %>% filter(Date <= LastProductionDate) glo_royalties <- glo_royalties %>% filter(PaymentDate <= LastProductionDate) inferred_output <- glo_royalties %>% ungroup %>% select(Lease_Number, PaymentDate, Production_Type, PaymentAmount) %>% spread(Production_Type, PaymentAmount) %>% replace_na(list(Gas = 0, Oil = 0)) %>% rename(Oil_Revenue = Oil, Gas_Revenue = Gas) %>% left_join(prices, by = c("PaymentDate" = "Date")) %>% inner_join(leases, by = "Lease_Number") %>% mutate(Oil_Production = Oil_Revenue / (Oil_Price * Oil_Royalty), Gas_Production = Gas_Revenue / (Gas_Price * Gas_Royalty)) %>% select(Lease_Number, Type, Effective_Date, PaymentDate, Oil_Production, Gas_Production) %>% gather(Production_Type, Production, -Lease_Number, -Type, -Effective_Date, -PaymentDate) %>% group_by(Lease_Number) %>% mutate(HasEarlyProduction = max(PaymentDate < Effective_Date)) %>% filter(PaymentDate >= Effective_Date) first_date_oil <- inferred_output %>% ungroup %>% filter(Production > 0, Production_Type == "Oil_Production") %>% arrange(Lease_Number, PaymentDate) %>% group_by(Lease_Number) %>% summarize(FirstOilDate = min(PaymentDate)) first_date_gas <- inferred_output %>% ungroup %>% filter(Production > 0, Production_Type == "Gas_Production") %>% arrange(Lease_Number, PaymentDate) %>% group_by(Lease_Number) %>% summarize(FirstGasDate = min(PaymentDate)) inferred_output <- inferred_output %>% left_join(first_date_oil) %>% left_join(first_date_gas) inferred_oil24 <- inferred_output %>% filter(!is.na(FirstOilDate), Production_Type == "Oil_Production", PaymentDate >= FirstOilDate) %>% arrange(Lease_Number, PaymentDate) %>% group_by(Lease_Number) %>% mutate(n_oil = n()) %>% filter(row_number() <= 24) %>% mutate(oil24 = sum(Production)) %>% filter(row_number() == 1) %>% ungroup %>% select(Lease_Number, n_oil, oil24) inferred_gas24 <- inferred_output %>% filter(!is.na(FirstGasDate), Production_Type == "Gas_Production", PaymentDate >= FirstGasDate) %>% arrange(Lease_Number, PaymentDate) %>% group_by(Lease_Number) %>% mutate(n_gas = n()) %>% filter(row_number() <= 24) %>% mutate(gas24 = sum(Production)) %>% filter(row_number() == 1) %>% ungroup %>% select(Lease_Number, n_gas, gas24) total_inferred_output <- inferred_output %>% mutate(delay = round(as.numeric(PaymentDate - Effective_Date) / 30), discount_factor = 1 / ((1 + modiscount) ^ delay), Discounted_Production = Production * discount_factor) %>% group_by(Lease_Number, HasEarlyProduction, Production_Type) %>% summarize(TotalProduction = sum(Production), TotalDiscountedProduction = sum(Discounted_Production)) %>% ungroup inferred_output_oil <- total_inferred_output %>% filter(Production_Type == "Oil_Production") %>% rename(OilProduction = TotalProduction, DiscountedOilProduction = TotalDiscountedProduction) %>% select(-Production_Type) inferred_output_gas <- total_inferred_output %>% filter(Production_Type == "Gas_Production") %>% rename(GasProduction = TotalProduction, DiscountedGasProduction = TotalDiscountedProduction) %>% select(-Production_Type) glo_output <- full_join(inferred_output_oil, inferred_output_gas) # go back and compute discounted revenue in the same way total_royalties <- glo_royalties %>% ungroup %>% select(Lease_Number, PaymentDate, Production_Type, PaymentAmount) %>% spread(Production_Type, PaymentAmount) %>% replace_na(list(Gas = 0, Oil = 0)) %>% rename(Oil_Revenue = Oil, Gas_Revenue = Gas) %>% inner_join(leases, by = "Lease_Number") %>% select(Lease_Number, Type, Effective_Date, PaymentDate, Oil_Revenue, Gas_Revenue) %>% gather(Production_Type, Revenue, -Lease_Number, -Type, -Effective_Date, -PaymentDate) %>% group_by(Lease_Number) %>% mutate(HasEarlyProduction = max(PaymentDate < Effective_Date)) %>% filter(PaymentDate >= Effective_Date) %>% mutate(delay = round(as.numeric(PaymentDate - Effective_Date) / 30), discount_factor = 1 / ((1 + modiscount) ^ delay), Discounted_Revenue = Revenue * discount_factor) %>% group_by(Lease_Number, HasEarlyProduction, Production_Type) %>% summarize(TotalRevenue = sum(Revenue), TotalDiscountedRevenue = sum(Discounted_Revenue)) %>% ungroup total_royalties_oil <- total_royalties %>% filter(Production_Type == "Oil_Revenue") %>% rename(OilRevenue = TotalRevenue, DiscountedOilRevenue = TotalDiscountedRevenue) %>% select(-Production_Type) total_royalties_gas <- total_royalties %>% filter(Production_Type == "Gas_Revenue") %>% rename(GasRevenue = TotalRevenue, DiscountedGasRevenue = TotalDiscountedRevenue) %>% select(-Production_Type) # combine things and zero out the 3 wells with negative values glo_output <- full_join(full_join(inferred_output_oil, inferred_output_gas), full_join(total_royalties_oil, total_royalties_gas)) %>% left_join(inferred_oil24) %>% left_join(inferred_gas24) %>% replace_na(list(n_oil = 0, n_gas = 0, oil24 = 0, gas24 = 0)) %>% mutate_at(vars(contains("Revenue"), contains("OilProduction"), contains("GasProduction")), funs(pmax(., 0))) save(glo_output, file = file.path(gen, "glo_output.Rda"))
/code/Data_Cleaning/glo_output.R
no_license
vmolchan/public_cs_texas
R
false
false
9,583
r
#=========================================================================== # BASIC TEXAS SETUP #=========================================================================== root = getwd() while(basename(root) != "texas") { root = dirname(root) } library(tidyverse) library(lubridate) library(readxl) source(file.path(root, "code", "paths.R")) source(file.path(root, "code", "texas_constants.R")) options(scipen = 99999) #============================================================================== # read in royalty data #============================================================================== # From PIA on negative payments: The “negative payment” isn’t a payment at all. # It’s actually the result of the company overpaying in the past # (something that happens quite often, in fact, and sometimes they only # catch it after a long time), so it’s an adjustment. If you look at the # records, you’ll see that even when it happens several times to the same # company, they usually end up with a positive amount. There are exceptions, # such as if they overpaid, but now the lease has expired, and the last # recorded transaction was a refund, then it shows as negative. glo_royalties <- file.path(raw_payments, "royalties_2019.01.02.xlsx") %>% read_excel() %>% gather(PaymentDate, PaymentAmount, `200501`:`201810`) %>% rename(Lease_Number = MFN, Lease_Status = Status, Production_Type = Product, TotalPaymentAmount = `Total Of SumOfcurPaymentAmount`, Effective_Date = `Effective Date`, Status_Date = `Status Date`)%>% mutate(PaymentDate = ymd(paste0(PaymentDate, "01"))) # find when payments start and end # filter out entries outside of the start and end payment dates glo_royalties <- glo_royalties %>% group_by(Lease_Number, Production_Type) %>% arrange(Lease_Number, PaymentDate) %>% mutate(StartPaymentDate = PaymentDate[which(!is.na(PaymentAmount))[1]]) %>% arrange(Lease_Number, desc(PaymentDate)) %>% mutate(EndPaymentDate = PaymentDate[which(!is.na(PaymentAmount))[1]]) %>% filter(PaymentDate >= StartPaymentDate, PaymentDate <= EndPaymentDate) %>% arrange(Lease_Number, Production_Type, PaymentDate) %>% mutate(PaymentAmount = ifelse(is.na(PaymentAmount), 0, PaymentAmount)) # IMPORTANT: since we believe the GLO royalty payment file contains just the # payments made to the state of texas, we need to *double* the RAL royalty # revenue numbers, so that we are including the (presumably) unmeasured # surface owner share load(file.path(gen, "leases_state.Rda")) leases <- leases_state %>% as.data.frame %>% select(Lease_Number, Type, Effective_Date, Oil_Royalty, Gas_Royalty) %>% as_tibble # note: there are now some dupes at the Lease_Number, PaymentDate, # Production_Type level, driven by lease status, so we drop lease status from # this file and deduplicate here glo_royalties <- glo_royalties %>% inner_join(select(leases, Lease_Number, Type)) %>% mutate(PaymentAmount = if_else(Type == "RAL", PaymentAmount * 2, PaymentAmount)) %>% select(-Type) %>% ungroup %>% select(-Lease_Status, -Status_Date, -Effective_Date) %>% unique #============================================================================== # turn royalty stream into inferred production stream using royalty rate # information and contemporaneous price information #============================================================================== load(file.path(gen, "prices.Rda")) prices <- prices %>% rename(Gas_Price = gas, Oil_Price = oil) # idea: gas revenue in month t = gas_royalty * price_gas * gas_production, so # gas_production = gas_revenue / (gas_price * gas_royalty) # right now dropping implied production that occurs before a lease's # effective date # note: at some point we should decide what dates we actually want to use, but # at present (Jan 2019) the royalty data goes through October 2018, and the # price data goes through September 2018. To make things simple and # conservative, we cut things off at June 2018 prices <- prices %>% filter(Date <= LastProductionDate) glo_royalties <- glo_royalties %>% filter(PaymentDate <= LastProductionDate) inferred_output <- glo_royalties %>% ungroup %>% select(Lease_Number, PaymentDate, Production_Type, PaymentAmount) %>% spread(Production_Type, PaymentAmount) %>% replace_na(list(Gas = 0, Oil = 0)) %>% rename(Oil_Revenue = Oil, Gas_Revenue = Gas) %>% left_join(prices, by = c("PaymentDate" = "Date")) %>% inner_join(leases, by = "Lease_Number") %>% mutate(Oil_Production = Oil_Revenue / (Oil_Price * Oil_Royalty), Gas_Production = Gas_Revenue / (Gas_Price * Gas_Royalty)) %>% select(Lease_Number, Type, Effective_Date, PaymentDate, Oil_Production, Gas_Production) %>% gather(Production_Type, Production, -Lease_Number, -Type, -Effective_Date, -PaymentDate) %>% group_by(Lease_Number) %>% mutate(HasEarlyProduction = max(PaymentDate < Effective_Date)) %>% filter(PaymentDate >= Effective_Date) first_date_oil <- inferred_output %>% ungroup %>% filter(Production > 0, Production_Type == "Oil_Production") %>% arrange(Lease_Number, PaymentDate) %>% group_by(Lease_Number) %>% summarize(FirstOilDate = min(PaymentDate)) first_date_gas <- inferred_output %>% ungroup %>% filter(Production > 0, Production_Type == "Gas_Production") %>% arrange(Lease_Number, PaymentDate) %>% group_by(Lease_Number) %>% summarize(FirstGasDate = min(PaymentDate)) inferred_output <- inferred_output %>% left_join(first_date_oil) %>% left_join(first_date_gas) inferred_oil24 <- inferred_output %>% filter(!is.na(FirstOilDate), Production_Type == "Oil_Production", PaymentDate >= FirstOilDate) %>% arrange(Lease_Number, PaymentDate) %>% group_by(Lease_Number) %>% mutate(n_oil = n()) %>% filter(row_number() <= 24) %>% mutate(oil24 = sum(Production)) %>% filter(row_number() == 1) %>% ungroup %>% select(Lease_Number, n_oil, oil24) inferred_gas24 <- inferred_output %>% filter(!is.na(FirstGasDate), Production_Type == "Gas_Production", PaymentDate >= FirstGasDate) %>% arrange(Lease_Number, PaymentDate) %>% group_by(Lease_Number) %>% mutate(n_gas = n()) %>% filter(row_number() <= 24) %>% mutate(gas24 = sum(Production)) %>% filter(row_number() == 1) %>% ungroup %>% select(Lease_Number, n_gas, gas24) total_inferred_output <- inferred_output %>% mutate(delay = round(as.numeric(PaymentDate - Effective_Date) / 30), discount_factor = 1 / ((1 + modiscount) ^ delay), Discounted_Production = Production * discount_factor) %>% group_by(Lease_Number, HasEarlyProduction, Production_Type) %>% summarize(TotalProduction = sum(Production), TotalDiscountedProduction = sum(Discounted_Production)) %>% ungroup inferred_output_oil <- total_inferred_output %>% filter(Production_Type == "Oil_Production") %>% rename(OilProduction = TotalProduction, DiscountedOilProduction = TotalDiscountedProduction) %>% select(-Production_Type) inferred_output_gas <- total_inferred_output %>% filter(Production_Type == "Gas_Production") %>% rename(GasProduction = TotalProduction, DiscountedGasProduction = TotalDiscountedProduction) %>% select(-Production_Type) glo_output <- full_join(inferred_output_oil, inferred_output_gas) # go back and compute discounted revenue in the same way total_royalties <- glo_royalties %>% ungroup %>% select(Lease_Number, PaymentDate, Production_Type, PaymentAmount) %>% spread(Production_Type, PaymentAmount) %>% replace_na(list(Gas = 0, Oil = 0)) %>% rename(Oil_Revenue = Oil, Gas_Revenue = Gas) %>% inner_join(leases, by = "Lease_Number") %>% select(Lease_Number, Type, Effective_Date, PaymentDate, Oil_Revenue, Gas_Revenue) %>% gather(Production_Type, Revenue, -Lease_Number, -Type, -Effective_Date, -PaymentDate) %>% group_by(Lease_Number) %>% mutate(HasEarlyProduction = max(PaymentDate < Effective_Date)) %>% filter(PaymentDate >= Effective_Date) %>% mutate(delay = round(as.numeric(PaymentDate - Effective_Date) / 30), discount_factor = 1 / ((1 + modiscount) ^ delay), Discounted_Revenue = Revenue * discount_factor) %>% group_by(Lease_Number, HasEarlyProduction, Production_Type) %>% summarize(TotalRevenue = sum(Revenue), TotalDiscountedRevenue = sum(Discounted_Revenue)) %>% ungroup total_royalties_oil <- total_royalties %>% filter(Production_Type == "Oil_Revenue") %>% rename(OilRevenue = TotalRevenue, DiscountedOilRevenue = TotalDiscountedRevenue) %>% select(-Production_Type) total_royalties_gas <- total_royalties %>% filter(Production_Type == "Gas_Revenue") %>% rename(GasRevenue = TotalRevenue, DiscountedGasRevenue = TotalDiscountedRevenue) %>% select(-Production_Type) # combine things and zero out the 3 wells with negative values glo_output <- full_join(full_join(inferred_output_oil, inferred_output_gas), full_join(total_royalties_oil, total_royalties_gas)) %>% left_join(inferred_oil24) %>% left_join(inferred_gas24) %>% replace_na(list(n_oil = 0, n_gas = 0, oil24 = 0, gas24 = 0)) %>% mutate_at(vars(contains("Revenue"), contains("OilProduction"), contains("GasProduction")), funs(pmax(., 0))) save(glo_output, file = file.path(gen, "glo_output.Rda"))
testlist <- list(x = structure(c(0, 1.39234637988959e+188, 0, 8.64639514365796e-275, 1.98730118526674e-168, 5.28313590379074e-312, 2.00689190195556e-314, 4.62271829730836e-178, 3.41641322345409e-312, 0, 0, 3.45972424407988e-168, 3.81867046820952e-152, 6.80705808407373e-145, 5.68910722257131e-304, 8.37633803288811e-165, 4.15521272617805e-149, 4.41367626449033e-274, 1.02491746460854e-164, 444271.454154523, 8.49986853871755e-242, 3.07175433988878e-319, 2.19286062667273e-138, 1.5414686408377e-139, 5.36676071891531e-137, 5.22628019552399e-299, 5.4334696901187e-312, 2.0282964101554e-130, 1.19687218712775e-309, 4.82364851552269e-246 ), .Dim = c(3L, 10L))) result <- do.call(bravo:::colSumSq_matrix,testlist) str(result)
/bravo/inst/testfiles/colSumSq_matrix/libFuzzer_colSumSq_matrix/colSumSq_matrix_valgrind_files/1609959812-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
728
r
testlist <- list(x = structure(c(0, 1.39234637988959e+188, 0, 8.64639514365796e-275, 1.98730118526674e-168, 5.28313590379074e-312, 2.00689190195556e-314, 4.62271829730836e-178, 3.41641322345409e-312, 0, 0, 3.45972424407988e-168, 3.81867046820952e-152, 6.80705808407373e-145, 5.68910722257131e-304, 8.37633803288811e-165, 4.15521272617805e-149, 4.41367626449033e-274, 1.02491746460854e-164, 444271.454154523, 8.49986853871755e-242, 3.07175433988878e-319, 2.19286062667273e-138, 1.5414686408377e-139, 5.36676071891531e-137, 5.22628019552399e-299, 5.4334696901187e-312, 2.0282964101554e-130, 1.19687218712775e-309, 4.82364851552269e-246 ), .Dim = c(3L, 10L))) result <- do.call(bravo:::colSumSq_matrix,testlist) str(result)
library( dplyr, warn.conflicts = FALSE ) library( ggplot2 , warn.conflicts = FALSE ) source("../scripts/plotting/mytheme.R") argv <- commandArgs( trailingOnly = TRUE ) datafile <- argv[1] outplot <- argv[2] mactvalues <- as.numeric( unlist( strsplit( argv[3], " " ) ) ) nc <- as.numeric( argv[4] ) # Load data and create the variable for generating the facet labels: load( datafile ) data <- data %>% filter( mact %in% mactvalues ) #data <- sample_n( data, nrow(data)/100 ) data$mact2 <- factor(data$mact, labels = paste0("max[act] : ", levels(factor(data$mact)) ) ) # Remove all observations with a connectivity below 100 from the instantaneous step data. # These can cause artefacts from cell breaking. remove.rows <- data$conn < 1 print( paste0( "WARNING --- Removing ", sum(remove.rows), " rows from data with broken cells..." ) ) data <- data[ !remove.rows, ] # Find the maximum density (which will be the max of the y axis later on; need this # to get proper positioning of the lambda act labels) ymax <- 0 for( m in unique(data$mact) ){ dtmp <- data %>% filter( mact == m ) %>% filter( lact == min(lact) ) %>% as.data.frame() h <- hist(dtmp$v, plot = FALSE ) if( max(h$density)*1.1 > ymax ){ ymax <- max(h$density)*1.1 } } # For each max act value, generate positions for the corresponding range of lambda act values. # used later for plotting by geom_text data2 <- data.frame() annotationdata <- data.frame() for( m in unique( data$mact ) ){ dtmp <- data %>% filter( mact == m ) %>% as.data.frame() dtmp$lact2 <- ( dtmp$lact - min(dtmp$lact) ) / ( max( dtmp$lact ) - min(dtmp$lact) ) adtmp <- data.frame( mact = m, mact2 = unique(dtmp$mact2), lact = unique( dtmp$lact ), lact2 = unique( dtmp$lact2), v = 0.92*max(dtmp$v) ) adtmp$pos <- seq( 0.85*ymax, 0.05*ymax, length.out = length( unique( dtmp$lact ) ) ) data2 <- rbind( data2, dtmp) annotationdata <- rbind( annotationdata, adtmp ) } annotationdata2 <- annotationdata %>% group_by( mact, mact2 ) %>% summarise( v = mean(v), pos = 0.95*ymax ) # Create the plot p <- ggplot( data2, aes( x = v, y = ..density.., color = lact2, group = lact ) ) + geom_freqpoly(bins=50, show.legend=FALSE) + facet_wrap( ~mact2, scales="free_x", ncol = nc, labeller=label_parsed )+ scale_color_gradientn( colors=c("red4","red","darkorange1","orange")) + labs( x = "instantaneous speed (pixels/MCS)", y = "density" ) + scale_y_continuous( limits = c(0,ymax), expand=c(0,0) ) + scale_x_continuous( expand = c(0,0) ) + # sizes in geom_text are in mm = 14/5 pt, so multiply textsize in pt with 5/14 to get # proper size in geom_text. geom_text( data = annotationdata, aes( y = pos, label = lact ), size=0.87*mytheme$text$size*(5/14), show.legend = FALSE ) + geom_text(data = annotationdata2, aes( y = pos, x = v, group = mact), color = "black", size=mytheme$text$size*(5/14), label = "lambda[act]", parse = TRUE ) + mytheme + theme( legend.position = "right", axis.line.x = element_blank(), panel.spacing.y = unit(-1, "mm") ) npanels <- length( unique( data$mact ) ) npanelrows <- ceiling( npanels/nc ) pheight <- 3.5*npanelrows+0.5 pw <- 4.5*nc+1 if( pw > 18 ){pw <- 18} ggsave( outplot, width = pw, height = pheight, units="cm")
/figures/scripts/plotting/plot-instantaneous-speed-MactLact-facet-selection.R
permissive
ingewortel/2020-ucsp
R
false
false
3,363
r
library( dplyr, warn.conflicts = FALSE ) library( ggplot2 , warn.conflicts = FALSE ) source("../scripts/plotting/mytheme.R") argv <- commandArgs( trailingOnly = TRUE ) datafile <- argv[1] outplot <- argv[2] mactvalues <- as.numeric( unlist( strsplit( argv[3], " " ) ) ) nc <- as.numeric( argv[4] ) # Load data and create the variable for generating the facet labels: load( datafile ) data <- data %>% filter( mact %in% mactvalues ) #data <- sample_n( data, nrow(data)/100 ) data$mact2 <- factor(data$mact, labels = paste0("max[act] : ", levels(factor(data$mact)) ) ) # Remove all observations with a connectivity below 100 from the instantaneous step data. # These can cause artefacts from cell breaking. remove.rows <- data$conn < 1 print( paste0( "WARNING --- Removing ", sum(remove.rows), " rows from data with broken cells..." ) ) data <- data[ !remove.rows, ] # Find the maximum density (which will be the max of the y axis later on; need this # to get proper positioning of the lambda act labels) ymax <- 0 for( m in unique(data$mact) ){ dtmp <- data %>% filter( mact == m ) %>% filter( lact == min(lact) ) %>% as.data.frame() h <- hist(dtmp$v, plot = FALSE ) if( max(h$density)*1.1 > ymax ){ ymax <- max(h$density)*1.1 } } # For each max act value, generate positions for the corresponding range of lambda act values. # used later for plotting by geom_text data2 <- data.frame() annotationdata <- data.frame() for( m in unique( data$mact ) ){ dtmp <- data %>% filter( mact == m ) %>% as.data.frame() dtmp$lact2 <- ( dtmp$lact - min(dtmp$lact) ) / ( max( dtmp$lact ) - min(dtmp$lact) ) adtmp <- data.frame( mact = m, mact2 = unique(dtmp$mact2), lact = unique( dtmp$lact ), lact2 = unique( dtmp$lact2), v = 0.92*max(dtmp$v) ) adtmp$pos <- seq( 0.85*ymax, 0.05*ymax, length.out = length( unique( dtmp$lact ) ) ) data2 <- rbind( data2, dtmp) annotationdata <- rbind( annotationdata, adtmp ) } annotationdata2 <- annotationdata %>% group_by( mact, mact2 ) %>% summarise( v = mean(v), pos = 0.95*ymax ) # Create the plot p <- ggplot( data2, aes( x = v, y = ..density.., color = lact2, group = lact ) ) + geom_freqpoly(bins=50, show.legend=FALSE) + facet_wrap( ~mact2, scales="free_x", ncol = nc, labeller=label_parsed )+ scale_color_gradientn( colors=c("red4","red","darkorange1","orange")) + labs( x = "instantaneous speed (pixels/MCS)", y = "density" ) + scale_y_continuous( limits = c(0,ymax), expand=c(0,0) ) + scale_x_continuous( expand = c(0,0) ) + # sizes in geom_text are in mm = 14/5 pt, so multiply textsize in pt with 5/14 to get # proper size in geom_text. geom_text( data = annotationdata, aes( y = pos, label = lact ), size=0.87*mytheme$text$size*(5/14), show.legend = FALSE ) + geom_text(data = annotationdata2, aes( y = pos, x = v, group = mact), color = "black", size=mytheme$text$size*(5/14), label = "lambda[act]", parse = TRUE ) + mytheme + theme( legend.position = "right", axis.line.x = element_blank(), panel.spacing.y = unit(-1, "mm") ) npanels <- length( unique( data$mact ) ) npanelrows <- ceiling( npanels/nc ) pheight <- 3.5*npanelrows+0.5 pw <- 4.5*nc+1 if( pw > 18 ){pw <- 18} ggsave( outplot, width = pw, height = pheight, units="cm")
\name{RGB2HSL} \alias{RGB2HSL} \title{Convert RGB coordinates to HSL} \description{\code{RGB2HSL} Converts RGB coordinates to HSL. } \usage{RGB2HSL(RGBmatrix) } \arguments{ \item{RGBmatrix}{ RGB coordinates} } \value{ HSL coordinates } \source{ Logicol S.r.l., 2014 EasyRGB color search engine \url{http://www.easyrgb.com/} } \references{ Logicol S.r.l., 2014 EasyRGB color search engine \url{http://www.easyrgb.com/} } \author{Jose Gama} \examples{ RGB<-c(124,63,78) RGB2HSL(RGB) } \keyword{datasets}
/man/RGB2HSL.Rd
no_license
playwar/colorscience
R
false
false
507
rd
\name{RGB2HSL} \alias{RGB2HSL} \title{Convert RGB coordinates to HSL} \description{\code{RGB2HSL} Converts RGB coordinates to HSL. } \usage{RGB2HSL(RGBmatrix) } \arguments{ \item{RGBmatrix}{ RGB coordinates} } \value{ HSL coordinates } \source{ Logicol S.r.l., 2014 EasyRGB color search engine \url{http://www.easyrgb.com/} } \references{ Logicol S.r.l., 2014 EasyRGB color search engine \url{http://www.easyrgb.com/} } \author{Jose Gama} \examples{ RGB<-c(124,63,78) RGB2HSL(RGB) } \keyword{datasets}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convertOne.r \name{convertOne} \alias{convertOne} \title{Convert an Excel spreadsheet to match the CoVaxON schema} \arguments{ \item{file}{Character scalar. Names the path to an Excel file to convert.} } \value{ Void } \description{ \code{convertOne} reads an Excel file and converts the data to match the schema required for upload into CoVaxON. The converted file is saved as CSV and given the same name as the input Excel file. }
/man/convertOne.Rd
permissive
DurhamRegionHARP/verto2covax
R
false
true
511
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convertOne.r \name{convertOne} \alias{convertOne} \title{Convert an Excel spreadsheet to match the CoVaxON schema} \arguments{ \item{file}{Character scalar. Names the path to an Excel file to convert.} } \value{ Void } \description{ \code{convertOne} reads an Excel file and converts the data to match the schema required for upload into CoVaxON. The converted file is saved as CSV and given the same name as the input Excel file. }
# rm(list=ls()) library(jagsUI) library(dplyr) library(tidyverse) library(lubridate) library(PKPDmisc) ################################################################# ########## BUGS CODE ############################################ ################################################################# # Define the model in the BUGS language and write a text file sink("flathead_ppcp.txt") cat(" model { # Likelihood: # Level-1 of the model for (i in 1:80){ y[i] ~ dnorm(mu[i], tau) mu[i] <- beta0 + site[i] + month[i] } # Priors beta0 ~ dnorm(0, 0.0001) sigma.e ~ dunif(0, 1) sigma.site ~ dunif(0,1) sigma.month ~ dunif(0,1) # Derived quantities tau <- pow(sigma.e,-2) tau.site <- pow(sigma.site,-2) tau.month <- pow(sigma.month,-2) } # end model ",fill = TRUE) sink() # Read in data for PPCPs dat <- read.csv("../cleaned_data/ppcp.csv", header = TRUE) dat_numeric <- dat %>% filter(!(SITE %in% c("FI2", "DU2", "HO1"))) %>% filter(SITE %in% c("BD", "FLBS", "SA", "SJ", "BO", "LK", "WF", "WS", "DA", "FI")) %>% mutate(time_point = ifelse(MONTH == "MAY", "MAY_1", NA), time_point = ifelse(MONTH == "JUNE" & between(TIME, 1, 6), "MAY_1", time_point), time_point = ifelse(MONTH == "JUNE" & between(TIME, 7, 25), "JUNE_1", time_point), time_point = ifelse(MONTH == "JUNE" & between(TIME, 26, 30), "JUNE_2", time_point), time_point = ifelse(MONTH == "JULY" & between(TIME, 1, 11), "JUNE_2", time_point), time_point = ifelse(MONTH == "JULY" & between(TIME, 12, 23), "JULY_1", time_point), time_point = ifelse(MONTH == "JULY" & between(TIME, 24, 31), "JULY_2", time_point), time_point = ifelse(MONTH == "AUGUST" & between(TIME, 1, 8), "JULY_2", time_point), time_point = ifelse(MONTH == "AUGUST" & between(TIME, 9, 21), "AUGUST_1", time_point), time_point = ifelse(MONTH == "AUGUST" & between(TIME, 22, 31), "AUGUST_2", time_point), time_point = ifelse(MONTH == "SEPTEMBER" & between(TIME, 1, 5), "AUGUST_2", time_point), time_point = ifelse(MONTH == "SEPTEMBER" & between(TIME, 6, 23), "SEPTEMBER_1", time_point), time_point = ifelse(MONTH == "SEPTEMBER" & between(TIME, 24, 30), "SEPTEMBER_2", time_point), time_point = ifelse(MONTH == "OCTOBER", "SEPTEMBER_2", time_point), time_point = as.factor(time_point)) %>% group_by(SITE, time_point) %>% summarize(y_raw = sum(CONCENTRATION, na.rm = TRUE)) %>% ungroup() %>% mutate(y = log10(y_raw+1)) %>% #filter(y <= 2) %>% #mutate(y = scale(y_raw, scale = TRUE, center = TRUE)) %>% mutate(site = as.numeric(SITE), month =as.numeric(time_point)) %>% select(-time_point, -SITE) %>% #strip_attributes(c("scaled:center", "scaled:scale")) %>% mutate(y = as.vector(y)) %>% select(-y_raw) %>% dplyr::select(y, site, month) %>% as.list() #### Playing with variance ratios ESS <- dat dat_numeric$time_point <- factor(dat_numeric$time_point, levels = c("MAY_1", "JUNE_1", "JUNE_2", "JULY_1", "JULY_2", "AUGUST_1", "AUGUST_2", "SEPTEMBER_1", "SEPTEMBER_2")) attr(dat_numeric$y, "attr") <- NULL dat_numeric <- merTools::stripAttributes(dat_numeric) # Read in dat for fatty acids # dat <- read.csv("../cleaned_data/fatty_acids.csv", header = TRUE) # # SAFA <- c("C12.0", "C14.0", "C15.0", "C16.0", "C17.0", "C18.0", "C20.0", "C22.0", "iso.C15.0", "C24.0", "C26.0", "C28.0") # MUFA <- c( "C15.1", "C14.1n5", "C15.1w7", "C16.1w5", "C16.1w6", "C16.1w7", "C16.1w7c", "C16.1w8", "C16.1w9", "C17.1n7", "C18.1w7", "C18.1w7c", # "C18.1w9", "C18.1w9c", "C20.1w7", "C20.1w9", "C22.1w7", "C22.1w9", "C22.1w9c") # LUFA <- c("C16.2", "C16.2w4", "C16.2w6", "C16.2w7", "C16.3w3", "C16.3w4", "C16.3w6", "C18.2w6", "C18.2w6t", "C18.2w6c", # "C18.3w3", "C18.3w6", "C20.2w6", "C20.3w3", "C20.3w6", "C22.2w6", "C22.3w3") # HUFA <- c("C16.4w1", "C16.4w3", "C18.4w3", "C18.4w4", "C18.5w3", "C20.4w2", "C20.4w3", "C20.4w6", "C20.5w3", "C22.4w3", # "C22.4w6", "C22.5w3", "C22.5w6", "C22.6w3") # SCUFA_LUFA <- c("C16.2", "C16.2w4", "C16.2w6", "C16.2w7", "C16.3w3", "C16.3w4", "C16.3w6", "C18.2w6", "C18.2w6c", # "C18.2w6t", "C18.3w3", "C18.3w6") # LCUFA_LUFA <- c("C20.2w6", "C20.3w3", "C20.3w6", "C22.2w6", "C22.3w3") # SCUFA_HUFA <- c("C16.4w1", "C16.4w3", "C18.4w3", "C18.4w4", "C18.5w3") # LCUFA_HUFA <- c( "C20.4w2", "C20.4w3", "C20.4w6", "C20.5w3", "C22.4w3", # "C22.4w6", "C22.5w3", "C22.5w6", "C22.6w3") # SCUFA <- c("C16.2w4", "C16.2w6", "C16.2w7", "C16.3w3", "C16.3w4", "C16.3w6", "C18.2w6", "C18.2w6t", "C18.2w6t", # "C18.3w3", "C18.3w6", "C16.4w1", "C16.4w3", "C18.4w3", "C18.4w4", "C18.5w3") # LCUFA <- c( "C20.4w2", "C20.4w3", "C20.4w6", "C20.5w3", "C22.4w3", # "C22.4w6", "C22.5w3", "C22.5w6", "C22.6w3", "C20.2w6", "C20.3w3", "C20.3w6", "C22.2w6", "C22.3w3") # # C18PUFA <- c("C18.2w6", "C18.2w6t", "C18.3w3", "C18.3w6", "C18.4w3", "C18.4w4", "C18.5w3") # C20PUFA <- c("C20.4w3", "C20.4w6", "C20.5w3", "C20.2w6", "C20.3w3", "C20.3w6") # # dat_numeric <- dat %>% # filter(!(LOC %in% c("FI2", "DU2", "HO1"))) %>% # rename("month" = "MONTH") %>% # gather(fatty_acid, conc, C12.0:C28.0) %>% # filter(fatty_acid != "C19.0", # !(fatty_acid %in% MUFA)) %>% # mutate(fatty_acid_group = ifelse(fatty_acid %in% SAFA, "SAFA", NA), # fatty_acid_group = ifelse(fatty_acid %in% c(LUFA, HUFA), "PUFA", fatty_acid_group)) %>% # group_by(LOC, month, fatty_acid_group) %>% # summarize(total_conc = sum(conc)) %>% # ungroup() %>% # spread(fatty_acid_group, total_conc) %>% # mutate(y_raw = PUFA/SAFA) %>% # mutate(site = as.numeric(LOC), # month =as.numeric(month), # y = as.numeric(y_raw)) %>% # select(y, site, month) %>% # as.list() str(dat_numeric) attach(dat_numeric) # Initial values inits <- function (){ list (beta0 = rnorm(1), sigma.e=runif(1), sigma.site=runif(1),sigma.month=runif(1), sigma.site.month=runif(1)) } # Parameters monitored parameters <- c("beta0","sigma.e","sigma.site", "sigma.month", "sigma.site.month", "local.b","global.b","a") # MCMC settings ni <- 10000 nt <- 1 nb <- 5000 nc <- 2 start.time = Sys.time() # Set timer # Call JAGS from R out <- jags(data = dat_numeric, inits, parameters, "flathead_ppcp.txt", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, parallel=T) end.time = Sys.time() elapsed.time = round(difftime(end.time, start.time, units='mins'), dig = 2) cat('Posterior computed in ', elapsed.time, ' minutes\n\n', sep='') # Calculate computation time # Summarize posteriors print(out, dig = 3) ### Sometimes you have many, many parameters to examine: # Find which parameters, if any, have Rhat > 1.1 which(out$summary[, c("Rhat")] > 1.1) # Or see what max Rhat value is max(out$summary[, c("Rhat")]) # outExp <- out$summary # write.csv(outExp, "TP_ModelSummary.csv", row.names = T) mcmcOut <- out$sims.list saveRDS(mcmcOut, file="../cleaned_data/fatty_acid_mcmc_out.rds") library(ggmcmc) library(gridExtra) library(ggthemes) library(coda) out.mcmc <- as.mcmc(out) S <- ggs(out.mcmc$samples) # ggs_traceplot(S) ggmcmc(S, file = "../figures_tables/fatty_acid_mcmc_output.pdf") # For select parameters # hist(out$sims.list$tau2) # sum1 <- out$summary # write.csv(sum1,"summary_exp.csv", row.names = T) # traceplot(out,parameters = "a") # traceplot(out,parameters = "lambda1") # traceplot(out,parameters = "sigma.year") # # dat$lake <- as.numeric(as.factor(as.numeric(dat$lagoslakeid))) # dat$year <- as.numeric(as.factor(as.numeric(dat$sampleyear))) # dat$huc <- as.numeric(as.factor(as.numeric(as.factor(dat$hu4_zoneid)))) # write.csv(dat,"tp_dat_diagnostics.csv",row.names = FALSE)
/R_scripts/03_bayesian_model.R
no_license
mishafredmeyer/flathead_sewage
R
false
false
8,204
r
# rm(list=ls()) library(jagsUI) library(dplyr) library(tidyverse) library(lubridate) library(PKPDmisc) ################################################################# ########## BUGS CODE ############################################ ################################################################# # Define the model in the BUGS language and write a text file sink("flathead_ppcp.txt") cat(" model { # Likelihood: # Level-1 of the model for (i in 1:80){ y[i] ~ dnorm(mu[i], tau) mu[i] <- beta0 + site[i] + month[i] } # Priors beta0 ~ dnorm(0, 0.0001) sigma.e ~ dunif(0, 1) sigma.site ~ dunif(0,1) sigma.month ~ dunif(0,1) # Derived quantities tau <- pow(sigma.e,-2) tau.site <- pow(sigma.site,-2) tau.month <- pow(sigma.month,-2) } # end model ",fill = TRUE) sink() # Read in data for PPCPs dat <- read.csv("../cleaned_data/ppcp.csv", header = TRUE) dat_numeric <- dat %>% filter(!(SITE %in% c("FI2", "DU2", "HO1"))) %>% filter(SITE %in% c("BD", "FLBS", "SA", "SJ", "BO", "LK", "WF", "WS", "DA", "FI")) %>% mutate(time_point = ifelse(MONTH == "MAY", "MAY_1", NA), time_point = ifelse(MONTH == "JUNE" & between(TIME, 1, 6), "MAY_1", time_point), time_point = ifelse(MONTH == "JUNE" & between(TIME, 7, 25), "JUNE_1", time_point), time_point = ifelse(MONTH == "JUNE" & between(TIME, 26, 30), "JUNE_2", time_point), time_point = ifelse(MONTH == "JULY" & between(TIME, 1, 11), "JUNE_2", time_point), time_point = ifelse(MONTH == "JULY" & between(TIME, 12, 23), "JULY_1", time_point), time_point = ifelse(MONTH == "JULY" & between(TIME, 24, 31), "JULY_2", time_point), time_point = ifelse(MONTH == "AUGUST" & between(TIME, 1, 8), "JULY_2", time_point), time_point = ifelse(MONTH == "AUGUST" & between(TIME, 9, 21), "AUGUST_1", time_point), time_point = ifelse(MONTH == "AUGUST" & between(TIME, 22, 31), "AUGUST_2", time_point), time_point = ifelse(MONTH == "SEPTEMBER" & between(TIME, 1, 5), "AUGUST_2", time_point), time_point = ifelse(MONTH == "SEPTEMBER" & between(TIME, 6, 23), "SEPTEMBER_1", time_point), time_point = ifelse(MONTH == "SEPTEMBER" & between(TIME, 24, 30), "SEPTEMBER_2", time_point), time_point = ifelse(MONTH == "OCTOBER", "SEPTEMBER_2", time_point), time_point = as.factor(time_point)) %>% group_by(SITE, time_point) %>% summarize(y_raw = sum(CONCENTRATION, na.rm = TRUE)) %>% ungroup() %>% mutate(y = log10(y_raw+1)) %>% #filter(y <= 2) %>% #mutate(y = scale(y_raw, scale = TRUE, center = TRUE)) %>% mutate(site = as.numeric(SITE), month =as.numeric(time_point)) %>% select(-time_point, -SITE) %>% #strip_attributes(c("scaled:center", "scaled:scale")) %>% mutate(y = as.vector(y)) %>% select(-y_raw) %>% dplyr::select(y, site, month) %>% as.list() #### Playing with variance ratios ESS <- dat dat_numeric$time_point <- factor(dat_numeric$time_point, levels = c("MAY_1", "JUNE_1", "JUNE_2", "JULY_1", "JULY_2", "AUGUST_1", "AUGUST_2", "SEPTEMBER_1", "SEPTEMBER_2")) attr(dat_numeric$y, "attr") <- NULL dat_numeric <- merTools::stripAttributes(dat_numeric) # Read in dat for fatty acids # dat <- read.csv("../cleaned_data/fatty_acids.csv", header = TRUE) # # SAFA <- c("C12.0", "C14.0", "C15.0", "C16.0", "C17.0", "C18.0", "C20.0", "C22.0", "iso.C15.0", "C24.0", "C26.0", "C28.0") # MUFA <- c( "C15.1", "C14.1n5", "C15.1w7", "C16.1w5", "C16.1w6", "C16.1w7", "C16.1w7c", "C16.1w8", "C16.1w9", "C17.1n7", "C18.1w7", "C18.1w7c", # "C18.1w9", "C18.1w9c", "C20.1w7", "C20.1w9", "C22.1w7", "C22.1w9", "C22.1w9c") # LUFA <- c("C16.2", "C16.2w4", "C16.2w6", "C16.2w7", "C16.3w3", "C16.3w4", "C16.3w6", "C18.2w6", "C18.2w6t", "C18.2w6c", # "C18.3w3", "C18.3w6", "C20.2w6", "C20.3w3", "C20.3w6", "C22.2w6", "C22.3w3") # HUFA <- c("C16.4w1", "C16.4w3", "C18.4w3", "C18.4w4", "C18.5w3", "C20.4w2", "C20.4w3", "C20.4w6", "C20.5w3", "C22.4w3", # "C22.4w6", "C22.5w3", "C22.5w6", "C22.6w3") # SCUFA_LUFA <- c("C16.2", "C16.2w4", "C16.2w6", "C16.2w7", "C16.3w3", "C16.3w4", "C16.3w6", "C18.2w6", "C18.2w6c", # "C18.2w6t", "C18.3w3", "C18.3w6") # LCUFA_LUFA <- c("C20.2w6", "C20.3w3", "C20.3w6", "C22.2w6", "C22.3w3") # SCUFA_HUFA <- c("C16.4w1", "C16.4w3", "C18.4w3", "C18.4w4", "C18.5w3") # LCUFA_HUFA <- c( "C20.4w2", "C20.4w3", "C20.4w6", "C20.5w3", "C22.4w3", # "C22.4w6", "C22.5w3", "C22.5w6", "C22.6w3") # SCUFA <- c("C16.2w4", "C16.2w6", "C16.2w7", "C16.3w3", "C16.3w4", "C16.3w6", "C18.2w6", "C18.2w6t", "C18.2w6t", # "C18.3w3", "C18.3w6", "C16.4w1", "C16.4w3", "C18.4w3", "C18.4w4", "C18.5w3") # LCUFA <- c( "C20.4w2", "C20.4w3", "C20.4w6", "C20.5w3", "C22.4w3", # "C22.4w6", "C22.5w3", "C22.5w6", "C22.6w3", "C20.2w6", "C20.3w3", "C20.3w6", "C22.2w6", "C22.3w3") # # C18PUFA <- c("C18.2w6", "C18.2w6t", "C18.3w3", "C18.3w6", "C18.4w3", "C18.4w4", "C18.5w3") # C20PUFA <- c("C20.4w3", "C20.4w6", "C20.5w3", "C20.2w6", "C20.3w3", "C20.3w6") # # dat_numeric <- dat %>% # filter(!(LOC %in% c("FI2", "DU2", "HO1"))) %>% # rename("month" = "MONTH") %>% # gather(fatty_acid, conc, C12.0:C28.0) %>% # filter(fatty_acid != "C19.0", # !(fatty_acid %in% MUFA)) %>% # mutate(fatty_acid_group = ifelse(fatty_acid %in% SAFA, "SAFA", NA), # fatty_acid_group = ifelse(fatty_acid %in% c(LUFA, HUFA), "PUFA", fatty_acid_group)) %>% # group_by(LOC, month, fatty_acid_group) %>% # summarize(total_conc = sum(conc)) %>% # ungroup() %>% # spread(fatty_acid_group, total_conc) %>% # mutate(y_raw = PUFA/SAFA) %>% # mutate(site = as.numeric(LOC), # month =as.numeric(month), # y = as.numeric(y_raw)) %>% # select(y, site, month) %>% # as.list() str(dat_numeric) attach(dat_numeric) # Initial values inits <- function (){ list (beta0 = rnorm(1), sigma.e=runif(1), sigma.site=runif(1),sigma.month=runif(1), sigma.site.month=runif(1)) } # Parameters monitored parameters <- c("beta0","sigma.e","sigma.site", "sigma.month", "sigma.site.month", "local.b","global.b","a") # MCMC settings ni <- 10000 nt <- 1 nb <- 5000 nc <- 2 start.time = Sys.time() # Set timer # Call JAGS from R out <- jags(data = dat_numeric, inits, parameters, "flathead_ppcp.txt", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, parallel=T) end.time = Sys.time() elapsed.time = round(difftime(end.time, start.time, units='mins'), dig = 2) cat('Posterior computed in ', elapsed.time, ' minutes\n\n', sep='') # Calculate computation time # Summarize posteriors print(out, dig = 3) ### Sometimes you have many, many parameters to examine: # Find which parameters, if any, have Rhat > 1.1 which(out$summary[, c("Rhat")] > 1.1) # Or see what max Rhat value is max(out$summary[, c("Rhat")]) # outExp <- out$summary # write.csv(outExp, "TP_ModelSummary.csv", row.names = T) mcmcOut <- out$sims.list saveRDS(mcmcOut, file="../cleaned_data/fatty_acid_mcmc_out.rds") library(ggmcmc) library(gridExtra) library(ggthemes) library(coda) out.mcmc <- as.mcmc(out) S <- ggs(out.mcmc$samples) # ggs_traceplot(S) ggmcmc(S, file = "../figures_tables/fatty_acid_mcmc_output.pdf") # For select parameters # hist(out$sims.list$tau2) # sum1 <- out$summary # write.csv(sum1,"summary_exp.csv", row.names = T) # traceplot(out,parameters = "a") # traceplot(out,parameters = "lambda1") # traceplot(out,parameters = "sigma.year") # # dat$lake <- as.numeric(as.factor(as.numeric(dat$lagoslakeid))) # dat$year <- as.numeric(as.factor(as.numeric(dat$sampleyear))) # dat$huc <- as.numeric(as.factor(as.numeric(as.factor(dat$hu4_zoneid)))) # write.csv(dat,"tp_dat_diagnostics.csv",row.names = FALSE)
##*********************************************************************** ## methods to compute the spacings for diffrent objects. ## ## First sort in descending order, then diff and possibly weight. ## ## Author: Yves Deville <deville.yves@alpestat.com> ## ##*********************************************************************** spacings <- function(object, ...) { UseMethod("spacings") } spacings.numeric <- function(object, wExp = TRUE, ...) { if(is.null(object)) return(NULL) if (length(object) > 1L) { sp <- -diff(sort(object, decreasing = TRUE)) if (wExp) sp <- sp * (1:length(sp)) } else sp <- numeric(0) sp } ##======================================================================= ## to be used with a data.frame with a "block" variable ## such as MAXdata of OTSdata ##======================================================================= spacings.data.frame <- function(object, varName, wExp = TRUE, ...) { ff <- function(x) { if (length(x) > 1) return(-diff(sort(x, decreasing = TRUE))) else return(numeric(0)) } ## sp <- tapply(X = object[ , varName], INDEX = object[ , "block"], FUN = ff) ## weight the spacings if (wExp) sp <- lapply(sp, function(x) { x * (1L:length(x)) }) sigmaHat <- mean(unlist(sp)) ## attr(sp, "weights") sp } ##======================================================================= ## the data.frame in a "Rendata" object can be choosed with 'type' ##======================================================================= spacings.Rendata <- function(object, type = c("MAX", "OTS", "OT"), wExp = TRUE, ...) { type <- match.arg(type) varName <- object$info$varName if (type != "OT") { res <- spacings(object[[sprintf("%sdata", type)]], varName = varName, wExp = wExp) } else { res <- spacings.numeric(object = object[["OTdata"]][ , varName], wExp = wExp) } res } if (FALSE) { rd <- Garonne res <- spacings(object = rd$MAXdata, varName = rd$info$varName) res1 <- spacings(rd, type = "OT") res2 <- spacings(rd, type = "MAX") }
/R/spacings.R
no_license
cran/Renext
R
false
false
2,142
r
##*********************************************************************** ## methods to compute the spacings for diffrent objects. ## ## First sort in descending order, then diff and possibly weight. ## ## Author: Yves Deville <deville.yves@alpestat.com> ## ##*********************************************************************** spacings <- function(object, ...) { UseMethod("spacings") } spacings.numeric <- function(object, wExp = TRUE, ...) { if(is.null(object)) return(NULL) if (length(object) > 1L) { sp <- -diff(sort(object, decreasing = TRUE)) if (wExp) sp <- sp * (1:length(sp)) } else sp <- numeric(0) sp } ##======================================================================= ## to be used with a data.frame with a "block" variable ## such as MAXdata of OTSdata ##======================================================================= spacings.data.frame <- function(object, varName, wExp = TRUE, ...) { ff <- function(x) { if (length(x) > 1) return(-diff(sort(x, decreasing = TRUE))) else return(numeric(0)) } ## sp <- tapply(X = object[ , varName], INDEX = object[ , "block"], FUN = ff) ## weight the spacings if (wExp) sp <- lapply(sp, function(x) { x * (1L:length(x)) }) sigmaHat <- mean(unlist(sp)) ## attr(sp, "weights") sp } ##======================================================================= ## the data.frame in a "Rendata" object can be choosed with 'type' ##======================================================================= spacings.Rendata <- function(object, type = c("MAX", "OTS", "OT"), wExp = TRUE, ...) { type <- match.arg(type) varName <- object$info$varName if (type != "OT") { res <- spacings(object[[sprintf("%sdata", type)]], varName = varName, wExp = wExp) } else { res <- spacings.numeric(object = object[["OTdata"]][ , varName], wExp = wExp) } res } if (FALSE) { rd <- Garonne res <- spacings(object = rd$MAXdata, varName = rd$info$varName) res1 <- spacings(rd, type = "OT") res2 <- spacings(rd, type = "MAX") }
utils::globalVariables(c("station_values", "date_start", "date_end", "keyword", "network", "date_range", "station", "show", "show_date", "word", "snippet", ".x")) sfj <- purrr::safely(jsonlite::fromJSON) s_head <- purrr::safely(httr::HEAD)
/R/aaa.r
no_license
hrbrmstr/newsflash
R
false
false
266
r
utils::globalVariables(c("station_values", "date_start", "date_end", "keyword", "network", "date_range", "station", "show", "show_date", "word", "snippet", ".x")) sfj <- purrr::safely(jsonlite::fromJSON) s_head <- purrr::safely(httr::HEAD)
#.libPaths("/data/Rlibs") options(mrgsolve.soloc = "build") knitr::opts_chunk$set( comment = '.', fig.height = 5, fig.width = 9, fig.align = "center", message = FALSE, warning = FALSE ) ggplot2::theme_set(ggplot2::theme_bw())
/src/global.R
no_license
kylebaron/mpss-mrgsolve
R
false
false
241
r
#.libPaths("/data/Rlibs") options(mrgsolve.soloc = "build") knitr::opts_chunk$set( comment = '.', fig.height = 5, fig.width = 9, fig.align = "center", message = FALSE, warning = FALSE ) ggplot2::theme_set(ggplot2::theme_bw())
#shield for input type number_check2 <-function(x,y) { n <- as.numeric(x,y) {return(ifelse(is.na(n), 'ValueError', 'ValueError'))} } #test print(number_check2(2,'h')) number_check1 <-function(x) { as.numeric(x) if (is.na(x)) {return('ValueError')} } #test print(number_check1('h')) exponential <-function(x,y){ if(y<0) {print('nan')} else {number_check2(x,y)} {return(x**y)} } print(exponential(5,6)) print(exponential(0,6)) add <-function(x,y){ number_check2(x,y) {return(x+y)} } print(add(1,6)) subtract <-function(x,y){ number_check2(x,y) {return(x-y)} } print(subtract(6,1)) multiply <-function(x,y){ number_check2(x,y) {return(x*y)} } print(multiply(6,6)) divide <-function(x,y){ if(y == 0) {print('nan')} else {number_check2(x,y)} {return(x/y)} } print(divide(6,3)) sin <-function(x){ number_check1(x) {return(sin(x))} } cos <-function(x){ number_check1(x) {return(cos(x))} } tan <-function(x){ number_check1(x) {return(tan(x))} } sq <-function(x){ number_check1(x) {return(sqrt(x))} } get_numbers <- function() { numb_input <- readline("Enter up to two numbers to calculate: ") {(return (numbers <- lapply(str_split(numb_input, ""), as.integer)))} #take up to two numbers, use the lapply function to loop over the strings and turn them into intergers, split them at the space, and add to a list } operation_2numbers <- function(x,y){ #A function has a definitive number of parameters. Need two functions to cover one and two-number inputs. op <- readline( "Which operation would you like to use?: \n", "[1] Enter 1 for addition. \n", "[2] Enter 2 for subtraction. \n", "[3] Enter 3 for multiplication. \n", "[4] Enter 4 for division. \n" , "[5] Enter 5 for exponential. \n" ).as.integer if (op < 0) or (op > 5) {print ('Please enter number in range 1 to 5.')} else if (op == 1) {print(add(x,y))} else if (op == 2) {print(subtract(x,y))} else if (op == 3) {print(multiply(x,y))} else if (op == 4) {print(divide(x,y))} else if (op == 5) {return(exponential(x,y))} } operation_1number <- function(x){ op <- readline( "Which operation would you like to use?: \n", "[1] Enter 1 for sin. \n", "[2] Enter 2 for cos. \n", "[3] Enter 3 for tan. \n", "[4] Enter 4 for sqrt. \n" ).as.integer if (op < 0) or (op > 4) {print ('Please enter number in range 1 to 4.')} else if (op == 1) {print(sin(x,y))} else if (op == 2) {print(cos(x,y))} else if (op == 3) {print(tan(x,y))} else if (op == 4) {return(sq(x,y))} } get_numbers() if len(numbers) == 1:#if length of list is equal to 1... print operation_1number(numbers[0]) #run operation one, which will offer sin, cos, and tan, on first index (in this case, the only index) else: print operation_2numbers(numbers[0],numbers[1])
/CA5/CA 5.R
no_license
LAWLORAE/al_pbd
R
false
false
2,865
r
#shield for input type number_check2 <-function(x,y) { n <- as.numeric(x,y) {return(ifelse(is.na(n), 'ValueError', 'ValueError'))} } #test print(number_check2(2,'h')) number_check1 <-function(x) { as.numeric(x) if (is.na(x)) {return('ValueError')} } #test print(number_check1('h')) exponential <-function(x,y){ if(y<0) {print('nan')} else {number_check2(x,y)} {return(x**y)} } print(exponential(5,6)) print(exponential(0,6)) add <-function(x,y){ number_check2(x,y) {return(x+y)} } print(add(1,6)) subtract <-function(x,y){ number_check2(x,y) {return(x-y)} } print(subtract(6,1)) multiply <-function(x,y){ number_check2(x,y) {return(x*y)} } print(multiply(6,6)) divide <-function(x,y){ if(y == 0) {print('nan')} else {number_check2(x,y)} {return(x/y)} } print(divide(6,3)) sin <-function(x){ number_check1(x) {return(sin(x))} } cos <-function(x){ number_check1(x) {return(cos(x))} } tan <-function(x){ number_check1(x) {return(tan(x))} } sq <-function(x){ number_check1(x) {return(sqrt(x))} } get_numbers <- function() { numb_input <- readline("Enter up to two numbers to calculate: ") {(return (numbers <- lapply(str_split(numb_input, ""), as.integer)))} #take up to two numbers, use the lapply function to loop over the strings and turn them into intergers, split them at the space, and add to a list } operation_2numbers <- function(x,y){ #A function has a definitive number of parameters. Need two functions to cover one and two-number inputs. op <- readline( "Which operation would you like to use?: \n", "[1] Enter 1 for addition. \n", "[2] Enter 2 for subtraction. \n", "[3] Enter 3 for multiplication. \n", "[4] Enter 4 for division. \n" , "[5] Enter 5 for exponential. \n" ).as.integer if (op < 0) or (op > 5) {print ('Please enter number in range 1 to 5.')} else if (op == 1) {print(add(x,y))} else if (op == 2) {print(subtract(x,y))} else if (op == 3) {print(multiply(x,y))} else if (op == 4) {print(divide(x,y))} else if (op == 5) {return(exponential(x,y))} } operation_1number <- function(x){ op <- readline( "Which operation would you like to use?: \n", "[1] Enter 1 for sin. \n", "[2] Enter 2 for cos. \n", "[3] Enter 3 for tan. \n", "[4] Enter 4 for sqrt. \n" ).as.integer if (op < 0) or (op > 4) {print ('Please enter number in range 1 to 4.')} else if (op == 1) {print(sin(x,y))} else if (op == 2) {print(cos(x,y))} else if (op == 3) {print(tan(x,y))} else if (op == 4) {return(sq(x,y))} } get_numbers() if len(numbers) == 1:#if length of list is equal to 1... print operation_1number(numbers[0]) #run operation one, which will offer sin, cos, and tan, on first index (in this case, the only index) else: print operation_2numbers(numbers[0],numbers[1])
# -------------------------------------------------------------------------------- # 동적시각화와 관련된 다양한 패키지를 소개합니다. # -------------------------------------------------------------------------------- # 이번 장은 다양한 패키지를 일괄적으로 나열하여 보여드립니다. #### 1. plotly #### # plotly(이하 플로틀리)의 미션은 # Plotly is helping leading organizations close the gap between Data Science teams and the rest of the organization. With Plotly, your teams can easily design, develop, and operationalize data science initiatives that deliver real results.(출처: https://plot.ly/) # 플로틀리가 지향하는 철학이 시각화의 철학과 일맥상통합니다. # 데이터 싸이언티스트가 만들어내는 시각화 결과물을 데이터와 친하지 않는 사람들에게 어떻게 보다 쉽게 실제 결과물을 전달 (deliver) 할 수 있는지가 오늘 수업의 핵심입니다. # plotly가 지원하는 주요언어: Python, R, JavaScript # 현직이 프론트엔드 개발자이며, JavaScript와 친숙하다면, JavaScript를 배우는 것 추천 # 현직이 응용프로그램 개발자이며, Python과 친숙하다면, Python으로 배우는 것 추천 # 현직이 보고서 작성, PT와 관련있는 업종이라면, R로 배우는 것 추천 # 기본차트 예시 # ggplot2 그래프를 객체화해서 ggplotly() 에서 구현하는 것을 말합니다. # 필자경험상 plotly의 문법을 새로 익히는 것보다, ggplot2 문법을 마스터한 상태에서 ggplotly()로만 구현하는 것을 추천합니다. library(ggplot2) library(plotly) p <- ggplot(mpg, aes(x=displ, y=hwy, color=class)) + geom_point(size=3) + labs(x = "Engine displacement", y = "Highway Mileage", color = "Car Class") + theme_bw() ggplotly(p) # 모델링 그래프도 plotly에서 표현될 수 있습니다. data(CPS85, package = "mosaicData") cps85_glm <- glm(married ~ sex + age + race + sector, family="binomial", data=CPS85) p <- visreg(cps85_glm, "age", by = "sex", gg = TRUE, scale="response") + labs(y = "Prob(Married)", x = "Age", title = "Relationship of age and marital status", subtitle = "controlling for race and job sector", caption = "source: Current Population Survey 1985") + theme_minimal() ggplotly(p) # ggplot2 패키지를 사용하지 않는다면, plotly의 문법을 새로 익혀야 하는데, # 처음 입문자들에게는 추천하지 않습니다. # 아래 예시 library(dplyr) plot_ly(diamonds, x = ~cut) %>% add_histogram() # 보시면 ggplot2와 형태는 유사하지만, 문법이 조금 상이함을 볼 수 있습니다. # 선택과 집중이라는 측면에서 봤을 때, ggplot2 패키지에 집중하시되, plotly에서는 가급적 ggplotly() 함수를 사용하는 것을 권유합니다. # ----- (1) 시계열 그래프 작성 ----- library(plotly) data(economics, package = "ggplot2") economics %>% arrange(psavert) %>% plot_ly(x = ~date, y = ~psavert) %>% add_lines() # 그룹별 시계열 그래프 library(dplyr) top5 <- txhousing %>% group_by(city) %>% summarise(m = mean(sales, na.rm = TRUE)) %>% arrange(desc(m)) %>% top_n(5) tx5 <- semi_join(txhousing, top5, by = "city") p <- ggplot(tx5, aes(x = date, y = median, colour = city)) + geom_line() + theme_minimal() ggplotly(p) # ----- (2) Error Bars ----- data <- ToothGrowth %>% group_by(supp, dose) %>% dplyr::summarise(n = n(), mean = mean(len), sd = sd(len), se = sd/sqrt(n)) %>% dplyr::mutate(dose = as.factor(dose)) %>% ungroup() glimpse(data) # ggplot2 방식 p <- ggplot(data, aes(x = dose, y = mean, group = supp, colour = supp)) + geom_point(size = 2) + geom_line(size = 1) + geom_errorbar(aes(ymin = mean - se, ymax = mean + se), width = .1) + theme_minimal() ggplotly(p) # plotly 방식 plot_ly(data = data[which(data$supp == 'OJ'),], x = ~dose, y = ~mean, type = 'scatter', mode = 'lines+markers', name = 'OJ', error_y = ~list(array = sd, color = '#000000')) %>% add_trace(data = data[which(data$supp == 'VC'),], name = 'VC') # 어떤 것이 더 편한지는 각자 선택하시기를 바랍니다. # ---- (3) Financial Plot ----- library(plotly) funnel_data <- data.frame( percent = c(39, 27.4, 20.6, 11, 2), path = c("웹사이트 방문", "다운로드", "잠재고객", "장바구니", "실제구매"), stringsAsFactors = FALSE ) plot_ly() %>% add_trace( type = "funnel", y = funnel_data$path, x = funnel_data$percent) %>% layout(yaxis = list(categoryarray = funnel_data$path)) # 위 그래프의 경우에는 ggplot2으로 작성하기에는 매우 어려울 것입니다. # 실제로 그릴수는 있습니다만, 추천하지는 않습니다. # https://beta.rstudioconnect.com/content/5294/funnel_plot.nb.html # 강사가 생각하는 시각화 잘 작성하는 요령은, # 한 패키지에 의존하기보다, 새로운 패키지들을 잘 찾아서 응용하는 것에 초점을 맞추자입니다. # 그러려면, 정보가 가장 중요하며, 어딘가에 늘 정리하는 습관이 중요합니다. #### 2. Leaflet #### # Leaflet 패키지는 지도 시각화에 특화가 되어 있습니다. # Leaflet 패키지 역시, 오픈소스 JavaScript용으로 나왔고, R로 확장되었습니다. # 특히, 지도시각화에 최적화되었기 때문에, 공간시각화를 하시는 분이 있다면, 많이 활용해보는 것을 추천합니다. # Plotly와 마찬가지로 특유의 문법이 존재하기 때문에 초기 학습 곡선이 필요합니다. # 이 영역은 ggplot2와 패키지와 연관성이 크게 없습니다. # 다만, 차주에 배울 shiny앱에 적용시킬 수 있는 장점이 있기 때문에, 배워두면 응용이 가능합니다. # 미리 말씀드리면, shiny앱 시각화 부터는 일종의 플랫폼에 기반한 개발의 영역이 짙습니다. # 이 때에는 사내에 이 시스템을 도입할 것인가? 말 것인가?는 기존 시각화 솔루션과 냉정하게 비교 분석 하셔야 합니다. # 간단하게 그래프 그리기 library(leaflet) leaflet() %>% addTiles() %>% addMarkers(lng=127.027696, lat=37.498124, popup="강남역입니다.") # DataFrames library(htmltools) # 데이터셋을 코로나 기반으로 바꿀 것 df <- read.csv(textConnection( "Name,Lat,Long Samurai Noodle,47.597131,-122.327298 Kukai Ramen,47.6154,-122.327157 Tsukushinbo,47.59987,-122.326726")) leaflet(df) %>% addTiles() %>% addMarkers(~Long, ~Lat, label = ~htmlEscape(Name)) # 각 옵션에 대해 자세하게 알고 싶다면, https://rstudio.github.io/leaflet/popups.html # 참고하세요.
/python/수업자료/머신러닝/R_edu-master/R_NCS_2020/1_day/source/1_6_Interactive_graph.R
no_license
denim5409/connected
R
false
false
7,032
r
# -------------------------------------------------------------------------------- # 동적시각화와 관련된 다양한 패키지를 소개합니다. # -------------------------------------------------------------------------------- # 이번 장은 다양한 패키지를 일괄적으로 나열하여 보여드립니다. #### 1. plotly #### # plotly(이하 플로틀리)의 미션은 # Plotly is helping leading organizations close the gap between Data Science teams and the rest of the organization. With Plotly, your teams can easily design, develop, and operationalize data science initiatives that deliver real results.(출처: https://plot.ly/) # 플로틀리가 지향하는 철학이 시각화의 철학과 일맥상통합니다. # 데이터 싸이언티스트가 만들어내는 시각화 결과물을 데이터와 친하지 않는 사람들에게 어떻게 보다 쉽게 실제 결과물을 전달 (deliver) 할 수 있는지가 오늘 수업의 핵심입니다. # plotly가 지원하는 주요언어: Python, R, JavaScript # 현직이 프론트엔드 개발자이며, JavaScript와 친숙하다면, JavaScript를 배우는 것 추천 # 현직이 응용프로그램 개발자이며, Python과 친숙하다면, Python으로 배우는 것 추천 # 현직이 보고서 작성, PT와 관련있는 업종이라면, R로 배우는 것 추천 # 기본차트 예시 # ggplot2 그래프를 객체화해서 ggplotly() 에서 구현하는 것을 말합니다. # 필자경험상 plotly의 문법을 새로 익히는 것보다, ggplot2 문법을 마스터한 상태에서 ggplotly()로만 구현하는 것을 추천합니다. library(ggplot2) library(plotly) p <- ggplot(mpg, aes(x=displ, y=hwy, color=class)) + geom_point(size=3) + labs(x = "Engine displacement", y = "Highway Mileage", color = "Car Class") + theme_bw() ggplotly(p) # 모델링 그래프도 plotly에서 표현될 수 있습니다. data(CPS85, package = "mosaicData") cps85_glm <- glm(married ~ sex + age + race + sector, family="binomial", data=CPS85) p <- visreg(cps85_glm, "age", by = "sex", gg = TRUE, scale="response") + labs(y = "Prob(Married)", x = "Age", title = "Relationship of age and marital status", subtitle = "controlling for race and job sector", caption = "source: Current Population Survey 1985") + theme_minimal() ggplotly(p) # ggplot2 패키지를 사용하지 않는다면, plotly의 문법을 새로 익혀야 하는데, # 처음 입문자들에게는 추천하지 않습니다. # 아래 예시 library(dplyr) plot_ly(diamonds, x = ~cut) %>% add_histogram() # 보시면 ggplot2와 형태는 유사하지만, 문법이 조금 상이함을 볼 수 있습니다. # 선택과 집중이라는 측면에서 봤을 때, ggplot2 패키지에 집중하시되, plotly에서는 가급적 ggplotly() 함수를 사용하는 것을 권유합니다. # ----- (1) 시계열 그래프 작성 ----- library(plotly) data(economics, package = "ggplot2") economics %>% arrange(psavert) %>% plot_ly(x = ~date, y = ~psavert) %>% add_lines() # 그룹별 시계열 그래프 library(dplyr) top5 <- txhousing %>% group_by(city) %>% summarise(m = mean(sales, na.rm = TRUE)) %>% arrange(desc(m)) %>% top_n(5) tx5 <- semi_join(txhousing, top5, by = "city") p <- ggplot(tx5, aes(x = date, y = median, colour = city)) + geom_line() + theme_minimal() ggplotly(p) # ----- (2) Error Bars ----- data <- ToothGrowth %>% group_by(supp, dose) %>% dplyr::summarise(n = n(), mean = mean(len), sd = sd(len), se = sd/sqrt(n)) %>% dplyr::mutate(dose = as.factor(dose)) %>% ungroup() glimpse(data) # ggplot2 방식 p <- ggplot(data, aes(x = dose, y = mean, group = supp, colour = supp)) + geom_point(size = 2) + geom_line(size = 1) + geom_errorbar(aes(ymin = mean - se, ymax = mean + se), width = .1) + theme_minimal() ggplotly(p) # plotly 방식 plot_ly(data = data[which(data$supp == 'OJ'),], x = ~dose, y = ~mean, type = 'scatter', mode = 'lines+markers', name = 'OJ', error_y = ~list(array = sd, color = '#000000')) %>% add_trace(data = data[which(data$supp == 'VC'),], name = 'VC') # 어떤 것이 더 편한지는 각자 선택하시기를 바랍니다. # ---- (3) Financial Plot ----- library(plotly) funnel_data <- data.frame( percent = c(39, 27.4, 20.6, 11, 2), path = c("웹사이트 방문", "다운로드", "잠재고객", "장바구니", "실제구매"), stringsAsFactors = FALSE ) plot_ly() %>% add_trace( type = "funnel", y = funnel_data$path, x = funnel_data$percent) %>% layout(yaxis = list(categoryarray = funnel_data$path)) # 위 그래프의 경우에는 ggplot2으로 작성하기에는 매우 어려울 것입니다. # 실제로 그릴수는 있습니다만, 추천하지는 않습니다. # https://beta.rstudioconnect.com/content/5294/funnel_plot.nb.html # 강사가 생각하는 시각화 잘 작성하는 요령은, # 한 패키지에 의존하기보다, 새로운 패키지들을 잘 찾아서 응용하는 것에 초점을 맞추자입니다. # 그러려면, 정보가 가장 중요하며, 어딘가에 늘 정리하는 습관이 중요합니다. #### 2. Leaflet #### # Leaflet 패키지는 지도 시각화에 특화가 되어 있습니다. # Leaflet 패키지 역시, 오픈소스 JavaScript용으로 나왔고, R로 확장되었습니다. # 특히, 지도시각화에 최적화되었기 때문에, 공간시각화를 하시는 분이 있다면, 많이 활용해보는 것을 추천합니다. # Plotly와 마찬가지로 특유의 문법이 존재하기 때문에 초기 학습 곡선이 필요합니다. # 이 영역은 ggplot2와 패키지와 연관성이 크게 없습니다. # 다만, 차주에 배울 shiny앱에 적용시킬 수 있는 장점이 있기 때문에, 배워두면 응용이 가능합니다. # 미리 말씀드리면, shiny앱 시각화 부터는 일종의 플랫폼에 기반한 개발의 영역이 짙습니다. # 이 때에는 사내에 이 시스템을 도입할 것인가? 말 것인가?는 기존 시각화 솔루션과 냉정하게 비교 분석 하셔야 합니다. # 간단하게 그래프 그리기 library(leaflet) leaflet() %>% addTiles() %>% addMarkers(lng=127.027696, lat=37.498124, popup="강남역입니다.") # DataFrames library(htmltools) # 데이터셋을 코로나 기반으로 바꿀 것 df <- read.csv(textConnection( "Name,Lat,Long Samurai Noodle,47.597131,-122.327298 Kukai Ramen,47.6154,-122.327157 Tsukushinbo,47.59987,-122.326726")) leaflet(df) %>% addTiles() %>% addMarkers(~Long, ~Lat, label = ~htmlEscape(Name)) # 각 옵션에 대해 자세하게 알고 싶다면, https://rstudio.github.io/leaflet/popups.html # 참고하세요.
# R code file that constructs the PLOT 2 for the Projet 1. The Code File include # the code for reading the data so that the plot can be fully reproduced. It # also creates the PNG file. file.png = "plot2.png" filename = "household_power_consumption.txt" # check if the data is in the current working directory. If not, download and unzip file. if(!file.exists(filename)){ cat("Download and unzip data file for Project 1 in current working directory:\n", getwd(), "\n please wait ...\n") temp <- tempfile() fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(fileUrl, temp) unzip(temp) unlink(temp) dateDownloaded <- date() cat("download and unzipping done.\n") } # check if sqldf package is installed and load the package if (!"sqldf" %in% installed.packages()) install.packages("sqldf") library(sqldf) # load the file with read.csv.sql() and read only rows in 1/2/2007 and 2/2/2007 # no NA values available in this subset, so no need to deal with :) dataf <- read.csv.sql(filename, sql = "select * from file where Date in ('1/2/2007', '2/2/2007')", header = TRUE, sep = ";") closeAllConnections() # add a new column datetime with the combined Data and Time column dataf$datetime <- paste(dataf$Date, dataf$Time) dataf$datetime <- strptime(dataf$datetime, "%d/%m/%Y %H:%M:%S") # get and save aspects of the LOCALE for the R process for my case i am in # AUSTRIA and use the locale "German_Austria.1252", so the x axes labels are # "Do" instead of "Thu" ("Donnerstag" instead of "Thursday") / "Fr" for "Fri" / # "Sa" for "Sat" mylocation <- Sys.getlocale("LC_TIME") # set Time Location to C locale (which is the default for the C language and # reflects North-American usage) so it is in North-American Englisch and from # now on it appears Thu/Fri/Sat Sys.setlocale("LC_TIME", "C") # Open PNG device; create 'plot2.png' in my working directory # uses cairographics' PNG backend which will never use a palette and normally # creates a larger 32-bit ARGB file - this may work better for specialist uses # with semi-transparent colours. png(file = file.png, width = 480, height = 480, type = "cairo-png") # ready for PLOT 2 par(mfrow = c(1,1)) with(dataf, plot(datetime ,Global_active_power, xlab = "", ylab = "Global Active Power (kilowatts)" ,type="l")) # Close the PNG file device and write file dev.off() # switch back to the original LOCALE for date/time processing Sys.setlocale("LC_TIME", mylocation) cat(file.png, "created in working directory.\n")
/plot2.R
no_license
gitranz/ExData_Plotting1
R
false
false
2,645
r
# R code file that constructs the PLOT 2 for the Projet 1. The Code File include # the code for reading the data so that the plot can be fully reproduced. It # also creates the PNG file. file.png = "plot2.png" filename = "household_power_consumption.txt" # check if the data is in the current working directory. If not, download and unzip file. if(!file.exists(filename)){ cat("Download and unzip data file for Project 1 in current working directory:\n", getwd(), "\n please wait ...\n") temp <- tempfile() fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(fileUrl, temp) unzip(temp) unlink(temp) dateDownloaded <- date() cat("download and unzipping done.\n") } # check if sqldf package is installed and load the package if (!"sqldf" %in% installed.packages()) install.packages("sqldf") library(sqldf) # load the file with read.csv.sql() and read only rows in 1/2/2007 and 2/2/2007 # no NA values available in this subset, so no need to deal with :) dataf <- read.csv.sql(filename, sql = "select * from file where Date in ('1/2/2007', '2/2/2007')", header = TRUE, sep = ";") closeAllConnections() # add a new column datetime with the combined Data and Time column dataf$datetime <- paste(dataf$Date, dataf$Time) dataf$datetime <- strptime(dataf$datetime, "%d/%m/%Y %H:%M:%S") # get and save aspects of the LOCALE for the R process for my case i am in # AUSTRIA and use the locale "German_Austria.1252", so the x axes labels are # "Do" instead of "Thu" ("Donnerstag" instead of "Thursday") / "Fr" for "Fri" / # "Sa" for "Sat" mylocation <- Sys.getlocale("LC_TIME") # set Time Location to C locale (which is the default for the C language and # reflects North-American usage) so it is in North-American Englisch and from # now on it appears Thu/Fri/Sat Sys.setlocale("LC_TIME", "C") # Open PNG device; create 'plot2.png' in my working directory # uses cairographics' PNG backend which will never use a palette and normally # creates a larger 32-bit ARGB file - this may work better for specialist uses # with semi-transparent colours. png(file = file.png, width = 480, height = 480, type = "cairo-png") # ready for PLOT 2 par(mfrow = c(1,1)) with(dataf, plot(datetime ,Global_active_power, xlab = "", ylab = "Global Active Power (kilowatts)" ,type="l")) # Close the PNG file device and write file dev.off() # switch back to the original LOCALE for date/time processing Sys.setlocale("LC_TIME", mylocation) cat(file.png, "created in working directory.\n")
get_coeffs <- function(taxon_list, survey, date, save=F){ library(data.table) library(dplyr) library(readr) library(rfishbase) library(stringr) s_time <- Sys.time() ### Load Data datalw <- taxon_list %>% dplyr::rename(level = rank) %>% mutate(species = str_split(taxa, " ", simplify=T, n=2)[,2], level = case_when(level=="Family" ~ "family", level=="Genus" ~ "genus", level=="Species" ~"species", level=="Subspecies" ~ "species")) %>% select(family, genus, species, level) %>% filter(!is.na(family)) datalw$a <- datalw$b <- datalw$taxo <- datalw$source <- datalw$Type <- NA ### Get relationships for(i in 1:nrow(datalw)){ if(datalw$level[i]=='species'){ # Species level <- 'spe' lw.spe <- length_weight(paste(datalw$genus[i],datalw$species[i], sep = " ")) %>% filter(!is.na(a), !is.na(b), Type=="TL") # Genus if(nrow(lw.spe)==0 | ncol(lw.spe)<3){ genus <- datalw$genus[i] species_in_genus <- species_list(Genus = genus) lw.spe <- length_weight(species_in_genus) %>% filter(!is.na(a), !is.na(b), Type=="TL") level <- 'gen' rm(genus, species_in_genus) } # Family if(nrow(lw.spe)==0 | ncol(lw.spe)<3){ family <- datalw$family[i] species_in_family <- species_list(Family = family) lw.spe <- length_weight(species_in_family) %>% filter(!is.na(a), !is.na(b), Type=="TL") level <- 'fam' rm(family, species_in_family) } if(nrow(lw.spe)>0){ datalw$a[i] <- mean(lw.spe$a) datalw$b[i] <- mean(lw.spe$b) datalw$taxo[i] <- level datalw$Type[i] <- paste(unique(lw.spe$Type), collapse = "-") datalw$source[i] <- paste("fb", available_releases()[1], sep="-")} rm(level, lw.spe) } if(datalw$level[i]=='genus'){ # Genus genus <- datalw$genus[i] species_in_genus <- species_list(Genus = genus) lw.spe <- length_weight(species_in_genus) %>% filter(!is.na(a), a<1, !is.na(b), Type=="TL") level <- 'gen' rm(genus, species_in_genus) # Family if(nrow(lw.spe)==0 | ncol(lw.spe)<3){ family <- datalw$family[i] species_in_family <- species_list(Family = family) lw.spe <- length_weight(species_in_family) %>% filter(!is.na(a), !is.na(b), Type=="TL") level <- 'fam' rm(family, species_in_family) } if(nrow(lw.spe)>0){ datalw$a[i] <- mean(lw.spe$a) datalw$b[i] <- mean(lw.spe$b) datalw$taxo[i] <- level datalw$Type[i] <- paste(unique(lw.spe$Type), collapse = "-") datalw$source[i] <- paste("fb", available_releases()[1], sep="-") } rm(level, lw.spe) } if(datalw$level[i]=='family'){ # Family family <- datalw$family[i] species_in_family <- species_list(Family = family) lw.spe <- length_weight(species_in_family) %>% filter(!is.na(a), !is.na(b), Type=="TL") level <- 'fam' rm(family, species_in_family) if(nrow(lw.spe)>0){ datalw$a[i] <- mean(lw.spe$a) datalw$b[i] <- mean(lw.spe$b) datalw$taxo[i] <- level datalw$Type[i] <- paste(unique(lw.spe$Type), collapse = "-") datalw$source[i] <- paste("fb", available_releases()[1], sep="-") } rm(level, lw.spe) } } if(any(is.na(datalw$a))==FALSE){print("All length-weight relationship coefficients found!")} else{"Some coefficients are still missing!"} datalw <- datalw %>% select(-family, -genus, -species, -level) %>% rename(level_inferred = taxo) datalw <- cbind(taxon_list, datalw) if(save==TRUE){ write.csv(datalw, file=paste0("length_weight/length.weight_",survey,"_",date,".csv")) } e_time <- Sys.time() print(s_time-e_time) return(datalw) }
/functions/get_length_weight_coeffs_rfishbase.R
no_license
AquaAuma/FishGlob_data
R
false
false
4,284
r
get_coeffs <- function(taxon_list, survey, date, save=F){ library(data.table) library(dplyr) library(readr) library(rfishbase) library(stringr) s_time <- Sys.time() ### Load Data datalw <- taxon_list %>% dplyr::rename(level = rank) %>% mutate(species = str_split(taxa, " ", simplify=T, n=2)[,2], level = case_when(level=="Family" ~ "family", level=="Genus" ~ "genus", level=="Species" ~"species", level=="Subspecies" ~ "species")) %>% select(family, genus, species, level) %>% filter(!is.na(family)) datalw$a <- datalw$b <- datalw$taxo <- datalw$source <- datalw$Type <- NA ### Get relationships for(i in 1:nrow(datalw)){ if(datalw$level[i]=='species'){ # Species level <- 'spe' lw.spe <- length_weight(paste(datalw$genus[i],datalw$species[i], sep = " ")) %>% filter(!is.na(a), !is.na(b), Type=="TL") # Genus if(nrow(lw.spe)==0 | ncol(lw.spe)<3){ genus <- datalw$genus[i] species_in_genus <- species_list(Genus = genus) lw.spe <- length_weight(species_in_genus) %>% filter(!is.na(a), !is.na(b), Type=="TL") level <- 'gen' rm(genus, species_in_genus) } # Family if(nrow(lw.spe)==0 | ncol(lw.spe)<3){ family <- datalw$family[i] species_in_family <- species_list(Family = family) lw.spe <- length_weight(species_in_family) %>% filter(!is.na(a), !is.na(b), Type=="TL") level <- 'fam' rm(family, species_in_family) } if(nrow(lw.spe)>0){ datalw$a[i] <- mean(lw.spe$a) datalw$b[i] <- mean(lw.spe$b) datalw$taxo[i] <- level datalw$Type[i] <- paste(unique(lw.spe$Type), collapse = "-") datalw$source[i] <- paste("fb", available_releases()[1], sep="-")} rm(level, lw.spe) } if(datalw$level[i]=='genus'){ # Genus genus <- datalw$genus[i] species_in_genus <- species_list(Genus = genus) lw.spe <- length_weight(species_in_genus) %>% filter(!is.na(a), a<1, !is.na(b), Type=="TL") level <- 'gen' rm(genus, species_in_genus) # Family if(nrow(lw.spe)==0 | ncol(lw.spe)<3){ family <- datalw$family[i] species_in_family <- species_list(Family = family) lw.spe <- length_weight(species_in_family) %>% filter(!is.na(a), !is.na(b), Type=="TL") level <- 'fam' rm(family, species_in_family) } if(nrow(lw.spe)>0){ datalw$a[i] <- mean(lw.spe$a) datalw$b[i] <- mean(lw.spe$b) datalw$taxo[i] <- level datalw$Type[i] <- paste(unique(lw.spe$Type), collapse = "-") datalw$source[i] <- paste("fb", available_releases()[1], sep="-") } rm(level, lw.spe) } if(datalw$level[i]=='family'){ # Family family <- datalw$family[i] species_in_family <- species_list(Family = family) lw.spe <- length_weight(species_in_family) %>% filter(!is.na(a), !is.na(b), Type=="TL") level <- 'fam' rm(family, species_in_family) if(nrow(lw.spe)>0){ datalw$a[i] <- mean(lw.spe$a) datalw$b[i] <- mean(lw.spe$b) datalw$taxo[i] <- level datalw$Type[i] <- paste(unique(lw.spe$Type), collapse = "-") datalw$source[i] <- paste("fb", available_releases()[1], sep="-") } rm(level, lw.spe) } } if(any(is.na(datalw$a))==FALSE){print("All length-weight relationship coefficients found!")} else{"Some coefficients are still missing!"} datalw <- datalw %>% select(-family, -genus, -species, -level) %>% rename(level_inferred = taxo) datalw <- cbind(taxon_list, datalw) if(save==TRUE){ write.csv(datalw, file=paste0("length_weight/length.weight_",survey,"_",date,".csv")) } e_time <- Sys.time() print(s_time-e_time) return(datalw) }
################################################################################################################## # 2015 earthquake in nepal # ################################################################################################################## ##lets load all required packages pacman::p_load(rvest, ggmap,leaflet, lubridate, googleVis, dplyr) ##lets download data from seismonepal.gov.np website url <- "www.seismonepal.gov.np/" #As there are five page of data # for( i in 1:5){ # i = as.numeric(i) # url <- paste("http://www.seismonepal.gov.np/index.php?action=earthquakes&show=recent&page=", i, sep="") # if (i==1){ # table <- url %>% # read_html() %>% # html_nodes(xpath='//*[@class="block2-content"]/table[1]') %>% # html_table(header = TRUE) %>% # as.data.frame() # }else{ # temp <- url %>% # read_html() %>% # html_nodes(xpath='//*[@class="block2-content"]/table[1]') %>% # html_table(header = TRUE) %>% # as.data.frame() # # table = rbind(table, temp) # } # } #Or you can download from githib table = read.csv("https://raw.githubusercontent.com/BkrmDahal/Nepal_earthquake/master/2015_earthquake_nepal.csv", stringsAsFactors = F) ##lets do some feature engineering and conversion table$Date_full = paste(table$Date, table$Local.Time) table$Date_full = mdy_hm(table$Date_full) table$hour = hour(table$Date_full) table$month = month(table$Date_full) table$year = year(table$Date_full) table$Latitude = as.double(table$Latitude) table$Longitude = as.double(table$Longitude) table$Magnitude.ML. = as.numeric(table$Magnitude.ML.) table$Energy = 10^((1.5 * table$Magnitude.ML.) + 4.8)*10 table$Energy_relative_to_7.6 = table$Energy/max(table$Energy) table$Rating = ifelse(table$Magnitude.ML.>7, "M greater than 7", ifelse(table$Magnitude.ML.>6, "M greater btn 6-7", ifelse(table$Magnitude.ML.>5, "M greater btn 5-6", "M greater below 5"))) table = table[complete.cases(table), ] # ##let make ggplot plot # #IRkernel::set_plot_options(width=800, height=800, units='px') # #lets download nepal map # map <- get_map(location = "Nepal", zoom = 7, source="osm", color = 'bw') # # (ggmap(map, extent = "device") + # geom_point(data = table, aes(y = Latitude, x = Longitude, color = factor(Rating), size = Energy), alpha = I(0.6)) # + scale_size_continuous(range = c(1,50), guide = 'none')) ##lets make some interactive googlechart #gvisgeochart table$LatLong = paste(table$Latitude ,table$Longitude, sep=":") table$Tip = paste("Epicentre:", table$Epicentre,sep="") table = table[order(table$Magnitude.ML., decreasing = T), ] geomarker = table[which(table$Magnitude.ML.>=4.5), c("LatLong",'Energy_relative_to_7.6', "Magnitude.ML.", "Tip")] GeoMarker <- gvisGeoChart(geomarker, "LatLong", hovervar = "Tip", sizevar='Energy_relative_to_7.6', colorvar="Magnitude.ML.", options=list(height=350, width=600, region="NP", title="Location of 1015 April earthquake and aftersock greater than 5M epicentre", chartid="Geomap", displayMode='markers', tableOptions="bgcolor=\"#AABBCC\"", colorAxis="{values:[4.5,6,8], colors:[\'grey', \'orange\', \'red']}")) plot(GeoMarker) #lets make calender chart calender <- summarise(group_by(table, Date), No_of_earthquake = length(Date)) calender$Date <- mdy(calender$Date) Cal <- gvisCalendar(calender, datevar="Date", numvar="No_of_earthquake", options=list( title="Daily no of aftersock after april 2015 earthquake", calendar="{cellSize:10, yearLabel:{fontSize:20, color:'#444444'}, focusedCellColor:{stroke:'red'}}", width=600, height=200), chartid="Calendar") ###bubble chart table$time = sapply(strsplit(table$Local.Time, ":"), function(x) { x <- as.numeric(x) x[1]+x[2]/60 }) bubble = table[, c("Date_full", "time","Magnitude.ML.", "Energy_relative_to_7.6","Energy", "Rating", "Date", "Epicentre")] bubble <- gvisBubbleChart(bubble, idvar="Date", xvar="time", yvar="Magnitude.ML.", colorvar="Rating", sizevar="Energy", options = list(width=600, height=350, title="Time of Earthquake", chartArea="{left:50,top:50,width:\"75%\",height:\"75%\"}", hAxis= "{title:'Time (hrs)'}", chartid = "bubble chart")) plot(bubble) #Bar graph table$greater_5 = ifelse(table$Magnitude.ML.>=5,1,0) table$less_5 = ifelse(table$Magnitude.ML.>=5,0,1) table$Epicentre = tolower(table$Epicentre) table$Epicentre = ifelse(table$Epicentre=="sindhupalchok" | table$Epicentre=="sindupalchok", 'sindhupalchowk', ifelse(table$Epicentre=="kabre" | table$Epicentre=="kavrepalanchok" | table$Epicentre=="kavrepalanchowk" , "kavre", table$Epicentre)) pie = summarise(group_by(table, Epicentre), No_of_quakes_greater_than_5 = sum(greater_5),No_of_quakes_less_than_5 = sum(less_5) ) pie = pie[which(pie$No_of_quakes_less_than_5>5), ] Pie <- gvisBarChart(pie, options = list(width=600, height=400, title="No of quakes below 5 and above 5")) plot(Pie) ##lets merge Combine <- gvisMerge(gvisMerge(GeoMarker, Pie), gvisMerge(bubble, Cal), horizontal=FALSE, tableOptions="bgcolor=\"#AABBCC\"", "Earthquake in Nepal") plot(Combine) print(Combine, file="Earthquake in Nepal.html") ################################################Bikram Dahal####################################
/Earthquake_in_Nepal_Vis.R
no_license
BkrmDahal/Nepal_earthquake
R
false
false
6,537
r
################################################################################################################## # 2015 earthquake in nepal # ################################################################################################################## ##lets load all required packages pacman::p_load(rvest, ggmap,leaflet, lubridate, googleVis, dplyr) ##lets download data from seismonepal.gov.np website url <- "www.seismonepal.gov.np/" #As there are five page of data # for( i in 1:5){ # i = as.numeric(i) # url <- paste("http://www.seismonepal.gov.np/index.php?action=earthquakes&show=recent&page=", i, sep="") # if (i==1){ # table <- url %>% # read_html() %>% # html_nodes(xpath='//*[@class="block2-content"]/table[1]') %>% # html_table(header = TRUE) %>% # as.data.frame() # }else{ # temp <- url %>% # read_html() %>% # html_nodes(xpath='//*[@class="block2-content"]/table[1]') %>% # html_table(header = TRUE) %>% # as.data.frame() # # table = rbind(table, temp) # } # } #Or you can download from githib table = read.csv("https://raw.githubusercontent.com/BkrmDahal/Nepal_earthquake/master/2015_earthquake_nepal.csv", stringsAsFactors = F) ##lets do some feature engineering and conversion table$Date_full = paste(table$Date, table$Local.Time) table$Date_full = mdy_hm(table$Date_full) table$hour = hour(table$Date_full) table$month = month(table$Date_full) table$year = year(table$Date_full) table$Latitude = as.double(table$Latitude) table$Longitude = as.double(table$Longitude) table$Magnitude.ML. = as.numeric(table$Magnitude.ML.) table$Energy = 10^((1.5 * table$Magnitude.ML.) + 4.8)*10 table$Energy_relative_to_7.6 = table$Energy/max(table$Energy) table$Rating = ifelse(table$Magnitude.ML.>7, "M greater than 7", ifelse(table$Magnitude.ML.>6, "M greater btn 6-7", ifelse(table$Magnitude.ML.>5, "M greater btn 5-6", "M greater below 5"))) table = table[complete.cases(table), ] # ##let make ggplot plot # #IRkernel::set_plot_options(width=800, height=800, units='px') # #lets download nepal map # map <- get_map(location = "Nepal", zoom = 7, source="osm", color = 'bw') # # (ggmap(map, extent = "device") + # geom_point(data = table, aes(y = Latitude, x = Longitude, color = factor(Rating), size = Energy), alpha = I(0.6)) # + scale_size_continuous(range = c(1,50), guide = 'none')) ##lets make some interactive googlechart #gvisgeochart table$LatLong = paste(table$Latitude ,table$Longitude, sep=":") table$Tip = paste("Epicentre:", table$Epicentre,sep="") table = table[order(table$Magnitude.ML., decreasing = T), ] geomarker = table[which(table$Magnitude.ML.>=4.5), c("LatLong",'Energy_relative_to_7.6', "Magnitude.ML.", "Tip")] GeoMarker <- gvisGeoChart(geomarker, "LatLong", hovervar = "Tip", sizevar='Energy_relative_to_7.6', colorvar="Magnitude.ML.", options=list(height=350, width=600, region="NP", title="Location of 1015 April earthquake and aftersock greater than 5M epicentre", chartid="Geomap", displayMode='markers', tableOptions="bgcolor=\"#AABBCC\"", colorAxis="{values:[4.5,6,8], colors:[\'grey', \'orange\', \'red']}")) plot(GeoMarker) #lets make calender chart calender <- summarise(group_by(table, Date), No_of_earthquake = length(Date)) calender$Date <- mdy(calender$Date) Cal <- gvisCalendar(calender, datevar="Date", numvar="No_of_earthquake", options=list( title="Daily no of aftersock after april 2015 earthquake", calendar="{cellSize:10, yearLabel:{fontSize:20, color:'#444444'}, focusedCellColor:{stroke:'red'}}", width=600, height=200), chartid="Calendar") ###bubble chart table$time = sapply(strsplit(table$Local.Time, ":"), function(x) { x <- as.numeric(x) x[1]+x[2]/60 }) bubble = table[, c("Date_full", "time","Magnitude.ML.", "Energy_relative_to_7.6","Energy", "Rating", "Date", "Epicentre")] bubble <- gvisBubbleChart(bubble, idvar="Date", xvar="time", yvar="Magnitude.ML.", colorvar="Rating", sizevar="Energy", options = list(width=600, height=350, title="Time of Earthquake", chartArea="{left:50,top:50,width:\"75%\",height:\"75%\"}", hAxis= "{title:'Time (hrs)'}", chartid = "bubble chart")) plot(bubble) #Bar graph table$greater_5 = ifelse(table$Magnitude.ML.>=5,1,0) table$less_5 = ifelse(table$Magnitude.ML.>=5,0,1) table$Epicentre = tolower(table$Epicentre) table$Epicentre = ifelse(table$Epicentre=="sindhupalchok" | table$Epicentre=="sindupalchok", 'sindhupalchowk', ifelse(table$Epicentre=="kabre" | table$Epicentre=="kavrepalanchok" | table$Epicentre=="kavrepalanchowk" , "kavre", table$Epicentre)) pie = summarise(group_by(table, Epicentre), No_of_quakes_greater_than_5 = sum(greater_5),No_of_quakes_less_than_5 = sum(less_5) ) pie = pie[which(pie$No_of_quakes_less_than_5>5), ] Pie <- gvisBarChart(pie, options = list(width=600, height=400, title="No of quakes below 5 and above 5")) plot(Pie) ##lets merge Combine <- gvisMerge(gvisMerge(GeoMarker, Pie), gvisMerge(bubble, Cal), horizontal=FALSE, tableOptions="bgcolor=\"#AABBCC\"", "Earthquake in Nepal") plot(Combine) print(Combine, file="Earthquake in Nepal.html") ################################################Bikram Dahal####################################
######################################################### ### Problem #1: 3 different ways to make 5 x 5 matrix.### ######################################################### ### Method #1: method1<- matrix(1:25, 5, 5) method1 method2_a <- 1:5 method2_b <- 6:10 method2_c <- 11:15 method2_d <- 16:20 method2_e <- 21:25 method2 <- as.matrix(cbind(method2_a,method2_b, method2_c, method2_d, method2_e)) colnames(method2) <- NULL method2 method3 <- as.matrix(rbind(method2_a, method2_b,method2_c, method2_d, method2_e)) rownames(method3) <- NULL method3 ############################################################################## ### Problem 2: Find the sum of all numbers below 1000 that can be divisible ## ### by 3 or 5(Hint: Conditionals) ############################################################################# a <- 1:1000 sum_div <- a[a%%3 ==0 | a%%5 == 0] sum_div_total = sum(sum_div) sum_div_total ### [1] 234168 ################################################################################# ### Problem 3: Find the sum of the even valued terms of the Fibonacci sequence ### that do not exceed 4,000,000.(Hint: refer to page 110 in our week 1 slides) ################################################################################# ###NOTE: Running this through 4,000,000 causes my computer to freeze, ### so I'm running through 400. fib <- numeric() fib[1] <- fib[2] <- 1 for (i in 3:400){ fib[i] <- fib[i-2] + fib[i-1] } fib_even <- fib[fib%%2 == 0] fib_even_sum <- sum(fib_even) fib_even_sum ############################################ ### Problem 4: Prove magic square is true.## ############################################ my_mat <- matrix(c(8, 3, 4, 1, 5, 9, 6, 7, 2), ncol = 3) my_mat ### Checking rows apply(my_mat, 1, sum) ### [1] 15 15 15 ### Checking columns apply(my_mat, 2, sum) ### [1] 15 15 15 ### Checking diagonal (my_mat[1,1] + my_mat[2,2] + my_mat[3,3]) ### [1] 15 ### Checking counter diagonal (my_mat[3,1] + my_mat[2,2] + my_mat[1,3]) ### [1] 15 ######################################## ### Problem 5: Fix the following code: ######################################## x <- 1 if(x= 1){ cat("x is 1") }else { cat("x is not 1") } ### Fix: add second equal sign in x = 1 statement x <- 1 if(x== 1){ cat("x is 1") }else { cat("x is not 1") } ######################################################################### ### Problem 6: Write function that calculates median absolute deviation: ######################################################################### a <- rnorm(10) ### [1] -1.16144545 0.45966130 -2.06521499 -0.83999965 -0.28824412 -0.10920736 1.42384990 ### [8] 1.78795599 0.03400712 0.22620641 for(i in length(a)){ # MAD=median(a[i]- median(a)|) MAD <- abs(median(a[i] - median(a))) } MAD ### [1] 0.2638065 #################################################################### ### Problem 7: What is the largest prime number for 600,851,475,143. #################################################################### ### I cannot figure this out. ######################################################################### ### Problem 8: Find the largest palindrome created from 2 3-digit factors ######################################################################### pal_function <- function(x){ x <- as.character(x) forward <- unlist(strsplit(x, split = "")) backward <- rev(forward) palindrome <- all(forward == backward) return(palindrome) } a <- matrix(999:100, nrow = 1) a <- t(a)%*%a pal_test <- unique(sort(as.vector(x), decreasing = TRUE)) max_pal <- 000000 i <- 1 n <- length(pal_test) while (i <= n) { if (pal_function(pal_test[i])) { result <- pal_test[i] print(result) break } i <- i + 1 } # [1] 906609 ######################################################################## ###Problem 9: ### Explain what vectorizing a function does and when it's appropriate. ######################################################################## ### An un-vectorized function needs you to initialize the vectors and then ### create loops to run over each element of the vector. A vectorized function will ### perform the operation over every element of a vector all at once. ############################################################################# ### Write a vectorized function that will examine an input array of numbers ### and return a logical array of whether each number is a palindrome. ############################################################################# palindrome <- function(x){ y <- strsplit(as.character(x), "") rev_y <- paste(rev(unlist(y)), collapse= "") x == rev_y } palindrome(122221) ### [1] TRUE palindrome(123456) ### [1] FALSE palindrome ("pop") ### [1] TRUE ############################################################################ ### Problem 10: come up with a question from the tips dataset, and show code ### and solution. ############################################################################ library(reshape2) head(tips) ###Do people tip more for dinner on Sunday than Saturday? dcast(tips,time~day, value.var = "tip", fun = mean, na.rm = TRUE) ### time Fri Sat Sun Thur ### 1 Dinner 2.940000 2.993103 3.255132 3.000000 ### 2 Lunch 2.382857 NaN NaN 2.767705 ### Answer: on average, people tip approximatel $0.25 for dinner on Sunday ### than they do on Saturday.
/Homework_2.R
no_license
poplock100/NYC_Data_Science
R
false
false
5,579
r
######################################################### ### Problem #1: 3 different ways to make 5 x 5 matrix.### ######################################################### ### Method #1: method1<- matrix(1:25, 5, 5) method1 method2_a <- 1:5 method2_b <- 6:10 method2_c <- 11:15 method2_d <- 16:20 method2_e <- 21:25 method2 <- as.matrix(cbind(method2_a,method2_b, method2_c, method2_d, method2_e)) colnames(method2) <- NULL method2 method3 <- as.matrix(rbind(method2_a, method2_b,method2_c, method2_d, method2_e)) rownames(method3) <- NULL method3 ############################################################################## ### Problem 2: Find the sum of all numbers below 1000 that can be divisible ## ### by 3 or 5(Hint: Conditionals) ############################################################################# a <- 1:1000 sum_div <- a[a%%3 ==0 | a%%5 == 0] sum_div_total = sum(sum_div) sum_div_total ### [1] 234168 ################################################################################# ### Problem 3: Find the sum of the even valued terms of the Fibonacci sequence ### that do not exceed 4,000,000.(Hint: refer to page 110 in our week 1 slides) ################################################################################# ###NOTE: Running this through 4,000,000 causes my computer to freeze, ### so I'm running through 400. fib <- numeric() fib[1] <- fib[2] <- 1 for (i in 3:400){ fib[i] <- fib[i-2] + fib[i-1] } fib_even <- fib[fib%%2 == 0] fib_even_sum <- sum(fib_even) fib_even_sum ############################################ ### Problem 4: Prove magic square is true.## ############################################ my_mat <- matrix(c(8, 3, 4, 1, 5, 9, 6, 7, 2), ncol = 3) my_mat ### Checking rows apply(my_mat, 1, sum) ### [1] 15 15 15 ### Checking columns apply(my_mat, 2, sum) ### [1] 15 15 15 ### Checking diagonal (my_mat[1,1] + my_mat[2,2] + my_mat[3,3]) ### [1] 15 ### Checking counter diagonal (my_mat[3,1] + my_mat[2,2] + my_mat[1,3]) ### [1] 15 ######################################## ### Problem 5: Fix the following code: ######################################## x <- 1 if(x= 1){ cat("x is 1") }else { cat("x is not 1") } ### Fix: add second equal sign in x = 1 statement x <- 1 if(x== 1){ cat("x is 1") }else { cat("x is not 1") } ######################################################################### ### Problem 6: Write function that calculates median absolute deviation: ######################################################################### a <- rnorm(10) ### [1] -1.16144545 0.45966130 -2.06521499 -0.83999965 -0.28824412 -0.10920736 1.42384990 ### [8] 1.78795599 0.03400712 0.22620641 for(i in length(a)){ # MAD=median(a[i]- median(a)|) MAD <- abs(median(a[i] - median(a))) } MAD ### [1] 0.2638065 #################################################################### ### Problem 7: What is the largest prime number for 600,851,475,143. #################################################################### ### I cannot figure this out. ######################################################################### ### Problem 8: Find the largest palindrome created from 2 3-digit factors ######################################################################### pal_function <- function(x){ x <- as.character(x) forward <- unlist(strsplit(x, split = "")) backward <- rev(forward) palindrome <- all(forward == backward) return(palindrome) } a <- matrix(999:100, nrow = 1) a <- t(a)%*%a pal_test <- unique(sort(as.vector(x), decreasing = TRUE)) max_pal <- 000000 i <- 1 n <- length(pal_test) while (i <= n) { if (pal_function(pal_test[i])) { result <- pal_test[i] print(result) break } i <- i + 1 } # [1] 906609 ######################################################################## ###Problem 9: ### Explain what vectorizing a function does and when it's appropriate. ######################################################################## ### An un-vectorized function needs you to initialize the vectors and then ### create loops to run over each element of the vector. A vectorized function will ### perform the operation over every element of a vector all at once. ############################################################################# ### Write a vectorized function that will examine an input array of numbers ### and return a logical array of whether each number is a palindrome. ############################################################################# palindrome <- function(x){ y <- strsplit(as.character(x), "") rev_y <- paste(rev(unlist(y)), collapse= "") x == rev_y } palindrome(122221) ### [1] TRUE palindrome(123456) ### [1] FALSE palindrome ("pop") ### [1] TRUE ############################################################################ ### Problem 10: come up with a question from the tips dataset, and show code ### and solution. ############################################################################ library(reshape2) head(tips) ###Do people tip more for dinner on Sunday than Saturday? dcast(tips,time~day, value.var = "tip", fun = mean, na.rm = TRUE) ### time Fri Sat Sun Thur ### 1 Dinner 2.940000 2.993103 3.255132 3.000000 ### 2 Lunch 2.382857 NaN NaN 2.767705 ### Answer: on average, people tip approximatel $0.25 for dinner on Sunday ### than they do on Saturday.
# Data Frame Operations # 1. Creating data frames empty <- data.frame() # It creates empty data frame empty c1 <- 1:10 c1 letters c2 <- letters c2 <- letters[1:10] c2 c1 c2 df <- data.frame(col.name.1 = c1, col.name.2 = c2) df # 2. Importing and Exporting data write.csv(df,file = "saved_df.csv") read.csv('saved_df.csv') # 3. Getting information about dataframe df nrow(df) ncol(df) colnames(df) rownames(df) summary(df) # statistcal summary of the data frame str(df) # returns the structure of the data frame # 4. Referencing Cells df[[6,2]] # Call single cell value df[[5,'col.name.2']] # Call single cell and assign a value df[[2,'col.name.1']] <- 99 df # 5. Referencing Rows df[1,] # 6. Referencing Columns mtcars # Built-in Data frame in R str(mtcars) # returns structure colnames(mtcars) # column names head(mtcars) # first 6 rows tail(mtcars) # last 6 rows dim(mtcars) # get number of rows and columns # 4 different ways to get a vector of columns mtcars[,'drat'] mtcars$drat mtcars[,5] mtcars[['drat']] # returns vector mtcars['drat'] # returns data frame mtcars[c('mpg','drat')] # Should pass a vector to get multiple columns head(mtcars[c('mpg','drat')]) head(mtcars,3) # get first 3 rows from the data frame sapply(mtcars,mean,na.rm=TRUE) # get mean value # Adding new row df2 <- data.frame(col.name.1=2000,col.name.2 = 'new') df2 dfnew <- rbind(df,df2) dfnew df$newcol <- 2*df$col.name.1 df # Setting column names colnames(df) colnames(df) <- c("One","Two",'Three') # rename all the column name at once df colnames(df)[1] <- "New One" df # Selecting multiple rows df[2:7,2] df[-2,] # Select all rows but not 2nd # Selecting rows with conditions mtcars[mtcars$mpg >30,] mtcars[mtcars$mpg > 30 & mtcars$hp >65,] mtcars[ (mtcars$mpg >25) & (mtcars$cyl == 4), c('mpg','hp','cyl','gear')] subset(mtcars, mpg >30 & hp > 65) # Selecting multiple columns mtcars[,c(1,2,3)] mtcars[,c('mpg','hp','cyl')] mtcars[,c('mpg','hp','cyl')][1:7,] # Dealing with missing data is.na(mtcars) any(is.na(df)) any(is.na(mtcars)) any(is.na(mtcars$mpg)) mtcars$mpg[is.na(mtcars$mpg)] <- mean(mtcars$mpg)
/Data Frame Operations.R
no_license
Lakshhmi/R-Course
R
false
false
2,174
r
# Data Frame Operations # 1. Creating data frames empty <- data.frame() # It creates empty data frame empty c1 <- 1:10 c1 letters c2 <- letters c2 <- letters[1:10] c2 c1 c2 df <- data.frame(col.name.1 = c1, col.name.2 = c2) df # 2. Importing and Exporting data write.csv(df,file = "saved_df.csv") read.csv('saved_df.csv') # 3. Getting information about dataframe df nrow(df) ncol(df) colnames(df) rownames(df) summary(df) # statistcal summary of the data frame str(df) # returns the structure of the data frame # 4. Referencing Cells df[[6,2]] # Call single cell value df[[5,'col.name.2']] # Call single cell and assign a value df[[2,'col.name.1']] <- 99 df # 5. Referencing Rows df[1,] # 6. Referencing Columns mtcars # Built-in Data frame in R str(mtcars) # returns structure colnames(mtcars) # column names head(mtcars) # first 6 rows tail(mtcars) # last 6 rows dim(mtcars) # get number of rows and columns # 4 different ways to get a vector of columns mtcars[,'drat'] mtcars$drat mtcars[,5] mtcars[['drat']] # returns vector mtcars['drat'] # returns data frame mtcars[c('mpg','drat')] # Should pass a vector to get multiple columns head(mtcars[c('mpg','drat')]) head(mtcars,3) # get first 3 rows from the data frame sapply(mtcars,mean,na.rm=TRUE) # get mean value # Adding new row df2 <- data.frame(col.name.1=2000,col.name.2 = 'new') df2 dfnew <- rbind(df,df2) dfnew df$newcol <- 2*df$col.name.1 df # Setting column names colnames(df) colnames(df) <- c("One","Two",'Three') # rename all the column name at once df colnames(df)[1] <- "New One" df # Selecting multiple rows df[2:7,2] df[-2,] # Select all rows but not 2nd # Selecting rows with conditions mtcars[mtcars$mpg >30,] mtcars[mtcars$mpg > 30 & mtcars$hp >65,] mtcars[ (mtcars$mpg >25) & (mtcars$cyl == 4), c('mpg','hp','cyl','gear')] subset(mtcars, mpg >30 & hp > 65) # Selecting multiple columns mtcars[,c(1,2,3)] mtcars[,c('mpg','hp','cyl')] mtcars[,c('mpg','hp','cyl')][1:7,] # Dealing with missing data is.na(mtcars) any(is.na(df)) any(is.na(mtcars)) any(is.na(mtcars$mpg)) mtcars$mpg[is.na(mtcars$mpg)] <- mean(mtcars$mpg)
title.extraction.function <- function(name.original) { name.split <- strsplit(name.original, ", ")[[1]][2] title <- strsplit(name.split, ". ")[[1]][1] return (title) } fetch.mean.age.for.title.function <- function(title,aggregate.age.per.title) { return (aggregate.age.per.title$MeanAge[aggregate.age.per.title$Title==title]) } generate.new.feature.is.child <- function(titanic.data) { titanic.data$is.child<-'ADULT' titanic.data$is.child[titanic.data$Age<=12]<- 'CHILD' titanic.data$is.child <- as.factor(titanic.data$is.child) return(titanic.data) } generate.new.feature.binned.fare <- function(titanic.data) { titanic.data$binned.fare <- '30+' titanic.data$binned.fare[titanic.data$Fare<30 & titanic.data$Fare>=20] <- '20-30' titanic.data$binned.fare[titanic.data$Fare<20 & titanic.data$Fare>=10] <- '10-20' titanic.data$binned.fare[titanic.data$Fare<10] <- '<10' titanic.data$binned.fare <- as.factor(titanic.data$binned.fare) return(titanic.data) } generate.new.feature.family.size <- function(titanic.data) { titanic.data$family.size <- titanic.data$Parch+titanic.data$SibSp return(titanic.data) } clean.features.one <- function(titanic.data) { # Convert appropriate columns as factors titanic.data$Survived <- as.factor(titanic.data$Survived ) titanic.data$Pclass <- as.factor(titanic.data$Pclass ) titanic.data$Name<-as.character(x =titanic.data$Name) titanic.data$Embarked[which(is.na(titanic.data$Embarked))] <- 'S' titanic.data$Embarked <- as.factor(titanic.data$Embarked) return(titanic.data) } filter.features.one <- function(titanic.data) { #Remove Unwanted Columns titanic.data$Ticket<-NULL titanic.data$Fare<-NULL titanic.data$Cabin<-NULL titanic.data$PassengerId<-NULL titanic.data$Embarked<-NULL return(titanic.data) }
/titanic_helper.R
no_license
aman-290698/Titanic_Analysis_R
R
false
false
1,816
r
title.extraction.function <- function(name.original) { name.split <- strsplit(name.original, ", ")[[1]][2] title <- strsplit(name.split, ". ")[[1]][1] return (title) } fetch.mean.age.for.title.function <- function(title,aggregate.age.per.title) { return (aggregate.age.per.title$MeanAge[aggregate.age.per.title$Title==title]) } generate.new.feature.is.child <- function(titanic.data) { titanic.data$is.child<-'ADULT' titanic.data$is.child[titanic.data$Age<=12]<- 'CHILD' titanic.data$is.child <- as.factor(titanic.data$is.child) return(titanic.data) } generate.new.feature.binned.fare <- function(titanic.data) { titanic.data$binned.fare <- '30+' titanic.data$binned.fare[titanic.data$Fare<30 & titanic.data$Fare>=20] <- '20-30' titanic.data$binned.fare[titanic.data$Fare<20 & titanic.data$Fare>=10] <- '10-20' titanic.data$binned.fare[titanic.data$Fare<10] <- '<10' titanic.data$binned.fare <- as.factor(titanic.data$binned.fare) return(titanic.data) } generate.new.feature.family.size <- function(titanic.data) { titanic.data$family.size <- titanic.data$Parch+titanic.data$SibSp return(titanic.data) } clean.features.one <- function(titanic.data) { # Convert appropriate columns as factors titanic.data$Survived <- as.factor(titanic.data$Survived ) titanic.data$Pclass <- as.factor(titanic.data$Pclass ) titanic.data$Name<-as.character(x =titanic.data$Name) titanic.data$Embarked[which(is.na(titanic.data$Embarked))] <- 'S' titanic.data$Embarked <- as.factor(titanic.data$Embarked) return(titanic.data) } filter.features.one <- function(titanic.data) { #Remove Unwanted Columns titanic.data$Ticket<-NULL titanic.data$Fare<-NULL titanic.data$Cabin<-NULL titanic.data$PassengerId<-NULL titanic.data$Embarked<-NULL return(titanic.data) }
# Copyright 2020 Observational Health Data Sciences and Informatics # # This file is part of CensoringSensAnalysis # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #' @export execute <- function(connectionDetails, cdmDatabaseSchema, oracleTempSchema, cohortDatabaseSchema, cohortTable, indicationFolder = indicationFolder, sensAnalysisFolder = sensAnalysisFolder, targetId, comparatorId, createCohorts = TRUE, computeNewEstimates = TRUE) { if (!file.exists(sensAnalysisFolder)) { dir.create(sensAnalysisFolder) } if (createCohorts) { ParallelLogger::logInfo("Creating cohorts") connection <- DatabaseConnector::connect(connectionDetails) .createCohorts(connection = connection, cdmDatabaseSchema = cdmDatabaseSchema, oracleTempSchema = oracleTempSchema, cohortDatabaseSchema = cohortDatabaseSchema, cohortTable = cohortTable, outputFolder = sensAnalysisFolder) DatabaseConnector::disconnect(connection) } if (computeNewEstimates) { ParallelLogger::logInfo("Computing new estimates") fetchNewCohorts(connectionDetails, cohortDatabaseSchema, cohortTable, newTargetId, newComparatorId, sensAnalysisFolder) combineOldAndNewData(targetId, comparatorId, newTargetId, newComparatorId, indicationFolder, sensAnalysisFolder) computeNewEstimates(sensAnalysisFolder) calibrateResults(indicationFolder, sensAnalysisFolder) } }
/extras/CensoringSensAnalysis/R/Main.R
permissive
sebastiaan101/Legend
R
false
false
2,391
r
# Copyright 2020 Observational Health Data Sciences and Informatics # # This file is part of CensoringSensAnalysis # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #' @export execute <- function(connectionDetails, cdmDatabaseSchema, oracleTempSchema, cohortDatabaseSchema, cohortTable, indicationFolder = indicationFolder, sensAnalysisFolder = sensAnalysisFolder, targetId, comparatorId, createCohorts = TRUE, computeNewEstimates = TRUE) { if (!file.exists(sensAnalysisFolder)) { dir.create(sensAnalysisFolder) } if (createCohorts) { ParallelLogger::logInfo("Creating cohorts") connection <- DatabaseConnector::connect(connectionDetails) .createCohorts(connection = connection, cdmDatabaseSchema = cdmDatabaseSchema, oracleTempSchema = oracleTempSchema, cohortDatabaseSchema = cohortDatabaseSchema, cohortTable = cohortTable, outputFolder = sensAnalysisFolder) DatabaseConnector::disconnect(connection) } if (computeNewEstimates) { ParallelLogger::logInfo("Computing new estimates") fetchNewCohorts(connectionDetails, cohortDatabaseSchema, cohortTable, newTargetId, newComparatorId, sensAnalysisFolder) combineOldAndNewData(targetId, comparatorId, newTargetId, newComparatorId, indicationFolder, sensAnalysisFolder) computeNewEstimates(sensAnalysisFolder) calibrateResults(indicationFolder, sensAnalysisFolder) } }
# generate_anthro_1850.R # # Generate final figures for the emissions_gridding paper. # # Read instructions in gridding_paper_figures.R, then run this file. # # Matt Nicholson # 12/17/19 source('code/gridding-paper-figures/gridding_paper_figures.R') CH4_1850 <- file.path(HISTORICAL_EMS_DIR, 'CH4-em-anthro_input4MIPs_emissions_CMIP_CEDS-2017-05-18-supplemental-data_gn_185001-196012.nc') CH4_1980 <- file.path(HISTORICAL_EMS_DIR, 'CH4-em-anthro_input4MIPs_emissions_CMIP_CEDS-2017-05-18_gn_197001-201412.nc') stopifnot(all(file.exists(c(CH4_1850, CH4_1980)))) # Get breaks for consistent scale across all years ------------------------ # # In order to have all plots show emissions with the same scale, we need to # provide the same scale breaks for each year we are plotting. To do this, we # first load all of the grids that need the same scale, then call the function # `calculate_em_cuts` to get the scale breaks for each species. # # Note that the grids are cached when loaded here, so that plotting does not # have to re-load them. # Historical gridding file names print("Gathering historical gridded file names") nc_files_hist_1850 <- get_nc_filenames(EM_LIST, 'anthro', '1850', HISTORICAL_EMS_DIR, nc_files = c(CH4=CH4_1850)) nc_files_hist_1950 <- get_nc_filenames(EM_LIST, 'anthro', '1950', HISTORICAL_EMS_DIR, nc_files = c(CH4=CH4_1980)) # Special arguments for loading each type of file hist_1850_args <- list(years = 1850, nc_years = list(CH4 = seq(1850, 1960, 10), default = 1850)) hist_1980_args <- list(years = 1980, nc_years = HIST_YEAR_LIST) # Create lists of emissions grids, where each element is an array with year in # the third dimension em_grids_hist_1850 <- Map(load_emissions_grid, nc_files_hist_1850, EM_LIST, MoreArgs = hist_1850_args) em_grids_hist_1950 <- Map(load_emissions_grid, nc_files_hist_1950, EM_LIST, MoreArgs = hist_1980_args) # Combine emissions for each species across all years and types em_grids_all <- Map(c, em_grids_hist_1850[NON_CO2], em_grids_hist_1950[NON_CO2]) # Future open burning emissions do not include CO2, so add it separately em_grids_all['CO2'] <- Map(c, em_grids_hist_1950['CO2'], em_grids_hist_1850['CO2']) # Calculate the breaks breaks <- lapply(em_grids_all, calculate_em_cuts, get_breaks = TRUE) # Create plots ------------------------------------------------------------ # 1850 anthro figure print("Plotting 1850 anthro figure") plot_em_grids(EM_LIST, 'anthro', 1850, '1850', indir = HISTORICAL_EMS_DIR, outdir = 'output/gridding-paper-figures', nc_years = list(CH4=seq(1850, 1960, 10), default = 1850), nc_files = c(CH4=CH4_1850), breaks = breaks)
/CEDS/generate_anthro_1850.R
permissive
mnichol3/my_jgcri
R
false
false
2,699
r
# generate_anthro_1850.R # # Generate final figures for the emissions_gridding paper. # # Read instructions in gridding_paper_figures.R, then run this file. # # Matt Nicholson # 12/17/19 source('code/gridding-paper-figures/gridding_paper_figures.R') CH4_1850 <- file.path(HISTORICAL_EMS_DIR, 'CH4-em-anthro_input4MIPs_emissions_CMIP_CEDS-2017-05-18-supplemental-data_gn_185001-196012.nc') CH4_1980 <- file.path(HISTORICAL_EMS_DIR, 'CH4-em-anthro_input4MIPs_emissions_CMIP_CEDS-2017-05-18_gn_197001-201412.nc') stopifnot(all(file.exists(c(CH4_1850, CH4_1980)))) # Get breaks for consistent scale across all years ------------------------ # # In order to have all plots show emissions with the same scale, we need to # provide the same scale breaks for each year we are plotting. To do this, we # first load all of the grids that need the same scale, then call the function # `calculate_em_cuts` to get the scale breaks for each species. # # Note that the grids are cached when loaded here, so that plotting does not # have to re-load them. # Historical gridding file names print("Gathering historical gridded file names") nc_files_hist_1850 <- get_nc_filenames(EM_LIST, 'anthro', '1850', HISTORICAL_EMS_DIR, nc_files = c(CH4=CH4_1850)) nc_files_hist_1950 <- get_nc_filenames(EM_LIST, 'anthro', '1950', HISTORICAL_EMS_DIR, nc_files = c(CH4=CH4_1980)) # Special arguments for loading each type of file hist_1850_args <- list(years = 1850, nc_years = list(CH4 = seq(1850, 1960, 10), default = 1850)) hist_1980_args <- list(years = 1980, nc_years = HIST_YEAR_LIST) # Create lists of emissions grids, where each element is an array with year in # the third dimension em_grids_hist_1850 <- Map(load_emissions_grid, nc_files_hist_1850, EM_LIST, MoreArgs = hist_1850_args) em_grids_hist_1950 <- Map(load_emissions_grid, nc_files_hist_1950, EM_LIST, MoreArgs = hist_1980_args) # Combine emissions for each species across all years and types em_grids_all <- Map(c, em_grids_hist_1850[NON_CO2], em_grids_hist_1950[NON_CO2]) # Future open burning emissions do not include CO2, so add it separately em_grids_all['CO2'] <- Map(c, em_grids_hist_1950['CO2'], em_grids_hist_1850['CO2']) # Calculate the breaks breaks <- lapply(em_grids_all, calculate_em_cuts, get_breaks = TRUE) # Create plots ------------------------------------------------------------ # 1850 anthro figure print("Plotting 1850 anthro figure") plot_em_grids(EM_LIST, 'anthro', 1850, '1850', indir = HISTORICAL_EMS_DIR, outdir = 'output/gridding-paper-figures', nc_years = list(CH4=seq(1850, 1960, 10), default = 1850), nc_files = c(CH4=CH4_1850), breaks = breaks)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sheets_objects.R \name{TextToColumnsRequest} \alias{TextToColumnsRequest} \title{TextToColumnsRequest Object} \usage{ TextToColumnsRequest(delimiter = NULL, source = NULL, delimiterType = NULL) } \arguments{ \item{delimiter}{The delimiter to use} \item{source}{The source data range} \item{delimiterType}{The delimiter type to use} } \value{ TextToColumnsRequest object } \description{ TextToColumnsRequest Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} Splits a column of text into multiple columns,based on a delimiter in each cell. }
/man/TextToColumnsRequest.Rd
no_license
key-Mustang/googleSheetsR
R
false
true
659
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sheets_objects.R \name{TextToColumnsRequest} \alias{TextToColumnsRequest} \title{TextToColumnsRequest Object} \usage{ TextToColumnsRequest(delimiter = NULL, source = NULL, delimiterType = NULL) } \arguments{ \item{delimiter}{The delimiter to use} \item{source}{The source data range} \item{delimiterType}{The delimiter type to use} } \value{ TextToColumnsRequest object } \description{ TextToColumnsRequest Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} Splits a column of text into multiple columns,based on a delimiter in each cell. }
library("ISLR") names(Smarket) #Nos tira los nombres de las columnas View(Smarket) data(Smarket) dim(Smarket) #Nos dice la cantidad de filas y columnas summary(Smarket) #Hace un resumen de estadisticas descriptivas cor(Smarket[,-9]) #Armamos una matrix de correlacion entre cada columna omitiendo la variable cualitativa hist(Smarket$Lag1, col = "red") #Trazamos un histogramas de retornos diarios shapiro.test(Smarket$Lag1) #Hay una evidencia fuerte contra la normalidad de los residuos plot(Smarket$Volume) #Ploteamos la evolucion del volumen para el indice S&P ''' Vamos a correr una regresion logistica para predecir el valor de direction, en funcion de lag1 a lag5, y Volume ''' exists(is.null(Smarket)) glm.fit = glm(Smarket$Direction ~ Smarket$Lag1+Smarket$Lag2+Smarket$Lag3+Smarket$Lag4+Smarket$Lag5+Smarket$Volume, family = binomial) summary(glm.fit) ''' No hay muchas evidencias para decir que los retornos de dias anteriores puedan explicar de alguna manera que el valor del indice cierre al alza o la baja ''' ''' Ahora vamos a tratar de predecir las probabilidades con las variables de entrenamiento ''' probs = predict(glm.fit, type = "response") probs #Estas probabilidades indican la probabilidad de que el indice se encuentre al alza contrasts(Smarket$Direction) ''' Ahora vamos a transformar las probabilidades, para que nos diga directamente si el indice sera al alza cuando la probabilidad predicha supere al 0.5, de lo contrario seria a la baja ''' probs[probs > 0.5] = "Up" probs[probs <= 0.5] = "Down" probs table(probs, Smarket$Direction) #Entonces creamos una matrix de confusion para saber la precision del modelo ''' Aunque esto no nos dice nada acerca de la verdadera precision del modelo puesto que solo estamos utilizando las observaciones de entrenamiento ''' ''' Vamos a utilizar como observaciones de entrenamientos, las observaciones entre 2001 y 2004 ''' vector = (Smarket$Year < 2005) ''' Creamos las observaciones para el test ''' x_test = Smarket[!train,] y_test = Smarket$Direction[!train] glm.fit = glm(Direction ~ Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data = Smarket, family = binomial, subset = train) glm.predict = predict(glm.fit, x_test, type = "response") #Generamos las predicciones glm.predict[glm.predict > 0.5] = "Up" glm.predict[glm.predict <= 0.5] = "Down" table(glm.predict, y_test) #Ahora creamos la matrix de confusion ''' Esto quiere decir que no podemos predecir el comportamiento del indice a partir del volumen y los retornos pasados ''' ''' Pero podemos mejorar el modelo a partir de reducir el numero de predictores. Esto lo tomamos en cuenta por los p valores arrojados cuando entrenamos el primer modelo con todos los predictores''' glm.fit = glm(Direction ~ Lag1+Lag2, data = Smarket, family = binomial) glm.predict = predict(glm.fit, x_test, type = "response") glm.predict[glm.predict > 0.5] = "Up" glm.predict[glm.predict <= 0.5] = "Down" table(glm.predict, y_test) ''' Podemos ver en la nueva matrix de confusion como mejoro la precision del modelo '''
/Regresion logistica.R
no_license
DaroMiceliPy/AnalisisCuantitativoR
R
false
false
3,101
r
library("ISLR") names(Smarket) #Nos tira los nombres de las columnas View(Smarket) data(Smarket) dim(Smarket) #Nos dice la cantidad de filas y columnas summary(Smarket) #Hace un resumen de estadisticas descriptivas cor(Smarket[,-9]) #Armamos una matrix de correlacion entre cada columna omitiendo la variable cualitativa hist(Smarket$Lag1, col = "red") #Trazamos un histogramas de retornos diarios shapiro.test(Smarket$Lag1) #Hay una evidencia fuerte contra la normalidad de los residuos plot(Smarket$Volume) #Ploteamos la evolucion del volumen para el indice S&P ''' Vamos a correr una regresion logistica para predecir el valor de direction, en funcion de lag1 a lag5, y Volume ''' exists(is.null(Smarket)) glm.fit = glm(Smarket$Direction ~ Smarket$Lag1+Smarket$Lag2+Smarket$Lag3+Smarket$Lag4+Smarket$Lag5+Smarket$Volume, family = binomial) summary(glm.fit) ''' No hay muchas evidencias para decir que los retornos de dias anteriores puedan explicar de alguna manera que el valor del indice cierre al alza o la baja ''' ''' Ahora vamos a tratar de predecir las probabilidades con las variables de entrenamiento ''' probs = predict(glm.fit, type = "response") probs #Estas probabilidades indican la probabilidad de que el indice se encuentre al alza contrasts(Smarket$Direction) ''' Ahora vamos a transformar las probabilidades, para que nos diga directamente si el indice sera al alza cuando la probabilidad predicha supere al 0.5, de lo contrario seria a la baja ''' probs[probs > 0.5] = "Up" probs[probs <= 0.5] = "Down" probs table(probs, Smarket$Direction) #Entonces creamos una matrix de confusion para saber la precision del modelo ''' Aunque esto no nos dice nada acerca de la verdadera precision del modelo puesto que solo estamos utilizando las observaciones de entrenamiento ''' ''' Vamos a utilizar como observaciones de entrenamientos, las observaciones entre 2001 y 2004 ''' vector = (Smarket$Year < 2005) ''' Creamos las observaciones para el test ''' x_test = Smarket[!train,] y_test = Smarket$Direction[!train] glm.fit = glm(Direction ~ Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data = Smarket, family = binomial, subset = train) glm.predict = predict(glm.fit, x_test, type = "response") #Generamos las predicciones glm.predict[glm.predict > 0.5] = "Up" glm.predict[glm.predict <= 0.5] = "Down" table(glm.predict, y_test) #Ahora creamos la matrix de confusion ''' Esto quiere decir que no podemos predecir el comportamiento del indice a partir del volumen y los retornos pasados ''' ''' Pero podemos mejorar el modelo a partir de reducir el numero de predictores. Esto lo tomamos en cuenta por los p valores arrojados cuando entrenamos el primer modelo con todos los predictores''' glm.fit = glm(Direction ~ Lag1+Lag2, data = Smarket, family = binomial) glm.predict = predict(glm.fit, x_test, type = "response") glm.predict[glm.predict > 0.5] = "Up" glm.predict[glm.predict <= 0.5] = "Down" table(glm.predict, y_test) ''' Podemos ver en la nueva matrix de confusion como mejoro la precision del modelo '''
# This is a version with suggested updates by T Therneau # All updates are stolen from survexp in the survival package, with comments. # Most changes are used, some further corrections were required. rformulate <- function (formula, data = parent.frame(), ratetable, na.action, rmap, int, centered, cause) { call <- match.call() m <- match.call(expand.dots = FALSE) # keep the parts of the call that we want, toss others m <- m[c(1, match(c("formula", "data", "cause"), names(m), nomatch=0))] m[[1L]] <- quote(stats::model.frame) # per CRAN, the formal way to set it Terms <- if (missing(data)) terms(formula, specials= c("strata","ratetable")) else terms(formula, specials=c("strata", "ratetable"), data = data) Term2 <- Terms #sorting out the ratetable argument - matching demographic variables rate <- attr(Terms, "specials")$ratetable if (length(rate) > 1) stop("Can have only 1 ratetable() call in a formula") #matching demographic variables via rmap if (!missing(rmap)) { # use this by preference if (length(rate) >0) stop("cannot have both ratetable() in the formula and a rmap argument") rcall <- rmap if (!is.call(rcall) || rcall[[1]] != as.name('list')) stop ("Invalid rcall argument") } #done with rmap else if (length(rate) >0) { #sorting out ratetable stemp <- untangle.specials(Terms, 'ratetable') rcall <- as.call(parse(text=stemp$var)[[1]]) # as a call object rcall[[1]] <- as.name('list') # make it a call to list Term2 <- Term2[-stemp$terms] # remove from the formula } else rcall <- NULL # A ratetable, but no rcall or ratetable() # Check that there are no illegal names in rcall, then expand it # to include all the names in the ratetable if (is.ratetable(ratetable)) { israte <- TRUE dimid <- names(dimnames(ratetable)) if (is.null(dimid)) dimid <- attr(ratetable, "dimid") # older style else attr(ratetable, "dimid") <- dimid #put all tables into the old style temp <- match(names(rcall)[-1], dimid) # 2,3,... are the argument names if (any(is.na(temp))) stop("Variable not found in the ratetable:", (names(rcall))[is.na(temp)]) if (any(!(dimid %in% names(rcall)))) { to.add <- dimid[!(dimid %in% names(rcall))] temp1 <- paste(text=paste(to.add, to.add, sep='='), collapse=',') if (is.null(rcall)) rcall <- parse(text=paste("list(", temp1, ")"))[[1]] else { temp2 <- deparse(rcall) rcall <- parse(text=paste("c(", temp2, ",list(", temp1, "))"))[[1]] } } } else stop("invalid ratetable") # Create a temporary formula, used only in the call to model.frame, # that has extra variables newvar <- all.vars(rcall) if (length(newvar) > 0) { tform <- paste(paste(deparse(Term2), collapse=""), paste(newvar, collapse='+'), sep='+') m$formula <- as.formula(tform, environment(Terms)) } m <- eval(m, parent.frame()) n <- nrow(m) if (n==0) stop("data set has 0 rows") Y <- model.extract(m, "response") offset <- model.offset(m) if (length(offset)==0) offset <- rep(0., n) if (!is.Surv(Y)) stop("Response must be a survival object") Y.surv <- Y if (attr(Y, "type") == "right") { type <- attr(Y, "type") status <- Y[, 2] Y <- Y[, 1] start <- rep(0, n) ncol0 <- 2 } else if (attr(Y, "type") == "counting") { type <- attr(Y, "type") status <- Y[, 3] start <- Y[, 1] Y <- Y[, 2] ncol0 <- 3 } else stop("Illegal response value") if (any(c(Y, start) < 0)) stop("Negative follow up time") if(max(Y)<30) warning("The event times must be expressed in days! (Your max time in the data is less than 30 days) \n") # rdata contains the variables matching the ratetable rdata <- data.frame(eval(rcall, m), stringsAsFactors=TRUE) rtemp <- match.ratetable(rdata, ratetable) #this function puts the dates in R and in cutpoints in rtabledate R <- rtemp$R cutpoints <- rtemp$cutpoints if(is.null(attr(ratetable, "factor"))) attr(ratetable, "factor") <- (attr(ratetable, "type") ==1) attr(ratetable, "dimid") <- dimid rtorig <- attributes(ratetable) nrt <- length(rtorig$dimid) #checking if the ratetable variables are given in days wh.age <- which(dimid=="age") wh.year <- which(dimid=="year") if(length(wh.age)>0){ if (max(R[,wh.age])<150 & median(diff(cutpoints[[wh.age]]))>12) warning("Age in the ratetable part of the formula must be expressed in days! \n (Your max age is less than 150 days) \n") } # TMT -- note the new class if(length(wh.year)>0){ if(min(R[,wh.year])>1850 & max(R[,wh.year])<2020& inherits(cutpoints[[wh.year]], "rtdate")) warning("The calendar year must be one of the date classes (Date, date, POSIXt)\n (Your variable seems to be expressed in years) \n") } #checking if one of the continuous variables is fixed: if(nrt!=ncol(R)){ nonex <- which(is.na(match(rtorig$dimid,attributes(ratetable)$dimid))) for(it in nonex){ if(rtorig$type[it]!=1)warning(paste("Variable ",rtorig$dimid[it]," is held fixed even though it changes in time in the population tables. \n (You may wish to set a value for each individual and not just one value for all)",sep="")) } } #NEW in 2.05 (strata) # Now create the X matrix and strata strats <- attr(Term2, "specials")$strata if (length(strats)) { temp_str <- untangle.specials(Term2,"strata",1) if (length(temp_str$vars) == 1) strata.keep <- m[[temp_str$vars]] else strata.keep <- strata(m[,temp_str$vars],shortlabel=TRUE,sep=",") Term2 <- Term2[-temp_str$terms] } else strata.keep <- factor(rep(1,n)) # zgoraj ze definirano n = nrow(m) if (!missing(cause)) strata.keep <- factor(rep(1,n)) attr(Term2, "intercept") <- 1 # ignore a "-1" in the formula X <- model.matrix(Term2, m)[,-1, drop=FALSE] mm <- ncol(X) if (mm > 0 && !missing(centered) && centered) { mvalue <- colMeans(X) X <- X - rep(mvalue, each=nrow(X)) } else mvalue <- double(mm) cause <- model.extract(m, "cause") if(is.null(cause)) cause <- rep(2,nrow(m)) #NEW: ce cause manjka #status[cause==0] <- 0 keep <- Y > start if (!missing(int)) { int <- max(int) status[Y > int * 365.241] <- 0 Y <- pmin(Y, int * 365.241) keep <- keep & (start < int * 365.241) } if (any(start > Y) | any(Y < 0)) stop("Negative follow-up times") if (!all(keep)) { X <- X[keep, , drop = FALSE] Y <- Y[keep] start <- start[keep] status <- status[keep] R <- R[keep, ,drop=FALSE] strata.keep <- strata.keep[keep] # dodano za strato #NEW in 2.05 offset <- offset[keep] Y.surv <- Y.surv[keep, , drop = FALSE] cause <- cause[keep] n <- sum(keep) rdata <- rdata[keep,] } # I do not want to preserve variable class here - so paste R onto here, give it names temp <- R names(temp) <- paste0("X", 1:ncol(temp)) # with the right names #if variable class needs to be preserved, use this instead # variable class. So paste on rdata, but with the right order and names #temp <- rdata[,match(dimid, names(rdata))] # in the right order #names(temp) <- paste0("X", 1:ncol(temp)) # with the right names data <- data.frame(start = start, Y = Y, stat = status, temp) if (mm != 0) data <- cbind(data, X) # we pass the altered cutpoints forward, keep them in the date format (could be changed eventually to get rid of the date package dependence) attr(ratetable, "cutpoints") <- lapply(cutpoints, function(x) { if(inherits(x, 'rtabledate')) class(x) <- 'date' x}) out <- list(data = data, R = R, status = status, start = start, Y = Y, X = as.data.frame(X), m = mm, n = n, type = type, Y.surv = Y.surv, Terms = Terms, ratetable = ratetable, offset = offset, formula=formula, cause = cause, mvalue=mvalue, strata.keep=strata.keep) # dodano za strato #NEW in 2.05 na.action <- attr(m, "na.action") if (length(na.action)) out$na.action <- na.action out }
/R/rformulate.r
no_license
cran/relsurv
R
false
false
8,526
r
# This is a version with suggested updates by T Therneau # All updates are stolen from survexp in the survival package, with comments. # Most changes are used, some further corrections were required. rformulate <- function (formula, data = parent.frame(), ratetable, na.action, rmap, int, centered, cause) { call <- match.call() m <- match.call(expand.dots = FALSE) # keep the parts of the call that we want, toss others m <- m[c(1, match(c("formula", "data", "cause"), names(m), nomatch=0))] m[[1L]] <- quote(stats::model.frame) # per CRAN, the formal way to set it Terms <- if (missing(data)) terms(formula, specials= c("strata","ratetable")) else terms(formula, specials=c("strata", "ratetable"), data = data) Term2 <- Terms #sorting out the ratetable argument - matching demographic variables rate <- attr(Terms, "specials")$ratetable if (length(rate) > 1) stop("Can have only 1 ratetable() call in a formula") #matching demographic variables via rmap if (!missing(rmap)) { # use this by preference if (length(rate) >0) stop("cannot have both ratetable() in the formula and a rmap argument") rcall <- rmap if (!is.call(rcall) || rcall[[1]] != as.name('list')) stop ("Invalid rcall argument") } #done with rmap else if (length(rate) >0) { #sorting out ratetable stemp <- untangle.specials(Terms, 'ratetable') rcall <- as.call(parse(text=stemp$var)[[1]]) # as a call object rcall[[1]] <- as.name('list') # make it a call to list Term2 <- Term2[-stemp$terms] # remove from the formula } else rcall <- NULL # A ratetable, but no rcall or ratetable() # Check that there are no illegal names in rcall, then expand it # to include all the names in the ratetable if (is.ratetable(ratetable)) { israte <- TRUE dimid <- names(dimnames(ratetable)) if (is.null(dimid)) dimid <- attr(ratetable, "dimid") # older style else attr(ratetable, "dimid") <- dimid #put all tables into the old style temp <- match(names(rcall)[-1], dimid) # 2,3,... are the argument names if (any(is.na(temp))) stop("Variable not found in the ratetable:", (names(rcall))[is.na(temp)]) if (any(!(dimid %in% names(rcall)))) { to.add <- dimid[!(dimid %in% names(rcall))] temp1 <- paste(text=paste(to.add, to.add, sep='='), collapse=',') if (is.null(rcall)) rcall <- parse(text=paste("list(", temp1, ")"))[[1]] else { temp2 <- deparse(rcall) rcall <- parse(text=paste("c(", temp2, ",list(", temp1, "))"))[[1]] } } } else stop("invalid ratetable") # Create a temporary formula, used only in the call to model.frame, # that has extra variables newvar <- all.vars(rcall) if (length(newvar) > 0) { tform <- paste(paste(deparse(Term2), collapse=""), paste(newvar, collapse='+'), sep='+') m$formula <- as.formula(tform, environment(Terms)) } m <- eval(m, parent.frame()) n <- nrow(m) if (n==0) stop("data set has 0 rows") Y <- model.extract(m, "response") offset <- model.offset(m) if (length(offset)==0) offset <- rep(0., n) if (!is.Surv(Y)) stop("Response must be a survival object") Y.surv <- Y if (attr(Y, "type") == "right") { type <- attr(Y, "type") status <- Y[, 2] Y <- Y[, 1] start <- rep(0, n) ncol0 <- 2 } else if (attr(Y, "type") == "counting") { type <- attr(Y, "type") status <- Y[, 3] start <- Y[, 1] Y <- Y[, 2] ncol0 <- 3 } else stop("Illegal response value") if (any(c(Y, start) < 0)) stop("Negative follow up time") if(max(Y)<30) warning("The event times must be expressed in days! (Your max time in the data is less than 30 days) \n") # rdata contains the variables matching the ratetable rdata <- data.frame(eval(rcall, m), stringsAsFactors=TRUE) rtemp <- match.ratetable(rdata, ratetable) #this function puts the dates in R and in cutpoints in rtabledate R <- rtemp$R cutpoints <- rtemp$cutpoints if(is.null(attr(ratetable, "factor"))) attr(ratetable, "factor") <- (attr(ratetable, "type") ==1) attr(ratetable, "dimid") <- dimid rtorig <- attributes(ratetable) nrt <- length(rtorig$dimid) #checking if the ratetable variables are given in days wh.age <- which(dimid=="age") wh.year <- which(dimid=="year") if(length(wh.age)>0){ if (max(R[,wh.age])<150 & median(diff(cutpoints[[wh.age]]))>12) warning("Age in the ratetable part of the formula must be expressed in days! \n (Your max age is less than 150 days) \n") } # TMT -- note the new class if(length(wh.year)>0){ if(min(R[,wh.year])>1850 & max(R[,wh.year])<2020& inherits(cutpoints[[wh.year]], "rtdate")) warning("The calendar year must be one of the date classes (Date, date, POSIXt)\n (Your variable seems to be expressed in years) \n") } #checking if one of the continuous variables is fixed: if(nrt!=ncol(R)){ nonex <- which(is.na(match(rtorig$dimid,attributes(ratetable)$dimid))) for(it in nonex){ if(rtorig$type[it]!=1)warning(paste("Variable ",rtorig$dimid[it]," is held fixed even though it changes in time in the population tables. \n (You may wish to set a value for each individual and not just one value for all)",sep="")) } } #NEW in 2.05 (strata) # Now create the X matrix and strata strats <- attr(Term2, "specials")$strata if (length(strats)) { temp_str <- untangle.specials(Term2,"strata",1) if (length(temp_str$vars) == 1) strata.keep <- m[[temp_str$vars]] else strata.keep <- strata(m[,temp_str$vars],shortlabel=TRUE,sep=",") Term2 <- Term2[-temp_str$terms] } else strata.keep <- factor(rep(1,n)) # zgoraj ze definirano n = nrow(m) if (!missing(cause)) strata.keep <- factor(rep(1,n)) attr(Term2, "intercept") <- 1 # ignore a "-1" in the formula X <- model.matrix(Term2, m)[,-1, drop=FALSE] mm <- ncol(X) if (mm > 0 && !missing(centered) && centered) { mvalue <- colMeans(X) X <- X - rep(mvalue, each=nrow(X)) } else mvalue <- double(mm) cause <- model.extract(m, "cause") if(is.null(cause)) cause <- rep(2,nrow(m)) #NEW: ce cause manjka #status[cause==0] <- 0 keep <- Y > start if (!missing(int)) { int <- max(int) status[Y > int * 365.241] <- 0 Y <- pmin(Y, int * 365.241) keep <- keep & (start < int * 365.241) } if (any(start > Y) | any(Y < 0)) stop("Negative follow-up times") if (!all(keep)) { X <- X[keep, , drop = FALSE] Y <- Y[keep] start <- start[keep] status <- status[keep] R <- R[keep, ,drop=FALSE] strata.keep <- strata.keep[keep] # dodano za strato #NEW in 2.05 offset <- offset[keep] Y.surv <- Y.surv[keep, , drop = FALSE] cause <- cause[keep] n <- sum(keep) rdata <- rdata[keep,] } # I do not want to preserve variable class here - so paste R onto here, give it names temp <- R names(temp) <- paste0("X", 1:ncol(temp)) # with the right names #if variable class needs to be preserved, use this instead # variable class. So paste on rdata, but with the right order and names #temp <- rdata[,match(dimid, names(rdata))] # in the right order #names(temp) <- paste0("X", 1:ncol(temp)) # with the right names data <- data.frame(start = start, Y = Y, stat = status, temp) if (mm != 0) data <- cbind(data, X) # we pass the altered cutpoints forward, keep them in the date format (could be changed eventually to get rid of the date package dependence) attr(ratetable, "cutpoints") <- lapply(cutpoints, function(x) { if(inherits(x, 'rtabledate')) class(x) <- 'date' x}) out <- list(data = data, R = R, status = status, start = start, Y = Y, X = as.data.frame(X), m = mm, n = n, type = type, Y.surv = Y.surv, Terms = Terms, ratetable = ratetable, offset = offset, formula=formula, cause = cause, mvalue=mvalue, strata.keep=strata.keep) # dodano za strato #NEW in 2.05 na.action <- attr(m, "na.action") if (length(na.action)) out$na.action <- na.action out }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parse-utils.R \name{is_association_id} \alias{is_association_id} \title{Is a string a GWAS Catalog association accession ID?} \usage{ is_association_id(str, convert_NA_to_FALSE = TRUE) } \arguments{ \item{str}{A character vector of strings.} \item{convert_NA_to_FALSE}{Whether to treat \code{NA} as \code{NA} (\code{convert_NA_to_FALSE = FALSE}) or whether to return \code{FALSE} when an \code{NA} is found (\code{convert_NA_to_FALSE = TRUE}).} } \value{ A logical vector. } \description{ Find which strings are valid GWAS Catalog association IDs (returns \code{TRUE}). Association IDs are tested against the following regular expression: \code{^\\\\d+$}. } \keyword{internal}
/man/is_association_id.Rd
permissive
ramiromagno/gwasrapidd
R
false
true
756
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parse-utils.R \name{is_association_id} \alias{is_association_id} \title{Is a string a GWAS Catalog association accession ID?} \usage{ is_association_id(str, convert_NA_to_FALSE = TRUE) } \arguments{ \item{str}{A character vector of strings.} \item{convert_NA_to_FALSE}{Whether to treat \code{NA} as \code{NA} (\code{convert_NA_to_FALSE = FALSE}) or whether to return \code{FALSE} when an \code{NA} is found (\code{convert_NA_to_FALSE = TRUE}).} } \value{ A logical vector. } \description{ Find which strings are valid GWAS Catalog association IDs (returns \code{TRUE}). Association IDs are tested against the following regular expression: \code{^\\\\d+$}. } \keyword{internal}
library(igraph) library(Matrix) library(R.matlab) x<-data.frame(c(0,1,0,0,1,0),c(1,0,1,0,1,0),c(0,1,0,1,0,0),c(0,0,1,0,1,1),c(1,1,0,0,1,0),c(0,0,0,1,0,0)) ##### c=0 for(i in 1:nrow(x)){ for(j in 1:ncol(x)){ print(x[i,j]) c=c+1 } } print(c) ### grap<-matrix(nrow=5,ncol=5) grap<-matrix(nrow=5,ncol=5) for(i in 1:nrow(x)){ for(j in 1:ncol(x)){ grap[i,j]=x[i,j] c=c+1 } } #### #population initialisation popin<-function(grap,n){ m<-matrix(nrow=n,ncol=ncol(grap)) for(i in 1:n){ m[i,]=ranpop(grap) } m } ranpop<-function(grap1) { x<-c(1:ncol(grap1)) for(i in 1:ncol(grap1)){ y=sample(1:ncol(grap1),ncol(grap1),replace=F) for(j in y){ if(grap1[i,j]==1){ x[i]=j break } } } x } ####### comunity<-function(m){ c<-matrix(nrow=nrow(m),ncol=ncol(m)) for(i in 1:nrow(m)){ k<-com(m[i,],ncol(m)) for(j in 1:ncol(m)){ c[i,j]<-k[j] } } c } ###### com<-function(a,n){ x<-c(1:n) y<-c(1:n) ## y=a for(p in 1:n){ y[p]=a[p] } d<-c(1:n) for(z in 1:n){ d[z]<-0 } l=0 for(i in 1:n){ if(i==1){ d[i]=1 l=1 } else{ if(i==2){ if(x[1]==y[2]||y[1]==x[2]){ d[2]=d[1] } } else{ for(j in 1:(i-1) ){ if(x[j]==y[i]||y[j]==x[i]){ d[i]=d[j] break } }} if(d[i]==0){ l=l+1 d[i]=l } } } d } ################ comm<-function(a,n){ x<-c(1:n) y<-c(1:n) for(p in 1:n){ y[p]<-a[p] } d<-c(1:n) for(z in 1:n){ d[z]<-0 } print(d[3]) l=1 for(i in 1:n){ k<-0 b<-0 if(d[i]==0){ print(d[i]) while(b!=x[i]&&b!=y[i]){ print(d[i]) if(k==0){ b<-y[i] d[i]<-l d[b]<-l b<-y[b] k<-k+1 } else if(d[b]==0){ d[b]<-l b<-y[b]} } l<-l+1 } } d } ############ com2<-function(a,n){ x<-c(1:n) y<-c(1:n) for(p in 1:n){ y[p]<-a[p] } d<-c(1:n) for(z in 1:n){ d[z]<-0 } b=0 l=1 for(i in 1:n){ if(i==1){ d[i]=l b=y[i] d[b]=l b=y[b] } else if(i!=1&&d[i]==0){ if(i!=n){ for(j in (i+1):n){ if(x[i]==y[j]&&d[y[j]]!=0){ d[i]=d[y[j]] break } else if(y[i]==x[j]&&d[x[j]]!=0){ d[i]=d[x[j]] break } } } } if(i==1||d[i]==0){ while(b!=y[i]||b!=x[i]){ d[b]=d[i] b=y[b] } } if(d[i]==0){ l=l+1 d[i]=l } } d } #############################################3 community_no<-function(a,n){ visited<-c(1:n) com<-c(1:n) for(i in 1:n){ visited[i]<-0 } visited[1]<-1 v<-which(q %in% 1) k<-1 com[1]<-1 for(i in v){ com[i]<-1 } m<-matrix(nrow=n,ncol=n) for(i in 1:n){ if(visited[i]==0){ v<-which(q %in% i) } } } #########################################################3 com_no<-function(chrom,col){ m<-matrix(nrow=col,ncol=col) for( i in 1:col){ for(j in 1:col){ m[i,j]<-0 } } for( i in 1:col){ m[i,chrom[i]]<-1 m[chrom[i],i]<-1 } #print(m) com<-c(1:col) visited<-c(1:col) for(i in 1:col){ com[i]<-0 visited[i]<-0 } k<-1 for(i in 1:col){ if(visited[i]==0){ visited[i]<-k com[i]<-k for(j in 1:col){ if(m[i,j]==1){ if(com[i]==0){ com[i]<-k } if(com[j]==0){ com[j]<-k } for(l in 1:col){ if(m[l,j]==1){ if(com[l]==0){ com[l]<-k } if(com[j]==0){ com[j]<-k } } } visited[j]<-1 } } k<-k+1 } } com } ####3#### function to get S matrices (takes input the grap and community array) ########## ##### functtion to append new elements in list #########3 ###########################################3 get_smat<-function(grap,com){ m<-max(com) counts<-table(com) s_mat<-list() for(i in 1:m){ c<-counts[names(counts)==i] mat<-matrix(nrow=c,ncol=c) node<-list() k=1 for(j in 1:length(com)){ if(com[j]==i){ node[k]=j k=k+1 } } for(l in 1:c){ for(m in 1:c){ if(grap[ node[[l]],node[[m]] ]==1){ mat[l,m]<-1 } else{ mat[l,m]<-0 } } } s_mat[[i]]<-mat } s_mat } ################################################### ########### comunity score #################### comunity_score<-function(grap,com,r){ s_mat<-get_smat(grap,com) qvalue<-c(1:length(s_mat)) for( i in 1:length(s_mat)){ s_i<- s_mat[[i]] row_means<-c(1:nrow(s_i)) I<-nrow(s_i ) for( j in 1:I){ row_means[j]<-sum(s_i[j,]) row_means<-(row_means)/I } vs<-sum(s_i) #print("VS") #print(vs) for( j in 1:I){ row_means[j]<-row_means[j]^r } ms<-sum(row_means) #print("MS") #print(ms) ms<-row_means/I qvalue[i]<-ms*vs } sum(qvalue) } ############################### cs<-function(g,com,r){ r<-nrow(com) cscore<-c(1:r) for(i in 1:r ){ cscore[i]<-comunity_score(g,com[i,],r) } cscore } ################################################### #####modularity mvalue<-function(g){ l=0 ##for(i in 1:nrow(g)){ ### for(j in 1:ncol(g)){ ## l=l+g[i,j] ## } ##} l=sum(g)/2 l } kvalue<-function(g){ l<-c(1:nrow(g)) for(i in 1:nrow(g)){ l[i]<-0 } for(i in 1:n){ for(j in 1:n ){ l[i]=l[i]+g[i,j] } } l } delta<-function(a,i,j){ l=0 if(a[i]==a[j]) l=1 l } qvalue<-function(g,m,k,c){ l=0 for(i in 1:nrow(g)){ for(j in 1:nrow(g)){ l=l+((g[i,j]-((k[i]*k[j])/(2*m)))*delta(c,i,j)) } } l/(2*m) } modularity<-function(g,c){ #n<-nrow(g) #q<-c(1:nrow(c)) m<-mvalue(g) k<-kvalue(g) #for(i in 1:nrow(c)){ q<-qvalue(g,m,k,c) #} q } ############################################################# ####### selection ############# totalfit<-function(q,n){ l=0 for(i in 1:n){ l=l+q[i] } l } cumprob<-function(q,t,n){ z<-c(1:n) p<-c(1:n) for(i in 1:n){ p[i]<-(q[i]/t) } l=0 for(i in 1:n){ z[i]=l+p[i] l=z[i] } z } selection<-function(c,q){ n<-nrow(c) tfit<-totalfit(q,n) s<-cumprob(q,tfit,n) r<-runif(1,0,1) l=n for(i in 1:n){ if(s[i]>=r){ l=i break} } l } ################################################ ########## mutation ################ mutation<-function(g,m){ n<-ncol(g) r<-sample(1:n,1) k<-sample(1:n,n,replace = F) for(i in 1:n){ if(m[i]!=r&&g[r,k[i]]==1){ m[i]=k[i] break } } m } ############################################### #################### multipoint mutation #################### multipmutation<-function(chromo,nocol,grap){ newchromo<-chromo randchoice=sample(1:nocol,1,replace=F) #print("no of points to be mutetd") #print(randchoice) randindex=sample(1:nocol,randchoice,replace=F) #print("points that are muteted") #print(randindex) for(i in randindex){ randnodes=sample(1:nocol,nocol,replace=F) breakcount=0 for(j in randnodes ){ if(grap[i,j]==1&&newchromo[i]!=j){ newchromo[i]=j breakcount=1 break } } } newchromo } ########### crossover ##################### crossover<-function(a,b,n){ f<-c(1:n) d<-sample(0:1,n,replace=T) s=0 for(i in d){ if(i==1){ s=s+1 f[s]=a[s] } else{ s=s+1 f[s]=b[s] } } f } #############################################################33 ############## elitism ######################### elitism<-function(q){ k=max(q) l=which(q %in% k) l[1] } ########################################################################## ############# assingnment function ##################### ############## ga function #################### ga<-function(g,cr,mr,er,r,pop,itr){ m<-popin(g,pop) # print(m) c<-comunity(m) q<-cs(g,c,r) col<-ncol(m) #print(c) newm=matrix(nrow=pop,ncol=ncol(m)) for(i in 1:itr){ s=0 while(s<pop){ re<-runif(1,0,1) rm<-runif(1,0,1) rc<-runif(1,0,1) ##elitism#### if(re<er&&s<pop){ choice=runif(1,0,1) if(choice<0.5){ h<-elitism(q) s<-(s+1) newm[s,]=m[h,] } else{ m1<-max(q) f<-which(q %in% m1) m2<-min(q) l<-which(q %in% m2) m[l[1],]<-m[f[1],] } } ##### mutation ###### if(rm<=mr&&s<pop){ ##### multip for choosing single or mutipoint mutation############ multip<-sample(0:1,1) if(multip==0){ t<-selection(c,q) #print("gene 1") #print(m[t,]) mutate<-mutation(g, m[t,]) #print("offspring") #print(mutate) s<-(s+1) for(i in 1:ncol(m)){ newm[s,i]<-mutate[i] } } else{ t<-selection(c,q) mutate<-multipmutation( m[t,],ncol(g),g) #print("gene 1") #print(m[t,]) #print("offspring") #print(mutate) s<-(s+1) for(i in 1:ncol(m)){ newm[s,i]<-mutate[i] } } } #######crossover######### if(rc<=cr&&s<pop){ a<-0 b<-0 while(a==b){ a<-selection(c,q) b<-selection(c,q) } crosschild<-crossover(m[a,],m[b,],ncol(m)) s<-(s+1) for(i in 1:ncol(m)){ newm[s,i]<-crosschild[i] } } } c1<-comunity(newm) c<-c1 m<-newm q1<-cs(g,c,r) q<-q1 #print(m) #print("newgen") } o=max(q) v<-which(q %in% o) print(max(c[v[1],])) #print(c[v[1],]) #print(c(1:62)) #print(m[v[1],]) #print(length(m[v[1],])) mod<-modularity(g,c[v[1],]) print(mod) o }
/Assignment3/1/GAnet/GAnet.R
no_license
ckant96/Datamining
R
false
false
10,087
r
library(igraph) library(Matrix) library(R.matlab) x<-data.frame(c(0,1,0,0,1,0),c(1,0,1,0,1,0),c(0,1,0,1,0,0),c(0,0,1,0,1,1),c(1,1,0,0,1,0),c(0,0,0,1,0,0)) ##### c=0 for(i in 1:nrow(x)){ for(j in 1:ncol(x)){ print(x[i,j]) c=c+1 } } print(c) ### grap<-matrix(nrow=5,ncol=5) grap<-matrix(nrow=5,ncol=5) for(i in 1:nrow(x)){ for(j in 1:ncol(x)){ grap[i,j]=x[i,j] c=c+1 } } #### #population initialisation popin<-function(grap,n){ m<-matrix(nrow=n,ncol=ncol(grap)) for(i in 1:n){ m[i,]=ranpop(grap) } m } ranpop<-function(grap1) { x<-c(1:ncol(grap1)) for(i in 1:ncol(grap1)){ y=sample(1:ncol(grap1),ncol(grap1),replace=F) for(j in y){ if(grap1[i,j]==1){ x[i]=j break } } } x } ####### comunity<-function(m){ c<-matrix(nrow=nrow(m),ncol=ncol(m)) for(i in 1:nrow(m)){ k<-com(m[i,],ncol(m)) for(j in 1:ncol(m)){ c[i,j]<-k[j] } } c } ###### com<-function(a,n){ x<-c(1:n) y<-c(1:n) ## y=a for(p in 1:n){ y[p]=a[p] } d<-c(1:n) for(z in 1:n){ d[z]<-0 } l=0 for(i in 1:n){ if(i==1){ d[i]=1 l=1 } else{ if(i==2){ if(x[1]==y[2]||y[1]==x[2]){ d[2]=d[1] } } else{ for(j in 1:(i-1) ){ if(x[j]==y[i]||y[j]==x[i]){ d[i]=d[j] break } }} if(d[i]==0){ l=l+1 d[i]=l } } } d } ################ comm<-function(a,n){ x<-c(1:n) y<-c(1:n) for(p in 1:n){ y[p]<-a[p] } d<-c(1:n) for(z in 1:n){ d[z]<-0 } print(d[3]) l=1 for(i in 1:n){ k<-0 b<-0 if(d[i]==0){ print(d[i]) while(b!=x[i]&&b!=y[i]){ print(d[i]) if(k==0){ b<-y[i] d[i]<-l d[b]<-l b<-y[b] k<-k+1 } else if(d[b]==0){ d[b]<-l b<-y[b]} } l<-l+1 } } d } ############ com2<-function(a,n){ x<-c(1:n) y<-c(1:n) for(p in 1:n){ y[p]<-a[p] } d<-c(1:n) for(z in 1:n){ d[z]<-0 } b=0 l=1 for(i in 1:n){ if(i==1){ d[i]=l b=y[i] d[b]=l b=y[b] } else if(i!=1&&d[i]==0){ if(i!=n){ for(j in (i+1):n){ if(x[i]==y[j]&&d[y[j]]!=0){ d[i]=d[y[j]] break } else if(y[i]==x[j]&&d[x[j]]!=0){ d[i]=d[x[j]] break } } } } if(i==1||d[i]==0){ while(b!=y[i]||b!=x[i]){ d[b]=d[i] b=y[b] } } if(d[i]==0){ l=l+1 d[i]=l } } d } #############################################3 community_no<-function(a,n){ visited<-c(1:n) com<-c(1:n) for(i in 1:n){ visited[i]<-0 } visited[1]<-1 v<-which(q %in% 1) k<-1 com[1]<-1 for(i in v){ com[i]<-1 } m<-matrix(nrow=n,ncol=n) for(i in 1:n){ if(visited[i]==0){ v<-which(q %in% i) } } } #########################################################3 com_no<-function(chrom,col){ m<-matrix(nrow=col,ncol=col) for( i in 1:col){ for(j in 1:col){ m[i,j]<-0 } } for( i in 1:col){ m[i,chrom[i]]<-1 m[chrom[i],i]<-1 } #print(m) com<-c(1:col) visited<-c(1:col) for(i in 1:col){ com[i]<-0 visited[i]<-0 } k<-1 for(i in 1:col){ if(visited[i]==0){ visited[i]<-k com[i]<-k for(j in 1:col){ if(m[i,j]==1){ if(com[i]==0){ com[i]<-k } if(com[j]==0){ com[j]<-k } for(l in 1:col){ if(m[l,j]==1){ if(com[l]==0){ com[l]<-k } if(com[j]==0){ com[j]<-k } } } visited[j]<-1 } } k<-k+1 } } com } ####3#### function to get S matrices (takes input the grap and community array) ########## ##### functtion to append new elements in list #########3 ###########################################3 get_smat<-function(grap,com){ m<-max(com) counts<-table(com) s_mat<-list() for(i in 1:m){ c<-counts[names(counts)==i] mat<-matrix(nrow=c,ncol=c) node<-list() k=1 for(j in 1:length(com)){ if(com[j]==i){ node[k]=j k=k+1 } } for(l in 1:c){ for(m in 1:c){ if(grap[ node[[l]],node[[m]] ]==1){ mat[l,m]<-1 } else{ mat[l,m]<-0 } } } s_mat[[i]]<-mat } s_mat } ################################################### ########### comunity score #################### comunity_score<-function(grap,com,r){ s_mat<-get_smat(grap,com) qvalue<-c(1:length(s_mat)) for( i in 1:length(s_mat)){ s_i<- s_mat[[i]] row_means<-c(1:nrow(s_i)) I<-nrow(s_i ) for( j in 1:I){ row_means[j]<-sum(s_i[j,]) row_means<-(row_means)/I } vs<-sum(s_i) #print("VS") #print(vs) for( j in 1:I){ row_means[j]<-row_means[j]^r } ms<-sum(row_means) #print("MS") #print(ms) ms<-row_means/I qvalue[i]<-ms*vs } sum(qvalue) } ############################### cs<-function(g,com,r){ r<-nrow(com) cscore<-c(1:r) for(i in 1:r ){ cscore[i]<-comunity_score(g,com[i,],r) } cscore } ################################################### #####modularity mvalue<-function(g){ l=0 ##for(i in 1:nrow(g)){ ### for(j in 1:ncol(g)){ ## l=l+g[i,j] ## } ##} l=sum(g)/2 l } kvalue<-function(g){ l<-c(1:nrow(g)) for(i in 1:nrow(g)){ l[i]<-0 } for(i in 1:n){ for(j in 1:n ){ l[i]=l[i]+g[i,j] } } l } delta<-function(a,i,j){ l=0 if(a[i]==a[j]) l=1 l } qvalue<-function(g,m,k,c){ l=0 for(i in 1:nrow(g)){ for(j in 1:nrow(g)){ l=l+((g[i,j]-((k[i]*k[j])/(2*m)))*delta(c,i,j)) } } l/(2*m) } modularity<-function(g,c){ #n<-nrow(g) #q<-c(1:nrow(c)) m<-mvalue(g) k<-kvalue(g) #for(i in 1:nrow(c)){ q<-qvalue(g,m,k,c) #} q } ############################################################# ####### selection ############# totalfit<-function(q,n){ l=0 for(i in 1:n){ l=l+q[i] } l } cumprob<-function(q,t,n){ z<-c(1:n) p<-c(1:n) for(i in 1:n){ p[i]<-(q[i]/t) } l=0 for(i in 1:n){ z[i]=l+p[i] l=z[i] } z } selection<-function(c,q){ n<-nrow(c) tfit<-totalfit(q,n) s<-cumprob(q,tfit,n) r<-runif(1,0,1) l=n for(i in 1:n){ if(s[i]>=r){ l=i break} } l } ################################################ ########## mutation ################ mutation<-function(g,m){ n<-ncol(g) r<-sample(1:n,1) k<-sample(1:n,n,replace = F) for(i in 1:n){ if(m[i]!=r&&g[r,k[i]]==1){ m[i]=k[i] break } } m } ############################################### #################### multipoint mutation #################### multipmutation<-function(chromo,nocol,grap){ newchromo<-chromo randchoice=sample(1:nocol,1,replace=F) #print("no of points to be mutetd") #print(randchoice) randindex=sample(1:nocol,randchoice,replace=F) #print("points that are muteted") #print(randindex) for(i in randindex){ randnodes=sample(1:nocol,nocol,replace=F) breakcount=0 for(j in randnodes ){ if(grap[i,j]==1&&newchromo[i]!=j){ newchromo[i]=j breakcount=1 break } } } newchromo } ########### crossover ##################### crossover<-function(a,b,n){ f<-c(1:n) d<-sample(0:1,n,replace=T) s=0 for(i in d){ if(i==1){ s=s+1 f[s]=a[s] } else{ s=s+1 f[s]=b[s] } } f } #############################################################33 ############## elitism ######################### elitism<-function(q){ k=max(q) l=which(q %in% k) l[1] } ########################################################################## ############# assingnment function ##################### ############## ga function #################### ga<-function(g,cr,mr,er,r,pop,itr){ m<-popin(g,pop) # print(m) c<-comunity(m) q<-cs(g,c,r) col<-ncol(m) #print(c) newm=matrix(nrow=pop,ncol=ncol(m)) for(i in 1:itr){ s=0 while(s<pop){ re<-runif(1,0,1) rm<-runif(1,0,1) rc<-runif(1,0,1) ##elitism#### if(re<er&&s<pop){ choice=runif(1,0,1) if(choice<0.5){ h<-elitism(q) s<-(s+1) newm[s,]=m[h,] } else{ m1<-max(q) f<-which(q %in% m1) m2<-min(q) l<-which(q %in% m2) m[l[1],]<-m[f[1],] } } ##### mutation ###### if(rm<=mr&&s<pop){ ##### multip for choosing single or mutipoint mutation############ multip<-sample(0:1,1) if(multip==0){ t<-selection(c,q) #print("gene 1") #print(m[t,]) mutate<-mutation(g, m[t,]) #print("offspring") #print(mutate) s<-(s+1) for(i in 1:ncol(m)){ newm[s,i]<-mutate[i] } } else{ t<-selection(c,q) mutate<-multipmutation( m[t,],ncol(g),g) #print("gene 1") #print(m[t,]) #print("offspring") #print(mutate) s<-(s+1) for(i in 1:ncol(m)){ newm[s,i]<-mutate[i] } } } #######crossover######### if(rc<=cr&&s<pop){ a<-0 b<-0 while(a==b){ a<-selection(c,q) b<-selection(c,q) } crosschild<-crossover(m[a,],m[b,],ncol(m)) s<-(s+1) for(i in 1:ncol(m)){ newm[s,i]<-crosschild[i] } } } c1<-comunity(newm) c<-c1 m<-newm q1<-cs(g,c,r) q<-q1 #print(m) #print("newgen") } o=max(q) v<-which(q %in% o) print(max(c[v[1],])) #print(c[v[1],]) #print(c(1:62)) #print(m[v[1],]) #print(length(m[v[1],])) mod<-modularity(g,c[v[1],]) print(mod) o }
/Retos/Reto 1/intentoItatira.R
no_license
escobartc/AnalisisNumerico-2130
R
false
false
5,217
r
library(ERP) ### Name: ERP-package ### Title: Significance Analysis of Event-Related Potentials Data: ### Significance Analysis of Event-Related Potentials Data ### Aliases: ERP-package ERP ### ** Examples ## Not run: ##D ##D data(impulsivity) ##D ##D # Paired t-tests for the comparison of the ERP curves in the two conditions, ##D # within experimental group High, at channel CPZ ##D ##D erpdta.highCPZ = impulsivity[(impulsivity$Group=="High")&(impulsivity$Channel=="CPZ"),5:505] ##D # ERP curves for subjects in group 'High' ##D covariates.highCPZ = impulsivity[(impulsivity$Group=="High")&(impulsivity$Channel=="CPZ"),1:4] ##D covariates.highCPZ = droplevels(covariates.highCPZ) ##D # Experimental covariates for subjects in group High ##D ##D design = model.matrix(~C(Subject,sum)+Condition,data=covariates.highCPZ) ##D # Design matrix to compare ERP curves in the two conditions ##D design0 = model.matrix(~C(Subject,sum),data=covariates.highCPZ) ##D # Design matrix for the null model (no condition effect) ##D ##D tests = erpfatest(erpdta.highCPZ,design,design0,nbf=NULL,wantplot=TRUE,significance="none") ##D # with significance="none", just to choose a number of factors ##D Ftest = erpFtest(erpdta.highCPZ,design,design0,nbf=6) ##D Ftest$pval ##D tests = erpfatest(erpdta.highCPZ,design,design0,nbf=6) ##D # with nbf=6 (approximate conservative recommendation based on the variance inflation plot) ##D ##D time_pt = seq(0,1000,2) # Sequence of time points (1 time point every 2ms in [0,1000]) ##D nbs = 20 # Number of B-splines for the plot of the effect curve ##D effect=which(colnames(design)=="ConditionSuccess") ##D erpplot(erpdta.highCPZ,design=design,frames=time_pt,effect=effect,xlab="Time (ms)", ##D ylab=expression(Effect~curve~(mu~V)),bty="l",ylim=c(-3,3),nbs=nbs, ##D cex.axis=1.25,cex.lab=1.25,interval="simultaneous") ##D # with interval="simultaneous", both the pointwise and the simultaneous confidence bands ##D # are plotted ##D points(time_pt[tests$significant],rep(0,length(tests$significant)),pch=16,col="blue") ##D # Identifies significant time points by blue dots ##D title("Success-Failure effect curve with 95 percent C.I.",cex.main=1.25) ##D mtext(paste("12 subjects - Group High - ",nbs," B-splines",sep=""),cex=1.25) ## End(Not run)
/data/genthat_extracted_code/ERP/examples/ERP-package.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
2,359
r
library(ERP) ### Name: ERP-package ### Title: Significance Analysis of Event-Related Potentials Data: ### Significance Analysis of Event-Related Potentials Data ### Aliases: ERP-package ERP ### ** Examples ## Not run: ##D ##D data(impulsivity) ##D ##D # Paired t-tests for the comparison of the ERP curves in the two conditions, ##D # within experimental group High, at channel CPZ ##D ##D erpdta.highCPZ = impulsivity[(impulsivity$Group=="High")&(impulsivity$Channel=="CPZ"),5:505] ##D # ERP curves for subjects in group 'High' ##D covariates.highCPZ = impulsivity[(impulsivity$Group=="High")&(impulsivity$Channel=="CPZ"),1:4] ##D covariates.highCPZ = droplevels(covariates.highCPZ) ##D # Experimental covariates for subjects in group High ##D ##D design = model.matrix(~C(Subject,sum)+Condition,data=covariates.highCPZ) ##D # Design matrix to compare ERP curves in the two conditions ##D design0 = model.matrix(~C(Subject,sum),data=covariates.highCPZ) ##D # Design matrix for the null model (no condition effect) ##D ##D tests = erpfatest(erpdta.highCPZ,design,design0,nbf=NULL,wantplot=TRUE,significance="none") ##D # with significance="none", just to choose a number of factors ##D Ftest = erpFtest(erpdta.highCPZ,design,design0,nbf=6) ##D Ftest$pval ##D tests = erpfatest(erpdta.highCPZ,design,design0,nbf=6) ##D # with nbf=6 (approximate conservative recommendation based on the variance inflation plot) ##D ##D time_pt = seq(0,1000,2) # Sequence of time points (1 time point every 2ms in [0,1000]) ##D nbs = 20 # Number of B-splines for the plot of the effect curve ##D effect=which(colnames(design)=="ConditionSuccess") ##D erpplot(erpdta.highCPZ,design=design,frames=time_pt,effect=effect,xlab="Time (ms)", ##D ylab=expression(Effect~curve~(mu~V)),bty="l",ylim=c(-3,3),nbs=nbs, ##D cex.axis=1.25,cex.lab=1.25,interval="simultaneous") ##D # with interval="simultaneous", both the pointwise and the simultaneous confidence bands ##D # are plotted ##D points(time_pt[tests$significant],rep(0,length(tests$significant)),pch=16,col="blue") ##D # Identifies significant time points by blue dots ##D title("Success-Failure effect curve with 95 percent C.I.",cex.main=1.25) ##D mtext(paste("12 subjects - Group High - ",nbs," B-splines",sep=""),cex=1.25) ## End(Not run)
#!/usr/bin/env Rscript # # Wrapper for running CIBERSORT.R by command line. # CIBERSORT.R uses 3 parallel threads. # Note: writes CIBERSORT-Results.txt file to working directory library(optparse) # Command line arguments opt_list = list( make_option(c("-l", "--library"), type="character", default=".", help="Folder containing CIBERSORT.R source code. Access is distributed through cibersort.stanford.edu"), make_option(c("-b", "--basis"), type="character", default=NULL, help=".tsv file containing gene expression matrix of basis cell types."), make_option(c("-o", "--output"), type="character", default="cibersort_output", help="Output folder.") ) opt_parser = OptionParser( option_list=opt_list, usage="%prog [options] expr_file") opt = parse_args(opt_parser, positional_arguments=1) expr_file = opt$args[1] # Load CIBERSORT source code source(file.path(opt$options$library, "CIBERSORT.R")) message("Running CIBERSORT on: ", expr_file) # Run CIBERSORT results = CIBERSORT( opt$options$basis, # path to basis expression file expr_file # expression file ) # Write output to file dir.create(opt$options$output) # create output folder write.table(results, file.path(opt$options$output, paste0( basename(expr_file), ".tsv")), sep="\t", quote=FALSE)
/src/runCibersort.r
no_license
skoplev/hpc-cibersort
R
false
false
1,296
r
#!/usr/bin/env Rscript # # Wrapper for running CIBERSORT.R by command line. # CIBERSORT.R uses 3 parallel threads. # Note: writes CIBERSORT-Results.txt file to working directory library(optparse) # Command line arguments opt_list = list( make_option(c("-l", "--library"), type="character", default=".", help="Folder containing CIBERSORT.R source code. Access is distributed through cibersort.stanford.edu"), make_option(c("-b", "--basis"), type="character", default=NULL, help=".tsv file containing gene expression matrix of basis cell types."), make_option(c("-o", "--output"), type="character", default="cibersort_output", help="Output folder.") ) opt_parser = OptionParser( option_list=opt_list, usage="%prog [options] expr_file") opt = parse_args(opt_parser, positional_arguments=1) expr_file = opt$args[1] # Load CIBERSORT source code source(file.path(opt$options$library, "CIBERSORT.R")) message("Running CIBERSORT on: ", expr_file) # Run CIBERSORT results = CIBERSORT( opt$options$basis, # path to basis expression file expr_file # expression file ) # Write output to file dir.create(opt$options$output) # create output folder write.table(results, file.path(opt$options$output, paste0( basename(expr_file), ".tsv")), sep="\t", quote=FALSE)
#Set Working Directory and Read Data getwd() #windows setwd("C:/Users/user hp/Documents/GitHub/DataMining-master/DataMining-master/Datasets") getwd() stats <- read.csv("Test-Data.csv") stats #Split the Data 1960 & 2013 stats[stats$Year == "1960",] df1960 <- stats[c(1:187),] df1960 df2013 <- stats[c(188:374),] df2013 #Create dataframes for the split data 1960 & 2013 mydf1960 <- data.frame(df1960) mydf1960 mydf2013 <- data.frame(df2013) mydf2013 #Add new column for Life Expectancy Data Country_Code <- c("ABW","AFG","AGO","ALB","ARE","ARG","ARM","ATG","AUS","AUT","AZE","BDI","BEL","BEN","BFA","BGD","BGR","BHR","BHS","BIH","BLR","BLZ","BOL","BRA","BRB","BRN","BTN","BWA","CAF","CAN","CHE","CHL","CHN","CIV","CMR","COG","COL","COM","CPV","CRI","CUB","CYP","CZE","DEU","DJI","DNK","DOM","DZA","ECU","EGY","ERI","ESP","EST","ETH","FIN","FJI","FRA","FSM","GAB","GBR","GEO","GHA","GIN","GMB","GNB","GNQ","GRC","GRD","GTM","GUM","GUY","HKG","HND","HRV","HTI","HUN","IDN","IND","IRL","IRN","IRQ","ISL","ITA","JAM","JOR","JPN","KAZ","KEN","KGZ","KHM","KIR","KOR","KWT","LAO","LBN","LBR","LBY","LCA","LKA","LSO","LTU","LUX","LVA","MAC","MAR","MDA","MDG","MDV","MEX","MKD","MLI","MLT","MMR","MNE","MNG","MOZ","MRT","MUS","MWI","MYS","NAM","NCL","NER","NGA","NIC","NLD","NOR","NPL","NZL","OMN","PAK","PAN","PER","PHL","PNG","POL","PRI","PRT","PRY","PYF","QAT","ROU","RUS","RWA","SAU","SDN","SEN","SGP","SLB","SLE","SLV","SOM","SSD","STP","SUR","SVK","SVN","SWE","SWZ","SYR","TCD","TGO","THA","TJK","TKM","TLS","TON","TTO","TUN","TUR","TZA","UGA","UKR","URY","USA","UZB","VCT","VEN","VIR","VNM","VUT","WSM","YEM","ZAF","COD","ZMB","ZWE") Life_Expectancy_At_Birth_1960 <- c(65.5693658536586,32.328512195122,32.9848292682927,62.2543658536585,52.2432195121951,65.2155365853659,65.8634634146342,61.7827317073171,70.8170731707317,68.5856097560976,60.836243902439,41.2360487804878,69.7019512195122,37.2782682926829,34.4779024390244,45.8293170731707,69.2475609756098,52.0893658536585,62.7290487804878,60.2762195121951,67.7080975609756,59.9613658536585,42.1183170731707,54.2054634146342,60.7380487804878,62.5003658536585,32.3593658536585,50.5477317073171,36.4826341463415,71.1331707317073,71.3134146341463,57.4582926829268,43.4658048780488,36.8724146341463,41.523756097561,48.5816341463415,56.716756097561,41.4424390243903,48.8564146341463,60.5761951219512,63.9046585365854,69.5939268292683,70.3487804878049,69.3129512195122,44.0212682926829,72.1765853658537,51.8452682926829,46.1351219512195,53.215,48.0137073170732,37.3629024390244,69.1092682926829,67.9059756097561,38.4057073170732,68.819756097561,55.9584878048781,69.8682926829268,57.5865853658537,39.5701219512195,71.1268292682927,63.4318536585366,45.8314634146342,34.8863902439024,32.0422195121951,37.8404390243902,36.7330487804878,68.1639024390244,59.8159268292683,45.5316341463415,61.2263414634146,60.2787317073171,66.9997073170732,46.2883170731707,64.6086585365854,42.1000975609756,68.0031707317073,48.6403170731707,41.1719512195122,69.691756097561,44.945512195122,48.0306829268293,73.4286585365854,69.1239024390244,64.1918292682927,52.6852682926829,67.6660975609756,58.3675853658537,46.3624146341463,56.1280731707317,41.2320243902439,49.2159756097561,53.0013170731707,60.3479512195122,43.2044634146342,63.2801219512195,34.7831707317073,42.6411951219512,57.303756097561,59.7471463414634,46.5107073170732,69.8473170731707,68.4463902439024,69.7868292682927,64.6609268292683,48.4466341463415,61.8127804878049,39.9746829268293,37.2686341463415,57.0656341463415,60.6228048780488,28.2116097560976,67.6017804878049,42.7363902439024,63.7056097560976,48.3688048780488,35.0037073170732,43.4830975609756,58.7452195121951,37.7736341463415,59.4753414634146,46.8803902439024,58.6390243902439,35.5150487804878,37.1829512195122,46.9988292682927,73.3926829268293,73.549756097561,35.1708292682927,71.2365853658537,42.6670731707317,45.2904634146342,60.8817073170732,47.6915853658537,57.8119268292683,38.462243902439,67.6804878048781,68.7196097560976,62.8089268292683,63.7937073170732,56.3570487804878,61.2060731707317,65.6424390243903,66.0552926829268,42.2492926829268,45.6662682926829,48.1876341463415,38.206,65.6598292682927,49.3817073170732,30.3315365853659,49.9479268292683,36.9658780487805,31.6767073170732,50.4513658536585,59.6801219512195,69.9759268292683,68.9780487804878,73.0056097560976,44.2337804878049,52.768243902439,38.0161219512195,40.2728292682927,54.6993170731707,56.1535365853659,54.4586829268293,33.7271219512195,61.3645365853659,62.6575853658537,42.009756097561,45.3844146341463,43.6538780487805,43.9835609756098,68.2995365853659,67.8963902439025,69.7707317073171,58.8855365853659,57.7238780487805,59.2851219512195,63.7302195121951,59.0670243902439,46.4874878048781,49.969512195122,34.3638048780488,49.0362926829268,41.0180487804878,45.1098048780488,51.5424634146342) Life_Expectancy_At_Birth_2013 <- c(75.3286585365854,60.0282682926829,51.8661707317073,77.537243902439,77.1956341463415,75.9860975609756,74.5613658536585,75.7786585365854,82.1975609756098,80.890243902439,70.6931463414634,56.2516097560976,80.3853658536585,59.3120243902439,58.2406341463415,71.245243902439,74.4658536585366,76.5459512195122,75.0735365853659,76.2769268292683,72.4707317073171,69.9820487804878,67.9134390243903,74.1224390243903,75.3339512195122,78.5466585365854,69.1029268292683,64.3608048780488,49.8798780487805,81.4011219512195,82.7487804878049,81.1979268292683,75.3530243902439,51.2084634146342,55.0418048780488,61.6663902439024,73.8097317073171,62.9321707317073,72.9723658536585,79.2252195121951,79.2563902439025,79.9497804878049,78.2780487804878,81.0439024390244,61.6864634146342,80.3024390243903,73.3199024390244,74.5689512195122,75.648512195122,70.9257804878049,63.1778780487805,82.4268292682927,76.4243902439025,63.4421951219512,80.8317073170732,69.9179268292683,81.9682926829268,68.9733902439024,63.8435853658537,80.9560975609756,74.079512195122,61.1420731707317,58.216487804878,59.9992682926829,54.8384146341464,57.2908292682927,80.6341463414634,73.1935609756098,71.4863902439024,78.872512195122,66.3100243902439,83.8317073170732,72.9428536585366,77.1268292682927,62.4011463414634,75.2682926829268,68.7046097560976,67.6604146341463,81.0439024390244,75.1259756097561,69.4716829268293,83.1170731707317,82.290243902439,73.4689268292683,73.9014146341463,83.3319512195122,70.45,60.9537804878049,70.2024390243902,67.7720487804878,65.7665853658537,81.459756097561,74.462756097561,65.687243902439,80.1288780487805,60.5203902439024,71.6576829268293,74.9127073170732,74.2402926829268,49.3314634146342,74.1634146341464,81.7975609756098,73.9804878048781,80.3391463414634,73.7090487804878,68.811512195122,64.6739024390244,76.6026097560976,76.5326585365854,75.1870487804878,57.5351951219512,80.7463414634146,65.6540975609756,74.7583658536585,69.0618048780488,54.641512195122,62.8027073170732,74.46,61.466,74.567512195122,64.3438780487805,77.1219512195122,60.8281463414634,52.4421463414634,74.514756097561,81.1048780487805,81.4512195121951,69.222,81.4073170731707,76.8410487804878,65.9636829268293,77.4192195121951,74.2838536585366,68.1315609756097,62.4491707317073,76.8487804878049,78.7111951219512,80.3731707317073,72.7991707317073,76.3340731707317,78.4184878048781,74.4634146341463,71.0731707317073,63.3948292682927,74.1776341463415,63.1670487804878,65.878756097561,82.3463414634146,67.7189268292683,50.3631219512195,72.4981463414634,55.0230243902439,55.2209024390244,66.259512195122,70.99,76.2609756097561,80.2780487804878,81.7048780487805,48.9379268292683,74.7157804878049,51.1914878048781,59.1323658536585,74.2469268292683,69.4001707317073,65.4565609756098,67.5223658536585,72.6403414634147,70.3052926829268,73.6463414634147,75.1759512195122,64.2918292682927,57.7676829268293,71.159512195122,76.8361951219512,78.8414634146341,68.2275853658537,72.8108780487805,74.0744146341464,79.6243902439024,75.756487804878,71.669243902439,73.2503902439024,63.583512195122,56.7365853658537,58.2719268292683,59.2373658536585,55.633) Life60 <- data.frame(Code= Country_Code, Life.Expectancy= Life_Expectancy_At_Birth_1960) Life13 <- data.frame(Code= Country_Code, Life.Expectancy= Life_Expectancy_At_Birth_2013) #Merging Dataframes DF60 <- merge(mydf1960, Life60, by.x = "Country.Code", by.y = "Code") DF13 <- merge(mydf2013, Life13, by.x = "Country.Code", by.y = "Code") head(DF60) head(DF13) rm(df1960, df2013, Life13, Life60, mydf1960, mydf2013, Country_Code, Life_Expectancy_At_Birth_1960, Life_Expectancy_At_Birth_2013) #Visualizing Data 1960 library(ggplot2) qplot(data = DF60, x = Fertility.Rate, y = Life.Expectancy, color = Country.Name, size=I(3), shape=I(19), alpha =I(.4), main = "Life Expectancy vs Fertility Rate") qplot(data = DF60, x = Fertility.Rate, y = Life.Expectancy, color = Region, size=I(3), shape=I(19), alpha =I(.4), main = "Life Expectancy vs Fertility Rate by Region (1960)") #Visualizing Data 2013 qplot(data = DF13, x = Fertility.Rate, y = Life.Expectancy, color = Country.Name, size=I(3), shape=I(19), alpha =I(.4), main = "Life Expectancy vs Fertility Rate") qplot(data = DF13, x = Fertility.Rate, y = Life.Expectancy, color = Region, size=I(3), shape=I(19), alpha =I(.4), main = "Life Expectancy vs Fertility Rate by Region (2013)")
/Unit_1/Exams/Exam.R
no_license
Angi-Reynoso/Mineria_de_Datos
R
false
false
9,242
r
#Set Working Directory and Read Data getwd() #windows setwd("C:/Users/user hp/Documents/GitHub/DataMining-master/DataMining-master/Datasets") getwd() stats <- read.csv("Test-Data.csv") stats #Split the Data 1960 & 2013 stats[stats$Year == "1960",] df1960 <- stats[c(1:187),] df1960 df2013 <- stats[c(188:374),] df2013 #Create dataframes for the split data 1960 & 2013 mydf1960 <- data.frame(df1960) mydf1960 mydf2013 <- data.frame(df2013) mydf2013 #Add new column for Life Expectancy Data Country_Code <- c("ABW","AFG","AGO","ALB","ARE","ARG","ARM","ATG","AUS","AUT","AZE","BDI","BEL","BEN","BFA","BGD","BGR","BHR","BHS","BIH","BLR","BLZ","BOL","BRA","BRB","BRN","BTN","BWA","CAF","CAN","CHE","CHL","CHN","CIV","CMR","COG","COL","COM","CPV","CRI","CUB","CYP","CZE","DEU","DJI","DNK","DOM","DZA","ECU","EGY","ERI","ESP","EST","ETH","FIN","FJI","FRA","FSM","GAB","GBR","GEO","GHA","GIN","GMB","GNB","GNQ","GRC","GRD","GTM","GUM","GUY","HKG","HND","HRV","HTI","HUN","IDN","IND","IRL","IRN","IRQ","ISL","ITA","JAM","JOR","JPN","KAZ","KEN","KGZ","KHM","KIR","KOR","KWT","LAO","LBN","LBR","LBY","LCA","LKA","LSO","LTU","LUX","LVA","MAC","MAR","MDA","MDG","MDV","MEX","MKD","MLI","MLT","MMR","MNE","MNG","MOZ","MRT","MUS","MWI","MYS","NAM","NCL","NER","NGA","NIC","NLD","NOR","NPL","NZL","OMN","PAK","PAN","PER","PHL","PNG","POL","PRI","PRT","PRY","PYF","QAT","ROU","RUS","RWA","SAU","SDN","SEN","SGP","SLB","SLE","SLV","SOM","SSD","STP","SUR","SVK","SVN","SWE","SWZ","SYR","TCD","TGO","THA","TJK","TKM","TLS","TON","TTO","TUN","TUR","TZA","UGA","UKR","URY","USA","UZB","VCT","VEN","VIR","VNM","VUT","WSM","YEM","ZAF","COD","ZMB","ZWE") Life_Expectancy_At_Birth_1960 <- c(65.5693658536586,32.328512195122,32.9848292682927,62.2543658536585,52.2432195121951,65.2155365853659,65.8634634146342,61.7827317073171,70.8170731707317,68.5856097560976,60.836243902439,41.2360487804878,69.7019512195122,37.2782682926829,34.4779024390244,45.8293170731707,69.2475609756098,52.0893658536585,62.7290487804878,60.2762195121951,67.7080975609756,59.9613658536585,42.1183170731707,54.2054634146342,60.7380487804878,62.5003658536585,32.3593658536585,50.5477317073171,36.4826341463415,71.1331707317073,71.3134146341463,57.4582926829268,43.4658048780488,36.8724146341463,41.523756097561,48.5816341463415,56.716756097561,41.4424390243903,48.8564146341463,60.5761951219512,63.9046585365854,69.5939268292683,70.3487804878049,69.3129512195122,44.0212682926829,72.1765853658537,51.8452682926829,46.1351219512195,53.215,48.0137073170732,37.3629024390244,69.1092682926829,67.9059756097561,38.4057073170732,68.819756097561,55.9584878048781,69.8682926829268,57.5865853658537,39.5701219512195,71.1268292682927,63.4318536585366,45.8314634146342,34.8863902439024,32.0422195121951,37.8404390243902,36.7330487804878,68.1639024390244,59.8159268292683,45.5316341463415,61.2263414634146,60.2787317073171,66.9997073170732,46.2883170731707,64.6086585365854,42.1000975609756,68.0031707317073,48.6403170731707,41.1719512195122,69.691756097561,44.945512195122,48.0306829268293,73.4286585365854,69.1239024390244,64.1918292682927,52.6852682926829,67.6660975609756,58.3675853658537,46.3624146341463,56.1280731707317,41.2320243902439,49.2159756097561,53.0013170731707,60.3479512195122,43.2044634146342,63.2801219512195,34.7831707317073,42.6411951219512,57.303756097561,59.7471463414634,46.5107073170732,69.8473170731707,68.4463902439024,69.7868292682927,64.6609268292683,48.4466341463415,61.8127804878049,39.9746829268293,37.2686341463415,57.0656341463415,60.6228048780488,28.2116097560976,67.6017804878049,42.7363902439024,63.7056097560976,48.3688048780488,35.0037073170732,43.4830975609756,58.7452195121951,37.7736341463415,59.4753414634146,46.8803902439024,58.6390243902439,35.5150487804878,37.1829512195122,46.9988292682927,73.3926829268293,73.549756097561,35.1708292682927,71.2365853658537,42.6670731707317,45.2904634146342,60.8817073170732,47.6915853658537,57.8119268292683,38.462243902439,67.6804878048781,68.7196097560976,62.8089268292683,63.7937073170732,56.3570487804878,61.2060731707317,65.6424390243903,66.0552926829268,42.2492926829268,45.6662682926829,48.1876341463415,38.206,65.6598292682927,49.3817073170732,30.3315365853659,49.9479268292683,36.9658780487805,31.6767073170732,50.4513658536585,59.6801219512195,69.9759268292683,68.9780487804878,73.0056097560976,44.2337804878049,52.768243902439,38.0161219512195,40.2728292682927,54.6993170731707,56.1535365853659,54.4586829268293,33.7271219512195,61.3645365853659,62.6575853658537,42.009756097561,45.3844146341463,43.6538780487805,43.9835609756098,68.2995365853659,67.8963902439025,69.7707317073171,58.8855365853659,57.7238780487805,59.2851219512195,63.7302195121951,59.0670243902439,46.4874878048781,49.969512195122,34.3638048780488,49.0362926829268,41.0180487804878,45.1098048780488,51.5424634146342) Life_Expectancy_At_Birth_2013 <- c(75.3286585365854,60.0282682926829,51.8661707317073,77.537243902439,77.1956341463415,75.9860975609756,74.5613658536585,75.7786585365854,82.1975609756098,80.890243902439,70.6931463414634,56.2516097560976,80.3853658536585,59.3120243902439,58.2406341463415,71.245243902439,74.4658536585366,76.5459512195122,75.0735365853659,76.2769268292683,72.4707317073171,69.9820487804878,67.9134390243903,74.1224390243903,75.3339512195122,78.5466585365854,69.1029268292683,64.3608048780488,49.8798780487805,81.4011219512195,82.7487804878049,81.1979268292683,75.3530243902439,51.2084634146342,55.0418048780488,61.6663902439024,73.8097317073171,62.9321707317073,72.9723658536585,79.2252195121951,79.2563902439025,79.9497804878049,78.2780487804878,81.0439024390244,61.6864634146342,80.3024390243903,73.3199024390244,74.5689512195122,75.648512195122,70.9257804878049,63.1778780487805,82.4268292682927,76.4243902439025,63.4421951219512,80.8317073170732,69.9179268292683,81.9682926829268,68.9733902439024,63.8435853658537,80.9560975609756,74.079512195122,61.1420731707317,58.216487804878,59.9992682926829,54.8384146341464,57.2908292682927,80.6341463414634,73.1935609756098,71.4863902439024,78.872512195122,66.3100243902439,83.8317073170732,72.9428536585366,77.1268292682927,62.4011463414634,75.2682926829268,68.7046097560976,67.6604146341463,81.0439024390244,75.1259756097561,69.4716829268293,83.1170731707317,82.290243902439,73.4689268292683,73.9014146341463,83.3319512195122,70.45,60.9537804878049,70.2024390243902,67.7720487804878,65.7665853658537,81.459756097561,74.462756097561,65.687243902439,80.1288780487805,60.5203902439024,71.6576829268293,74.9127073170732,74.2402926829268,49.3314634146342,74.1634146341464,81.7975609756098,73.9804878048781,80.3391463414634,73.7090487804878,68.811512195122,64.6739024390244,76.6026097560976,76.5326585365854,75.1870487804878,57.5351951219512,80.7463414634146,65.6540975609756,74.7583658536585,69.0618048780488,54.641512195122,62.8027073170732,74.46,61.466,74.567512195122,64.3438780487805,77.1219512195122,60.8281463414634,52.4421463414634,74.514756097561,81.1048780487805,81.4512195121951,69.222,81.4073170731707,76.8410487804878,65.9636829268293,77.4192195121951,74.2838536585366,68.1315609756097,62.4491707317073,76.8487804878049,78.7111951219512,80.3731707317073,72.7991707317073,76.3340731707317,78.4184878048781,74.4634146341463,71.0731707317073,63.3948292682927,74.1776341463415,63.1670487804878,65.878756097561,82.3463414634146,67.7189268292683,50.3631219512195,72.4981463414634,55.0230243902439,55.2209024390244,66.259512195122,70.99,76.2609756097561,80.2780487804878,81.7048780487805,48.9379268292683,74.7157804878049,51.1914878048781,59.1323658536585,74.2469268292683,69.4001707317073,65.4565609756098,67.5223658536585,72.6403414634147,70.3052926829268,73.6463414634147,75.1759512195122,64.2918292682927,57.7676829268293,71.159512195122,76.8361951219512,78.8414634146341,68.2275853658537,72.8108780487805,74.0744146341464,79.6243902439024,75.756487804878,71.669243902439,73.2503902439024,63.583512195122,56.7365853658537,58.2719268292683,59.2373658536585,55.633) Life60 <- data.frame(Code= Country_Code, Life.Expectancy= Life_Expectancy_At_Birth_1960) Life13 <- data.frame(Code= Country_Code, Life.Expectancy= Life_Expectancy_At_Birth_2013) #Merging Dataframes DF60 <- merge(mydf1960, Life60, by.x = "Country.Code", by.y = "Code") DF13 <- merge(mydf2013, Life13, by.x = "Country.Code", by.y = "Code") head(DF60) head(DF13) rm(df1960, df2013, Life13, Life60, mydf1960, mydf2013, Country_Code, Life_Expectancy_At_Birth_1960, Life_Expectancy_At_Birth_2013) #Visualizing Data 1960 library(ggplot2) qplot(data = DF60, x = Fertility.Rate, y = Life.Expectancy, color = Country.Name, size=I(3), shape=I(19), alpha =I(.4), main = "Life Expectancy vs Fertility Rate") qplot(data = DF60, x = Fertility.Rate, y = Life.Expectancy, color = Region, size=I(3), shape=I(19), alpha =I(.4), main = "Life Expectancy vs Fertility Rate by Region (1960)") #Visualizing Data 2013 qplot(data = DF13, x = Fertility.Rate, y = Life.Expectancy, color = Country.Name, size=I(3), shape=I(19), alpha =I(.4), main = "Life Expectancy vs Fertility Rate") qplot(data = DF13, x = Fertility.Rate, y = Life.Expectancy, color = Region, size=I(3), shape=I(19), alpha =I(.4), main = "Life Expectancy vs Fertility Rate by Region (2013)")
#' Convert `dataspice` metadata to EML #' #' Performs an (imperfect) conversion of `dataspice` metadata to EML. It's #' very likely you will get validation errors and need to fix them afterwards #' but `spice_to_eml` is a good way to a richer metadata schema (EML) when #' you're already using `dataspice` but need a richer metadata schema. #' #' @param spice (list) Your `dataspice` metadata. Uses #' `data/metadata/dataspice.json` by default. #' #' @return (emld) The crosswalked `emld` object #' @export #' #' @examples #' # Load an example dataspice JSON that comes installed with the package #' spice <- system.file( #' "examples", "annual-escapement.json", #' package = "dataspice" #' ) #' #' # And crosswalk it to EML #' spice_to_eml(spice) #' #' # We can also create dataspice metadata from scratch and crosswalk it to EML #' myspice <- list( #' name = "My example spice", #' creator = "Me", #' contact = "Me" #' ) #' spice_to_eml(myspice) spice_to_eml <- function(spice = file.path( "data", "metadata", "dataspice.json" )) { if (is.character(spice)) { if (!file.exists(spice)) { stop("Could not find dataspice JSON file at the path '", spice, "'") } doc <- jsonlite::read_json(spice) } else if (is.list(spice)) { doc <- spice } else { stop("spice must be either a path or a list object") } out <- list( dataset = list( title = crosswalk(doc, "name"), creator = crosswalk(doc, "creator"), abstract = crosswalk(doc, "description"), pubDate = crosswalk(doc, "datePublished"), coverage = list( temporalCoverage = crosswalk(doc, "temporalCoverage"), geographicCoverage = crosswalk(doc, "spatialCoverage") ), contact = crosswalk(doc, "creator"), dataTable = c( crosswalk(doc, "distribution") ) ) ) # Warn about variableMeasured not being crosswalked if ("variableMeasured" %in% names(doc)) { warning("variableMeasured not crosswalked to EML because we don't have ", "enough information. Use `crosswalk_variables` to create the ", "start of an EML attributes table. See ?crosswalk_variables for ", "help.", call. = FALSE ) message( "You might want to run EML::eml_validate on the result at ", "this point and fix what validations errors are produced.", " You will commonly need to set `packageId`, `system`, ", "and provide `attributeList` elements for each `dataTable`." ) } EML::as_emld(out) }
/R/spice_to_eml.R
permissive
drakileshr/dataspice
R
false
false
2,614
r
#' Convert `dataspice` metadata to EML #' #' Performs an (imperfect) conversion of `dataspice` metadata to EML. It's #' very likely you will get validation errors and need to fix them afterwards #' but `spice_to_eml` is a good way to a richer metadata schema (EML) when #' you're already using `dataspice` but need a richer metadata schema. #' #' @param spice (list) Your `dataspice` metadata. Uses #' `data/metadata/dataspice.json` by default. #' #' @return (emld) The crosswalked `emld` object #' @export #' #' @examples #' # Load an example dataspice JSON that comes installed with the package #' spice <- system.file( #' "examples", "annual-escapement.json", #' package = "dataspice" #' ) #' #' # And crosswalk it to EML #' spice_to_eml(spice) #' #' # We can also create dataspice metadata from scratch and crosswalk it to EML #' myspice <- list( #' name = "My example spice", #' creator = "Me", #' contact = "Me" #' ) #' spice_to_eml(myspice) spice_to_eml <- function(spice = file.path( "data", "metadata", "dataspice.json" )) { if (is.character(spice)) { if (!file.exists(spice)) { stop("Could not find dataspice JSON file at the path '", spice, "'") } doc <- jsonlite::read_json(spice) } else if (is.list(spice)) { doc <- spice } else { stop("spice must be either a path or a list object") } out <- list( dataset = list( title = crosswalk(doc, "name"), creator = crosswalk(doc, "creator"), abstract = crosswalk(doc, "description"), pubDate = crosswalk(doc, "datePublished"), coverage = list( temporalCoverage = crosswalk(doc, "temporalCoverage"), geographicCoverage = crosswalk(doc, "spatialCoverage") ), contact = crosswalk(doc, "creator"), dataTable = c( crosswalk(doc, "distribution") ) ) ) # Warn about variableMeasured not being crosswalked if ("variableMeasured" %in% names(doc)) { warning("variableMeasured not crosswalked to EML because we don't have ", "enough information. Use `crosswalk_variables` to create the ", "start of an EML attributes table. See ?crosswalk_variables for ", "help.", call. = FALSE ) message( "You might want to run EML::eml_validate on the result at ", "this point and fix what validations errors are produced.", " You will commonly need to set `packageId`, `system`, ", "and provide `attributeList` elements for each `dataTable`." ) } EML::as_emld(out) }
testlist <- list(doy = numeric(0), latitude = c(-6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, 2.77448001761892e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 8.01045874493453e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = NA_real_) result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist) str(result)
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/libFuzzer_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1612735860-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
613
r
testlist <- list(doy = numeric(0), latitude = c(-6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, -6.82852703442279e-229, 2.77448001761892e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 8.01045874493453e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = NA_real_) result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist) str(result)
\name{translate.humar} \alias{translate.humar} %- Also NEED an '\alias' for EACH other topic documented here. \title{ %% ~~function to do ... ~~ translate.humar } \description{ %% ~~ A concise (1-5 lines) description of what the function does. ~~ This package translates the column names in the humar data set from Icelandic to English } \usage{ translate.humar() } %- maybe also 'usage' for other objects documented here. \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... Returns a data set with English for column names. } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ Paul Frater <pnf1@hi.is> } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ nephrops.trawls <- translate.humar() names(nephrops.trawls) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/man/translate.humar.Rd
no_license
pfrater/LogbooksTranslate
R
false
false
1,267
rd
\name{translate.humar} \alias{translate.humar} %- Also NEED an '\alias' for EACH other topic documented here. \title{ %% ~~function to do ... ~~ translate.humar } \description{ %% ~~ A concise (1-5 lines) description of what the function does. ~~ This package translates the column names in the humar data set from Icelandic to English } \usage{ translate.humar() } %- maybe also 'usage' for other objects documented here. \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... Returns a data set with English for column names. } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ Paul Frater <pnf1@hi.is> } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ nephrops.trawls <- translate.humar() names(nephrops.trawls) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/DomacaNaloga3/arhiv/naloga2Andraz.r
no_license
tinarazic/machine_learning
R
false
false
8,562
r
setwd("/home/user/education/rprogs") library(tidyverse) mpg ?mpg ggplot(data=mpg) + geom_point(mapping = aes(x=displ, y = hwy)) ggplot(data=mpg) nrow(mtcars) ncol(mtcars) ggplot(data = mpg) + geom_point(mapping = aes(x = hwy, y = cyl)) ggplot(data = mpg) + geom_point(mapping = aes(x = class, y = drv)) ggplot(data = mpg) + geom_point(mapping = aes(x = displ, y = hwy, color = class))#color, size, alpha, shape ggplot(data = mpg) + geom_point(mapping = aes(x = displ, y = hwy), colour = "red") ?mpg glimpse(mpg)#produces types of each variables ggplot(data = mpg) + geom_point(mapping = aes(x = cty, y = hwy, color = displ<5)) #facets x11() ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_wrap(~class) ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_grid(drv~cyl) ggplot(data = mpg)+ geom_point(mapping = aes(x=drv, y = cyl)) ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_wrap(~year) ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_grid(.~class) ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_grid(.~drv) ?facet_wrap ggplot(data = mpg) + geom_smooth(mapping = aes(x = displ, y = hwy,color=drv)) ggplot(data = mpg, mapping = aes(x=displ, y = hwy)) + geom_point(mapping = aes(color = class))+ geom_smooth(data = filter(mpg, class == "subcompact"),se = FALSE) ?filter ggplot(data = mpg) + geom_area(mapping = aes(x = displ, y = hwy))#boxplot,histogram,area,line #ex6 #1 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point() + geom_smooth(se = FALSE) #2 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point() + geom_smooth(mapping = aes(group = drv), se = FALSE) #3 ggplot(data = mpg, mapping = aes(x = displ, y = hwy, group = drv, color = drv)) + geom_point() + geom_smooth( se = FALSE) #4 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point(mapping = aes(color = drv)) + geom_smooth(se = FALSE) #5 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point(mapping = aes(color = drv)) + geom_smooth(mapping = aes(linetype = drv),se = FALSE) #6 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point(mapping = aes(color = drv)) #statistical transformation ?diamonds diamonds ggplot(data = diamonds)+ geom_bar(mapping = aes(x = cut)) ?geom_bar x11() ggplot(data = diamonds)+ stat_summary( mapping = aes(x=cut, y = depth), fun.ymin = min, fun.ymax = max, fun.y = median ) ?stat_summary #1 x11() ggplot(data = diamonds)+ geom_pointrange( mapping = aes(x=cut, y = depth), stat = "summary", fun.ymin = min, fun.ymax = max, fun.y = median ) #2
/ggplot.R
no_license
ArtHouse5/r_progs
R
false
false
2,815
r
setwd("/home/user/education/rprogs") library(tidyverse) mpg ?mpg ggplot(data=mpg) + geom_point(mapping = aes(x=displ, y = hwy)) ggplot(data=mpg) nrow(mtcars) ncol(mtcars) ggplot(data = mpg) + geom_point(mapping = aes(x = hwy, y = cyl)) ggplot(data = mpg) + geom_point(mapping = aes(x = class, y = drv)) ggplot(data = mpg) + geom_point(mapping = aes(x = displ, y = hwy, color = class))#color, size, alpha, shape ggplot(data = mpg) + geom_point(mapping = aes(x = displ, y = hwy), colour = "red") ?mpg glimpse(mpg)#produces types of each variables ggplot(data = mpg) + geom_point(mapping = aes(x = cty, y = hwy, color = displ<5)) #facets x11() ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_wrap(~class) ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_grid(drv~cyl) ggplot(data = mpg)+ geom_point(mapping = aes(x=drv, y = cyl)) ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_wrap(~year) ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_grid(.~class) ggplot(data = mpg) + geom_point(mapping = aes(x=displ, y = hwy))+ facet_grid(.~drv) ?facet_wrap ggplot(data = mpg) + geom_smooth(mapping = aes(x = displ, y = hwy,color=drv)) ggplot(data = mpg, mapping = aes(x=displ, y = hwy)) + geom_point(mapping = aes(color = class))+ geom_smooth(data = filter(mpg, class == "subcompact"),se = FALSE) ?filter ggplot(data = mpg) + geom_area(mapping = aes(x = displ, y = hwy))#boxplot,histogram,area,line #ex6 #1 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point() + geom_smooth(se = FALSE) #2 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point() + geom_smooth(mapping = aes(group = drv), se = FALSE) #3 ggplot(data = mpg, mapping = aes(x = displ, y = hwy, group = drv, color = drv)) + geom_point() + geom_smooth( se = FALSE) #4 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point(mapping = aes(color = drv)) + geom_smooth(se = FALSE) #5 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point(mapping = aes(color = drv)) + geom_smooth(mapping = aes(linetype = drv),se = FALSE) #6 ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) + geom_point(mapping = aes(color = drv)) #statistical transformation ?diamonds diamonds ggplot(data = diamonds)+ geom_bar(mapping = aes(x = cut)) ?geom_bar x11() ggplot(data = diamonds)+ stat_summary( mapping = aes(x=cut, y = depth), fun.ymin = min, fun.ymax = max, fun.y = median ) ?stat_summary #1 x11() ggplot(data = diamonds)+ geom_pointrange( mapping = aes(x=cut, y = depth), stat = "summary", fun.ymin = min, fun.ymax = max, fun.y = median ) #2
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stat_summed_mean_difference.R \name{stat_summed_mean_difference} \alias{stat_summed_mean_difference} \title{Mean difference test statistic.} \usage{ stat_summed_mean_difference(data) } \arguments{ \item{data}{A data frame which contains columns `trmt` and `measurement`.} } \description{ Mean difference test statistic. }
/man/stat_summed_mean_difference.Rd
no_license
jlkravitz/texttest
R
false
true
400
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stat_summed_mean_difference.R \name{stat_summed_mean_difference} \alias{stat_summed_mean_difference} \title{Mean difference test statistic.} \usage{ stat_summed_mean_difference(data) } \arguments{ \item{data}{A data frame which contains columns `trmt` and `measurement`.} } \description{ Mean difference test statistic. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CovariateGraph.R \name{CovariateGraph} \alias{CovariateGraph} \title{CovariateGraph} \usage{ Value <- CovariateGraph(X, nbin) } \arguments{ \item{X}{Data of primary variate X[, 1] and covariate X[, 2] (double(n, 2))} \item{nbin}{(optional) No. of bins: range of covariate is subdivided by nbin bins and covariate is rounded to centre of bin (double(1))} } \value{ list of length nbin, containing for each bin and each value x of the primary variate coinciding with a covariate value in this bin, the number of times that the primary variate is larger than or equal to x } \description{ Conditional probability of a covariate belonging to some bin, given that the primary variate exceeds a threshold } \details{ If no nbin is specified, then the covariate is assumed to be discrete, and the bins are its range of values For the i-th bin, the graph of its conditional probability p2 as a function of the probability p1 of exceedance of the value of the primary variate is given by p2 <- (1:length(l))/l, p1 <- l/n with n <- dim(X)[1] and l <- Value[[i]] } \author{ Cees de Valk \email{ceesfdevalk@gmail.com} }
/man/CovariateGraph.Rd
no_license
ceesfdevalk/EVTools
R
false
true
1,254
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CovariateGraph.R \name{CovariateGraph} \alias{CovariateGraph} \title{CovariateGraph} \usage{ Value <- CovariateGraph(X, nbin) } \arguments{ \item{X}{Data of primary variate X[, 1] and covariate X[, 2] (double(n, 2))} \item{nbin}{(optional) No. of bins: range of covariate is subdivided by nbin bins and covariate is rounded to centre of bin (double(1))} } \value{ list of length nbin, containing for each bin and each value x of the primary variate coinciding with a covariate value in this bin, the number of times that the primary variate is larger than or equal to x } \description{ Conditional probability of a covariate belonging to some bin, given that the primary variate exceeds a threshold } \details{ If no nbin is specified, then the covariate is assumed to be discrete, and the bins are its range of values For the i-th bin, the graph of its conditional probability p2 as a function of the probability p1 of exceedance of the value of the primary variate is given by p2 <- (1:length(l))/l, p1 <- l/n with n <- dim(X)[1] and l <- Value[[i]] } \author{ Cees de Valk \email{ceesfdevalk@gmail.com} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/IO-methods.R \name{import_biom} \alias{import_biom} \title{Import phyloseq data from biom-format file} \usage{ import_biom(BIOMfilename, treefilename=NULL, refseqfilename=NULL, refseqFunction=readDNAStringSet, refseqArgs=NULL, parseFunction=parse_taxonomy_default, parallel=FALSE, version=1.0, ...) } \arguments{ \item{BIOMfilename}{(Required). A character string indicating the file location of the BIOM formatted file. This is a JSON formatted file, specific to biological datasets, as described in \url{http://www.qiime.org/svn_documentation/documentation/biom_format.html}{the biom-format home page}. In principle, this file should include you OTU abundance data (OTU table), your taxonomic classification data (taxonomy table), as well as your sample data, for instance what might be in your ``sample map'' in QIIME. A phylogenetic tree is not yet supported by biom-format, and so is a separate argument here. If, for some reason, your biom-format file is missing one of these mentioned data types but you have it in a separate file, you can first import the data that is in the biom file using this function, \code{import_biom}, and then ``merge'' the remaining data after you have imported with other tools using the relatively general-purpose data merging function called \code{\link{merge_phyloseq}}.} \item{treefilename}{(Optional). Default value is \code{NULL}. A file representing a phylogenetic tree or a \code{\link{phylo}} object. Files can be NEXUS or Newick format. See \code{\link{read_tree}} for more details. Also, if using a recent release of the GreenGenes database tree, try the \code{\link{read_tree_greengenes}} function -- this should solve some issues specific to importing that tree. If provided, the tree should have the same OTUs/tip-labels as the OTUs in the other files. Any taxa or samples missing in one of the files is removed from all. As an example from the QIIME pipeline, this tree would be a tree of the representative 16S rRNA sequences from each OTU cluster, with the number of leaves/tips equal to the number of taxa/species/OTUs, or the complete reference database tree that contains the OTU identifiers of every OTU in your abundance table. Note that this argument can be a tree object (\code{\link[ape]{phylo}}-class) for cases where the tree has been --- or needs to be --- imported separately, as in the case of the GreenGenes tree mentioned earlier (code{\link{read_tree_greengenes}}).} \item{refseqfilename}{(Optional). Default \code{NULL}. The file path of the biological sequence file that contains at a minimum a sequence for each OTU in the dataset. Alternatively, you may provide an already-imported \code{\link[Biostrings]{XStringSet}} object that satisfies this condition. In either case, the \code{\link{names}} of each OTU need to match exactly the \code{\link{taxa_names}} of the other components of your data. If this is not the case, for example if the data file is a FASTA format but contains additional information after the OTU name in each sequence header, then some additional parsing is necessary, which you can either perform separately before calling this function, or describe explicitly in a custom function provided in the (next) argument, \code{refseqFunction}. Note that the \code{\link[Biostrings]{XStringSet}} class can represent any arbitrary sequence, including user-defined subclasses, but is most-often used to represent RNA, DNA, or amino acid sequences. The only constraint is that this special list of sequences has exactly one named element for each OTU in the dataset.} \item{refseqFunction}{(Optional). Default is \code{\link[Biostrings]{readDNAStringSet}}, which expects to read a fasta-formatted DNA sequence file. If your reference sequences for each OTU are amino acid, RNA, or something else, then you will need to specify a different function here. This is the function used to read the file connection provided as the the previous argument, \code{refseqfilename}. This argument is ignored if \code{refseqfilename} is already a \code{\link[Biostrings]{XStringSet}} class.} \item{refseqArgs}{(Optional). Default \code{NULL}. Additional arguments to \code{refseqFunction}. See \code{\link[Biostrings]{XStringSet-io}} for details about additional arguments to the standard read functions in the Biostrings package.} \item{parseFunction}{(Optional). A function. It must be a function that takes as its first argument a character vector of taxonomic rank labels for a single OTU and parses and names each element (an optionally removes unwanted elements). Further details and examples of acceptable functions are provided in the documentation for \code{\link{parse_taxonomy_default}}. There are many variations on taxonomic nomenclature, and naming conventions used to store that information in various taxonomic databases and phylogenetic assignment algorithms. A popular database, \url{http://greengenes.lbl.gov/cgi-bin/nph-index.cgi}{greengenes}, has its own custom parsing function provided in the phyloseq package, \code{\link{parse_taxonomy_greengenes}}, and more can be contributed or posted as code snippets as needed. They can be custom-defined by a user immediately prior to the the call to \code{\link{import_biom}}, and this is a suggested first step to take when trouble-shooting taxonomy-related errors during file import.} \item{parallel}{(Optional). Logical. Wrapper option for \code{.parallel} parameter in \code{plyr-package} functions. If \code{TRUE}, apply parsing functions in parallel, using parallel backend provided by \code{\link{foreach}} and its supporting backend packages. One caveat, plyr-parallelization currently works most-cleanly with \code{multicore}-like backends (Mac OS X, Unix?), and may throw warnings for SNOW-like backends. See the example below for code invoking multicore-style backend within the \code{doParallel} package. Finally, for many datasets a parallel import should not be necessary because a serial import will be just as fast and the import is often only performed one time; after which the data should be saved as an RData file using the \code{\link{save}} function.} \item{version}{(Optional). Numeric. The expected version number of the file. As the BIOM format evolves, version-specific importers may be available by adjusting the version value. Default is \code{1.0}. Not yet implemented. Parsing of the biom-format is done mostly by the biom package now available in CRAN.} \item{...}{Additional parameters passed on to \code{\link{read_tree}}.} } \value{ A \code{\link{phyloseq-class}} object. } \description{ New versions of QIIME produce a more-comprehensive and formally-defined JSON file format, called biom file format: } \details{ ``The biom file format (canonically pronounced `biome') is designed to be a general-use format for representing counts of observations in one or more biological samples. BIOM is a recognized standard for the Earth Microbiome Project and is a Genomics Standards Consortium candidate project.'' \url{http://biom-format.org/} } \examples{ # An included example of a rich dense biom file rich_dense_biom <- system.file("extdata", "rich_dense_otu_table.biom", package="phyloseq") import_biom(rich_dense_biom, parseFunction=parse_taxonomy_greengenes) # An included example of a sparse dense biom file rich_sparse_biom <- system.file("extdata", "rich_sparse_otu_table.biom", package="phyloseq") import_biom(rich_sparse_biom, parseFunction=parse_taxonomy_greengenes) # # # Example code for importing large file with parallel backend # library("doParallel") # registerDoParallel(cores=6) # import_biom("my/file/path/file.biom", parseFunction=parse_taxonomy_greengenes, parallel=TRUE) } \references{ \href{http://www.qiime.org/svn_documentation/documentation/biom_format.html}{biom-format} } \seealso{ \code{\link{import}} \code{\link{import_qiime}} \code{\link{read_tree}} \code{\link{read_tree_greengenes}} \code{\link[biom]{biom-package}} \code{\link[biom]{read_biom}} \code{\link[biom]{biom_data}} \code{\link[biom]{sample_metadata}} \code{\link[biom]{observation_metadata}} \code{\link[Biostrings]{XStringSet-io}} }
/man/import_biom.Rd
no_license
antagomir/phyloseq
R
false
true
8,232
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/IO-methods.R \name{import_biom} \alias{import_biom} \title{Import phyloseq data from biom-format file} \usage{ import_biom(BIOMfilename, treefilename=NULL, refseqfilename=NULL, refseqFunction=readDNAStringSet, refseqArgs=NULL, parseFunction=parse_taxonomy_default, parallel=FALSE, version=1.0, ...) } \arguments{ \item{BIOMfilename}{(Required). A character string indicating the file location of the BIOM formatted file. This is a JSON formatted file, specific to biological datasets, as described in \url{http://www.qiime.org/svn_documentation/documentation/biom_format.html}{the biom-format home page}. In principle, this file should include you OTU abundance data (OTU table), your taxonomic classification data (taxonomy table), as well as your sample data, for instance what might be in your ``sample map'' in QIIME. A phylogenetic tree is not yet supported by biom-format, and so is a separate argument here. If, for some reason, your biom-format file is missing one of these mentioned data types but you have it in a separate file, you can first import the data that is in the biom file using this function, \code{import_biom}, and then ``merge'' the remaining data after you have imported with other tools using the relatively general-purpose data merging function called \code{\link{merge_phyloseq}}.} \item{treefilename}{(Optional). Default value is \code{NULL}. A file representing a phylogenetic tree or a \code{\link{phylo}} object. Files can be NEXUS or Newick format. See \code{\link{read_tree}} for more details. Also, if using a recent release of the GreenGenes database tree, try the \code{\link{read_tree_greengenes}} function -- this should solve some issues specific to importing that tree. If provided, the tree should have the same OTUs/tip-labels as the OTUs in the other files. Any taxa or samples missing in one of the files is removed from all. As an example from the QIIME pipeline, this tree would be a tree of the representative 16S rRNA sequences from each OTU cluster, with the number of leaves/tips equal to the number of taxa/species/OTUs, or the complete reference database tree that contains the OTU identifiers of every OTU in your abundance table. Note that this argument can be a tree object (\code{\link[ape]{phylo}}-class) for cases where the tree has been --- or needs to be --- imported separately, as in the case of the GreenGenes tree mentioned earlier (code{\link{read_tree_greengenes}}).} \item{refseqfilename}{(Optional). Default \code{NULL}. The file path of the biological sequence file that contains at a minimum a sequence for each OTU in the dataset. Alternatively, you may provide an already-imported \code{\link[Biostrings]{XStringSet}} object that satisfies this condition. In either case, the \code{\link{names}} of each OTU need to match exactly the \code{\link{taxa_names}} of the other components of your data. If this is not the case, for example if the data file is a FASTA format but contains additional information after the OTU name in each sequence header, then some additional parsing is necessary, which you can either perform separately before calling this function, or describe explicitly in a custom function provided in the (next) argument, \code{refseqFunction}. Note that the \code{\link[Biostrings]{XStringSet}} class can represent any arbitrary sequence, including user-defined subclasses, but is most-often used to represent RNA, DNA, or amino acid sequences. The only constraint is that this special list of sequences has exactly one named element for each OTU in the dataset.} \item{refseqFunction}{(Optional). Default is \code{\link[Biostrings]{readDNAStringSet}}, which expects to read a fasta-formatted DNA sequence file. If your reference sequences for each OTU are amino acid, RNA, or something else, then you will need to specify a different function here. This is the function used to read the file connection provided as the the previous argument, \code{refseqfilename}. This argument is ignored if \code{refseqfilename} is already a \code{\link[Biostrings]{XStringSet}} class.} \item{refseqArgs}{(Optional). Default \code{NULL}. Additional arguments to \code{refseqFunction}. See \code{\link[Biostrings]{XStringSet-io}} for details about additional arguments to the standard read functions in the Biostrings package.} \item{parseFunction}{(Optional). A function. It must be a function that takes as its first argument a character vector of taxonomic rank labels for a single OTU and parses and names each element (an optionally removes unwanted elements). Further details and examples of acceptable functions are provided in the documentation for \code{\link{parse_taxonomy_default}}. There are many variations on taxonomic nomenclature, and naming conventions used to store that information in various taxonomic databases and phylogenetic assignment algorithms. A popular database, \url{http://greengenes.lbl.gov/cgi-bin/nph-index.cgi}{greengenes}, has its own custom parsing function provided in the phyloseq package, \code{\link{parse_taxonomy_greengenes}}, and more can be contributed or posted as code snippets as needed. They can be custom-defined by a user immediately prior to the the call to \code{\link{import_biom}}, and this is a suggested first step to take when trouble-shooting taxonomy-related errors during file import.} \item{parallel}{(Optional). Logical. Wrapper option for \code{.parallel} parameter in \code{plyr-package} functions. If \code{TRUE}, apply parsing functions in parallel, using parallel backend provided by \code{\link{foreach}} and its supporting backend packages. One caveat, plyr-parallelization currently works most-cleanly with \code{multicore}-like backends (Mac OS X, Unix?), and may throw warnings for SNOW-like backends. See the example below for code invoking multicore-style backend within the \code{doParallel} package. Finally, for many datasets a parallel import should not be necessary because a serial import will be just as fast and the import is often only performed one time; after which the data should be saved as an RData file using the \code{\link{save}} function.} \item{version}{(Optional). Numeric. The expected version number of the file. As the BIOM format evolves, version-specific importers may be available by adjusting the version value. Default is \code{1.0}. Not yet implemented. Parsing of the biom-format is done mostly by the biom package now available in CRAN.} \item{...}{Additional parameters passed on to \code{\link{read_tree}}.} } \value{ A \code{\link{phyloseq-class}} object. } \description{ New versions of QIIME produce a more-comprehensive and formally-defined JSON file format, called biom file format: } \details{ ``The biom file format (canonically pronounced `biome') is designed to be a general-use format for representing counts of observations in one or more biological samples. BIOM is a recognized standard for the Earth Microbiome Project and is a Genomics Standards Consortium candidate project.'' \url{http://biom-format.org/} } \examples{ # An included example of a rich dense biom file rich_dense_biom <- system.file("extdata", "rich_dense_otu_table.biom", package="phyloseq") import_biom(rich_dense_biom, parseFunction=parse_taxonomy_greengenes) # An included example of a sparse dense biom file rich_sparse_biom <- system.file("extdata", "rich_sparse_otu_table.biom", package="phyloseq") import_biom(rich_sparse_biom, parseFunction=parse_taxonomy_greengenes) # # # Example code for importing large file with parallel backend # library("doParallel") # registerDoParallel(cores=6) # import_biom("my/file/path/file.biom", parseFunction=parse_taxonomy_greengenes, parallel=TRUE) } \references{ \href{http://www.qiime.org/svn_documentation/documentation/biom_format.html}{biom-format} } \seealso{ \code{\link{import}} \code{\link{import_qiime}} \code{\link{read_tree}} \code{\link{read_tree_greengenes}} \code{\link[biom]{biom-package}} \code{\link[biom]{read_biom}} \code{\link[biom]{biom_data}} \code{\link[biom]{sample_metadata}} \code{\link[biom]{observation_metadata}} \code{\link[Biostrings]{XStringSet-io}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/main.R \name{rlomax} \alias{rlomax} \title{rlomax} \usage{ rlomax(n, alpha, lambda) } \description{ Draws from the lomax (Pareto type II) distribution with shape alpha and scale lambda. }
/man/rlomax.Rd
permissive
bobverity/bobFunctions
R
false
true
266
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/main.R \name{rlomax} \alias{rlomax} \title{rlomax} \usage{ rlomax(n, alpha, lambda) } \description{ Draws from the lomax (Pareto type II) distribution with shape alpha and scale lambda. }
\name{accuracy} \alias{accuracy} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Calculates the accuracy of a matrix } \description{ Calculates the accuracy of a given matrix. } \usage{ accuracy(accuracy_matrix, classification_vector, config, return_hit_vector = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{accuracy\_matrix}{ data matrix with accurate classification } \item{classification\_vector}{ the classification to verify accuracy } \item{config}{ the configuration } \item{return_hit_vector}{ if true it will return accuracy ratio per cluster and total hit ration, otherwise it will return the total number of hits } } \details{ Compares the obtained classification and the real classification to check the accuracy. } \value{ Returns the total number of hits or accuracy ratio per cluster and total hit ration } %\references{ ~put references to the literature/web site here ~ } \author{ Fernando Martins } \note{ This function is part of the frbf implementation and is for internal use only. } \seealso{ \code{\link[frbf:RemoraConfiguration-class]{RemoraConfiguration}} } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ internal }
/man/accuracy.Rd
no_license
cran/frbf
R
false
false
1,272
rd
\name{accuracy} \alias{accuracy} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Calculates the accuracy of a matrix } \description{ Calculates the accuracy of a given matrix. } \usage{ accuracy(accuracy_matrix, classification_vector, config, return_hit_vector = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{accuracy\_matrix}{ data matrix with accurate classification } \item{classification\_vector}{ the classification to verify accuracy } \item{config}{ the configuration } \item{return_hit_vector}{ if true it will return accuracy ratio per cluster and total hit ration, otherwise it will return the total number of hits } } \details{ Compares the obtained classification and the real classification to check the accuracy. } \value{ Returns the total number of hits or accuracy ratio per cluster and total hit ration } %\references{ ~put references to the literature/web site here ~ } \author{ Fernando Martins } \note{ This function is part of the frbf implementation and is for internal use only. } \seealso{ \code{\link[frbf:RemoraConfiguration-class]{RemoraConfiguration}} } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ internal }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conversion.R \name{convert_cols} \alias{convert_cols} \alias{convert_cols.data.table} \title{convert all columns of specified type to another type} \usage{ convert_cols(x, ...) \method{convert_cols}{data.table}(x, from_class, to_class, inplace = TRUE) } \arguments{ \item{x}{an input data.table} \item{from_class}{original type/class} \item{to_class}{target type/class} } \description{ convert all columns of specified type to another type } \details{ Modifies the referenced data.tables! }
/man/convert_cols.Rd
permissive
vh-d/RETL
R
false
true
572
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conversion.R \name{convert_cols} \alias{convert_cols} \alias{convert_cols.data.table} \title{convert all columns of specified type to another type} \usage{ convert_cols(x, ...) \method{convert_cols}{data.table}(x, from_class, to_class, inplace = TRUE) } \arguments{ \item{x}{an input data.table} \item{from_class}{original type/class} \item{to_class}{target type/class} } \description{ convert all columns of specified type to another type } \details{ Modifies the referenced data.tables! }
library(testthat) library(FAtools) test_check("FAtools")
/tests/testthat.R
no_license
Shareryu/FAtools
R
false
false
58
r
library(testthat) library(FAtools) test_check("FAtools")
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "cmc") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass") lrn = makeLearner("classif.rda", par.vals = list(), predict.type = "prob") #:# hash #:# 8f19495b5a800014be447eb3fbdefe7d hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
/models/openml_cmc/classification_binaryClass/8f19495b5a800014be447eb3fbdefe7d/code.R
no_license
pysiakk/CaseStudies2019S
R
false
false
680
r
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "cmc") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass") lrn = makeLearner("classif.rda", par.vals = list(), predict.type = "prob") #:# hash #:# 8f19495b5a800014be447eb3fbdefe7d hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
########################################################################### # reduced skillspace estimation gdm_est_skillspace <- function(Ngroup, pi.k , Z, G , delta , eps=1E-10 ){ # gg <- 1 covdelta <- as.list(1:G) for (gg in 1:G){ ntheta <- Ngroup[gg] * pi.k[,gg] ntheta <- ntheta / sum(ntheta ) lntheta <- log(ntheta+eps) mod <- stats::lm( lntheta ~ 0 + Z , weights = ntheta ) covbeta <- vcov(mod) beta <- coef(mod) pi.k[,gg] <- exp( Z %*% beta ) / Ngroup[gg] pi.k[,gg] <- pi.k[,gg] / sum( pi.k[,gg] ) delta[,gg] <- beta covdelta[[gg]] <- covbeta } #--- OUTPUT res <- list( pi.k=pi.k , delta=delta , covdelta = covdelta ) return(res) } .gdm.est.skillspace <- gdm_est_skillspace
/R/gdm_est_skillspace.R
no_license
strategist922/CDM
R
false
false
729
r
########################################################################### # reduced skillspace estimation gdm_est_skillspace <- function(Ngroup, pi.k , Z, G , delta , eps=1E-10 ){ # gg <- 1 covdelta <- as.list(1:G) for (gg in 1:G){ ntheta <- Ngroup[gg] * pi.k[,gg] ntheta <- ntheta / sum(ntheta ) lntheta <- log(ntheta+eps) mod <- stats::lm( lntheta ~ 0 + Z , weights = ntheta ) covbeta <- vcov(mod) beta <- coef(mod) pi.k[,gg] <- exp( Z %*% beta ) / Ngroup[gg] pi.k[,gg] <- pi.k[,gg] / sum( pi.k[,gg] ) delta[,gg] <- beta covdelta[[gg]] <- covbeta } #--- OUTPUT res <- list( pi.k=pi.k , delta=delta , covdelta = covdelta ) return(res) } .gdm.est.skillspace <- gdm_est_skillspace
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/open.R \name{openSesame} \alias{openSesame} \title{The openSesame pipeline} \usage{ openSesame(x, platform = "", manifest = NULL, what = "beta", BPPARAM = SerialParam(), ...) } \arguments{ \item{x}{SigSet(s), IDAT prefix(es), minfi GenomicRatioSet(s), or RGChannelSet(s)} \item{platform}{optional platform string} \item{manifest}{optional dynamic manifest} \item{what}{either 'sigset' or 'beta'} \item{BPPARAM}{get parallel with MulticoreParam(n)} \item{...}{parameters to getBetas} } \value{ a numeric vector for processed beta values } \description{ This function is a simple wrapper of noob + nonlinear dye bias correction + pOOBAH masking. } \details{ If the input is an IDAT prefix or a \code{SigSet}, the output is the beta value numerics. If the input is a minfi GenomicRatioSet or RGChannelSet, the output is the sesamized GenomicRatioSet. } \examples{ sset <- sesameDataGet('HM450.1.TCGA.PAAD')$sset IDATprefixes <- searchIDATprefixes( system.file("extdata", "", package = "sesameData")) betas <- openSesame(IDATprefixes) }
/man/openSesame.Rd
permissive
jamorrison/sesame
R
false
true
1,124
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/open.R \name{openSesame} \alias{openSesame} \title{The openSesame pipeline} \usage{ openSesame(x, platform = "", manifest = NULL, what = "beta", BPPARAM = SerialParam(), ...) } \arguments{ \item{x}{SigSet(s), IDAT prefix(es), minfi GenomicRatioSet(s), or RGChannelSet(s)} \item{platform}{optional platform string} \item{manifest}{optional dynamic manifest} \item{what}{either 'sigset' or 'beta'} \item{BPPARAM}{get parallel with MulticoreParam(n)} \item{...}{parameters to getBetas} } \value{ a numeric vector for processed beta values } \description{ This function is a simple wrapper of noob + nonlinear dye bias correction + pOOBAH masking. } \details{ If the input is an IDAT prefix or a \code{SigSet}, the output is the beta value numerics. If the input is a minfi GenomicRatioSet or RGChannelSet, the output is the sesamized GenomicRatioSet. } \examples{ sset <- sesameDataGet('HM450.1.TCGA.PAAD')$sset IDATprefixes <- searchIDATprefixes( system.file("extdata", "", package = "sesameData")) betas <- openSesame(IDATprefixes) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/HRJfunctions.R \name{writeHRJAccessDatabase} \alias{writeHRJAccessDatabase} \title{(HRJ) Write HRJ "B" & "C" tables to MS Access database.} \usage{ writeHRJAccessDatabase(hrj, filename) } \arguments{ \item{hrj}{A list usually comprising of two data frames, which are the 'b' and 'c' HRJ tables in wide format with fields exactly matching those defined in the MS Access data base.} \item{filename}{A character string of length one. The MS Access filename.} } \value{ A MS database is written. Nothing is returned to R. } \description{ The Access data base must already be created, but can be empty. If there are tables with the same names as the data frames, then they will be over-written. } \examples{ \dontrun{ hrj.list <- readHRJtext(filepath) hrj.list$hrj.cwt.list <- lapply(hrj.list$hrj.cwt.list,updateStockByName, data.stock$stocks.df) writeHRJAccessDatabase(hrj = hrj.list$hrj.cwt.list, filename = 'test.accdb') #to add the "workingdata" table (which has C data, updated with B data): hrj.list.long <- reshapeHRJtolong(hrj.list$hrj.cwt.list, data.stock) workdingdata.wide <- reshapeHRJtowide(hrj.list.long$workingdata) writeHRJAccessDatabase(hrj = list(workingdata= workdingdata.wide), filename = 'test.accdb') } }
/man/writeHRJAccessDatabase.Rd
no_license
seananderson/ctctools
R
false
true
1,306
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/HRJfunctions.R \name{writeHRJAccessDatabase} \alias{writeHRJAccessDatabase} \title{(HRJ) Write HRJ "B" & "C" tables to MS Access database.} \usage{ writeHRJAccessDatabase(hrj, filename) } \arguments{ \item{hrj}{A list usually comprising of two data frames, which are the 'b' and 'c' HRJ tables in wide format with fields exactly matching those defined in the MS Access data base.} \item{filename}{A character string of length one. The MS Access filename.} } \value{ A MS database is written. Nothing is returned to R. } \description{ The Access data base must already be created, but can be empty. If there are tables with the same names as the data frames, then they will be over-written. } \examples{ \dontrun{ hrj.list <- readHRJtext(filepath) hrj.list$hrj.cwt.list <- lapply(hrj.list$hrj.cwt.list,updateStockByName, data.stock$stocks.df) writeHRJAccessDatabase(hrj = hrj.list$hrj.cwt.list, filename = 'test.accdb') #to add the "workingdata" table (which has C data, updated with B data): hrj.list.long <- reshapeHRJtolong(hrj.list$hrj.cwt.list, data.stock) workdingdata.wide <- reshapeHRJtowide(hrj.list.long$workingdata) writeHRJAccessDatabase(hrj = list(workingdata= workdingdata.wide), filename = 'test.accdb') } }
\name{anRpackage-package} \alias{anRpackage-package} \alias{anRpackage} \docType{package} \title{ What the package does (short line) ~~ package title ~~ } \description{ More about what it does (maybe more than one line) ~~ A concise (1-5 lines) description of the package ~~ } \details{ \tabular{ll}{ Package: \tab anRpackage\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2014-10-11\cr License: \tab What license is it under?\cr } ~~ An overview of how to use the package, including the most important ~~ ~~ functions ~~ } \author{ Who wrote it Maintainer: Who to complain to <yourfault@somewhere.net> ~~ The author and/or maintainer of the package ~~ } \references{ ~~ Literature or other references for background information ~~ } ~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~ ~~ the R documentation directory ~~ \keyword{ package } \seealso{ ~~ Optional links to other man pages, e.g. ~~ ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~ } \examples{ ~~ simple examples of the most important functions ~~ }
/folders/anRpackage/man/anRpackage-package.Rd
permissive
bwtian/Rtemplate
R
false
false
1,051
rd
\name{anRpackage-package} \alias{anRpackage-package} \alias{anRpackage} \docType{package} \title{ What the package does (short line) ~~ package title ~~ } \description{ More about what it does (maybe more than one line) ~~ A concise (1-5 lines) description of the package ~~ } \details{ \tabular{ll}{ Package: \tab anRpackage\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2014-10-11\cr License: \tab What license is it under?\cr } ~~ An overview of how to use the package, including the most important ~~ ~~ functions ~~ } \author{ Who wrote it Maintainer: Who to complain to <yourfault@somewhere.net> ~~ The author and/or maintainer of the package ~~ } \references{ ~~ Literature or other references for background information ~~ } ~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~ ~~ the R documentation directory ~~ \keyword{ package } \seealso{ ~~ Optional links to other man pages, e.g. ~~ ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~ } \examples{ ~~ simple examples of the most important functions ~~ }