blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7758407b4ac78377b45d24b56dcd4862e098da9d | bfffac0780569c66250007b0344023346ac91458 | /man/conplot.Rd | e0398b85080c6f153739b0dec402d4027ff9f75e | [] | no_license | cran/EMMIXskew | aef1a0b4d9f6af883a7e22a349f0fbf36b086f82 | b9ceb25f4722778c5fe65276272d01a234dd6670 | refs/heads/master | 2020-05-18T18:45:41.728074 | 2018-02-13T03:19:56 | 2018-02-13T03:19:56 | 17,678,934 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,102 | rd | conplot.Rd | \name{conplot}
\alias{conplot}
\alias{conplot2}
\alias{conplot3}
\alias{mypanel2}
\alias{mypanel3}
\alias{mypanel4}
\alias{panel.density}
\title{
Functions of Contours}
\description{
These functions are called by EmSkew.contours,EmSkew.filter and EmSkew.flow to plot the contours of (skew) mixture density after fitting to the data.
}
\usage{
conplot(x, y, pro, mu, sigma, dof, delta, distr, grid = 300,
nrand = 6000, levels = seq(5, 95, by = 20), col = "white")
conplot2(x, y, pro, mu, sigma, dof, delta, distr, grid = 300,
nrand = 6000, levels = seq(5, 95, by = 20))
conplot3(x, y, pro, mu, sigma, dof, delta, modpts,distr, grid =300,
nrand = 10000, levels = seq(5, 95, by = 20))
mypanel2(x,y,...)
mypanel3(x,y,...)
mypanel4(x,y,...)
panel.density(x, col=1,...)
}
\arguments{
\item{x}{
A vector of observations on variable x.}
\item{y}{
A vector of observations on variable y.}
\item{pro}{
A vector of mixing proportions in the (skew) mixture model.}
\item{mu}{
A matrix with each column corresponding to the mean or location vector of one mixture component.
}
\item{sigma}{
An array of covariance matrices for each component of the mixture distribution.
}
\item{dof}{
A vector of degrees of freedom when "distr"ibution is "mvt" or "mst".
}
\item{delta}{
A matrix with each column as skew parameter vector of one component when "distr"ibution is "msn" or "mst".
}
\item{distr}{
A three letter string indicating component distribution, "mvn"=normal distribution, "mvt"=t-distribution,"msn"=skew normal distribution, "mst"=skew t-ditribution.
}
\item{modpts}{The mode points.}
\item{grid}{
An integer for the number of grid points in one direction.
}
\item{nrand}{
A large integer for the number of random numbers being drawn.
}
\item{levels}{
A vector of contour percentage levels for the plots. It should be in the range of 0 to 100.
}
\item{col}{
The colour of contour lines.
}
\item{...}{other }
}
\details{
In most case, users do not call this function directly, instead they call the function emmix.flow.}
\seealso{
\code{\link{EmSkew.flow}}
}
|
f9d759f5aa300a5019d531add466242545b3fdd1 | ea54e2665d0ddb390dbcc12972cdd2bdbd059dfb | /R/mz_atomize.R | 5a1b91b5606cb2449d5fb60c21e79987f98f1e83 | [
"MIT"
] | permissive | wmoldham/mzrtools | 15960486e39cc4be6ada9447b50faa06131e9676 | fb50c9424415083e063ad365877a162ce7e2ef4e | refs/heads/master | 2021-03-30T05:40:09.352183 | 2020-10-22T19:54:14 | 2020-10-22T19:54:14 | 248,021,635 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,834 | r | mz_atomize.R | #' Parse a molecular formula into element and count
#'
#' \code{mz_atomize} separates a molecular formula into element and count,
#' returning a named vector of element counts.
#'
#' @param molecule A string containing a molecular formula (\emph{e.g.}, "C2H7NO3S").
#' Structural formulas containing parentheses are not acceptable. Charges
#' may be included, but the charge count should follow the sign (\emph{e.g.},
#' "C10H16N5O13P3-3").
#'
#' @return A vector of counts named by element.
#'
#' @export
#'
#' @examples
#' mz_atomize("C5H8O5-")
#' mz_atomize("C2H7NO3S")
#' mz_atomize("C10H16N5O13P3-3")
#'
mz_atomize <- function(molecule) {
# check arguments
correct_format <- "^(([A-Z]{1}[a-z]?)[0-9]*)((([A-Z]{1}[a-z]?)|(\\+|\\-))[0-9]*)*$"
if (!stringr::str_detect(molecule, pattern = correct_format)) {
stop("Incorrect format, provide molecular formula (e.g., \"C5H6O5\")")
}
atom_count <- "(([A-Z]{1}[a-z]?)|(\\+|\\-))[0-9]*"
atoms <- stringr::str_extract_all(molecule, pattern = atom_count, simplify = TRUE)
elements <- stringr::str_extract(atoms, "\\D*")
# verify elements
if(sum(elements %nin% names(atomic_mass)) > 0) {
errs <- elements[elements %nin% names(atomic_mass)]
stop(stringr::str_c("Unknown element(s) (",
stringr::str_c(errs, collapse = ", "),
") used in supplied formula"))
}
# check for duplicates
if (sum(duplicated(elements)) > 0) {
errs <- unique(elements[duplicated(elements)])
stop(stringr::str_c("Duplicated element(s) (",
stringr::str_c(errs, collapse = ", "),
") used in supplied formula"))
}
counts <-
stringr::str_extract(atoms, "\\d+") %>%
replace(is.na(.), "1") %>%
as.integer()
names(counts) <- elements
counts
}
|
24a64233cf96dfd28942d7172301bd5135e1175d | 5d690f159266b2c0f163e26fcfb9f9e17a0dc541 | /McSpatial/man/matchqreg.Rd | ca258add971dde25bc8323ab0c89d8affbb4c946 | [] | no_license | albrizre/spatstat.revdep | 3a83ab87085895712d7109c813dcc8acb55493e9 | b6fc1e73985b0b7ed57d21cbebb9ca4627183108 | refs/heads/main | 2023-03-05T14:47:16.628700 | 2021-02-20T01:05:54 | 2021-02-20T01:05:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,260 | rd | matchqreg.Rd | \name{matchqreg}
\alias{matchqreg}
\title{Sample quantiles and means over time for a matched sample data set}
\description{Calculates and graphs sample means and quantiles over time.
Intended for but not limited to a data set constructed with \emph{matchprop} or \emph{matchmahal} }
\usage{
matchqreg(form,taumat=c(.10,.25,.50,.75,.90), qreglwr.smooth=TRUE,
window=.50,bandwidth=0,kern="tcub", alldata=FALSE,
graph.yhat=TRUE,graph.mean=TRUE,data)
}
\arguments{
\item{form}{ A formula of the type \emph{y~x}, where \emph{x} represents time. }
\item{taumat}{Vector of quantiles. Default: \emph{taumat}=c(.10, .25, .50, .75, .90). }
\item{qreglwr.smooth}{If \emph{qreglwr.smooth=T}, uses \emph{qreglwr} to smooth the quantile series.
If \emph{qreglwr.smooth=F}, calculates period by period quantiles.}
\item{window}{Window size to be passed to \emph{qreglwr} if \emph{qreglwr.smooth=T.} Default: 0.50.}
\item{bandwidth}{Bandwidth to be passed to \emph{qreglwr} if \emph{qreglwr.smooth=T.} Default: 0, i.e., not used.}
\item{kern}{Kernel weighting function to be passed to \emph{qreglwr} if \emph{qreglwr.smooth=T.} Default is the tri-cube.
Options include "rect", "tria", "epan", "bisq", "tcub", "trwt", and "gauss".}
\item{alldata}{ Indicates how the \emph{alldata} option should be treated for \emph{qreglwr} if \emph{qreglwr.smooth=T}. Default: \emph{alldata=F} }
\item{graph.yhat}{If \emph{graph.yhat=T}, graphs the series of quantile lines. Default: \emph{graph.yhat=T.}}
\item{graph.mean}{If \emph{graph.mean=T}, graphs the means over time. Default: \emph{graph.yhat=T.}}
\item{data }{A data frame containing the data. Default: use data in the current working directory.}
}
\value{
\item{yhat}{Matrix of quantiles for y; actual quantiles if \emph{qreglwr.smooth=F} and smoothed values if \emph{qreglowr.smooth=T.}
Rows represent time periods and columns represent quantiles.}
\item{ymean}{Average value of y for each time period.}
\item{timevect}{Vector of target quantile values.}
}
\examples{
set.seed(189)
n = 500
# sale dates range from 0-10
# mean and variance of x increase over time, from 1 to 2
# price index for y increases from 0 to 1
timesale <- array(0,dim=n)
x <- rnorm(n,0,1)
for (j in seq(1,10)) {
timesale <- c(timesale, array(j, dim=n))
x <- c(x, rnorm(n,j/10,1+j/10))
}
n = length(x)
y <- x*1 + timesale/10 + rnorm(n, 0, sd(x)/2)
fit <- lm(y~x+factor(timesale))
summary(fit)
heddata <- data.frame(y,x,timesale)
summary(heddata)
par(ask=TRUE)
matchdata <- matchprop(timesale~x,data=heddata,ytreat=0,
distance="logit",discard="both")
table(matchdata$timesale)
fit <- matchqreg(y~timesale,qreglwr.smooth=FALSE,
graph.yhat=TRUE,graph.mean=TRUE,data=matchdata)
}
\details{
Calculates means and quantiles of \emph{y} for each time period present in the variable on the right hand side of the model formula.
The quantiles can be varied with the \emph{taumat} option. If \emph{qreglwr.smooth=T}, matchqreg uses the qreglwr command to smooth
the quantile lines and stores the results in the matrix \emph{yhat}. The unsmoothed, actual quantile values are stored in \emph{yhat} if
\emph{qreglwr.smooth=F}. The \emph{window, bandwidth, kern,} and \emph{alldata} options are passed on to \emph{qreglwr} if \emph{qreglwr.smooth=T}.
Although \emph{matchqreg} is meant to follow the \emph{matchprop} or \emph{matchmahal} command, it can be applied to any data set.
}
\references{
Deng, Yongheng, Sing Tien Foo, and Daniel P. McMillen, "Private Residential Price Indices in Singapore," \emph{Regional Science and Urban Economics},
42 (2012), 485-494.
Ho, D., Imai, K., King, G, Stuart, E., "Matching as Nonparametric Preprocessing for Reducing Model Dependence in Parametric Causal Inference," \emph{Political Analysis} 15 (2007), 199-236.
Ho, D., Imai, K., King, G, Stuart, E., "MatchIt: Nonparametric preprocessing for parametric causal inference," \emph{Journal of Statistical Software} 42 (2011), 1-28..
McMillen, Daniel P., "Repeat Sales as a Matching Estimator," \emph{Real Estate Economics} 40 (2012), 743-771.
}
\seealso{
\code{\link{matchmahal}}
\code{\link{matchprop}}
\code{\link{qreglwr}}
}
\keyword{Matching}
\keyword{Quantile Regression}
|
25a6613b5fc6c9fadc537b762b821a1872c64947 | 136eb68c86c635874692878a3cf54faeb3b36b45 | /AzureML/R/consume.R | 6d2c72336d6856ff6b06d87e6a5a5fb2aef0ddae | [
"MIT"
] | permissive | Azure/Azure-MachineLearning-ClientLibrary-R | 84f5a2db2c3c0dd1f417c5c470cee4899c8c3611 | dda06a103f774491de48dd54db8e56b961de7861 | refs/heads/master | 2023-04-14T12:25:46.928784 | 2023-03-28T16:48:16 | 2023-03-28T16:48:16 | 38,313,864 | 22 | 13 | MIT | 2023-03-28T16:48:18 | 2015-06-30T14:27:45 | HTML | UTF-8 | R | false | false | 17,922 | r | consume.R | #' Discover web service schema
#'
#' Discover the expected input to a web service specified by a web service ID ng the workspace ID and web service ID, information specific to the consumption functions
#'
#' @export
#'
#' @param helpURL URL of the help page of the web service
#' @param scheme the URI scheme
#' @param host optional parameter that defaults to ussouthcentral.services.azureml.net
#' @param api_version defaults to 2.0
#' @return List containing the request URL of the webservice, column names of the data, sample input as well as the input schema
#'
#' @seealso \code{\link{publishWebService}} \code{\link{consumeLists}}
#' @family discovery functions
#'
#' @examples
#' \dontrun{
#' endpoints <- getEndpoints("wsID", "wsAuth", "webserviceID")
#' wsSchema <- discoverSchema(endpoints[[1]]$HelpLocation)
#' }
discoverSchema <- function(helpURL, scheme = "https", host = "ussouthcentral.services.azureml.net", api_version = "2.0") {
endpointId = getDetailsFromUrl(helpURL)[[1]]
workspaceId = getDetailsFromUrl(helpURL)[[2]]
# Construct swagger document URL using parameters
# Use paste method without separator
swaggerURL = paste(scheme,"://", host, "/workspaces/", workspaceId, "/services/", endpointId,"/swagger.json", sep = "")
print(swaggerURL)
# Automatically parses the content and gets the swagger document
response <- RCurl::getURLContent(swaggerURL)
swagger = rjson::fromJSON(response)
# Accesses the input schema in the swagger document
inputSchema = swagger$definition$input1Item
#Accesses the example in the swagger document and converts it to JSON
exampleJson <- rjson::toJSON(swagger$definitions$ExecutionRequest$example)
#Accesses a single specific JSON object and formats it to be a request inputted as a list in R
inputExample = as.list((jsonlite::fromJSON((exampleJson)))$Inputs$input1)
for(i in 1:length(inputExample)) {
if(typeof(inputExample[[i]]) == "character") {
inputExample[i] = "Please input valid String"
}
}
#Accesses the names of the columns in the example and stores it in a list of column names
columnNames = list()
for(i in 1:length(inputExample)) {
columnNames[[i]] = names(inputExample)[[i]]
}
# Uses multiple nested loops to access the various paths in the swagger document and find the execution path
foundExecPath = FALSE
pathNo = 0
execPathNo= -1
for(execPath in swagger$paths) {
pathNo = pathNo + 1
for(operationpath in execPath) {
for(operation in operationpath) {
#Goes through the characteristcs in every operation e.g. operationId
for(charac in operation) {
# Finds the path in which the operationId (characteristic of the path) = execute and sets the execution path number
if(charac[1] == "execute")
{
#Sets found execution path to true
foundExecPath = TRUE
execPathNo = pathNo
break
}
}
}
}
}
#Stores the execution path
if(foundExecPath) {
executePath = names(swagger$paths)[[execPathNo]]
} else{
executePath = "Path not found"
}
# Constructs the request URL with the parameters as well as execution path found. The separator is set to an empty string
requestUrl = paste(scheme,"://", host, "/workspaces/", workspaceId, "/services/", endpointId, executePath, sep = "")
# Access the HTTP method type e.g. GET/ POST and constructs an example request
httpMethod = toupper(names(swagger$paths[[2]]))
httpRequest = paste(httpMethod,requestUrl)
# Tell user what functions they can use and prints to the console
if(foundExecPath) {
consumeFile = paste("To score a file: consumeFile(apiKey, requestUrl, dataframe)")
consumeDataFrame = paste("To score a dataframe: consumeDataframe(apiKey, requestUrl, scoreDataFrame)")
consumeLists = paste("To score requests as lists in the key-value format: consumeLists(apiKey, requestUrl, ...)")
cat("Sample functions to execute the web service and get a response synchronously:","\n", consumeFile,"\n", consumeDataFrame,"\n", consumeLists,"\n","\n")
} else {
cat("Warning! There was no execution path found for this web service, hence a request URL cannot be constructed!","\n","\n")
}
# Warns user of characters and urges them to enter valid strings for them
firstWarning = TRUE
for(i in 1:length(inputExample)) {
if(typeof(inputExample[[i]]) == "character") {
if(firstWarning) {
cat("Warning! The sample input does not contain sample values for characters. Please input valid Strings for these fields", "\n")
}
cat(" ", names(inputExample)[[i]],"\n")
firstWarning = FALSE
}
}
#Returns what was discovered in the form of a list
return (list("requestUrl" = requestUrl, "columnNames" = columnNames, "sampleInput" = inputExample, "inputSchema" = inputSchema))
}
#' Use a web service to score a file
#'
#' Read in a csv and score it in batches using a Microsoft Azure Machine Learning Web Service. The results are stored in a new csv, default named "results.csv"
#'
#' @export
#'
#' @param apiKey primary access key as a string
#' @param requestUrl API URL
#' @param inFileName the name of the file to be scored as a string
#' @param globalParam global parameters entered as a list, default value is an empty list
#' @param outputFileName the name of the file to output results to, entered as a string, default value is "results.csv"
#' @param batchSize batch size of each batch, default value is 300
#' @param retryDelay the time in seconds to delay before retrying in case of a server error, default value is 0.3 seconds
#' @return returnDataFrame data frame containing results returned from web service call
#'
#' @seealso \code{\link{discoverSchema}} \code{\link{publishWebService}}
#' @family consumption functions
#'
#' @import stats
#'
#' @examples
#' \dontrun{
#' add <- function(x,y) { return(x+y) }
#' newService <- publishWebService("add", "add",
#' list("x"="int","y"="int"), list("z"="int"), wsID, authToken)
#' webserviceDetails <- newService[[1]]
#' endpoints <- newService[[2]]
#'
#' response <- consumeFile(endpoints[[1]]$PrimaryKey, endpoints[[1]]$ApiLocation, "test.csv")
#' }
consumeFile <- function(apiKey, requestUrl, inFileName, globalParam = setNames(list(), character(0)), outputFileName = "results.csv", batchSize = 300, retryDelay = 0.3) {
#Stops users if they miss out mandatory fields
if (missing(apiKey)) {
stop("Need to specify API key")
}
if (missing(inFileName)) {
stop("Need to specify file to be scored")
}
if (missing(requestUrl)) {
stop("Need to specify request URL")
}
#read file and store as a data frame to be scored
scoreDataFrame = read.csv(inFileName,check.names=FALSE)
# create empty data frame that stores results to be returned
returnDataFrame <- data.frame(stringsAsFactors=FALSE)
# create data frame that stores requests in each batch
requestBatch = data.frame(stringsAsFactors=FALSE)
counter = 1
lastProc = 0
# Loop that iterates through the rows of the entire data frame that is to be scored
for(i in 1:(nrow(scoreDataFrame))) {
# If we have reached the batch size provided or the end of the data frame
if(counter == batchSize || i == (nrow(scoreDataFrame))) {
# Create empty data frame that stores results for that batch
batchResults = data.frame(stringsAsFactors=FALSE)
# Store a single batch of requests in a data frame
requestBatch = scoreDataFrame[(lastProc+1):i,,drop=FALSE]
# Convert them into key-value lists using rjson and df2json packages
keyvalues = rjson::fromJSON((df2json::df2json(requestBatch)))
# Store results returned from call in temp variable
temp <- callAPI(apiKey, requestUrl, keyvalues, globalParam, retryDelay)
# Set last processed to current row
lastProc = i
# Access output by converting from JSON into list and indexing into Results
resultStored <- jsonlite::fromJSON(temp)
resultList = resultStored$Results$output1
batchResults <- data.frame(resultList)
# Force returnDataFrame to have the same column names to avoid errors
if(length(returnDataFrame) != 0 && length(batchResults) != 0) {
names(returnDataFrame) <- names(resultList)
}
#Add batch results to the dataframe to be returned
returnDataFrame <- rbind(returnDataFrame,batchResults)
#Print how many rows in total have been processed
print(sprintf("%i %s %i %s", i,"out of",nrow(scoreDataFrame),"processed"))
#Reset the requests in the batch to empty data frame
requestBatch = data.frame(stringsAsFactors=FALSE)
counter = 0
}
counter = counter + 1
}
# Write results to a csv file
resultsFile <-file(outputFileName,"w")
write.csv(returnDataFrame, resultsFile)
close(resultsFile)
return (returnDataFrame)
}
#' Use a web service to score data in list format
#'
#' Score data represented as lists, where each list represents one parameter of the web service
#'
#' @export
#'
#' @param apiKey primary access key as a string
#' @param requestUrl API URL
#' @param ... variable number of requests entered as lists in key-value format
#' @param globalParam global parameters entered as a list, default value is an empty list
#' @param retryDelay the time in seconds to delay before retrying in case of a server error, default value is 0.3 seconds
#' @return returnDataFrame data frame containing results returned from web service call
#'
#' @seealso \code{\link{discoverSchema}} \code{\link{publishWebService}}
#' @family consumption functions
#'
#' @examples
#' \dontrun{
#' add <- function(x,y) { return(x+y) }
#' newService <- publishWebService("add", "add",
#' list("x"="int","y"="int"), list("z"="int"), wsID, authToken)
#' webserviceDetails <- newService[[1]]
#' endpoints <- newService[[2]]
#'
#' response <- consumeLists(endpoints[[1]]$PrimaryKey, endpoints[[1]]$ApiLocation,
#' list("x"=1, "y"=2), list("x"=3, "y"=4))
#' }
consumeLists <- function(apiKey, requestUrl, ..., globalParam = setNames(list(), character(0)), retryDelay = 0.3) {
#Stops users if they miss out mandatory fields
if (missing(apiKey)) {
stop("Need to specify API key")
}
if (missing(requestUrl)) {
stop("Need to specify request URL")
}
if(missing(globalParam)) {
globalParam = setNames(list(), character(0))
}
# Store variable number of lists entered as a list of lists
requestsLists <- list(...)
# Make API call with parameters
result <- callAPI(apiKey, requestUrl, requestsLists, globalParam, retryDelay)
# Access output by converting from JSON into list and indexing into Results
resultStored <- jsonlite::fromJSON(result)
resultList = resultStored$Results$output1
# Store results in a data frame
resultDataFrame <- data.frame(resultList)
return(resultDataFrame)
}
#' Use a web service to score a data frame
#'
#' Score a data frame, where each row is the input to the scoring function, using a Microsoft Azure Machine Learning web service
#'
#' @export
#'
#' @param apiKey primary access key of the web service as a string
#' @param requestUrl API URL
#' @param scoreDataFrame the data frame to be scored
#' @param globalParam global parameters entered as a list, default value is an empty list
#' @param batchSize batch size of each batch, default value is 300
#' @param retryDelay the time in seconds to delay before retrying in case of a server error, default value is 0.3 seconds
#' @return returnDataFrame data frame containing results returned from web service call
#'
#' @seealso \code{\link{discoverSchema}} \code{\link{publishWebService}}
#' @family consumption functions
#'
#' @examples
#' \dontrun{
#' add <- function(x,y) { return(x+y) }
#' newService <- publishWebService("add", "add",
#' list("x"="int","y"="int"), list("z"="int"), wsID, authToken)
#' webserviceDetails <- newService[[1]]
#' endpoints <- newService[[2]]
#'
#' df <- data.frame("x"=c(1,2), "y"=c(3,4))
#' response <- consumeDataframe(endpoints[[1]]$PrimaryKey, endpoints[[1]]$ApiLocation, df)
#' }
consumeDataframe <- function(apiKey, requestUrl, scoreDataFrame, globalParam=setNames(list(), character(0)), batchSize = 300, retryDelay = 0.3) {
#Stops users if they miss out mandatory fields
if (missing(apiKey)) {
stop("Need to specify API key")
}
if (missing(requestUrl)) {
stop("Need to specify request URL")
}
if (missing(scoreDataFrame)) {
stop("Need to specify dataframe to be scored")
}
# create empty data frame that stores results to be returned
returnDataFrame <- data.frame(stringsAsFactors=FALSE)
# create data frame that stores requests in each batch
requestBatch = data.frame(stringsAsFactors=FALSE)
counter = 1
lastProc = 0
# Loop that iterates through the rows of the entire data frame that is to be scored
for(i in 1:(nrow(scoreDataFrame))) {
# If we have reached the batch size provided or the end of the data frame
if(counter == batchSize || i == (nrow(scoreDataFrame))) {
# Create empty data frame that stores results for that batch
batchResults = data.frame(stringsAsFactors=FALSE)
# Store a single batch of requests in a data frame
requestBatch = scoreDataFrame[(lastProc+1):i,,drop=FALSE]
# Convert them into key-value lists using rjson and df2json packages
keyvalues = rjson::fromJSON((df2json::df2json(requestBatch)))
# Store results returned from call in temp variable
temp <- callAPI(apiKey, requestUrl, keyvalues, globalParam, retryDelay)
# Set last processed to current row
lastProc = i
# Access output by converting from JSON into list and indexing into Results
resultStored <- jsonlite::fromJSON(temp)
resultList = resultStored$Results$output1
batchResults <- data.frame(resultList)
# Force returnDataFrame to have the same column names to avoid errors
if(length(returnDataFrame) != 0 && length(batchResults) != 0) {
names(returnDataFrame) <- names(resultList)
}
#Add batch results to the dataframe to be returned
returnDataFrame <- rbind(returnDataFrame,batchResults)
#Print how many rows in total have been processed
print(sprintf("%i %s %i %s", i,"out of",nrow(scoreDataFrame),"processed"))
#Reset the requests in the batch to empty data frame
requestBatch = data.frame(stringsAsFactors=FALSE)
counter = 0
}
counter = counter + 1
}
return(returnDataFrame)
}
#' Framework for making an Azure ML web service API call.
#'
#' Helper function that constructs and send the API call to a Microsoft Azure Machine Learning web service, then receives and returns the response in JSON format.
#'
#' @param apiKey primary API key
#' @param requestUrl API URL
#' @param keyvalues the data to be passed to the web service
#' @param globalParam the global parameters for the web service
#' @param retryDelay number of seconds to wait after failing (max 3 tries) to try again
#' @return result the response
#'
#' @keywords internal
callAPI <- function(apiKey, requestUrl, keyvalues, globalParam, retryDelay) {
# Set number of tries and HTTP status to 0
httpStatus = 0
tries = 0
# Limit number of API calls to 3
for(i in 1:3) {
# In case of server error or if first try,
if(tries == 0 || httpStatus >= 500) {
if(httpStatus >= 500) {
# Print headers and let user know you are retrying
print(paste("The request failed with status code:", httpStatus, sep=" "))
print("headers:")
print(headers)
print(sprintf("%s %f %s", "Retrying in ",retryDelay," seconds"))
# Delay by specified time in case of server error
Sys.sleep(retryDelay)
}
tries = tries + 1
# Load RCurl package functions
options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
h = RCurl::basicTextGatherer()
hdr = RCurl::basicHeaderGatherer()
# Construct request payload
req = list(
Inputs = list(
input1 = keyvalues
)
,GlobalParameters = globalParam
)
# Convert request payload to JSON
body = enc2utf8((rjson::toJSON(req)))
# Create authorization header
authz_hdr = paste('Bearer', apiKey, sep=' ')
# Make call to API with necessary components
h$reset()
RCurl::curlPerform(url = requestUrl,
httpheader=c('Content-Type' = "application/json", 'Authorization' = authz_hdr),
postfields=body,
writefunction = h$update,
headerfunction = hdr$update,
verbose = FALSE
)
# Gather headers
headers = hdr$value()
# Get HTTP status to decide whether to throw bad request or retry, or return etc.
httpStatus = headers["status"]
result = h$value()
}
# Return result if successful
if(httpStatus == 200) {
return(result)
}
#if user error, print and return error details
else if ((httpStatus>= 400) && (500 > httpStatus))
{
print(paste("The request failed with status code:", httpStatus, sep=" "))
print("headers:")
print(headers)
print(h$value())
return (result)
}
}
return(result)
}
#' Helper function to extract information from a help page URL
#'
#' Given a Microsoft Azure Machine Learning web service endpoint, extracts the endpoint ID and the workspace ID
#'
#' @param helpURL the URL of a help page
#' @return a list containing the endpoint ID and the workspace ID
#'
#' @keywords internal
getDetailsFromUrl <- function(helpURL) {
#Uses a strong split to extract the endpoint ID and the workspace ID
return (list((strsplit(((strsplit(helpURL,"endpoints/"))[[1]][2]),"/")[[1]][[1]]),(strsplit(((strsplit(helpURL,"/workspaces/"))[[1]][2]),"/")[[1]][[1]])))
}
|
7b203dad15fe037c02306190bae3da6cd5ecc04f | 6788c0c20011fcb12d1bf23bc78e0780105f8d0e | /scripts/io_import_dreamtk/class_Database_MySQL.R | 2b800982597f17a1f36dfff08c4d56fd44252b14 | [] | no_license | NongCT230/DTKv0.8.4 | 947f4ce3b811a9af74dab5a1e16ec516fd6052e7 | 6a48a9b2129d2f29683003debfc45d2ce8e28a0b | refs/heads/master | 2023-04-10T03:17:20.658580 | 2021-04-12T15:07:17 | 2021-04-12T15:07:17 | 357,235,011 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,427 | r | class_Database_MySQL.R |
# MySQL connector class ---------------------------------------------------
# v0.9
#requires R6 class, tidyverse, logging, DBI, RMySQL packages
#creates a mysql connection object
#wrapper around RMySQL/DBI interface
Class.Database.MySQL <- R6Class("Class.Database.MySQL",
lock_class = TRUE,
cloneable = FALSE,
#private variables and functions
private = list(
host = NULL,
port = NULL,
user = NULL,
password = NULL,
db = NULL,
save = NULL,
mysql_connection = NULL, #RMySQL object
instance_name = NULL,
connectionObjectExists = function(){
if (!is.null(private$mysql_connection)){
return (TRUE);
} else {
return (FALSE);
}
},
loginInfoHasBeenSaved = function(){
if (!is.null(private$save) && private$save == "1"){
return (TRUE);
} else {
return (FALSE);
}
},
shouldSaveLoginInfo = function( login_info ){
if(login_info$save == "1"){
return (TRUE);
} else {
return (FALSE);
}
},
saveLoginInfo = function( login_info ){
private$host <- login_info$host;
private$port <- login_info$port;
private$user <- login_info$user;
private$password <- login_info$password;
private$db <- login_info$db;
private$save <- login_info$save;
},
eraseLoginInfo = function(){
private$host <- NULL;
private$port <- NULL;
private$user <- NULL;
private$password <- NULL;
private$db <- NULL;
private$save <- NULL;
gc(); #call garbage collector immediately
},
attemptDBConnection = function( login_info ){
tryCatch(
private$mysql_connection <- dbConnect(MySQL(),
user = login_info$user,
password = login_info$password,
host = login_info$host,
dbname = login_info$db,
port = login_info$port),
error = function(e) {
logerror(paste0("MySQL Connection error in mysql$dbConnect(): ", e));
private$mysql_connection <- NULL;
#private$eraseLoginInfo();
stop("MySQL Connection error, app can not proceed.");
}
);
}
),
#public variables and functions
public = list(
classname = NULL, #we want to use this to id class instead of typeof(object) which returns "environment" for all R6 classes
#constructor
initialize = function( instance_name = "MySQL" ) {
private$instance_name <- instance_name;
self$classname = "Database.MySQL";
},
#finalizer
finalize = function() {
# if(!is.null(private_mysqlconnection)){
# self$dbDisconnect();
# }
},
#
setLoginInfo = function(host = NULL, user = NULL, password = NULL, db = NULL, save = NULL, port = 3306){
private$host <- host;
private$port <- port;
private$user <- user;
private$password <- password;
private$db <- db;
private$save <- save;
},
#connect to DB
dbConnect = function() {
if ( private$loginInfoHasBeenSaved() ){
login_info <- list(user = private$user,
password = private$password,
host = private$host,
db = private$db,
port = private$port);
private$attemptDBConnection( login_info );
} else {
# login_window <- Class.GUI.LoginWindow$new(str_c("MySQL Login: ", private$instance_name));
# login_info <- login_window$getInfo(); #expected list with keys: user, password, host, db, save
#
# if ( private$shouldSaveLoginInfo( login_info ) ){
# private$saveLoginInfo( login_info );
# }
#
# private$attemptDBConnection( login_info );
#info has not been saved, we won't have it, can't connect
logerror("MySQL Connection error, no MySQL login information specified, app can not proceed.");
stop("MySQL Connection error, no MySQL login information specified, app can not proceed.");
}
if( private$connectionObjectExists() ){
logdebug("MySQL connection established.");
return (TRUE);
} else {
return (FALSE);
}
},
#disconnect from DB
dbDisconnect = function(){
if ( private$connectionObjectExists() ) {
result <- FALSE;
tryCatch (
result <- dbDisconnect(private$mysql_connection),
error = function(e) { logerror(paste0("MySQL disonnection error in mysql$dbDisconnect(): ", e)); }
);
private$mysql_connection <- NULL;
logdebug("MySQL connection removed.");
return (result);
}else{
logdebug("mysql$dbDisconnect(): Attempted to remove MySQL connection which has already been removed.");
return (FALSE);
}
},
#check if connection is valid, although beware that dbIsValid from RMySQL is not super reliable...
dbIsValid = function(){
if ( private$connectionObjectExists() ){
result <- FALSE;
tryCatch(
result <- dbIsValid(private$mysql_connection),
error = function(e) { logerror(paste0("MySQL Connection validity error in mysql$dbIsValid(): ", e)); }
);
return(result);
} else {
logdebug("mysql$dbIsValid(): MySQL connection does not exist.");
return (FALSE);
}
},
#return connection information
dbGetInfo = function(){
if ( private$connectionObjectExists() ){
tryCatch(
dbGetInfo(private$mysql_connection),
error = function() logerror(paste0("MySQL Connection info error in mysql$dbGetInfo(): ", e))
);
}else {
logwarn("mysql$dbGetInfo(): MySQL connection does not exist. No info.");
}
},
#return RMySQL DB connection object
getDB = function(){
return(private$mysql_connection);
},
#list tables
dbListTables = function(){
if ( private$connectionObjectExists() ){
result <- list();
tryCatch (
result <- dbListTables(private$mysql_connection),
error = function(e) { logerror(paste0("MySQL Connection table-listing error in mysql$dbListTables(): ", e, " Returning empty table list.")); }
);
return (result);
}else{
logwarn("mysql$dbListTables(): MySQL connection does not exist. Returning empty table list.");
return (list());
}
},
#list fields in table belonging to the database
dbListFields = function(table = NULL){
if ( private$connectionObjectExists() && !is.null(table)){
result <- list();
tryCatch (
result <- dbListFields(private$mysql_connection, table),
error = function(e) { logerror(paste0("MySQL Connection field-listing error in mysql$dbListFields(): ", e, " Returning empty table field list.")); }
);
return (result);
}else{
logwarn("mysql$dbListFields(): Either MySQL connection does not exist or no table requested. Returning empty table field list.");
return (list());
}
},
#read table, return dataframe
dbReadTable = function(table = NULL){
if ( private$connectionObjectExists() && !is.null(table)){
result <- tibble();
tryCatch (
result <- as_tibble(dbReadTable(private$mysql_connection, table)),
error = function(e) { logerror(paste0("MySQL Connection table-read error in mysql$dbReadTable(): ", e, " Returning empty tibble.")); }
);
return (result);
}else{
logwarn("mysql$dbReadTable(): Either MySQL connection does not exist or no table requested. Returning empty tibble.");
return (tibble());
}
},
#send query, return query object or NULL if connection or query are invalid
dbSendQuery = function(sql_query = ""){
if ( private$connectionObjectExists() && !is.null(sql_query) && is.character(sql_query)){
result <- NULL;
tryCatch (
result <- dbSendQuery(private$mysql_connection, sql_query),
error = function(e) { logerror(paste0("MySQL Connection send-query error in mysql$dbSendQuery(): ", e, " Returning NULL. SQL query was: ", sql_query));
stop("Failed to query MySQL server, which is a problem. Check connection, restart app.");
}
);
return (result);
}else{
logwarn("mysql$dbSendQuery(): Either MySQL connection does not exist or sql_query is invalid. Returning NULL.");
stop("Failed to query MySQL server, which is a problem. Check connection, restart app.");
return (NULL);
}
},
#fetch, return data based on query object, specify number of rows to get
dbFetch = function(query = NULL, n = 10000){
if ( private$connectionObjectExists() && !is.null(query) && is.numeric(n)){
result <- tibble();
tryCatch (
result <- dbFetch(query, n),
error = function(e) { logerror(paste0("MySQL Connection query-fetch error in mysql$dbFetch(): ", e, " Returning empty tibble.")); }
);
return (result);
}else{
logwarn("mysql$dbFetch(): Either MySQL connection does not exist or query invalid. Returning empty tibble.");
return (tibble());
}
},
#get query, return query object or NULL if connection or query are invalid
dbGetQuery = function(sql_query = ""){
if ( private$connectionObjectExists() && !is.null(sql_query) && is.character(sql_query)){
result <- tibble();
tryCatch (
result <- dbGetQuery(private$mysql_connection, sql_query),
error = function(e) { logerror(paste0("MySQL Connection get-query error in mysql$dbGetQuery(): ", e, " Returning empty tibble. SQL query was: ", sql_query)); }
);
return (result);
}else{
logwarn("mysql$dbGetQuery(): Either MySQL connection does not exist or sql_query is invalid. Returning empty tibble.");
return (tibble());
}
},
#clear data associated with query object
dbClearResult = function(query = NULL){
if ( private$connectionObjectExists() && !is.null(query)){
result <- FALSE;
tryCatch (
result <- dbClearResult(query),
error = function(e) { logerror(paste0("MySQL Connection clear-query error in mysql$dbClearResult(): ", e)); }
);
return (result);
}else{
logwarn("mysql$dbClearResult(): Either MySQL connection does not exist or query object has not been given.");
return (FALSE);
}
},
#write table; give db_table = string name of table to create, and df dataframe to write
dbWriteTable = function(db_table = NULL, df = NULL, overwrite = FALSE, append = FALSE){
if ( private$connectionObjectExists() &&
!is.null(db_table) &&
is.character(db_table) &&
!is.null(df) &&
is.data.frame(df) &&
!((overwrite == append) == TRUE)){
result <- FALSE;
tryCatch (
result <- dbWriteTable(private$mysql_connection, db_table, df, overwrite = overwrite, append = append),
error = function(e) { logerror(paste0("MySQL Connection table-write error in mysql$dbWriteTable(): ", e, " Table to be written: ", db_table)); }
);
return (result);
}else{
logwarn("mysql$dbWriteTable(): Connection or table writing parameters invalid. Table not written.");
return (FALSE);
}
},
#deletes a table belonging to the database
dbRemoveTable = function(table = NULL){
if ( private$connectionObjectExists() && !is.null(table) && is.character(table)){
result <- FALSE;
tryCatch (
result <- dbRemoveTable(private$mysql_connection, table),
error = function(e) { logerror(paste0("MySQL Connection table-remove error in mysql$dbRemoveTable(): ", e, " Table to be removed: ", table)); }
);
return (result);
}else{
logwarn("mysql$dbRemoveTable(): Either MySQL connection does not exist or no table requested.");
return (FALSE);
}
},
#check if database exists
dbExists = function(database = NULL){
if ( private$connectionObjectExists() && !is.null(database) && typeof(database) == "character"){
#get db list
db_list <- self$dbGetQuery("SHOW DATABASES");
if( all(database %in% db_list$Database) ){
return (TRUE);
} else {
return (FALSE);
}
} else {
logwarn("mysql$dbExists(): Either MySQL connection does not exist or no database requested.");
return (FALSE);
}
},
#check if a table or list of tables exists in the currently used database
dbTablesExist = function ( tables = NULL ){
if ( private$connectionObjectExists() && !is.null(tables)){
dbtables <- self$dbListTables();
tables_exist <- tables %in% dbtables;
if ( all(tables_exist) ) {
return (TRUE);
}else {
logerror( str_c("Missing tables: ", str_c(unlist ( tables[which(!tables_exist)] ), collapse = " ")) );
return (FALSE);
};
}
},
#check if table fields exist for a given table in the currently used database
dbFieldsExist = function ( table, fields ){
if ( private$connectionObjectExists() && !is.null(table) && !is.null(fields) ){
dbtable <- self$dbListFields( table );
fields_exist <- fields %in% dbtable;
if( all(fields_exist) ) {
return (TRUE);
}else{
logerror( str_c("Missing table fields: ", str_c(unlist ( fields[which(!fields_exist)] ), collapse = " ")) );
return (FALSE);
};
}
},
#use a different database
dbSetActiveDB = function(database = NULL){
if ( private$connectionObjectExists() && !is.null(database) && typeof(database) == "character"){
result <- TRUE;
tryCatch(
{ q <- self$dbSendQuery( paste0("USE ", database));
self$dbClearResult(q); },
error = function(e) { logerror("Unable to set required database in $dbSetActiveDB(): ", e); result <- FALSE; }
);
return (result);
} else {
logwarn("mysql$dbSetActiveDB(): Either MySQL connection does not exist or no database requested.");
return (FALSE);
}
},
#create a new database
dbCreate = function(database = NULL){
if ( private$connectionObjectExists() && !is.null(database) && typeof(database) == "character"){
#if DB does not exist, create, otherwise let us know
if( !self$dbExists( database ) ) {
#create DB
#q returns nothing upon dbFetch(q)
q <- self$dbSendQuery( paste0("CREATE DATABASE ", database));
self$dbClearResult(q);
#check successful creation of database
if ( self$dbExists( database ) ) {
logdebug( paste0("Databse ", database, " created.") );
return (TRUE);
} else {
logerror( paste0("Failed to create database ", database, ".") );
return (FALSE);
}
} else {
logdebug( paste0("Databse ", database, " already exists.") );
return (FALSE);
}
} else {
logwarn("mysql$dbCreate(): Either MySQL connection does not exist or no database requested.");
return (FALSE);
}
},
#
dbGetName = function(){
if (is.null(private$db)) {
return ("");
} else {
return (private$db);
}
}
)
)
|
9b68527924f6d66e4ee938d7acb40ce11c41cced | 37cc63b9708638db1fd1e01c0b3e52013654986c | /Transcript_Based/analysis/Brendan/Counts/featurecounts.R | ddca3f40c8aea835cd412260db3b288c16d1d2e0 | [] | no_license | brendan4/sravandevanathan | e7082bd5892ccc5cf679132aaa06c88abe4c17fc | 00c54737e09ea29b517a2d008a420748af43d9b7 | refs/heads/master | 2022-03-26T08:02:45.299082 | 2019-12-13T21:09:21 | 2019-12-13T21:09:21 | 170,389,554 | 0 | 0 | null | 2019-02-12T20:52:38 | 2019-02-12T20:52:37 | null | UTF-8 | R | false | false | 1,759 | r | featurecounts.R | library(Rsubread)
library(biomaRt)
main <- "/media/brendan/Elements/dba/DBA_121317"
setwd(main)
all.folders <- list.files()
bam.files <- c()
for(i in 1:length(all.folders)){
setwd(paste(main, all.folders[i],sep = "/"))
bam.files <- c(bam.files, paste(main, all.folders[i],list.files(pattern = ".bam$"), sep = "/"))
setwd(main)
}
setwd("..")
main2 <-"/media/brendan/Elements/dba/main_batch"
setwd(main2)
all.folders <- list.files()
for(i in 1:length(all.folders)){
setwd(paste(main2, all.folders[i],sep = "/"))
bam.files <- c(bam.files, paste(main2, all.folders[i],list.files(pattern = ".bam$"), sep = "/"))
setwd(main2)
}
#smallscale testing
#setwd(paste(main2, all.folders[10],sep = "/"))
#bam.files <- c(bam.files, paste(main2, all.folders[10],list.files(pattern = ".bam$"), sep = "/"))
setwd("..")
setwd("annotation")
anno <- list.files(pattern = ".gz$", full.names = TRUE)
fc <- featureCounts(bam.files, annot.ext= anno, isGTFAnnotationFile = T,isPairedEnd=TRUE)
look <- fc$annotation
#gene name conversions
#mart = useDataset("hsapiens_gene_ensembl",mart=ensembl)
ensembl <- useEnsembl(biomart="ensembl",
dataset="hsapiens_gene_ensembl")
genes <- rownames(fc$counts)
#G_list <- getBM(filters= "ensembl_gene_id", attributes= c("ensembl_gene_id","hgnc_symbol"),values=genes,mart= mart)
results <- getBM(attributes = c("ensembl_gene_id", "hgnc_symbol"),
filter = "ensembl_gene_id_version",
values = genes, mart = ensembl)
filters <- listFilters(ensembl)
filters
stat <- fc$counts
(fc$counts)
write.csv(stat, "counts.csv")
write.table(x=data.frame(fc$annotation[,c("GeneID", "Length")], fc$counts, stringsAsFactor = F), file = "counts.txt", quote= F, sep = "\t", row.names = F)
|
70d11f257fb4cd603e0208762b6de467f73a4ec4 | 82fc793b8261ce7e68da739616089e1e5ae26562 | /samsungPriceDateSVM.R | 515d1aaa1ff55868c985d34e13b2f20341d0783b | [
"Apache-2.0"
] | permissive | cloud322/ML_R | 2a67d431410829eb2ad9d63a6531b83a4212ed46 | c78b743d516f503e44459bded29f79e84eddd3ac | refs/heads/master | 2021-04-09T13:46:11.196164 | 2018-04-10T00:58:24 | 2018-04-10T00:58:24 | 125,489,375 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,840 | r | samsungPriceDateSVM.R | # a0326<-read.csv('file:///C:/Users/DJA/Downloads/m_sam.csv', header = T, sep=',')
a0326<-read.csv('file:///C:/Users/DJA/Downloads/새 폴더/0327_ANSI.txt', header = F, sep=',')
a0326
head(a0326)
colnames(a0326)<-c('a','stock','date','price','pr_ch','vol_ch','vol')
install.packages("dplyr")
library(dplyr)
samsung0326<-a0326 %>% filter(stock == '삼성전자')
head(samsung0326)
#date package
install.packages("tidyverse")
install.packages("lubridate")
library(tidyverse)
library(lubridate)
## Load the data from the csv file
d_p_sam<-samsung0326 %>% select(date, price)
plot(d_p_sam$date,d_p_sam$price,pch=16)
# help(strftime)
x <- d_p_sam$date
# x2 <- select(d_p_sam$date>="2018-04-02 09:00:01 KST" & d_p_sam$date>="2018-04-02 11:00:01 KST")
# x2
# time <- substr(as.POSIXct(sprintf("%06.0f", x), format='%H%M%S'), 12, 19)
time <- as.POSIXct(sprintf("%06.0f", x), format='%H%M%S')
time
# to data frame ((time,price))
time <- c(as.POSIXct(sprintf("%06.0f", x), format='%H%M%S'))
price <- c(d_p_sam$price)
df<- data.frame(time,price)
df
# d_p_sam2<-samsung0326 %>% select (as.POSIXct(sprintf("%06.0f", date), format='%H%M%S'), price)
# d_p_sam2
# ?timeSeries
# ?strptime
# time2<-format(as.character(x), format ='%H%M%S')
# time2
# d_p_sam$price
## Plot the data
# plot(time, d_p_sam$price,pch=13)
plot(df,pch=16,type='p',cex= 0.5)
abline(h=2500000)abline(h=2400000)
# plot(time2, d_p_sam$price)
# a<-ggplot(d_p_sam,aes(time2, price))
# a
# ?plot
# par(time2)
# ?par
# Create a linear regression model
# model <- lm(d_p_sam$price ~ time )
# model
model <- lm(price ~ time , df)
model
# Add the fitted line
abline(model)
# make a prediction for each X
# predictedY <- predict(model)
# predictedY
# plot(predictedY )
predictedY <- predict(model, df)
predictedY
# display the predictions
points(df$time, predictedY, col = "blue", pch=16, cex=0.5)
plot(df$time, predictedY, col = "red", pch=16)
##SVM
#RMSE
rmse <- function(error)
{
sqrt(mean(error^2))
}
error <- model$residuals # same as data$Y - predictedY
predictionRMSE <- rmse(error) # 10184.28
predictionRMSE
library(e1071)
model <- svm(price ~ time , df)
model
predictedY <- predict(model, df)
points(df$time, predictedY, col = "red", pch=4, cex=0.5)
summary(df)
summary(model)
summary(predictedY)
# svrModel$residuals != data$Y - predictedY
# compute the error
error <- df$price - predictedY
svrPredictionRMSE <- rmse(error)
svrPredictionRMSE #6343.204
###SVM tune
# perform a grid search
# tuneResult <- tune(svm, price ~ time, data = df,ranges = list(epsilon = seq(0,1,0.1), cost = 2^(2:9)))
# print(tuneResult)
# Parameter tuning of ‘svm’:
#
# - sampling method: 10-fold cross validation
#
# - best parameters:
# epsilon cost
# 0.5 512
#
# - best performance: 31149900
# 0.35~0.5
# Draw the tuning graph
# plot(tuneResult)
tuneResult <- tune(svm, price ~ time, data = df, ranges = list(epsilon = seq(0.35,0.5,0.01), cost = 2^(2:9)))
print(tuneResult)
plot(tuneResult)
tunedModel <- tuneResult$best.model
tunedModelY <- predict(tunedModel, df)
error <- data$Y - tunedModelY
# https://www.svm-tutorial.com/2014/10/support-vector-regression-r/
###########################################
# mariadb data test
test<-read.csv('file:///C:/Users/DJA/Downloads/testdataANSI.csv', header = F, sep=',')
head(test)
colnames(test)<-c('time','code','stock','date','price','pr_ch','vol_ch','vol')
test
test1<-test %>% filter(stock == '삼성전자')
test1
test2<-test1 %>% select(time, price)
test2
plot(as.Date(test1$time),test1$price,pch=16)
t1<-ggplot(test2,aes(as.Date(test1$time),test1$price))
t1<-t1+geom_line()
t1
# t2<-t1+geom_freqpoly(binwidth = 60)
# t2
|
cecc0d78e766172ce93f955532ba5f74516c16f4 | ec7be542fd7b75e5741bbf5b0605f1e993d1733a | /R/stream_read_fastq.R | 9a5830221b49867c86cc235a2c40667be4a30ec3 | [] | no_license | czhu/R_nanopore | d2b67d50005ce7468b1da7fa13de6b93045e8954 | 4e13e92e104a5ba2c6a1c772f077ef15ea199193 | refs/heads/master | 2023-07-14T11:02:16.083619 | 2021-08-26T15:29:14 | 2021-08-26T15:29:14 | 98,747,970 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 448 | r | stream_read_fastq.R | ## the c stop working properly, i get a list
## for fastq file with less then 1M reads no problem
stream_read_fastq = function(infile, FUN=identity) {
# readFastq doesn't work
# message: line too long /Users/czhu/data/DCM/pore/analysis_201704/reads.fastq.gz:55069
fs= FastqStreamer(infile)
rv = FUN(yield(fs))
while (length(fq <- yield(fs))) {
## do work here
rv = c(rv,FUN(fq))
}
close(fs)
rv
}
|
57d6c66035276d3cefc7cbdbef5b0e7788de0c1e | 80cee41ab69e6f3eee069027f0cd05e5216bef4d | /simulations/scripts/run_ncv_scan.r | 6ba76027ee194decb097990712f07e381ec559eb | [] | no_license | bbitarello/NCV | 64b037986250f96ce241ca0a6bcf0ebd9b38ec68 | 854ad80b7d0ef4ad0191a3f7243d95341ab6d7ac | refs/heads/master | 2021-06-03T19:01:17.762942 | 2016-08-18T04:59:37 | 2016-08-18T04:59:37 | 18,804,965 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,858 | r | run_ncv_scan.r | #!/usr/bin/r
## Cesare de Filippo, MPI-EVA
## 15-01-2014
## Last modified by Barbara Bitarello: 21.01.2014
## Run NCV:
## example command line:
##>run_ncv_scan.r INPUT 3000 1500 BED FDs chr21 scan1
## NOTE: that 'run_ncv_scan.r' should be executable. To do so do "chmod +x run_ncv_scan.r"
##example: chr21
#./run_ncv_scan.r tmp.ac 3000 1500 '/mnt/sequencedb/PopGen/cesare/bs_genomescan/Map50_100.TRF.SDs.hg19_pantro2.21.bed' '/mnt/sequencedb/PopGen/cesare/bs_genomescan/fds.hg19_pantro2.21.tsv' chr21 '/mnt/sequencedb/PopGen/barbara/vcf_practice/DATA/chr21/tmpdir/'
#but, actually, each chromosome will have a variable number of 30 MB windows and, therefore, a variable number of tpmdirs
#first open the bed and fd files
#then a loop should be added saying : for each tmpdir of a a given chromosome,
## INPUTS FILESa:
## 1. INPUT.NAME: the allele count file.
## 2. WINDOWSIZE: the number of bp to be analyzed.
## 3. SLIDE: the number of bp to slide over the window.
## 4. BED: the positions where the outgroup (chimpanzee) has an equivalent sequence.
## 5. FD: the positions where the two reference genomes differ.
## 6. TAG: the chromosome. E.g. 'chr21'
## 7. SAVEOB: where to save the R object
##
ERROR.MESSAGE <- paste(c("The script requires 7 arguments:",
"1. <INPUT>\t\tallele counts file.",
"2. <WINDOWSIZE>\t\tnumber of bp to be analyzed.",
"3. <SLIDE>\t\tnumber of bp to slide over the window.",
"4. <BED>\t\tpositions where the outgroup (chimpanzee) has an equivalent sequence.",
"5. <FDs>\t\tpositions where the two reference genomes differs.",
"6. <TAG>\t\tname of the chromosome being run with this command.",
"7. <SAVEOB>\t\tpath where the NCV results for this set of windows should be saved."),collapse="\n")
if (length(argv) != 7) {
cat(ERROR.MESSAGE,"\n")
quit(save="no",)
}
SAVEOB<-as.character(argv[7])
INPUT.NAME <- as.character(argv[1])
WINDOW <- as.numeric(argv[2])
SLIDE <- as.numeric(argv[3])
BED <- read.table(argv[4], sep="\t",stringsAsFactors=FALSE,as.is=TRUE)
FD <- read.table(argv[5], sep="\t",stringsAsFactors=FALSE,as.is=TRUE)
TAG<- as.character(argv[6])#headers for files
headr<-c("CHROM" ,"POS", "ID" ,"REF","ALT","Anc","AWS","LWK","YRI","CEU", "FIN","GBR","TSI", "CHB","CHS" ,"JPT","MXL", "CLM","PUR")
headr2<-c('chr', 'pos', 'human', 'chimp')
headr3<-c('chr', 'beg.pos', 'end.pos')
## to add NCV and the other caltulations
#the positions will be separated (for splitting) in the SGE script. This script here is for each window (3kb) inside the bigger (3Mb) window.
##add NCV script here ###
#run my.function for this 3 Mb window. #save R object in temp directory.
source("/mnt/sequencedb/PopGen/barbara/simulations/scripts/NCV.scanv3.r")
source("/mnt/sequencedb/PopGen/barbara/simulations/scripts/take.snps.r")
library(multicore)
library(SOAR) #speed up workspace loading.
#library(ggplot2)
Sys.setenv(R_LOCAL_CACHE="store_data_here")#already created.
#########################################################################################################################################################
##################################################################################################################################################
list.BED<-vector('list',dim(BED)[1])
#if(dim(g)[1]>0){ #if there is some position in the bed file for this window
for (i in 1: dim(BED)[1]){
seq(from=BED[i,2], to=BED[i,3])->list.BED[[i]] #create a sequence from beg.pos to end.pos from the bef file
}
sort(unlist(list.BED))->list.BED
#convert from factor to numeric (positions in FD.N)
colnames(FD)<-headr2
#as.numeric(levels(FD$pos))[FD$pos]->FD$pos
colnames(BED)<-headr3
########################################################################################################################################################
########################################################################################################################################################
########################################################################################################################################################
########################################################################################################################################################
my.function<-function(input.file=TEMP.INPUT, tag=TAG, W=WINDOW, S=SLIDE){
s <- seq(input.file[1,2],input.file[nrow(input.file),2], S)
s <- s[-length(s)] # remove last step because it's always a mess
#this is the command that takes a long time
system.time(lapply(1:length(s), function(i) subset(input.file, POS >= s[i] & POS <= s[i]+W)[,seq(3:21)])->chwin) #SNPs oper window.
#system.time(lapply(1:length(s),function(i) subset(fd.file, pos >= s[i] & pos <= s[i]+W)[,])->chwinfd) #FDs per window
#system.time(lapply(1:length(s),function(i) subset(bed.file, beg.pos >= s[i] & end.pos <= s[i]+W)[,])->chwinbed) #bed positions for win
lapply(chwin, function(z) as.matrix(z))-> chwinV2
#lapply(chwinfd, function(z) as.matrix(z))-> chwinV3
#apply(chwinbed, function(z) as.matrix(z))-> chwinV4
input.list<-list(INPUT.NCV=chwinV2, INPUT.FD=FD, INPUT.BED=list.BED)
#make a vector with all the positions in the intervals of the bed file
chNCV<-vector('list',length(input.list$INPUT.NCV))
for (i in 1: length(input.list$INPUT.NCV)){
NCV.scan3(INPUT.N=input.list$INPUT.NCV[[i]],FD.N=input.list$INPUT.FD,BED.N=input.list$INPUT.BED,pop='YRI')->chNCV[[i]]
}
f5<-cbind(rep(NA, length(s)), rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)),rep(NA, length(s)))
f5[,1]<-rep(tag, length(s)) #chromosome
f5[,2]<-s
f5[,3]<-s+W
f5[,4]<-unlist(lapply(chNCV, function(x) x$NCVf5))
f5[,5]<-unlist(lapply(chNCV, function(x) x$NCVf5FD))
f5[,6]<-unlist(lapply(chNCV, function(x) x$NCVf4))
f5[,7]<-unlist(lapply(chNCV, function(x) x$NCVf4FD))
f5[,8]<-unlist(lapply(chNCV, function(x) x$NCVf3))
f5[,9]<-unlist(lapply(chNCV, function(x) x$NCVf3FD))
f5[,10]<-unlist(lapply(chNCV, function(x) x$NCVf2))
f5[,11]<-unlist(lapply(chNCV, function(x) x$NCVf2FD))
f5[,12]<-unlist(lapply(chNCV, function(x) x$NCVf1))
f5[,13]<-unlist(lapply(chNCV, function(x) x$NCVf1FD))
f5[,14]<-unlist(lapply(chNCV, function(x) x$Nr.SNPs.1))
f5[,15]<-unlist(lapply(chNCV, function(x) x$Nr.SNPs.2))
f5[,16]<-unlist(lapply(chNCV, function(x) x$Nr.FDs))
f5[,17]<-unlist(lapply(chNCV, function(x) x$Initial_seg_sites))
colnames(f5)<-c('chr','beg.win', 'end.win','NCVf5', 'NCV5FD','NCVf4','NCVf4FD','NCVf3','NCVf3FD','NCVf2','NCVf2FD', 'NCVf1','NCVf1FD','Nr.SNPs1','Nr.SNPs2', 'Nr.FDs', 'Init.seg.sites')
res<-list(NCVs=f5,input=chwinV2,fd=chwinV3, bed=chwinV4)
return(res)
}
######################################################################################################################
######################################################################################################################
#loopp
#for i in 1: number of tmdir#foreach tmpdir
#
this.path<-paste('cd ',SAVEOB, sep='')
system(this.path)
#system("nBINS=$(ll|grep tmp -c)")
as.numeric(system('ls |grep tmp -c', intern=T))->nBINS
for (i in 1:nBINS){
temp.input<-paste(SAVEOB, 'tmpdir', i,'/', INPUT.NAME, sep='')
TEMP.INPUT <- try(read.table(temp.input,sep="\t",stringsAsFactors=FALSE,as.is=TRUE))
#NCV will only try to run if there is data in the input file. For some windows the tmp.ac file will be empty.
#That's why I have to ask this, otherwise the script stops.
if(exists("temp.input")){
colnames(TEMP.INPUT)<-headr
system.time(assign(paste('res__',TAG,i, sep=''),my.function(input.file=TEMP.INPUT,tag=TAG,W=WINDOW, S=SLIDE)))
#this here is the main command.
objectName<-paste('res__',TAG,i, sep='')
save(list=objectName, file=paste(SAVEOB, 'tmpdir', i,'/',objectName, ".RData", sep="")) #save R object with NCV results
}
}
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
###################################################################################################################################################
|
f115aa5f267f25322c6fad3102b07c82f2614f11 | c603a83622aba33dfb82bd443a0e0cae386ef7af | /scripts/sv_lens.R | 1aa5f052e47d05ddcd96196c775ee396c495fdc5 | [] | no_license | ahmedelhosseiny/lied_egypt_genome | e6f66669e7d467a3f9078327b6ba978996ce0700 | e1647e396a46a6f3e538e488caf11a5f95d86fb1 | refs/heads/master | 2022-11-08T22:25:57.817282 | 2020-06-19T07:52:23 | 2020-06-19T07:52:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,183 | r | sv_lens.R | # Making boxplots for snp stat numbers
library("RColorBrewer")
# Getting input and output filenames
# ["insertions","deletions","inversions","duplications"],
# filter=["all","pass"])
fname_ins_all <- snakemake@input[[1]]
fname_ins_pass <- snakemake@input[[2]]
fname_del_all <- snakemake@input[[3]]
fname_del_pass <- snakemake@input[[4]]
fname_inv_all <- snakemake@input[[5]]
fname_inv_pass <- snakemake@input[[6]]
fname_dup_all <- snakemake@input[[7]]
fname_dup_pass <- snakemake@input[[8]]
fname_trans_all <- snakemake@input[[9]]
fname_trans_pass <- snakemake@input[[10]]
fname_out_hist_all <- snakemake@output[[1]]
fname_out_hist_pass <- snakemake@output[[2]]
fname_out_svs_all <- snakemake@output[[3]]
fname_out_svs_pass <- snakemake@output[[4]]
# Plotting
pdf(fname_out_hist_all)
par(mfrow=c(2,2))
hist_cols <- brewer.pal(n = 8, name = 'Set2')[3:6]
del_all <- read.table(fname_del_all, header=FALSE)
hist(log10(del_all$V1),xlab="log10(Length)",ylab="Number",main="Deletions",breaks=30,col=hist_cols[1])
num_del_all <- length(del_all$V1)
inv_all <- read.table(fname_inv_all, header=FALSE)
hist(log10(inv_all$V1),xlab="log10(Length)",ylab="Number",main="Inversions",breaks=30,col=hist_cols[2])
num_inv_all <- length(inv_all$V1)
dup_all <- read.table(fname_dup_all, header=FALSE)
hist(log10(dup_all$V1),xlab="log10(Length)",ylab="Number",main="Duplications",breaks=30,col=hist_cols[3])
num_dup_all <- length(dup_all$V1)
ins_all <- read.table(fname_ins_all, header=FALSE)
hist(ins_all$V1,xlab="Length",ylab="Number",main="Insertions",breaks=30,col=hist_cols[4])
num_ins_all <- length(ins_all$V1)
dev.off()
pdf(fname_out_hist_pass)
par(mfrow=c(2,2))
del_pass <- read.table(fname_del_pass, header=FALSE)
hist(log10(del_pass$V1),xlab="log10(Length)",ylab="Number",main="Deletions",breaks=30,col=hist_cols[1])
num_del_pass <- length(del_pass$V1)
inv_pass <- read.table(fname_inv_pass, header=FALSE)
hist(log10(inv_pass$V1),xlab="log10(Length)",ylab="Number",main="Inversions",breaks=30,col=hist_cols[2])
num_inv_pass <- length(inv_pass$V1)
dup_pass <- read.table(fname_dup_pass, header=FALSE)
hist(log10(dup_pass$V1),xlab="log10(Length)",ylab="Number",main="Duplications",breaks=30,col=hist_cols[3])
num_dup_pass <- length(dup_pass$V1)
ins_pass <- read.table(fname_ins_pass, header=FALSE)
hist(ins_pass$V1,xlab="Length",ylab="Number",main="Insertions",breaks=30,col=hist_cols[4])
num_ins_pass <- length(ins_pass$V1)
dev.off()
pdf(fname_out_svs_all,width=5,height=8)
num_trans_all <- read.table(fname_trans_all)$V1
num_trans_pass <- read.table(fname_trans_pass)$V1
hist_cols <- brewer.pal(n = 8, name = 'Set2')[3:7]
sv_nums <- c(num_del_all,num_inv_all,num_dup_all,num_ins_all,num_trans_all)
sv_names <- c("Deletions","Inversions","Duplications","Insertions","Translocations")
barplot(sv_nums,names=sv_names,ylab="Number",col=hist_cols,las=2,cex.names=0.77)
dev.off()
pdf(fname_out_svs_pass,width=5,height=8)
sv_nums <- c(num_del_pass,num_inv_pass,num_dup_pass,num_ins_pass,num_trans_pass)
sv_names <- c("Deletions","Inversions","Duplications","Insertions","Translocations")
barplot(sv_nums,names=sv_names,ylab="Number",col=hist_cols,las=2,cex.names=0.77)
dev.off() |
e134ac3db81e969456ca7b21a5924572304c4ac7 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed_and_cleaned/11194_0/rinput.R | e0e82d40cf7f40462b24a8f7cd84059c86099a36 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("11194_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11194_0_unrooted.txt") |
fb9de4c7a783ab82892b034174fbd96b8e11baf5 | 9ede4232ad524cc08d8f76751a8856c425a18bee | /man/recompute_elpd_table.Rd | 3dbebae45a44ccda89777d2cfe84d60af6eeebe5 | [] | no_license | CreRecombinase/cause | 139f41686619759bf1e72790ec693d6113a0b129 | 6257e6c66e6c1518c8750d1e4d2b4d6e42644fa5 | refs/heads/master | 2022-06-09T16:51:03.534368 | 2021-11-30T19:00:46 | 2021-11-30T19:00:46 | 154,586,575 | 0 | 0 | null | 2019-07-12T16:21:58 | 2018-10-25T00:25:07 | R | UTF-8 | R | false | true | 457 | rd | recompute_elpd_table.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/in_sample_elpd_loo.R
\name{recompute_elpd_table}
\alias{recompute_elpd_table}
\title{Recompute elpd table for a CAUSE fit that is already computed}
\usage{
recompute_elpd_table(res)
}
\arguments{
\item{res}{A cause fit from running the cause function}
}
\value{
A model table equivalent to res$elpd
}
\description{
Recompute elpd table for a CAUSE fit that is already computed
}
|
e6df696787e1d64dabde87dfe3d10e08e8319e9d | cb2a3c65b4d4d7af977d0649db60362bf98e36bf | /code/Figure3.R | 28fb731ed808ebfc052059cd16f0f4eaecb84fa0 | [
"MIT"
] | permissive | BatadaLab/gdT_paper_analysis | 5fe8472c72f680fb6eb3bb6e09ed6c27c13ab05d | be76c820f47b826e5ae41952ceafc0628554c360 | refs/heads/master | 2023-01-19T06:35:19.220666 | 2020-12-01T12:44:53 | 2020-12-01T12:44:53 | 294,932,578 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,362 | r | Figure3.R | library(Seurat)
library(dplyr)
library(ggplot2)
library(ggsci)
library(gridExtra)
library(scID)
library(ggpubr)
library(plyr)
library(forcats)
library(easyGgplot2)
source("~/Google Drive/bin/batadalab_scrnaseq_utils.R")
setwd("gdT_paper_analysis/")
# -------------------------------------------------------
# Load data
# -------------------------------------------------------
# BC1
gem1 <- get_scrnaseq_data("p01e01_custom") %>%
GetAssayData(slot = "counts") %>%
as.data.frame() %>%
set_colnames(paste("BC1", colnames(.), sep = "_"))
# BC2
gem2 <- get_scrnaseq_data("p01e02_bedtools") %>%
GetAssayData(slot = "counts") %>%
as.data.frame() %>%
set_colnames(paste("BC2", colnames(.), sep = "_"))
# -------------------------------------------------------
# Set up objects
# -------------------------------------------------------
sobj1 <- CreateSeuratObject(counts = gem1, project = "BC1", min.cells = 5)
sobj1$stim <- "BC1"
sobj1 <- subset(sobj1, subset = nFeature_RNA > 500)
sobj1 <- NormalizeData(sobj1, verbose = FALSE)
sobj1 <- FindVariableFeatures(sobj1, selection.method = "vst", nfeatures = 2000)
sobj2 <- CreateSeuratObject(counts = gem2, project = "BC2", min.cells = 5)
sobj2$stim <- "BC2"
sobj2 <- subset(sobj2, subset = nFeature_RNA > 500)
sobj2 <- NormalizeData(sobj2, verbose = FALSE)
sobj2 <- FindVariableFeatures(sobj2, selection.method = "vst", nfeatures = 2000)
# -------------------------------------------------------
# Integrate
# -------------------------------------------------------
anchors <- FindIntegrationAnchors(object.list = list(sobj1, sobj2), dims = 1:20)
sobj.combined <- IntegrateData(anchorset = anchors, dims = 1:20)
# -------------------------------------------------------
# Perform integrated analysis
# -------------------------------------------------------
DefaultAssay(sobj.combined) <- "integrated"
# Run the standard workflow for visualization and clustering
sobj.combined <- ScaleData(sobj.combined, verbose = FALSE, features = rownames(sobj.combined))
sobj.combined <- RunPCA(sobj.combined, npcs = 30, verbose = FALSE)
# t-SNE and Clustering
sobj.combined <- RunUMAP(sobj.combined, reduction = "pca", dims = 1:20)
sobj.combined <- FindNeighbors(sobj.combined, reduction = "pca", dims = 1:20)
sobj.combined <- FindClusters(sobj.combined, resolution = 0.5)
# Remove TRDC- population from cluster 4
filtered_cells <- read.delim(
"data/raw/BC_filtered_cells.txt",
stringsAsFactors = F,
header = F
)
sobj.combined <- subset(
sobj.combined,
cells = filtered_cells$V1
)
pdf("Figures/Figure3/Fig3A.pdf", width = 2, height = 2)
DimPlot(
sobj.combined,
reduction = "umap",
label = TRUE,
pt.size = 0.01
) +
NoAxes() +
NoLegend()
dev.off()
pdf("Figures/Figure3/Fig3B.pdf", width = 4, height = 2.2)
DimPlot(
sobj.combined,
reduction = "umap",
group.by = "stim",
pt.size = 0.01,
split.by = 'stim'
) +
NoAxes() +
NoLegend()
dev.off()
# -------------------------------------------------------
# Delta1/Delta2 genetics
# -------------------------------------------------------
# BC1
vdj_BC1 <- read.delim(
"data/raw/VDJ_data/BC1_vdj_list.txt",
stringsAsFactors = F,
header = F
) %>%
mutate(V1 = unlist(lapply(V1, function(x) strsplit(x, "#|_")[[1]][2]))) %>%
mutate(V1 = paste("BC1", V1, sep = "_")) %>%
filter(V1 %in% colnames(gem1))
TRDV2_BC1 <- vdj_BC1$V1[grep("TRDV2", vdj_BC1$V2)]
TRDC_BC1 <- vdj_BC1$V1[grep("TRDC", vdj_BC1$V2)]
TRGV9_BC1 <- vdj_BC1$V1[grep("TRGV9", vdj_BC1$V2)]
# BC2
vdj_BC2 <- read.delim(
"data/raw/VDJ_data/BC2_vdj_list.txt",
stringsAsFactors = F,
header = F
) %>%
mutate(V1 = lapply(V1, function(x) strsplit(x, "#|_")[[1]][2])) %>%
mutate(V1 = paste("BC2", V1, sep = "_")) %>%
filter(V1 %in% colnames(gem2))
TRDV2_BC2 <- vdj_BC2$V1[grep("TRDV2", vdj_BC2$V2)]
TRDC_BC2 <- vdj_BC2$V1[grep("TRDC", vdj_BC2$V2)]
TRGV9_BC2 <- vdj_BC2$V1[grep("TRGV9", vdj_BC2$V2)]
# TRDC - Figure 3C(left)
vdj_labels <- rep("TRDC-", ncol(sobj.combined))
names(vdj_labels) <- colnames(sobj.combined)
vdj_labels[c(TRDC_BC1, TRDC_BC2)] <- "TRDC+"
df <- data.frame(sobj.combined@reductions$umap@cell.embeddings)
df$label <- vdj_labels[rownames(df)]
table(df$label)
dropouts <- rownames(df)[which(df$label == "TRDC-")]
tiff("Figures/Figure3/Fig3C_TRDC.tiff")
ggplot(
df[dropouts, ],
aes(x=UMAP_1, y=UMAP_2, color=factor(label), fill=factor(label))
) +
geom_point(size=1) +
theme_void() +
theme(legend.position="none") +
scale_color_manual(values=c("lightgrey","black")) +
scale_fill_manual(values=c("lightgrey","black")) +
geom_point(data=df[c(TRDC_BC1, TRDC_BC2), ], colour="black", size=1)
dev.off()
# alternative TRDC plot
p <- FeaturePlot(
sobj.combined,
"TRDC.NT-026437.11.3",
pt.size = 0.01,
combine = FALSE,
min.cutoff = 0,
max.cutoff = 3
)
pdf("Figures/Figure3/Fig3C_TRDC_v2.pdf", width = 2, height = 2.3)
plot(p[[1]] + NoAxes() + NoLegend())
dev.off()
# TRDV2 - Figure 3C(middle)
vdj_labels <- rep("nothing", ncol(sobj.combined))
names(vdj_labels) <- colnames(sobj.combined)
vdj_labels[c(TRDV2_BC1, TRDV2_BC2)] <- "TRDV2+"
df <- data.frame(sobj.combined@reductions$umap@cell.embeddings)
df$label <- vdj_labels[rownames(df)]
table(df$label)
dropouts <- rownames(df)[which(df$label == "nothing")]
tiff("Figures/Figure3/Fig3C_TRDV2.tiff")
ggplot(
df[dropouts, ],
aes(x=UMAP_1, y=UMAP_2, color=factor(label), fill=factor(label))
) +
geom_point(size=1) +
theme_void() +
theme(legend.position="none") +
scale_color_manual(values=c("lightgrey", "#0073C2FF")) +
scale_fill_manual(values=c("lightgrey", "#0073C2FF")) +
geom_point(
data = df[c(TRDV2_BC1, TRDV2_BC2), ],
colour = "#0073C2FF",
size = 1
)
dev.off()
# TRGV9 - Figure 3C(right)
vdj_labels <- rep("nothing", ncol(sobj.combined))
names(vdj_labels) <- colnames(sobj.combined)
vdj_labels[c(TRGV9_BC1, TRGV9_BC2)] <- "TRGV9+"
df <- data.frame(sobj.combined@reductions$umap@cell.embeddings)
df$label <- vdj_labels[rownames(df)]
table(df$label)
dropouts <- rownames(df)[which(df$label == "nothing")]
tiff("Figures/Figure3/Fig3C_TRGV9.tiff")
ggplot(
df[dropouts, ],
aes(x=UMAP_1, y=UMAP_2, color=factor(label), fill=factor(label))
) +
geom_point(size=1) +
theme_void() +
theme(legend.position="none") +
scale_color_manual(values=c("lightgrey", "#A73030FF")) +
scale_fill_manual(values=c("lightgrey", "#A73030FF")) +
geom_point(
data = df[c(TRGV9_BC1, TRGV9_BC2), ],
colour = "#A73030FF",
size = 1
)
dev.off()
# -------------------------------------------------------
# Figure 3D
# -------------------------------------------------------
TRDC_cells <- c(TRDC_BC1, TRDC_BC2)
TRDV2_cells <- c(TRDV2_BC1, TRDV2_BC2)
TRGV9_cells <- c(TRGV9_BC1, TRGV9_BC2)
stats <- data.frame(
matrix(NA, nrow = 3, ncol = length(unique(Idents(sobj.combined)))),
row.names = c("TRDC", "TRDV2", "TRGV9")
)
colnames(stats) <- unique(Idents(sobj.combined))
for (ID in colnames(stats)) {
cells = WhichCells(sobj.combined, idents = ID)
stats["TRDC", ID] <- length(intersect(cells, TRDC_cells))*100/length(cells)
stats["TRDV2", ID] <- length(intersect(cells, TRDV2_cells))*100/length(cells)
stats["TRGV9", ID] <- length(intersect(cells, TRGV9_cells))*100/length(cells)
}
df <- reshape2::melt(t(stats[, c("4", "6", "9")]))
colnames(df) <- c("ClusterID", "gene", "pct")
df$ClusterID <- factor(df$ClusterID, levels = c("9", "6", "4"))
pdf("Figures/Figure3/Fig3D.pdf", width = 4, height = 2)
ggplot(df, aes(x=ClusterID, y=pct, fill=factor(gene))) + geom_bar(stat = "identity", position=position_dodge()) +
labs(title = "", y="", x="") + theme_minimal() +
theme(
axis.text = element_text(size=7),
plot.margin = unit(c(0,0,0,0), "cm"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
)
dev.off()
# Print counts for plot
counts <- data.frame(
matrix(NA, nrow = 3, ncol = length(unique(Idents(sobj.combined)))),
row.names = c("TRDC", "TRDV2", "TRGV9")
)
colnames(counts) <- unique(Idents(sobj.combined))
for (ID in colnames(counts)) {
cells <- WhichCells(sobj.combined, idents = ID)
counts["TRDC", ID] <- length(intersect(cells, TRDC_cells))
counts["TRDV2", ID] <- length(intersect(cells, TRDV2_cells))
counts["TRGV9", ID] <- length(intersect(cells, TRGV9_cells))
}
counts[, c("4", "6", "9")]
# -------------------------------------------------------
# Figure 3E
# -------------------------------------------------------
cells <- WhichCells(sobj.combined, idents = c(4, 6, 9))
p <- FeaturePlot(
sobj.combined,
features = "KLRK1",
pt.size = 0.01,
combine = FALSE,
min.cutoff = 0,
max.cutoff = 4,
cells = cells
)
pdf("Figures/Figure3/FeaturePlots/KLRK1.pdf", width = 2, height = 2)
plot(p[[1]] + NoAxes() + NoLegend())
dev.off()
genes <- c("IFNG", "CCR6")
p <- FeaturePlot(
sobj.combined,
features = genes,
pt.size = 0.01,
combine = FALSE,
min.cutoff = 0,
max.cutoff = 3,
cells = cells
)
for(i in 1:length(p)) {
pdf(paste("Figures/Figure3/FeaturePlots/", genes[i], ".pdf", sep = ""), width = 2, height = 2)
plot(p[[i]] + NoAxes() + NoLegend())
dev.off()
}
# -------------------------------------------------------
# Save results
# -------------------------------------------------------
saveRDS(sobj.combined, file = "data/processed/BRCA_sobj_combined.rds")
# Save cluster IDs of cells in supplementary table
cell_metadata <- data.frame(Idents(sobj.combined)) %>%
tibble::rownames_to_column("Barcode")
colnames(cell_metadata) <- c("Barcode", "Cluster ID")
cell_metadata$`Donor ID` <- unlist(lapply(cell_metadata$Barcode, function(x) strsplit(x, "_")[[1]][1]))
cell_metadata <- cell_metadata %>%
mutate(
"Cluster ID" = case_when(
`Cluster ID` %in% c(0, 7) ~ "CD4-T",
`Cluster ID` %in% c(1, 2) ~ "CD8-T",
`Cluster ID` == 3 ~ "B",
`Cluster ID` == 4 ~ "gd-T.3",
`Cluster ID` == 5 ~ "T-reg",
`Cluster ID` == 6 ~ "gd-T.2",
`Cluster ID` == 8 ~ "Mph",
`Cluster ID` == 9 ~ "gd-T.9",
`Cluster ID` %in% c(10, 11, 12) ~ "Unclassified"
)
)
write.table(
cell_metadata,
file = "data/processed/BC_cell_metadata.txt",
quote = F,
row.names = F,
sep = "\t"
)
|
59a2e7b2cb843d7005163cde5cec8d3a3f0d0382 | f88192b3411a06f618d558479d15f17a03ab2ecf | /man/hello.Rd | 2b7768f413f32f9fc2ea5c461f365377eb224e93 | [] | no_license | Ing-Stat/ovn | f2dc159186228bc126377543dfa8bcad5923edb0 | 0886b1a0b7063a2a8c21cb17d676ecd17f5b2454 | refs/heads/main | 2023-07-17T08:38:18.121883 | 2021-09-05T18:40:44 | 2021-09-05T18:40:44 | 403,376,370 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 130 | rd | hello.Rd | \name{hello}
\alias{hello}
\title{Hello, Ovning!}
\usage{
hello()
}
\description{
Prints 'Hello, Ovning!'.
}
\examples{
hello()
}
|
94bc2b8fe78cb80825a8f0fef7db32898faab114 | 29585dff702209dd446c0ab52ceea046c58e384e | /crossReg/R/DeltaC.R | 50dbf4a84e5426e77a99d22936ce6853f589603d | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 820 | r | DeltaC.R | DeltaC <-
function (Data,order) {
lmResults <- lm(y ~ x1 + x2 + x1*x2,data=Data) # fit lm()
# extract coefficients
coefficient <- summary(lmResults)$coefficients
# extract variance covariance matrix
covariance <- vcov(lmResults)
B2 <- coefficient[3,1] # estimation of B2
B3 <- coefficient[4,1] # estimation of B3
COV22 <- covariance[3,3] # variance of B2
COV33 <- covariance[4,4] # variance of B3
k <- 1.96 # 95% percentile for N(0,1)
if (order==1) {
SE_delta <- sqrt((((COV22)/(B3^2))+(((B2^2)*COV33)/(B3^4))))
}
else if (order==2) {
SE_delta <- sqrt((((COV22)/(B3^2))+(((B2^2)*COV33)/(B3^4)))
+ (COV22*COV33+2*(B2^2)*(COV33)^2)/(B3^4))
}
C_Hat <- (-1)*B2/B3
LowCI <- C_Hat - k*SE_delta
UpperCI <- C_Hat + k*SE_delta
results <- list(LowCI = LowCI, UpperCI = UpperCI)
return(results)
}
|
c8bc39ad325095763d907ccdd76c61a5d98b7783 | 09c2196beab7422bda070c28afd41dec2a9d094d | /R/DataLoadingSaving.R | ec90b17ac0deeb24ac9c9390a739eb64e98c80a9 | [
"Apache-2.0"
] | permissive | sverchkov/SelfControlledCaseSeries | c37964ba69f675da217fd93c8837487ed088b529 | 920493cac367e1f8812bafd0a72f777d53f79e41 | refs/heads/master | 2020-03-28T02:31:13.046277 | 2018-03-19T16:43:42 | 2018-03-19T16:43:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,332 | r | DataLoadingSaving.R | # @file DataLoadingSaving.R
#
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of SelfControlledCaseSeries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Load data for SCCS from the database
#'
#' @description
#' Load all data needed to perform an SCCS analysis from the database.
#'
#' @details
#' This function downloads several types of information:
#' \itemize{
#' \item {Information on the occurrences of the outcome(s) of interest. Note that information for
#' multiple outcomes can be fetched in one go, and later the specific outcome can be specified
#' for which we want to build a model.}
#' \item {Information on the observation time and age for the people with the outcomes.}
#' \item {Information on exposures of interest which we want to include in the model.}
#' }
#' Five different database schemas can be specified, for five different types of information: The
#' \code{cdmDatabaseSchema} is used to extract patient age and observation period. The
#' \code{outcomeDatabaseSchema} is used to extract information about the outcomes, the
#' \code{exposureDatabaseSchema} is used to retrieve information on exposures, and the
#' \code{customCovariateDatabaseSchema} is optionally used to find additional, user-defined
#' covariates. All four locations could point to the same database schema.
#' \code{nestingCohortDatabaseSchema} is optionally used to define a cohort in which the analysis is nested,
#' for example a cohort of diabetics patients.
#'
#' All five locations could point to the same database schema.
#'
#' @return
#' Returns an object of type \code{sccsData}, containing information on the cases, their outcomes,
#' exposures, and potentially other covariates. Information about multiple outcomes can be captured at
#' once for efficiency reasons. This object is a list with the following components: \describe{
#' \item{cases}{An ffdf object listing the persons that have the outcome(s), their age, and
#' observation time.} \item{eras}{An ffdf object listing the exposures, outcomes and other
#' covariates.} \item{covariateRef}{An ffdf object describing the covariates that have been
#' extracted.} \item{metaData}{A list of objects with information on how the sccsData object was
#' constructed.} } The generic \code{summary()} function has been implemented for this object.
#'
#' @param connectionDetails An R object of type \code{ConnectionDetails} created using
#' the function \code{createConnectionDetails} in the
#' \code{DatabaseConnector} package.
#' @param cdmDatabaseSchema The name of the database schema that contains the OMOP CDM
#' instance. Requires read permissions to this database. On SQL
#' Server, this should specifiy both the database and the
#' schema, so for example 'cdm_instance.dbo'.
#' @param oracleTempSchema A schema where temp tables can be created in Oracle.
#' @param outcomeDatabaseSchema The name of the database schema that is the location where
#' the data used to define the outcome cohorts is available. If
#' outcomeTable = CONDITION_ERA, outcomeDatabaseSchema is not
#' used. Requires read permissions to this database.
#' @param outcomeTable The tablename that contains the outcome cohorts. If
#' outcomeTable is not CONDITION_OCCURRENCE or CONDITION_ERA,
#' then expectation is outcomeTable has format of COHORT table:
#' COHORT_DEFINITION_ID, SUBJECT_ID, COHORT_START_DATE,
#' COHORT_END_DATE.
#' @param outcomeIds A list of ids used to define outcomes. If outcomeTable =
#' CONDITION_OCCURRENCE, the list is a set of ancestor
#' CONCEPT_IDs, and all occurrences of all descendant concepts
#' will be selected. If outcomeTable <> CONDITION_OCCURRENCE,
#' the list contains records found in COHORT_DEFINITION_ID
#' field.
#' @param exposureDatabaseSchema The name of the database schema that is the location where
#' the exposure data used to define the exposure cohorts is
#' available. If exposureTable = DRUG_ERA,
#' exposureDatabaseSchema is not used but assumed to be
#' cdmSchema. Requires read permissions to this database.
#' @param exposureTable The tablename that contains the exposure cohorts. If
#' exposureTable <> DRUG_ERA, then expectation is exposureTable
#' has format of COHORT table: cohort_concept_id, SUBJECT_ID,
#' COHORT_START_DATE, COHORT_END_DATE.
#' @param exposureIds A list of identifiers to define the exposures of interest. If
#' exposureTable = DRUG_ERA, exposureIds should be CONCEPT_ID.
#' If exposureTable <> DRUG_ERA, exposureIds is used to select
#' the cohort_concept_id in the cohort-like table. If no
#' exposureIds are provided, all drugs or cohorts in the
#' exposureTable are included as exposures.
#' @param useCustomCovariates Create covariates from a custom table?
#' @param customCovariateDatabaseSchema The name of the database schema that is the location where
#' the custom covariate data is available.
#' @param customCovariateTable Name of the table holding the custom covariates. This table
#' should have the same structure as the cohort table.
#' @param customCovariateIds A list of cohort definition IDS identifying the records in
#' the customCovariateTable to use for building custom
#' covariates.
#' @param useNestingCohort Should the study be nested in a cohort (e.g. people with
#' a specific indication)? If not, the study will be nested
#' in the general population.
#' @param nestingCohortDatabaseSchema The name of the database schema that is the location
#' where the nesting cohort is defined.
#' @param nestingCohortTable Name of the table holding the nesting cohort. This table
#' should have the same structure as the cohort table.
#' @param nestingCohortId A cohort definition ID identifying the records in the
#' nestingCohortTable to use as nesting cohort.
#' @param deleteCovariatesSmallCount The minimum count for a covariate to appear in the data to be
#' kept.
#' @param studyStartDate A calendar date specifying the minimum date where data is
#' used. Date format is 'yyyymmdd'.
#' @param studyEndDate A calendar date specifying the maximum date where data is
#' used. Date format is 'yyyymmdd'.
#' @param cdmVersion Define the OMOP CDM version used: currently support "4" and
#' "5".
#' @param maxCasesPerOutcome If there are more than this number of cases for a single
#' outcome cases will be sampled to this size. \code{maxCasesPerOutcome = 0}
#' indicates no maximum size.
#'
#' @export
getDbSccsData <- function(connectionDetails,
cdmDatabaseSchema,
oracleTempSchema = cdmDatabaseSchema,
outcomeDatabaseSchema = cdmDatabaseSchema,
outcomeTable = "condition_era",
outcomeIds,
exposureDatabaseSchema = cdmDatabaseSchema,
exposureTable = "drug_era",
exposureIds = c(),
useCustomCovariates = FALSE,
customCovariateDatabaseSchema = cdmDatabaseSchema,
customCovariateTable = "cohort",
customCovariateIds = c(),
useNestingCohort = FALSE,
nestingCohortDatabaseSchema = cdmDatabaseSchema,
nestingCohortTable = "cohort",
nestingCohortId = NULL,
deleteCovariatesSmallCount = 100,
studyStartDate = "",
studyEndDate = "",
cdmVersion = "5",
maxCasesPerOutcome = 0) {
if (studyStartDate != "" && regexpr("^[12][0-9]{3}[01][0-9][0-3][0-9]$", studyStartDate) == -1) {
stop("Study start date must have format YYYYMMDD")
}
if (studyEndDate != "" && regexpr("^[12][0-9]{3}[01][0-9][0-3][0-9]$", studyEndDate) == -1) {
stop("Study end date must have format YYYYMMDD")
}
conn <- DatabaseConnector::connect(connectionDetails)
if (cdmVersion == "4") {
cohortDefinitionId <- "cohort_concept_id"
} else {
cohortDefinitionId <- "cohort_definition_id"
}
if (is.null(exposureIds) || length(exposureIds) == 0) {
hasExposureIds <- FALSE
} else {
if (!is.numeric(exposureIds))
stop("exposureIds must be a (vector of) numeric")
hasExposureIds <- TRUE
DatabaseConnector::insertTable(conn,
tableName = "#exposure_ids",
data = data.frame(concept_id = as.integer(exposureIds)),
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE,
oracleTempSchema = oracleTempSchema)
}
if (is.null(customCovariateIds) || length(customCovariateIds) == 0) {
hasCustomCovariateIds <- FALSE
} else {
if (!is.numeric(customCovariateIds))
stop("customCovariateIds must be a (vector of) numeric")
hasCustomCovariateIds <- TRUE
DatabaseConnector::insertTable(conn,
tableName = "#custom_covariate_ids",
data = data.frame(concept_id = as.integer(customCovariateIds)),
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE,
oracleTempSchema = oracleTempSchema)
}
writeLines("Creating cases")
sql <- SqlRender::loadRenderTranslateSql("CreateCases.sql",
packageName = "SelfControlledCaseSeries",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
outcome_database_schema = outcomeDatabaseSchema,
outcome_table = outcomeTable,
outcome_concept_ids = outcomeIds,
use_nesting_cohort = useNestingCohort,
nesting_cohort_database_schema = nestingCohortDatabaseSchema,
nesting_cohort_table = nestingCohortTable,
nesting_cohort_id = nestingCohortId,
study_start_date = studyStartDate,
study_end_date = studyEndDate,
cohort_definition_id = cohortDefinitionId)
DatabaseConnector::executeSql(conn, sql)
sampledCases <- FALSE
casesPerOutcome <- FALSE
if (maxCasesPerOutcome != 0) {
casesPerOutcome <- TRUE
writeLines("Counting cases per outcome")
sql <- SqlRender::loadRenderTranslateSql("CasesPerOutcome.sql",
packageName = "SelfControlledCaseSeries",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
outcome_database_schema = outcomeDatabaseSchema,
outcome_table = outcomeTable,
outcome_concept_ids = outcomeIds,
cohort_definition_id = cohortDefinitionId)
DatabaseConnector::executeSql(conn, sql)
sql <- "SELECT outcome_id, COUNT(*) AS case_count FROM #cases_per_outcome GROUP BY outcome_id"
sql <- SqlRender::translateSql(sql = sql, targetDialect = connectionDetails$dbms, oracleTempSchema = oracleTempSchema)$sql
caseCounts <- DatabaseConnector::querySql(conn, sql)
colnames(caseCounts) <- SqlRender::snakeCaseToCamelCase(colnames(caseCounts))
for (i in 1:nrow(caseCounts)) {
if (caseCounts$caseCount[i] > maxCasesPerOutcome) {
writeLines(paste0("Downsampling cases for outcome ", caseCounts$outcomeId[i], " from ", caseCounts$caseCount[i], " to ", maxCasesPerOutcome))
sampledCases <- TRUE
}
}
if (sampledCases) {
sql <- SqlRender::loadRenderTranslateSql("SampleCases.sql",
packageName = "SelfControlledCaseSeries",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
max_cases_per_outcome = maxCasesPerOutcome)
DatabaseConnector::executeSql(conn, sql)
}
}
sql <- SqlRender::loadRenderTranslateSql("CreateEras.sql",
packageName = "SelfControlledCaseSeries",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
outcome_database_schema = outcomeDatabaseSchema,
outcome_table = outcomeTable,
outcome_concept_ids = outcomeIds,
exposure_database_schema = exposureDatabaseSchema,
exposure_table = exposureTable,
use_custom_covariates = useCustomCovariates,
custom_covariate_database_schema = customCovariateDatabaseSchema,
custom_covariate_table = customCovariateTable,
has_exposure_ids = hasExposureIds,
has_custom_covariate_ids = hasCustomCovariateIds,
delete_covariates_small_count = deleteCovariatesSmallCount,
study_start_date = studyStartDate,
study_end_date = studyEndDate,
cdm_version = cdmVersion,
cohort_definition_id = cohortDefinitionId,
sampled_cases = sampledCases)
writeLines("Creating eras")
DatabaseConnector::executeSql(conn, sql)
writeLines("Fetching data from server")
start <- Sys.time()
sql <- SqlRender::loadRenderTranslateSql("QueryCases.sql",
packageName = "SelfControlledCaseSeries",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
sampled_cases = sampledCases)
cases <- DatabaseConnector::querySql.ffdf(conn, sql)
colnames(cases) <- SqlRender::snakeCaseToCamelCase(colnames(cases))
sql <- SqlRender::loadRenderTranslateSql("QueryEras.sql",
packageName = "SelfControlledCaseSeries",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema)
eras <- DatabaseConnector::querySql.ffdf(conn, sql)
colnames(eras) <- SqlRender::snakeCaseToCamelCase(colnames(eras))
sql <- "SELECT covariate_id, covariate_name FROM #covariate_ref"
sql <- SqlRender::translateSql(sql = sql, targetDialect = connectionDetails$dbms, oracleTempSchema = oracleTempSchema)$sql
covariateRef <- DatabaseConnector::querySql.ffdf(conn, sql)
colnames(covariateRef) <- SqlRender::snakeCaseToCamelCase(colnames(covariateRef))
delta <- Sys.time() - start
writeLines(paste("Loading took", signif(delta, 3), attr(delta, "units")))
# Delete temp tables
sql <- SqlRender::loadRenderTranslateSql("RemoveTempTables.sql",
packageName = "SelfControlledCaseSeries",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cases_per_outcome = casesPerOutcome,
sampled_cases = sampledCases)
DatabaseConnector::executeSql(conn, sql, progressBar = FALSE, reportOverallTime = FALSE)
DatabaseConnector::disconnect(conn)
metaData <- list(exposureIds = exposureIds, outcomeIds = outcomeIds, call = match.call())
result <- list(cases = cases, eras = eras, covariateRef = covariateRef, metaData = metaData)
# Open all ffdfs to prevent annoying messages later:
open(result$cases)
open(result$eras)
open(result$covariateRef)
class(result) <- "sccsData"
return(result)
}
#' Save the SCCS data to folder
#'
#' @description
#' \code{sccsData} saves an object of type sccsData to folder.
#'
#' @param sccsData An object of type \code{sccsData} as generated using \code{\link{getDbSccsData}}.
#' @param folder The name of the folder where the data will be written. The folder should not yet
#' exist.
#'
#' @details
#' The data will be written to a set of files in the specified folder.
#'
#' @examples
#' # todo
#'
#' @export
saveSccsData <- function(sccsData, folder) {
if (missing(sccsData))
stop("Must specify sccsData")
if (missing(folder))
stop("Must specify folder")
if (class(sccsData) != "sccsData")
stop("Data not of class sccsData")
cases <- sccsData$cases
eras <- sccsData$eras
covariateRef <- sccsData$covariateRef
ffbase::save.ffdf(cases, eras, covariateRef, dir = folder)
metaData <- sccsData$metaData
save(metaData, file = file.path(folder, "metaData.Rdata"))
# Open all ffdfs to prevent annoying messages later:
open(sccsData$cases)
open(sccsData$eras)
open(sccsData$covariateRef)
invisible(TRUE)
}
#' Load the SCCS data from a folder
#'
#' @description
#' \code{loadSccsData} loads an object of type sccsData from a folder in the file system.
#'
#' @param folder The name of the folder containing the data.
#' @param readOnly If true, the data is opened read only.
#'
#' @details
#' The data will be written to a set of files in the folder specified by the user.
#'
#' @return
#' An object of class cohortData.
#'
#' @export
loadSccsData <- function(folder, readOnly = TRUE) {
if (!file.exists(folder))
stop(paste("Cannot find folder", folder))
if (!file.info(folder)$isdir)
stop(paste("Not a folder:", folder))
temp <- setwd(folder)
absolutePath <- setwd(temp)
e <- new.env()
ffbase::load.ffdf(absolutePath, e)
load(file.path(absolutePath, "metaData.Rdata"), e)
result <- list(cases = get("cases", envir = e),
eras = get("eras", envir = e),
covariateRef = get("covariateRef", envir = e),
metaData = get("metaData", envir = e))
# Open all ffdfs to prevent annoying messages later:
open(result$cases, readonly = readOnly)
open(result$eras, readonly = readOnly)
open(result$covariateRef, readonly = readOnly)
class(result) <- "sccsData"
rm(e)
return(result)
}
#' @export
print.sccsData <- function(x, ...) {
writeLines("SCCS data object")
writeLines("")
writeLines(paste("Exposure concept ID(s):", paste(x$metaData$exposureIds, collapse = ",")))
writeLines(paste("Outcome concept ID(s):", paste(x$metaData$outcomeIds, collapse = ",")))
}
#' @export
summary.sccsData <- function(object, ...) {
caseCount <- nrow(object$cases)
outcomeCounts <- data.frame(outcomeConceptId = object$metaData$outcomeIds,
eventCount = 0,
caseCount = 0)
t <- object$eras$eraType == "hoi"
hois <- object$eras[ffbase::ffwhich(t, t == TRUE), ]
for (i in 1:nrow(outcomeCounts)) {
outcomeCounts$eventCount[i] <- ffbase::sum.ff(hois$conceptId == object$metaData$outcomeIds[i])
if (outcomeCounts$eventCount[i] == 0) {
outcomeCounts$caseCount[i] <- 0
} else {
t <- (hois$conceptId == object$metaData$outcomeIds[i])
outcomeCounts$caseCount[i] <- length(ffbase::unique.ff(hois$observationPeriodId[ffbase::ffwhich(t,
t == TRUE)]))
}
}
covariateValueCount <- ffbase::sum.ff(object$eras$eraType != "hoi")
result <- list(metaData = object$metaData,
caseCount = caseCount,
outcomeCounts = outcomeCounts,
covariateCount = nrow(object$covariateRef) - length(object$metaData$outcomeIds),
covariateValueCount = covariateValueCount)
class(result) <- "summary.sccsData"
return(result)
}
#' @export
print.summary.sccsData <- function(x, ...) {
writeLines("sccsData object summary")
writeLines("")
writeLines(paste("Exposure concept ID(s):", paste(x$metaData$exposureIds, collapse = ",")))
writeLines(paste("Outcome concept ID(s):", paste(x$metaData$outcomeIds, collapse = ",")))
writeLines("")
writeLines(paste("Cases:", paste(x$caseCount)))
writeLines("")
writeLines("Outcome counts:")
outcomeCounts <- x$outcomeCounts
rownames(outcomeCounts) <- outcomeCounts$outcomeConceptId
outcomeCounts$outcomeConceptId <- NULL
colnames(outcomeCounts) <- c("Event count", "Case count")
printCoefmat(outcomeCounts)
writeLines("")
writeLines("Covariates:")
writeLines(paste("Number of covariates:", x$covariateCount))
writeLines(paste("Number of covariate eras:", x$covariateValueCount))
}
|
9acc52bfaf58e7d7c4aa85f39e84b6189e3b6a9d | a2b3f5da696bb6898972d1367e1fad53ec12c235 | /cachematrix.R | e3d02fcc6e0351aa4a149ab995e99b2aa8f6df77 | [] | no_license | giridharnandigam/ProgrammingAssignment2 | abb403569ad538f49e0cbb9ee9cc5c25c781e7db | 8b65bca804004c20294f26c58794714d2d862c7f | refs/heads/master | 2021-01-20T14:22:30.461843 | 2017-05-08T06:49:19 | 2017-05-08T06:49:19 | 90,594,582 | 0 | 0 | null | 2017-05-08T06:38:30 | 2017-05-08T06:38:29 | null | UTF-8 | R | false | false | 1,836 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix())
{
## set inverse matrix variable to null
m <- NULL
## resetting everything to null
set <- function(y)
{
x <<- y
m <<- NULL
}
## get matrix
get <- function() x
## set inverse matrix and original matrix for comparison later
setinverse <- function(solve)
{
m <<- solve
oldmat <<- x
}
# get cached inverse matrix
getinverse <- function()
{
m
}
## get cached original matrix
getold <- function() oldmat
## special matrix object
list(set = set, get = get, setinverse = setinverse,
getinverse = getinverse, getold = getold)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...)
{
## Return a matrix that is the inverse of 'x'
## this function when inputted with special matrix object
## will return inverse only if the input matrix is invertible
## get the actual matrix
matdata <- matin$get()
## get previous/cached stored matrix here
oldmat <- matin$getold()
## get previously cached inverted matrix here
invd <- matin$getinverse()
## if there is no inverted matrix in cache or when matrix has changed
if(!is.null(invd) && identical(oldmat,matdata))
{
## compare if dimensions are same
if ((nrow(oldmat) == nrow(matdata)) &&
(ncol(oldmat) == ncol(matdata)))
{
message("getting cached data")
return(invd)
}
}
## in case when no the cached inverted matrix is there calculate here
matinv <- solve(matdata, ...)
matin$setinverse(matinv)
matinv
}
|
ada34d3ce3400497ee7eecb2fd7effc4d9f7e934 | ae4e0c00e14bea5574754c7ca3de8aa609343fc1 | /roa/R/uv.outlier.R | 1c7bf4a8df954d3e479720ae1a8acdcbf03aefbd | [] | no_license | kylieainslie/GetOut | 88337369560cecaf86b2293ac10ade548fded9ce | 653dbc2d1da2aeef6dac61fac605fcea5881dd37 | refs/heads/master | 2021-01-10T19:32:17.128768 | 2014-11-21T16:09:26 | 2014-11-21T16:09:26 | 26,873,996 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,594 | r | uv.outlier.R | #####################################
### Single Group Outlier Analysis ###
#####################################
uv.outlier<-function(data, annotation=NULL,annID, annName,cpmtype="Mann-Whitney",
save.path=NULL,diff=TRUE,num.id=TRUE,cut=0.85,common.genes=FALSE,under=FALSE,p=NULL){
#check for missing arguments
if(missing(data)){
stop("No data specified.")
}
#check for numeric arguments
if(!is.numeric(data)){
stop("Need numeric data matrix.")
}
#check that columns for annID and annName are numeric and not larger than total number of columns
if(is.null(annotation)){
annID=NULL
annName=NULL
}
else if((!is.null(annotation) & !is.numeric(annID)) | (!is.null(annotation) & !is.numeric(annName))
| (!is.null(annotation) & annID>dim(annotation)[2]) | (!is.null(annotation) & annName>dim(annotation)[2])){
stop("Invalid annotation column specification")
}
#check if cut-off matches under status
if(under==TRUE & cut>0.5){warning("cut value doesn't match under status")}
if(under==FALSE & cut<0.5){warning("cut value doesn't match under status")}
#both cpmtype and p cannot be null
if(!is.null(cpmtype)){p=NULL}
if(!is.null(p)){cpmtype=NULL}
if(is.null(cpmtype) & is.null(p)){stop("Must select change point model or user defined cut-off.")}
####################
### Progress Bar ###
####################
apply_pb <- function(X, MARGIN, FUN, ...,Title="Calculating ...")
{
env <- environment()
pb_Total <- sum(dim(X)[MARGIN])
counter <- 0
pb <- tkProgressBar(title=Title,min=0, max=pb_Total,width=300)
wrapper <- function(...)
{
curVal <- get("counter", envir = env)
assign("counter", curVal +1 ,envir= env)
setTkProgressBar(get("pb", envir= env),curVal +1)
FUN(...)
}
res <- apply(X, MARGIN, wrapper, ...)
close(pb)
res
}
#####################
### Outlier Stats ###
#####################
##Define number of samples per probe
num = dim(data)[1]
### GTI
gti = function(data) (sum(as.numeric(data)>as.numeric((quantile(data,0.75,na.rm=TRUE)-quantile(data,0.25,na.rm=TRUE))+quantile(data,0.75,na.rm=TRUE)))/num)*(mean(subset(data,data>((quantile(data,0.75,na.rm=TRUE)-quantile(data,0.25,na.rm=TRUE))+quantile(data,0.75,na.rm=TRUE))))+((quantile(data,0.75,na.rm=TRUE)-quantile(data,0.25,na.rm=TRUE))+quantile(data,0.75,na.rm=TRUE)))/mean(subset(data,data>((quantile(data,0.75,na.rm=TRUE)-quantile(data,0.25,na.rm=TRUE))+quantile(data,0.75,na.rm=TRUE))))
gti_under = function(data) (sum(as.numeric(data)<as.numeric((quantile(data,0.25,na.rm=TRUE)-quantile(data,0.75,na.rm=TRUE))+quantile(data,0.25,na.rm=TRUE)))/num)*(mean(subset(data,data<((quantile(data,0.25,na.rm=TRUE)-quantile(data,0.75,na.rm=TRUE))+quantile(data,0.25,na.rm=TRUE))))-((quantile(data,0.25,na.rm=TRUE)-quantile(data,0.75,na.rm=TRUE))+quantile(data,0.25,na.rm=TRUE)))/mean(subset(data,data<((quantile(data,0.25,na.rm=TRUE)-quantile(data,0.75,na.rm=TRUE))+quantile(data,0.25,na.rm=TRUE))))
### COPA
copa = function(data) (quantile(data,cut,na.rm=TRUE)-median(data))/mad(data)
### OSS
oss_new = function(data) sum(subset(data,data>((quantile(data,0.75,na.rm=TRUE)-quantile(data,0.25,na.rm=TRUE))+quantile(data,0.75,na.rm=TRUE)))-median(data))/mad(data)
oss_under = function(data) sum(subset(data,data<(quantile(data,0.25,na.rm=TRUE)-(quantile(data,0.75,na.rm=TRUE)-quantile(data,0.25,na.rm=TRUE))))-median(data))/mad(data)
### Variability
variability = function(data) var(data)
#Convert data frame into a matrix
data1<-data.matrix(data)
#Apply statistics
### GTI ############################################################################################
if(under==FALSE){data2 <- apply_pb(data1,2,gti,Title="Calculating GTI ...")}
if(under==TRUE){data2 <- apply_pb(data1,2,gti_under,Title="Calculating GTI ...")}
data2.a<-subset(data2,is.nan(data2)==FALSE)
data2.b<-subset(data2.a,is.na(data2.a)==FALSE)
data2.c<-subset(data2.b,data2.b!="Inf")
if(is.null(cpmtype) & !is.null(p)){
if(under==FALSE){data3<-ifelse(data2.c>quantile(data2.c, p,na.rm=TRUE), data2.c, NA)}
if(under==TRUE){data3<-ifelse(data2.c<quantile(data2.c, p,na.rm=TRUE), data2.c, NA)}
}
if(!is.null(cpmtype) & is.null(p)){
if(under==FALSE){data2.s<-sort(data2,decreasing=TRUE)}
if(under==TRUE){data2.s<-sort(data2)}
if (diff==FALSE){
cpm_g<-detectChangePoint(data2.s,cpmType=cpmtype, ARL0=500, startup=20)
}
if (diff==TRUE){
diffg<-c(rep(0,length(data2.s)))
for (i in 1:length(data2.s)-1){
diffg[i]<-data2.s[i]-data2.s[i+1]
}
diffg1<-subset(diffg,is.nan(diffg)==FALSE)
diffg2<-subset(diffg1,is.na(diffg1)==FALSE)
diffg3<-subset(diffg2,diffg2!="Inf")
cpm_g<-detectChangePoint(diffg3,cpmType=cpmtype, ARL0=500, startup=20)
}
if(under==FALSE){data3<-ifelse(data2>=data2.s[cpm_g$changePoint], data2, NA)}
if(under==TRUE){data3<-ifelse(data2<=data2.s[cpm_g$changePoint], data2, NA)}
}
data4<-subset(data3, data3!="NA")
if(num.id==TRUE){
noX<-substr(names(data4), 2,15)
noX1<-unique(noX)
if(!is.null(annotation)){
gsg<-annotation[annotation[,annID] %in% noX1,c(annID,annName)]
Val<-data4[noX1 %in% gsg[,1]]
val<-Val[unique(names(Val))]
}
else {gsg<-noX1
val<-as.vector(data4)}
}
if(num.id==FALSE){
if(!is.null(annotation)){
gsg<-annotation[annotation[,annID] %in% names(data4),c(annID,annName)]
val<-data4[names(data4) %in% gsg[,1]]
}
else {gsg<-names(data4)
val<-as.vector(data4)}
}
gti_out<-data.frame(gsg,Value=val,Stat="GTI")
rownames(gti_out)<-NULL
if(!is.null(save.path)){
write.csv(gti_out,file=paste(save.path,"gti_outlier.csv"))
}
#if (outsample==TRUE & !is.null(save.path)){
# if(under==FALSE){outlying.samples(data,names(data4),over_cut=cut,stat="GTI",savepath=save.path)}
# if(under==TRUE){outlying.samples(data,names(data4),under_cut=cut,stat="GTI",savepath=save.path)}
#}
#### COPA ##########################################################################################
data2c <- apply_pb(data1,2,copa,Title="Calculating COPA ...")
data2c.a<-subset(data2c,is.nan(data2c)==FALSE)
data2c.b<-subset(data2c.a,is.na(data2c.a)==FALSE)
data2c.c<-subset(data2c.b,data2c.b!="Inf")
if(is.null(cpmtype) & !is.null(p)){
if(under==FALSE){data3c<-ifelse(data2c.c>quantile(data2c.c, p,na.rm=TRUE), data2c.c, NA)}
if(under==TRUE){data3c<-ifelse(data2c.c<quantile(data2c.c, p,na.rm=TRUE), data2c.c, NA)}
}
if(!is.null(cpmtype) & is.null(p)){
if(under==FALSE){data2c.s<-sort(data2c,decreasing=TRUE)}
if(under==TRUE){data2c.s<-sort(data2c)}
if (diff==FALSE){
cpm_c<-detectChangePoint(data2c.s,cpmType=cpmtype, ARL0=500, startup=20)
}
if(diff==TRUE){
diffc<-c(rep(0,length(data2c.s)))
for (i in 1:length(data2c.s)-1){
diffc[i]<-data2c.s[i]-data2c.s[i+1]
}
diffc1<-subset(diffc,is.nan(diffc)==FALSE)
diffc2<-subset(diffc1,is.na(diffc1)==FALSE)
diffc3<-subset(diffc2,diffc2!="Inf")
cpm_c<-detectChangePoint(diffc3,cpmType=cpmtype, ARL0=500, startup=20)
}
if(under==FALSE){data3c<-ifelse(data2c>=data2c.s[cpm_c$changePoint], data2c, NA)}
if(under==TRUE){data3c<-ifelse(data2c<=data2c.s[cpm_c$changePoint], data2c, NA)}
}
data4c<-subset(data3c, data3c!="NA")
if(num.id==TRUE){
noX<-substr(names(data4c), 2,15)
noX1<-unique(noX)
if(!is.null(annotation)){
gsc<-annotation[annotation[,annID] %in% noX1,c(annID,annName)]
Val<-data4c[noX1 %in% gsc[,1]]
val<-Val[unique(names(Val))]
}
else {gsc<-noX1
val<-as.vector(data4c)}
}
if(num.id==FALSE){
if(!is.null(annotation)){
gsc<-annotation[annotation[,annID] %in% names(data4c),c(annID,annName)]
val<-data4c[names(data4c) %in% gsc[,1]]
}
else {gsc<-names(data4c)
val<-as.vector(data4c)}
}
copa_out<-data.frame(gsc,Value=val,Stat="COPA")
rownames(copa_out)<-NULL
if(!is.null(save.path)){
write.csv(copa_out,file=paste(save.path,"copa_outlier.csv"))
}
#if (outsample==TRUE & !is.null(save.path)){
# if(under==FALSE){outlying.samples(data,names(data4c),over_cut=cut,stat="copa",savepath=save.path)}
# if(under==TRUE){outlying.samples(data,names(data4c),under_cut=cut,stat="copa",savepath=save.path)}
#}
### OSS #########################################################################################
if(under==FALSE){data2o <- apply_pb(data1,2,oss_new,Title="Calculating OSS ...")}
if(under==TRUE){data2o <- apply_pb(data1,2,oss_under,Title="Calculating OSS ...")}
data2o.a<-subset(data2o,is.nan(data2o)==FALSE)
data2o.b<-subset(data2o.a,is.na(data2o.a)==FALSE)
data2o.c<-subset(data2o.b,data2o.b!="Inf")
if(is.null(cpmtype) & !is.null(p)){
if(under==FALSE){data3o<-ifelse(data2o.c>quantile(data2o.c, p,na.rm=TRUE), data2o.c, NA)}
if(under==TRUE){data3o<-ifelse(data2o.c<quantile(data2o.c, p,na.rm=TRUE), data2o.c, NA)}
}
if(!is.null(cpmtype) & is.null(p)){
if(under==FALSE){data2o.s<-sort(data2o.c,decreasing=TRUE)}
if(under==TRUE){data2o.s<-sort(data2o.c)}
if (diff==FALSE){
cpm_o<-detectChangePoint(data2o.s,cpmType=cpmtype, ARL0=500, startup=20)
}
if(diff==TRUE){
diffo<-c(rep(0,length(data2o.s)))
for (i in 1:length(data2o.s)-1){
diffo[i]<-data2o.s[i]-data2o.s[i+1]
}
diffo1<-subset(diffo,is.nan(diffo)==FALSE)
diffo2<-subset(diffo1,is.na(diffo1)==FALSE)
diffo3<-subset(diffo2,diffo2!="Inf")
cpm_o<-detectChangePoint(diffo3,cpmType=cpmtype, ARL0=500, startup=20)
}
if(under==FALSE){data3o<-ifelse(data2o>=data2o.s[cpm_o$changePoint], data2o, NA)}
if(under==TRUE){data3o<-ifelse(data2o<=data2o.s[cpm_o$changePoint], data2o, NA)}
}
data4o<-subset(data3o, data3o!="NA")
if(num.id==TRUE){
noX<-substr(names(data4o), 2,15)
noX1<-unique(noX)
if(!is.null(annotation)){
gso<-annotation[annotation[,annID] %in% noX1,c(annID,annName)]
Val<-data4o[noX1 %in% gso[,1]]
val<-Val[unique(names(Val))]
}
else {gso<-noX1
val<-as.vector(data4o)}
}
if(num.id==FALSE){
if(!is.null(annotation)){
gso<-annotation[annotation[,annID] %in% names(data4o),c(annID,annName)]
val<-data4o[names(data4o) %in% gso[,1]]
}
else {gso<-names(data4o)
val<-as.vector(data4o)}
}
oss_out<-data.frame(gso,Value=val,Stat="OSS")
rownames(oss_out)<-NULL
if(!is.null(save.path)){
write.csv(oss_out,file=paste(save.path,"oss_outlier.csv"))
}
#if (outsample==TRUE & !is.null(save.path)){
# if(under==FALSE){outlying.samples(data,names(data4o),over_cut=cut,stat="OSS",savepath=save.path)}
# if(under==TRUE){outlying.samples(data,names(data4o),under_cut=cut,stat="OSS",savepath=save.path)}
#}
### Variance #################################################################################
data2v <- apply_pb(data1,2,variability,Title="Calculating variance ...")
data2v.a<-subset(data2v,is.nan(data2v)==FALSE)
data2v.b<-subset(data2v.a,is.na(data2v.a)==FALSE)
data2v.c<-subset(data2v.b,data2v.b!="Inf")
data2v.s<-sort(data2v.c,decreasing=TRUE)
if(is.null(cpmtype) & !is.null(p)){
data3v<-ifelse(data2v.c>quantile(data2v.c, p,na.rm=TRUE), data2v.c, NA)
}
if(!is.null(cpmtype) & is.null(p)){
if (diff==FALSE){
cpm_v<-detectChangePoint(data2v.s,cpmType=cpmtype, ARL0=500, startup=20)
}
if (diff==TRUE){
diffv<-c(rep(0,length(data2v.s)))
for (i in 1:length(data2v.s)-1){
diffv[i]<-data2v.s[i]-data2v.s[i+1]
}
cpm_v<-detectChangePoint(diffv,cpmType=cpmtype, ARL0=500, startup=20)
}
data3v<-ifelse(data2v.c>=data2v.s[cpm_v$changePoint], data2v.c, NA)
}
data4v<-subset(data3v, data3v!="NA")
if(num.id==TRUE){
noX<-substr(names(data4v), 2,15)
noX1<-unique(noX)
if(!is.null(annotation)){
gsv<-annotation[annotation[,annID] %in% noX1,c(annID,annName)]
Val<-data4v[noX1 %in% gsv[,1]]
val<-Val[unique(names(Val))]
}
else {gsv<-noX1
val<-as.vector(data4v)}
}
if(num.id==FALSE){
if(!is.null(annotation)){
gsv<-annotation[annotation[,annID] %in% names(data4v),c(annID,annName)]
val<-data4v[names(data4v) %in% gsv[,1]]
}
else {gsv<-names(data4v)
val<-as.vector(data4v)}
}
var_out<-data.frame(gsv,Value=val,Stat="Var")
rownames(var_out)<-NULL
if(!is.null(save.path)){
write.csv(var_out,file=paste(save.path,"var_outlier.csv"))
}
#if (outsample==TRUE & !is.null(save.path)){
# outlying.samples(data,names(data4v),over_cut=cut,stat="var",savepath=save.path)
#}
########################
### all results file ###
########################
if(!is.null(annotation)){
all_results<-rbind(gti_out,copa_out,oss_out,var_out)
}
if(is.null(annotation)){all_results<-list(GTI=gti_out,COPA=copa_out,OSS=oss_out,Variance=var_out)}
if (!is.null(save.path)){
write.csv(all_results,file=paste(save.path,"all_results.csv"))
}
#################
### cpm plots ###
#################
if(!is.null(save.path)){
setwd(save.path)
par(mfrow=c(1,2))
### GTI
jpeg(filename="gti_cpm_plots.jpg")
cpm_plot_D_g<-plot(cpm_g$Ds)
cpm_thresh_plot<-plot(sort(data2))
abline(h=data2.s[cpm_g$changePoint],lty=2)
dev.off()
### COPA
jpeg(filename="copa_cpm_plots.jpg")
cpm_plot_D_c<-plot(cpm_c$Ds)
cpm_thresh_plot_c<-plot(sort(data2c))
abline(h=data2c.s[cpm_c$changePoint],lty=2)
dev.off()
### OSS
jpeg(filename="oss_cpm_plots.jpg")
cpm_plot_D_o<-plot(cpm_o$Ds)
cpm_thresh_plot_o<-plot(sort(data2o))
abline(h=data2o.s[cpm_o$changePoint],lty=2)
dev.off()
### Var
jpeg(filename="var_cpm_plots.jpg")
cpm_plot_D_v<-plot(cpm_v$Ds)
cpm_thresh_plot_v<-plot(sort(data2v))
abline(h=data2v.s[cpm_v$changePoint],lty=2)
dev.off()
}
if(is.null(save.path)){
par(mfrow=c(2,2))
# GTI
cpm_thresh_plot<-plot(sort(data2),main="GTI",ylab="GTI Values")
abline(h=data2.s[cpm_g$changePoint],lty=2)
# COPA
cpm_thresh_plot_c<-plot(sort(data2c),main="COPA",ylab="COPA Values")
abline(h=data2c.s[cpm_c$changePoint],lty=2)
# OSS
cpm_thresh_plot_o<-plot(sort(data2o),main="OSS",ylab="OSS Values")
abline(h=data2o.s[cpm_o$changePoint],lty=2)
# Variance
cpm_thresh_plot_v<-plot(sort(data2v),main="Variance",ylab="Variance values")
abline(h=data2v.s[cpm_v$changePoint],lty=2)
}
par(mfrow=c(1,1))
#######################
### Genes in common ###
#######################
if(common.genes==TRUE){
g<-names(data4)
c<-names(data4c)
o<-names(data4o)
v<-names(data4v)
common4<-Reduce(intersect, list(g,c,o,v))
if (length(common4)>0){
method=5
}
if (length(common4)==0){
warning("No outliers in common across all four methods")
method=3
}
if (method==5){
#Four methods
if(num.id==TRUE){common_genes<-substr(common4, 2,15)}
else {common_genes<-common4}
if(!is.null(annotation)){
out4<-data.frame(annotation[annotation[,annID] %in% common_genes,],Methods=4)
out4a<-out4[out4[,annName]!="---",]
}
else {out4a<-data.frame(common_genes,Methods=4)}
#Three methods
common3<-list(GCO=Reduce(intersect, list(g,c,o)),GCV=Reduce(intersect, list(g,c,v)),
GOV=Reduce(intersect, list(g,v,o)),COV=Reduce(intersect, list(v,c,o)))
if(num.id==TRUE){common_genes<-substr(unlist(common3), 2,15)}
else {common_genes<-unlist(common3)}
if(!is.null(annotation)){
out3<-data.frame(annotation[annotation[,annID] %in% common_genes,],Methods=3)
out3a<-out3[out3[,annName]!="---",]
}
else {out3a<-data.frame(common_genes, Methods=3)}
common.results<-rbind(out4a,out3a)
}
if (method==3){
common3<-list(GCO=Reduce(intersect, list(g,c,o)),GCV=Reduce(intersect, list(g,c,v)),
GOV=Reduce(intersect, list(g,v,o)),COV=Reduce(intersect, list(v,c,o)))
common3a<-unique(as.vector(unlist(common3)))
if (length(common3a)==0){
common.genes=FALSE
warning("No outliers in common across three of the four statistics")}
if(length(common3a)>0){
if(num.id==TRUE){common_genes<-substr(unlist(common3a), 2,15)}
else {common_genes<-common3a}
if(!is.null(annotation)){
out3<-data.frame(annotation[annotation[,annID] %in% common_genes,],Methods=3)
out3a<-out3[out3[,annName]!="---",]
common.results<-out3a
}
else {out3a<-data.frame(common_genes, Methods=3)
common.results<-out3a}
}
}
}
##############
### Output ###
##############
if(common.genes==FALSE){return(list(Outliers=all_results))}
if(common.genes==TRUE){return(list(Outliers=all_results,Common_Genes=common.results))}
}
|
45a4983ebeda9a924dedf350d2ddeef0fe8aa9b1 | 36853a83c9c333787682d1c1d585a2bb08c3c725 | /file_functions.R | b0358fca09c59b6af225953809a9568d1ca5d4aa | [] | no_license | muschellij2/CT_Pipeline | ea075a40ac0799b7a9ea2efe3b60f16e70a346cc | 9bf95e6f62bfd4d61a1084ed66d2af4155641dc2 | refs/heads/master | 2021-01-22T02:39:51.559886 | 2019-09-04T19:48:00 | 2019-09-04T19:48:00 | 11,985,496 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 218 | r | file_functions.R | ## get the basename for a file, for example getBase("blah.nii.gz", 2) = "blah"
getBase <- function(x, ind=1){
sapply(strsplit(x, split="\\."), function(xx)
paste(xx[1:(length(xx)-ind)], collapse=".", sep=""))
}
|
c4734b99aba1d232336690e5e62459e1ab70e219 | a6a033d0a8aee92bd6dac3f13c32946d1fe5a916 | /plot_all.R | 0e32220488e86abeae18c2adbb4596f69afb210c | [] | no_license | cobeylab/ccm-letter | 1a1001d99eb5562d4cd5abc382f27525e680bab6 | c032826fd391cfaf95cf54fe1c466fcaf860eef1 | refs/heads/master | 2021-01-12T03:41:55.987771 | 2017-01-09T22:53:00 | 2017-01-09T22:53:00 | 78,253,724 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,186 | r | plot_all.R | #!/usr/bin/env Rscript
library(RSQLite)
library(ggplot2)
main <- function() {
script_dir <- get_script_dir()
db <- dbConnect(SQLite(), file.path(script_dir, 'output_db_all.sqlite'))
plot_model_summary(script_dir, db)
for(use_splines in c(FALSE, TRUE)) {
for(remove_zeros in c(FALSE, TRUE)) {
for(use_surr_vec in list(c(FALSE, TRUE), c(TRUE, FALSE), c(TRUE, TRUE))) {
use_surr_flu <- use_surr_vec[1]
use_surr_env <- use_surr_vec[2]
for(use_log_flu in c(FALSE, TRUE)) {
for(flu_is_cause in c(FALSE, TRUE)) {
for(use_lagtest in c(FALSE, TRUE)) {
plot_model(
script_dir, db,
use_splines, remove_zeros, use_surr_flu, use_surr_env, use_log_flu, flu_is_cause, use_lagtest
)
}
}
}
}
}
}
}
plot_model_summary <- function(script_dir, db) {
if(!dir.exists('plots')) {
dir.create('plots')
}
query_format <- '
SELECT
ccm_rho.use_surr_flu, ccm_rho.use_surr_env, ccm_rho.use_log_flu,
SUM(rho_sig_95) AS n_sig_95,
SUM(rho_sig_95 AND (best_lag <= 0)) AS n_sig_95_lag
FROM ccm_rho, ccm_lagtest
WHERE
ccm_rho.%s = "flu" AND ccm_lagtest.%s = "flu"
AND ccm_rho.use_splines = 1
AND ccm_rho.remove_zeros = 1 AND ccm_lagtest.remove_zeros = 1
AND ccm_rho.use_log_flu = ccm_lagtest.use_log_flu
AND ccm_rho.%s = ccm_lagtest.%s
AND ccm_rho.country = ccm_lagtest.country
GROUP BY
ccm_rho.use_surr_flu, ccm_rho.use_surr_env, ccm_rho.use_log_flu
ORDER BY
ccm_rho.use_log_flu, ccm_rho.use_surr_flu, ccm_rho.use_surr_env
'
df_flucause <- dbGetQuery(
db,
sprintf(query_format, 'cause', 'cause', 'effect', 'effect')
)
df_envcause <- dbGetQuery(
db,
sprintf(query_format, 'effect', 'effect', 'cause', 'cause')
)
stopifnot(nrow(df_flucause) == nrow(df_envcause))
table_file <- file(file.path(script_dir, 'summary-table.tex'), 'w')
format_bool <- function(x) {
if(x) {
return('X')
}
else {
return('')
}
}
for(i in 1:nrow(df_flucause)) {
cat(
sprintf(
'%s & %s & %s & %d & %d & %d & %d \\\\\n',
format_bool(df_flucause$use_surr_flu[i]),
format_bool(df_flucause$use_surr_env[i]),
format_bool(df_flucause$use_log_flu[i]),
df_flucause$n_sig_95[i], df_envcause$n_sig_95[i],
df_flucause$n_sig_95_lag[i], df_envcause$n_sig_95_lag[i]
),
file = table_file
)
}
close(table_file)
}
plot_model <- function(script_dir, db, use_splines, remove_zeros, use_surr_flu, use_surr_env, use_log_flu, flu_is_cause, use_lagtest) {
if(!dir.exists('plots')) {
dir.create('plots')
}
if(flu_is_cause) {
flu_cause_or_effect <- 'cause'
var_cause_or_effect <- 'effect'
}
else {
flu_cause_or_effect <- 'effect'
var_cause_or_effect <- 'cause'
}
query <- sprintf('
SELECT ccm_rho.cause, ccm_rho.effect, ccm_rho.country, rho_sig_95, rho, rho_null_95, best_lag, rho_best_lag
FROM ccm_rho, ccm_lagtest
WHERE ccm_rho.use_splines = ? AND ccm_rho.remove_zeros = ? AND ccm_rho.use_surr_flu = ?
AND ccm_rho.use_surr_env = ? AND ccm_rho.use_log_flu = ? AND ccm_rho.%s = "flu"
AND ccm_lagtest.remove_zeros = ? AND ccm_lagtest.use_log_flu = ? and ccm_lagtest.%s = "flu"
AND ccm_rho.%s = ccm_lagtest.%s AND ccm_rho.country = ccm_lagtest.country
', flu_cause_or_effect, flu_cause_or_effect, var_cause_or_effect, var_cause_or_effect)
df <- dbGetPreparedQuery(db,
query,
data.frame(
ccm_rho.use_splines = use_splines, ccm_rho.remove_zeros = remove_zeros, ccm_rho.use_surr_flu = use_surr_flu,
ccm_rho.use_surr_env = use_surr_env, ccm_rho.use_log_flu = use_log_flu,
ccm_lagtest.remove_zeros = remove_zeros, ccm_lagtest.use_log_flu = use_log_flu
)
)
countries <- read.table(file.path(script_dir, 'countries.txt'), colClasses = 'character', sep = '\t')[,1]
df$country_factor <- factor(df$country, levels = countries)
if(flu_is_cause) {
df$var_factor <- factor(df$effect, levels = c('AH', 'T', 'RH', 'PRCP'))
}
else {
df$var_factor <- factor(df$cause, levels = c('AH', 'T', 'RH', 'PRCP'))
}
if(use_lagtest) {
df$significant <- df$rho_sig_95 & (df$best_lag <= 0)
}
else {
df$significant <- df$rho_sig_95
}
p <- ggplot(data = df) +
geom_point(aes(x = rho, y = 0, color = factor(significant), size = 1 + significant)) +
scale_radius(range = c(0.5, 1)) +
scale_color_manual(values = c('black', 'red')) +
geom_vline(aes(xintercept = rho_null_95), size = 0.25) +
labs(x = 'cross-map correlation (flu drives env. variable)', y = NULL) +
facet_grid(
country_factor ~ var_factor, switch = 'y',
labeller = labeller(var_factor = function(var) {
df_sig_sum <- sapply(var, function(v) { sum(df$significant[df$var_factor == v]) })
label <- c(
AH = 'abs. hum.', RH = 'rel. hum.', T = 'temp.', PRCP = 'precip.'
)[as.character(var)]
sprintf('%s (%d/26)', label, df_sig_sum)
})
) +
expand_limits(x = c(-0.25, 1)) +
scale_x_continuous(breaks=c(0, 0.5, 1.0)) +
theme_minimal(base_size = 9) +
theme(
strip.text.y = element_text(angle = 180, hjust = 1),
axis.text.y = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.x = element_line(size = 0.25, linetype = 2, color = 'darkgray'),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
legend.position = 'none',
panel.border = element_rect(linetype = 1, fill = NA),
panel.spacing.x = unit(0.25, 'cm'),
panel.spacing.y = unit(0.125, 'cm'),
axis.title.x = element_text(size = 8),
axis.ticks.x = element_line(size = 0.25)
)
plot_filename <- file.path(
script_dir, 'plots',
sprintf(
'us=%d-rz=%d-usf=%d-use=%d-ulf=%d-fic=%d-lt=%d.pdf',
use_splines, remove_zeros, use_surr_flu, use_surr_env, use_log_flu, flu_is_cause, use_lagtest
)
)
ggsave(plot_filename, p, width = 4.5, height = 4)
}
get_script_dir <- function() {
command_args <- commandArgs(trailingOnly = FALSE)
file_arg_name <- '--file='
tools:::file_path_as_absolute(
dirname(
sub(file_arg_name, '', command_args[grep(file_arg_name, command_args)])
)
)
}
main()
|
9c64c66863680fb3f15610012c3d6fe217098a47 | d50bbe2ef197a5275477667ff88cc4562fc84639 | /man/ccmean.Rd | db773c5434cdb232fd2c6a6a0d1e7a3ca47d52e6 | [
"MIT"
] | permissive | LarsHernandez/ccostr | cb1960a068aa2ab89c366ef19925c35691db8633 | 7c61f941f9202df12438baa0e977ee1bc197f91a | refs/heads/master | 2020-07-04T15:11:06.478697 | 2019-08-14T09:54:00 | 2019-08-14T09:54:00 | 202,321,578 | 0 | 0 | NOASSERTION | 2019-08-14T09:44:50 | 2019-08-14T09:44:49 | null | UTF-8 | R | false | true | 1,622 | rd | ccmean.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ccmean.R
\name{ccmean}
\alias{ccmean}
\title{Calculates estimates of the mean cost with censored data}
\usage{
ccmean(x, L = max(x$surv), addInterPol = 0)
}
\arguments{
\item{x}{A dataframe with columns: id, cost, delta and surv. If Cost history is available it can be specified by: start and stop,}
\item{L}{Limit. Mean cost is calculated up till L, if not specified L = max(surv)}
\item{addInterPol}{This parameter affects the interpolation of cost between two observed times. Defaults to zero.}
}
\value{
An object of class "ccobject".
}
\description{
This function calculates the mean cost for right-censored cost
data over a period of L time units (days, months, years,...)
}
\details{
The function returns four estimates. The first two are simple and biased
downwards, and included for comparison. The estimates are:
- AS: "Available Sample estimator" - The simple sample mean
- CC: "Complete Case estimator" - The mean of fully observed cases
- BT: "Weighted Complete Case estimator" - Bang and Tsiatis's estimator
- ZT: "Weighted Available estimator" - Zhao and Tian's estimator
The function needs the following in a dataframe:
- id: The id separating each individual
- cost: The total cost, or if start and stop provided the specific cost
- start: Start of cost
- stop: End of cost, if one time cost then start = stop
- delta: Event variable, 1 = event, 0 = no event
- surv: Survival
}
\examples{
hcost
ccmean(hcost, L = 1461, addInterPol = 1)
}
\references{
\insertRef{Bang2000}{ccostr}
\insertRef{Zhao2001}{ccostr}
}
|
caba3775524593224811001e5c8489a84481e10e | c3852480b1ccf9c53cc9777d9d6227b33881556a | /R/runDR.R | ec70cbfea8386caaa39d63cc51117b543616062b | [] | no_license | HelenaLC/CATALYST | f4506a621f81fef74911661d02330253d37aee9e | f3e294ed9a4d3f300feb994bb381df6a6b2c8309 | refs/heads/main | 2023-08-23T13:30:52.504230 | 2023-04-25T14:53:40 | 2023-04-25T14:53:40 | 75,288,871 | 60 | 34 | null | 2022-05-12T08:25:28 | 2016-12-01T12:06:28 | R | UTF-8 | R | false | false | 3,349 | r | runDR.R | #' @rdname runDR
#' @title Dimension reduction
#'
#' @description Wrapper around dimension reduction methods available
#' through \code{scater}, with optional subsampling of cells per each sample.
#'
#' @param x a \code{\link[SingleCellExperiment]{SingleCellExperiment}}.
#' @param dr character string specifying which dimension reduction to use.
#' @param cells single numeric specifying the maximal number of cells
#' per sample to use for dimension reduction; NULL for all cells.
#' @param features a character vector specifying which
#' antigens to use for dimension reduction; valid values are
#' \code{"type"/"state"} for \code{type/state_markers(x)}
#' if \code{rowData(x)$marker_class} have been specified;
#' a subset of \code{rownames(x)}; NULL to use all features.
#' @param assay character string specifying which assay data to use
#' for dimension reduction; valid values are \code{assayNames(x)}.
#' @param ... optional arguments for dimension reduction; passed to
#' \code{\link[scater]{runUMAP}}, \code{\link[scater]{runTSNE}},
#' \code{\link[scater]{runPCA}}, \code{\link[scater]{runMDS}}
#' and \code{\link[scater]{runDiffusionMap}}, respecttively.
#' See \code{?"scater-red-dim-args"} for details.
#'
#' @author Helena L Crowell \email{helena.crowell@@uzh.ch}
#'
#' @references
#' Nowicka M, Krieg C, Crowell HL, Weber LM et al.
#' CyTOF workflow: Differential discovery in
#' high-throughput high-dimensional cytometry datasets.
#' \emph{F1000Research} 2017, 6:748 (doi: 10.12688/f1000research.11622.1)
#'
#' @return a \code{ggplot} object.
#'
#' @examples
#' # construct SCE
#' data(PBMC_fs, PBMC_panel, PBMC_md)
#' sce <- prepData(PBMC_fs, PBMC_panel, PBMC_md)
#'
#' # run UMAP on <= 200 cells per sample
#' sce <- runDR(sce, features = type_markers(sce), cells = 100)
#'
#' @importFrom scater runUMAP runTSNE runPCA runMDS runDiffusionMap
#' @importFrom SingleCellExperiment reducedDim reducedDim<-
#' @importFrom SummarizedExperiment assayNames
#' @export
runDR <- function(x,
dr = c("UMAP", "TSNE", "PCA", "MDS", "DiffusionMap"),
cells = NULL, features = "type", assay = "exprs", ...) {
# check validity of input arguments
stopifnot(is(x, "SingleCellExperiment"))
dr <- match.arg(dr)
.check_assay(x, assay)
fs <- .get_features(x, features)
if (is.null(cells)) {
# use all cells
cs <- TRUE
} else {
if (is.null(x$sample_id))
stop("colData column sample_id not found,\n ",
" but is required to downsample cells.")
stopifnot(
is.numeric(cells), length(cells) == 1,
as.integer(cells) == cells, cells > 0)
# split cell indices by sample
cs <- split(seq_len(ncol(x)), x$sample_id)
# sample at most 'n' cells per sample
cs <- unlist(lapply(cs, function(u)
sample(u, min(cells, length(u)))))
}
# run dimension reduction
fun <- get(paste0("run", dr))
y <- fun(x[, cs], subset_row = fs, exprs_values = assay, ...)
# return SCE when no cell subsetting has been done
if (is.null(cells)) return(y)
# else, write coordinates into original SCE
xy <- reducedDim(y, dr)
m <- matrix(NA, nrow = ncol(x), ncol = ncol(xy))
m[cs, ] <- xy
reducedDim(x, dr) <- m
return(x)
} |
8a09bf1427fb89b88e1e742a6a1279dd100bfda9 | 2ce7630346bd7320c48d641ae9b9cd14c06108d3 | /man/writeSLI.Rd | 6c235753745e14470087eeaa803b81b2be06e159 | [] | no_license | bleutner/RStoolbox | 2cda3b6b37e51d06d176e2bc21005ea2e3e026c9 | ec6cac232d5e933d649b9ba52dfd5398ea63220e | refs/heads/master | 2023-04-09T17:06:00.653435 | 2022-04-01T19:54:52 | 2022-04-01T19:54:52 | 19,460,005 | 244 | 85 | null | 2023-03-29T08:47:53 | 2014-05-05T15:08:49 | R | UTF-8 | R | false | true | 1,654 | rd | writeSLI.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readSLI.R
\name{writeSLI}
\alias{writeSLI}
\title{Write ENVI spectral libraries}
\usage{
writeSLI(
x,
path,
wavl.units = "Micrometers",
scaleF = 1,
mode = "bin",
endian = .Platform$endian
)
}
\arguments{
\item{x}{data.frame with first column containing wavelengths and all other columns containing spectra.}
\item{path}{path to spectral library file to be created.}
\item{wavl.units}{wavelength units. Defaults to Micrometers. Nanometers is another typical option.}
\item{scaleF}{optional reflectance scaling factor. Defaults to 1.}
\item{mode}{character string specifying output file type. Must be one of \code{"bin"} for binary .sli files or \code{"ASCII"} for ASCII ENVI plot files.}
\item{endian}{character. Optional. By default the endian is determined based on the platform, but can be forced manually by setting it to either "little" or "big".}
}
\description{
Writes binary ENVI spectral library files (sli) with accompanying header (.sli.hdr) files OR ASCII spectral library files in ENVI format.
}
\details{
ENVI spectral libraries with ending .sli are binary arrays with spectra saved in rows.
}
\examples{
## Example data
sliFile <- system.file("external/vegSpec.sli", package="RStoolbox")
sliTmpFile <- paste0(tempdir(),"/vegetationSpectra.sli")
## Read spectral library
sli <- readSLI(sliFile)
head(sli)
plot(sli[,1:2], col = "orange", type = "l")
lines(sli[,c(1,3)], col = "green")
## Write to binary spectral library
writeSLI(sli, path = sliTmpFile)
}
\seealso{
\code{\link{readSLI}}
}
|
94cb27ec824675543c6dfb95fd31b3f263858919 | 132c868650be85a4eaf605832e6cd57d9aa8faf3 | /man/plot_time_to_removal_by_spvl.Rd | d71ffc4784db032270b4dca8175e644992e89e2c | [] | no_license | EvoNetHIV/RoleSPVL | 9797fe146afa1e750ef1cfdaf231b62e0f19e848 | 113b55fedbdd2ac6627b751df3e102e801e36c5c | refs/heads/master | 2021-09-14T17:46:30.726252 | 2018-05-16T22:16:13 | 2018-05-16T22:16:13 | 103,449,083 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 442 | rd | plot_time_to_removal_by_spvl.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_time_to_removal_spvl.R
\name{plot_time_to_removal_by_spvl}
\alias{plot_time_to_removal_by_spvl}
\title{Title}
\usage{
plot_time_to_removal_by_spvl(model, type = "aids")
}
\arguments{
\item{x}{A number.}
\item{y}{A number.}
}
\value{
return value here.
}
\description{
Description
}
\details{
Additional details here
}
\examples{
example function call here
}
|
c9671d07339fc7a9c6781ca6f99f0d8d73aad659 | 4b56315bc1671b8e25fce3c5293f270034a3f88c | /scripts/demographic/traumaEnvPred_plot.R | 2b08e6d20015ae948b648f229b19012e85a4307b | [] | no_license | PennBBL/pncLongitudinalPsychosis | fd8895b94bcd0c4910dd5a48019e7d2021993d3a | 006364626ccbddac6197a0e7c5cbe04215601e33 | refs/heads/master | 2023-05-27T00:16:35.264690 | 2021-06-18T13:55:49 | 2021-06-18T13:55:49 | 172,975,758 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,541 | r | traumaEnvPred_plot.R | ### This script runs logistic regressions to predict final PS status using baseline
### number of types of trauma and neighborhood environment.
### Creates Figure 4 and Table 3 in the longitudinal clinical paper.
###
### Ellyn Butler
### September 8, 2020 - September 11, 2020 (changed to envSES on November 30, 2020)
### New diagnostic labels on March 4, 2021
library('dplyr') # Version 1.0.2
library('sjPlot') # Version 2.8.4
library('reshape2') # Version 1.4.4
library('ggplot2') # Version 3.3.2
library('ggpubr') # Version 0.4.0
library('fastDummies') # Version 1.6.1
library('cowplot') # Version 1.1.1
clin_df <- read.csv('~/Documents/pncLongitudinalPsychosis/data/clinical/pnc_longitudinal_diagnosis_n749_20210112.csv', stringsAsFactors = TRUE)
demo_df <- read.csv('~/Documents/pncLongitudinalPsychosis/data/demographics/baseline/n9498_demographics_go1_20161212.csv', stringsAsFactors = TRUE)
env_df <- read.csv('~/Documents/traumaInformant/data/n9498_go1_environment_factor_scores_tymoore_20150909.csv', stringsAsFactors = TRUE)
trauma_df <- read.csv('~/Documents/traumaInformant/data/PNC_GO1_GOASSESSDataArchiveNontext_DATA_2015-07-14_1157.csv', stringsAsFactors = TRUE)
# Recalculate ntimepoints (a lot of erroneous zeros)
for (bblid in unique(clin_df$bblid)) {
clin_df[clin_df$bblid == bblid, 'ntimepoints'] <- length(clin_df[clin_df$bblid == bblid, 'ntimepoints'])
}
# Create first/last diagnoses df
clin_df <- clin_df[clin_df$timepoint == 't1' | clin_df$timepoint == paste0('t', clin_df$ntimepoints), ]
clin_df$timepoint <- recode(clin_df$timepoint, 't1'='t1', 't2'='tfinal2',
't3'='tfinal2', 't4'='tfinal2', 't5'='tfinal2', 't6'='tfinal2')
clin_df$diagnosis <- recode(clin_df$diagnosis, 'psy'='PS')
clin_df <- reshape2::dcast(clin_df, bblid ~ timepoint, value.var='diagnosis')
clin_df$t1_tfinal <- paste(clin_df$t1, clin_df$tfinal2, sep='_')
clin_df$Diagnoses <- recode(clin_df$t1_tfinal, 'TD_TD'='TD-TD', 'TD_other'='TD-OP',
'TD_PS'='TD-PS', 'other_TD'='OP-TD', 'other_other'='OP-OP', 'other_PS'='OP-PS',
'PS_TD'='PS-TD', 'PS_other'='PS-OP', 'PS_PS'='PS-PS')
clin_df$Diagnoses <- factor(clin_df$Diagnoses)
trauma_df <- trauma_df[trauma_df$interview_type %in% c('AP', 'MP', 'YPI'),]
trauma_df <- trauma_df[,c('proband_bblid', grep('ptd', names(trauma_df), value=TRUE))]
names(trauma_df)[names(trauma_df) == 'proband_bblid'] <- 'bblid'
final_df <- merge(clin_df, demo_df, by='bblid')
final_df <- merge(final_df, trauma_df, by='bblid')
names(final_df)[names(final_df) == 't1'] <- 'first_diagnosis'
names(final_df)[names(final_df) == 'tfinal2'] <- 'last_diagnosis'
final_df$Female <- recode(final_df$sex, `2`=1, `1`=0)
final_df$Sex <- recode(final_df$sex, `2`='Female', `1`='Male')
final_df$White <- recode(final_df$race, `1`='Yes', .default='No')
final_df$first_diagnosis <- recode(final_df$first_diagnosis, 'other'='OP')
final_df$first_diagnosis <- factor(final_df$first_diagnosis)
final_df$last_diagnosis <- recode(final_df$last_diagnosis, 'other'='OP')
#final_df$t1_tfinal <- recode(final_df$t1_tfinal, 'other_other'='OP_OP',
# 'other_TD'='OP_TD', 'other_PS'='OP_PS', 'TD_other'='TD_OP', 'PS_other'='PS_OP')
final_df <- within(final_df, Diagnoses <- relevel(Diagnoses, ref='TD-TD'))
final_df <- within(final_df, first_diagnosis <- relevel(first_diagnosis, ref='TD'))
ptdvars <- c(paste0('ptd00', 1:4), paste0('ptd00', 6:9))
final_df[, ptdvars] <- sapply(final_df[, ptdvars], na_if, y=9)
final_df <- merge(final_df, env_df)
final_df$PS_final <- recode(final_df$last_diagnosis, 'OP'=0, 'PS'=1, 'TD'=0)
final_df$num_type_trauma <- rowSums(final_df[, ptdvars])
######### Does # of trauma types predict final time point PS status? #########
mod1 <- glm(PS_final ~ first_diagnosis, family='binomial', data=final_df)
mod2 <- glm(PS_final ~ first_diagnosis + num_type_trauma, family='binomial', data=final_df)
#Answer: Yes
# Does baseline diagnosis moderate the relationship between final time
# point PS status and number of types of traumas such that there is a stronger
# relationship between number of trauma types and final PS status among those
# who were PS at baseline than those who were OP or TD?
mod3 <- glm(PS_final ~ num_type_trauma*first_diagnosis, family='binomial', data=final_df)
#Answer: No
all_models_trauma <- tab_model(mod1, mod2, mod3)
#final_df$PS_final_I <- recode(final_df$PS_final, 'Yes'=1, 'No'=0)
trauma_plot <- ggplot(final_df, aes(x=num_type_trauma, y=PS_final,
group=first_diagnosis, colour=first_diagnosis)) +
theme_linedraw() + geom_point(shape=1, position=position_jitter(width=.1,
height=.05)) + ylab('PS at Final Assessment') + xlab('Number of Types of Trauma') +
labs(colour='First Diagnosis') +
scale_colour_manual(values = c('green3', 'goldenrod2', 'red')) +
stat_smooth(method='glm', method.args=list(family='binomial'), se=FALSE) +
theme(legend.position='bottom')
pdf(file='~/Documents/pncLongitudinalPsychosis/plots/traumaLogistic.pdf', width=6, height=5)
trauma_plot
dev.off()
###### Does neighborhood environment predict final time point PS status? ######
mod2.2 <- glm(PS_final ~ first_diagnosis + envSES, family='binomial', data=final_df)
#Answer: No
# Does baseline diagnosis moderate the relationship between final time
# point PS status and number of types of traumas such that there is a stronger
# relationship between number of trauma types and final PS status among those
# who were PS at baseline than those who were OP or TD?
mod3.2 <- glm(PS_final ~ envSES*first_diagnosis, family='binomial', data=final_df)
#Answer: No
all_models_env <- tab_model(mod1, mod2.2, mod3.2)
env_plot <- ggplot(final_df, aes(x=envSES, y=PS_final,
group=first_diagnosis, colour=first_diagnosis)) +
theme_linedraw() + geom_point(shape=1, position=position_jitter(width=.1,
height=.05)) + ylab('PS at Final Assessment') + xlab('Neighborhood Environment') +
labs(colour='First Diagnosis') +
scale_colour_manual(values = c('green3', 'goldenrod2', 'red')) +
stat_smooth(method='glm', method.args=list(family='binomial'), se=FALSE) +
theme(legend.position='bottom')
pdf(file='~/Documents/pncLongitudinalPsychosis/plots/envLogistic.pdf', width=6, height=5)
env_plot
dev.off()
# Built figure
diag_legend <- get_legend(env_plot)
trauma_plot <- trauma_plot + theme(legend.position='none')
env_plot <- env_plot + theme(legend.position='none')
grid_plot <- cowplot::plot_grid(
cowplot::plot_grid(trauma_plot, env_plot, labels=c('A', 'B')),
diag_legend, rel_heights=c(4, 1), nrow=2, ncol=1)
pdf(file='~/Documents/pncLongitudinalPsychosis/plots/env_grid_paper.pdf', width=7, height=4)
print(grid_plot)
dev.off()
###############################################################################
###############################################################################
###############################################################################
###### Does the interaction between trauma and environment predict? ######
mod2.3 <- glm(PS_final ~ first_diagnosis + num_type_trauma + envSES, family='binomial', data=final_df)
mod3.3 <- glm(PS_final ~ first_diagnosis + num_type_trauma*envSES, family='binomial', data=final_df)
mod4.3 <- glm(PS_final ~ first_diagnosis*num_type_trauma*envSES, family='binomial', data=final_df)
mod5.3 <- glm(PS_final ~ first_diagnosis*num_type_trauma*envSES*sex, family='binomial', data=final_df) #Wildly overfit
print(tab_model(mod1, mod2.2, mod2, mod2.3, mod3.3, mod4.3, p.adjust='fdr',
file='~/Documents/pncLongitudinalPsychosis/results/table_prediction.html'))
###############################################################################
final_df$Age <- final_df$ageAtClinicalAssess1
# Sensitivity Table
mod1b <- glm(PS_final ~ Age + Sex + White + first_diagnosis, family='binomial', data=final_df)
mod2.2b <- glm(PS_final ~ Age + Sex + White + first_diagnosis + envSES, family='binomial', data=final_df)
mod2b <- glm(PS_final ~ Age + Sex + White + first_diagnosis + num_type_trauma, family='binomial', data=final_df)
mod2.3b <- glm(PS_final ~ Age + Sex + White + first_diagnosis + num_type_trauma + envSES, family='binomial', data=final_df)
mod3.3b <- glm(PS_final ~ Age + Sex + White + first_diagnosis + num_type_trauma*envSES, family='binomial', data=final_df)
mod4.3b <- glm(PS_final ~ Age + Sex + White + first_diagnosis*num_type_trauma*envSES, family='binomial', data=final_df)
print(tab_model(mod1b, mod2.2b, mod2b, mod2.3b, mod3.3b, mod4.3b, p.adjust='fdr',
file='~/Documents/pncLongitudinalPsychosis/results/table_prediction_sensitivity.html'))
#
|
de8b7a0e7460dab4874a44fc214a2c50b0b5b8fe | 92a0b69e95169c89ec0af530ed43a05af7134d45 | /R/get_project_path.R | de06a1ef5cbad3b0a19c0bdc0af45fa85ecc2446 | [] | no_license | gelfondjal/IT2 | 55185017b1b34849ac1010ea26afb6987471e62b | ee05e227403913e11bf16651658319c70c509481 | refs/heads/master | 2021-01-10T18:46:17.062432 | 2016-01-20T17:51:29 | 2016-01-20T17:51:29 | 21,449,261 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 693 | r | get_project_path.R | #' Given Project name, Return project directory
#' @param project_name is string with project name
#' @details Reads "~/ProjectPaths/projectid_2_diretory.csv" into dataframe
#' @return string containing project directory
#' @export
get.project.path <- function(project_name=NULL){
all.projects <- read.csv(file.path(path.expand.2("~"),"ProjectPaths","projectid_2_directory.csv"),as.is=TRUE)
if(is.null(project_name)){return(all.projects)}
project.out <- subset(all.projects,project.id==project_name)
if(nrow(project.out)!=1){stop("Project.id cannot be used to resolve project path")}
return(as.character(project.out$project.path))
} #END get.project.path
|
5c2e9c8381040fbc3013dcad64e7b1d65fd06835 | bc5f18c64d9e46db53126976834f617054edb35e | /R/mgvar.R | f2c533017a2dbbbbacf9dbce0134549f590cae04 | [] | no_license | cran/WRS2 | d7a6e48fca12e5c892b1c1ecc4ae1a151b485f2c | da65525b80d08bcdaa5d155e5db6e4670b615788 | refs/heads/master | 2022-06-30T08:31:33.772817 | 2022-06-10T15:33:29 | 2022-06-10T15:33:29 | 25,037,146 | 1 | 5 | null | null | null | null | UTF-8 | R | false | false | 1,012 | r | mgvar.R | mgvar<-function(m,se=FALSE,op=0,cov.fun=covmve,SEED=TRUE){
#
# Find the center of a scatterplot, add point that
# increases the generalized variance by smallest amount
# continue for all points
# return the generalized variance
# values corresponding to each point.
# The central values and point(s) closest to it get NA
#
# op=0 find central points using pairwise differences
# op!=0 find central points using measure of location
# used by cov.fun
#
# choices for cov.fun include
# covmve
# covmcd
# tbs (Rocke's measures of location
# rmba (Olive's median ball algorithm)
#
if(op==0)temp<-apgdis(m,se=se)$distance
if(op!=0)temp<-out(m,cov.fun=cov.fun,plotit=FALSE,SEED=SEED)$dis
flag<-(temp!=min(temp))
temp2<-temp
temp2[!flag]<-max(temp)
flag2<-(temp2!=min(temp2))
flag[!flag2]<-F
varvec<-NA
while(sum(flag)>0){
ic<-0
chk<-NA
remi<-NA
for(i in 1:nrow(m)){
if(flag[i]){
ic<-ic+1
chk[ic]<-gvar(rbind(m[!flag,],m[i,]))
remi[ic]<-i
}}
sor<-order(chk)
k<-remi[sor[1]]
varvec[k]<-chk[sor[1]]
flag[k]<-F
}
varvec
}
|
1425f587e2a53e22d4246113e941a2b5e5acfde4 | 8cc49a6c20dc68fbf70ffdc166cae5f5a5069afa | /cachematrix.R | 9caf9b562616ffd894292120415be8c22aa1eaf8 | [] | no_license | sigsmari/ProgrammingAssignment2 | 5989f5c096365b117e190c2cf0f69b555666fe19 | e0a78a77a55cd8c5fbc1cac2c353bed1a5e5f51a | refs/heads/master | 2021-01-18T10:45:46.692674 | 2015-02-18T20:28:30 | 2015-02-18T20:28:30 | 30,980,496 | 0 | 0 | null | 2015-02-18T18:45:37 | 2015-02-18T18:45:37 | null | UTF-8 | R | false | false | 730 | r | cachematrix.R | ## Creates a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
set_inv <- function(inv) i <<- inv
get_inv <- function() i
list(set = set, get = get,
set_inv = set_inv,
get_inv = get_inv)
}
## Computes the inverse of the matrix returned by makeCacheMatrix.
## If the inverse has already been calculated, then it retrieves the
## inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$get_inv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$set_inv(i)
i
}
|
0ae438a941f1302ceaaf45ae97f70aa93250403e | 00bf0bbb222c10aae4625b0ed5046d4b8b0e7c37 | /refm/api/src/bigdecimal/math.rd | 3c645bf23759eeccaaae986787eb998409479db3 | [] | no_license | foomin10/doctree | fe6a7097d544104fe71678121e6764d36a4b717a | a95789a60802c8f932c0a3e9ea21a4fc2058beb8 | refs/heads/master | 2023-02-18T20:00:32.001583 | 2023-02-05T00:49:18 | 2023-02-05T00:49:18 | 32,222,138 | 1 | 0 | null | 2015-03-14T16:54:52 | 2015-03-14T16:54:51 | null | UTF-8 | R | false | false | 3,827 | rd | math.rd |
BigDecimalを使った数学的な機能を提供します。
以下の計算が行えます。
* sqrt(x, prec)
* sin (x, prec)
* cos (x, prec)
* atan(x, prec)
* PI (prec)
* E (prec)
引数:
: x
計算対象の BigDecimal オブジェクト。
: prec
計算結果の精度。
#@samplecode 例
require "bigdecimal"
require "bigdecimal/math"
include BigMath
a = BigDecimal((PI(100)/2).to_s)
puts sin(a,100)
#=> 0.99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999998765917571575217785e0
#@end
= reopen BigMath
== Module Functions
--- sqrt(x, prec) -> BigDecimal
x の平方根を prec で指定した精度で計算します。
@param x 平方根を求める数。
@param prec 計算結果の精度。
@raise FloatDomainError x に 0 以下、もしくは NaN が指定された場合に発生します。
@raise ArgumentError prec に 0 未満が指定された場合に発生します。
#@samplecode
require "bigdecimal/math"
puts BigMath::sqrt(BigDecimal('2'), 10) #=> 0.1414213562373095048666666667e1
#@end
--- sin(x, prec) -> BigDecimal
x の正弦関数を prec で指定した精度で計算します。単位はラジアンです。x
に無限大や NaN を指定した場合には NaN を返します。
@param x 計算対象の BigDecimal オブジェクト。単位はラジアン。
@param prec 計算結果の精度。
@raise ArgumentError prec に 0 以下が指定された場合に発生します。
#@samplecode
require "bigdecimal/math"
puts BigMath::sin(BigDecimal('0.5'), 10) #=> 0.479425538604203000273287935689073955184741e0
#@end
--- cos(x, prec) -> BigDecimal
x の余弦関数を prec で指定した精度で計算します。単位はラジアンです。x
に無限大や NaN を指定した場合には NaN を返します。
@param x 計算対象の BigDecimal オブジェクト。単位はラジアン。
@param prec 計算結果の精度。
@raise ArgumentError prec に 0 以下が指定された場合に発生します。
#@samplecode
require "bigdecimal/math"
puts BigMath::cos(BigDecimal('0.5'), 10) #=> 0.8775825618903727161162815826327690580439923e0
#@end
--- atan(x, prec) -> BigDecimal
x の逆正接関数を prec で指定した精度で計算します。単位はラジアンです。
x に無限大や NaN を指定した場合には NaN を返します。
@param x 計算対象の BigDecimal オブジェクト。単位はラジアン。
@param prec 計算結果の精度。
@raise ArgumentError x の絶対値が1以上の場合に発生します。
@raise ArgumentError prec に 0 以下が指定された場合に発生します。
#@samplecode
require "bigdecimal/math"
puts BigMath::atan(BigDecimal('0.5'), 10) #=> 0.463647609000806116214256237466868871528608e0
#@end
=== 注意
x の絶対値を 0.9999 のような 1 に近すぎる値にすると計算結果が収束しない
可能性があります。
--- PI(prec) -> BigDecimal
円周率を prec で指定した精度で計算します。
@param prec 計算結果の精度。
@raise ArgumentError prec に 0 以下が指定された場合に発生します。
#@samplecode
require "bigdecimal/math"
puts BigMath::PI(2) #=> 0.31415926535897932384671233672993238432e1
puts BigMath::PI(5) #=> 0.31415926535897932384627534923029509162e1
puts BigMath::PI(10) #=> 0.3141592653589793238462643388813853786957412e1
#@end
--- E(prec) -> BigDecimal
自然対数の底 e を prec で指定した精度で計算します。
@param prec 計算結果の精度。
@raise ArgumentError prec に 0 以下が指定された場合に発生します。
#@samplecode
require "bigdecimal/math"
puts BigMath::E(2) #=> 0.27e1
puts BigMath::E(4) #=> 0.2718e1
puts BigMath::E(10) #=> 0.2718281828e1
#@end
|
81f270de2e964fb42c75f226112b3610bb8fb7ee | c820cd1cbfc72cfa539e94fe18e5ed24b909e89b | /percepcao-social/socialperception.R | 038852ae9a0316fd5d4dbbf7dc7968bd7f9e2426 | [] | no_license | crepeia/ead-senad | aad88adfe387e40d8b2049fe29c051706eb7448f | 12071f7e55efd90b5b64d1da8f0a4888dfb3c95d | refs/heads/master | 2021-11-24T16:06:02.848166 | 2021-11-18T12:49:01 | 2021-11-18T12:49:01 | 22,083,027 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,500 | r | socialperception.R | # Libraries ----
library(car) # Function Recode
library(psych) # Function Describe
library(mirt) # Function bfactor - mirt IRT
# Import data ----
# Questions
questions <- read.csv("percepcaosocial_questions.csv")
questionsLabels <- as.vector(questions[1:39,]); rm(questions)
# Analysis ----
## Import dataframe
socialPer <- read.csv("atitudesEducadores_df.csv", na.strings = "-")
socialPer <- socialPer[, -1]
## Convert in character
socialPer[,32:70] <- sapply(socialPer[,32:70], factor, levels = c("Discordo totalmente", "Discordo", "Nem discordo, nem concordo", "Concordo", "Concordo totalmente"))
# Recode items into numeric
for (i in 32:70){
socialPer[,i] <- Recode(socialPer[,i], "'Discordo totalmente'=1 ; 'Discordo'=2 ; 'Nem discordo, nem concordo' = 3; 'Concordo' = 4; 'Concordo totalmente' = 5; else = NA")
}
## Summing scales to remove NA's
socialPer$scaleSum <- rowSums(socialPer[,32:70])
## Subset completed observations and consented participation
socialPer <- subset(socialPer, subset=socialPer$termo=="Sim" & !is.na(socialPer$scaleSum))
# Demographics
## Age
### Clean data
socialPer$idade <- as.numeric(as.character(socialPer$idade))
socialPer$idade[socialPer$idade < 18 | socialPer$idade > 68 ] <- NA
### Descriptives
summary(socialPer$idade) # all
by(socialPer$idade, socialPer$sexo, describe) #by sex
## Sex
cbind(round(prop.table(sort(table(socialPer$sexo), decreasing = TRUE)),2))
## Degree
cbind(round(prop.table(sort(table(socialPer$escolaridade), decreasing = TRUE)),2))
## Marital Staus
cbind(round(prop.table(sort(table(socialPer$estadocivil), decreasing = TRUE)),2))
## Education
#cbind(round(prop.table(table(socialPer$formacao)),2)) # Broken, needs manual recoding
## Ocupação
#cbind(round(prop.table(table(socialPer$ocupacao)),2)) # Broken, needs manual recoding
## Time working
timeWorking <- as.numeric(as.character(socialPer$tempodeservico))
timeWorking[timeWorking > 59] <- NA
summary(timeWorking)
## Religion
cbind(round(prop.table(sort(table(socialPer$religiao), decreasing = TRUE)),2))
## Contact
cbind(round(prop.table(sort(table(socialPer$contatoanterior), decreasing = TRUE)),2))
## Deal with
cbind(round(prop.table(sort(table(socialPer$lidadiretamente), decreasing = TRUE)),2))
## Where deal with
cbind(round(prop.table(sort(table(socialPer$lida.onde), decreasing = TRUE)),2))
# Scale analysis ---
# Full scale
fullScale <- socialPer[,32:70]
# descriptives
describe(fullScale)
# alpha
alpha(fullScale) # Cronbach's alpha = .87
# EFA ----
## All items ----
## KMO
KMO(fullScale) # KMO = .89
# Barlett test of homogeneity # OK
bartlett.test(fullScale)
# Defining factors
fa.parallel(fullScale, fm="minres", fa="both", ylabel="Eigenvalues") # yields 2 factors
# Factor analysis using polychoric correlations
faAll <- fa.poly(fullScale, nfactors = 2, rotate = "oblimin", fm="minres")
print.psych(faAll, digits=2)
# Diagram
fa.diagram(faAll)
# RESULTADOS #
# Sem fator: 9,11,12,13,21,22,25,30,31,36
# Recode negative items
#for (i in c(14,19,28,29,31,33,34,35)){
# fullScale[,i] <- Recode(fullScale[,i], "5=1 ; 4=2 ; 3 = 3; 2 = 4; 1 = 5; else = NA")
#}
# Remove items with low loadings
shortScale <- fullScale[, -c(9,11,12,13,18,21,22,25,30,31,36)]
cbind(names(shortScale))
# EFA with shortScale
faShort <- fa.poly(shortScale, nfactors = 2, rotate = "oblimin", fm="minres")
print.psych(faShort, digits=2, cut=0.4)
# EFA with shortScale version 2
shortScale2 <- shortScale[, -c(15)]
faShort2 <- fa.poly(shortScale2, nfactors = 2, rotate = "oblimin", fm="minres")
print.psych(faShort2, digits=2, cut=0.4)
# Cronbach's alpha
alpha(shortScale2)
## CFA - Confirmatory factor analysis ---
cfa <- bfactor(shortScale2, c(2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1))
summary(cfa)
cfa1f <- bfactor(shortScale2)
itemplot(cfa, 2, shiny = TRUE)
itemplot(cfa, 3, type = 'infotrace')
itemplot(cfa, 1, type = "trace", drop.zeros = TRUE, shiny = TRUE)
### Summary
summary(cfa)
### Coefficients
coef(cfa)
# Factor 1 - feelings
feelingsF1 <- shortScale2[, c("ps001","ps002","ps003","ps004","ps005","ps006","ps007","ps008","ps010","ps015","ps017")]
cognitionF2 <- shortScale2[, c("ps014", "ps016", "ps019", "ps023", "ps024", "ps026", "ps027","ps028", "ps029","ps032","ps033","ps034","ps035","ps037","ps038","ps039")]
modF1 <- mirt(feelingsF1, 1, itemtype="graded")
plot(modF1, type= 'trace')
for (i in 1:11){
itemplot(modF1, i)
}
|
047fa45b51cccc7bfb0af6106336ae3b06f269bc | 1cfc36a4cfa8c2f7a6c597157b5a21a53ba53b09 | /selection.R | 1a99703cd470065a238d3ac6429c2c9f0a0a2f5f | [] | no_license | Sarnapa/KGA_Clustering | aca7b971dbf7df9341abad80b653e59ed5d7403b | 67094e5f9a3afca8b107f9f4063a4287573a6052 | refs/heads/master | 2020-05-30T17:12:51.577431 | 2019-06-08T11:03:27 | 2019-06-08T11:03:27 | 189,866,383 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 284 | r | selection.R | # Selection process
selection <- function(population, fitness, n){
order <- order(fitness, decreasing = TRUE)
fitness <- fitness[order]
population <- population[order]
fitness <- fitness[1:n]
population <- population[1:n]
list(population = population, fitness = fitness)
} |
5376b407e5fa4fdb2de727a81feab6b72a9bde37 | 73be3ad70da48645e5826425a64ff9e4c5ebae79 | /plot1.R | c359e37745b9b42f9b5b7e465df27092f56c558f | [] | no_license | zbn1827/ExData_Plotting1 | b712e0a0768751a84d1295beedb98fdbe6e94b9c | 7c375a2d5bf39f809f95d1cfdd01cf6cd227997a | refs/heads/master | 2021-01-18T17:08:48.640057 | 2017-04-03T03:13:41 | 2017-04-03T03:13:41 | 86,788,574 | 0 | 0 | null | 2017-03-31T07:02:35 | 2017-03-31T07:02:35 | null | UTF-8 | R | false | false | 1,628 | r | plot1.R | #load library lubridate for easier date convertion
library(lubridate)
#download the zip file to local directory
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "data.zip")
#unzip the file
unzip("data.zip")
#read the txt file by using the read table command and define it as data in environment
household_data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";")
#subsetting the data to include data only in Feb 1st 2007 and Feb 2nd 2007.
#notice that the values in date columns are factors, I simply match the date to "1/2/2007" and "2/2/2007"
#without converting them to a date format first
#I defined this new environmental variable as refined_data
refined_data <- household_data[household_data$Date == "1/2/2007" | household_data$Date == "2/2/2007",]
#convert the Date column from factor variable to date variable
refined_data$Date = dmy(refined_data$Date)
#ignore this one. I thought the instruction was asking the per minute difference between these two days
#a <- refined_data %>% group_by(Time) %>% mutate(diff = c(NA, diff(as.numeric(as.character(Global_active_power)))))
#converting all the factor values from column 3 to 9 to numeric values
for (i in 3:9) {refined_data[[i]] = as.numeric(as.character(refined_data[[i]]))}
#set the png parameter
png("plot1.png", width = 480, height = 480, units = "px")
#construct the histgram in plot 1
hist(refined_data$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
698d936e6532a74f4df7705e86b6684565da9979 | a62bb3501fc7fe06972ef35ecd806eb4f3bf4a46 | /Data/DataAlreadyUploadedToEDI/EDIProductionFiles/MakeEMLZooplankton/MakeEMLZooplankton.R | 39e0de91ab99bd7d527687eda9ceaf8f3c016488 | [] | no_license | arpitadas05/Reservoirs | 5014a4e3b6583bffce6563817ddf670affe572be | f5982948aea4520282b389ee0f402dc81db349fa | refs/heads/master | 2022-12-25T17:57:26.712700 | 2020-09-29T14:51:07 | 2020-09-29T14:51:07 | 299,657,782 | 0 | 0 | null | 2020-09-29T15:23:30 | 2020-09-29T15:23:29 | null | UTF-8 | R | false | false | 1,886 | r | MakeEMLZooplankton.R | #Make Zooplankton EML
#Cayelan Carey, based on EDI workshop 24 May 2018
# Install devtools
install.packages("devtools")
# Load devtools
library(devtools)
# Install and load EMLassemblyline
install_github("EDIorg/EMLassemblyline")
library(EMLassemblyline)
setwd("~/Dropbox/ComputerFiles/Virginia_Tech/Falling Creek/DataForWebsite/Github/ReservoirData/Formatted_Data/MakeEMLZooplankton")
data<-read.csv('zooplankton.csv', header=TRUE)
View(data)
import_templates(path = "~/Dropbox/ComputerFiles/Virginia_Tech/Falling Creek/DataForWebsite/Github/ReservoirData/Formatted_Data/MakeEMLZooplankton",
license = "CCBY", #use CCBY instead of CCBO so that data users need to cite our package
data.files = c("zooplankton")) #csv file name
define_catvars(path = "~/Dropbox/ComputerFiles/Virginia_Tech/Falling Creek/DataForWebsite/Github/ReservoirData/Formatted_Data/MakeEMLZooplankton")
make_eml(path = "/Users/cayelan/Dropbox/ComputerFiles/Virginia_Tech/Falling Creek/DataForWebsite/Github/ReservoirData/Formatted_Data/MakeEMLZooplankton",
dataset.title = "Crustacean zooplankton density and biomass and rotifer density for Beaverdam Reservoir, Carvins Cove Reservoir, Gatewood Reservoir, and Spring Hollow Reservoir in southwestern Virginia, USA 2014-2016",
data.files = c("zooplankton"),
data.files.description = c("Reservoir zooplankton dataset"), #short title
data.files.quote.character = c("\""),
temporal.coverage = c("2014-04-04", "2016-10-25"),
geographic.description = "Southwestern Virginia, USA, North America",
maintenance.description = "completed",
user.id = "carylab0", #your personal ID, will be Carey Lab ID eventually!
package.id = "edi.198.1") #from EDI portal, login, and then reserve a package ID via the
#Data Package Identifier Reservations
|
aa6cd898abab69042b76739cbfad5719971e6782 | 0f8b43d99ce585fc72baf4fa838d94de5ea134b5 | /man/annotateBinsFromGRanges.Rd | 06885d28eb69c8f1186d502680ff805be21f1cfa | [] | no_license | mverbeni/TADdistR | a4b977e076af47e6848fe70ab7f7b6a32c57bf31 | 61f145cea8f7245101a2955df9add788d256a002 | refs/heads/master | 2020-04-05T18:52:39.209596 | 2018-11-11T19:59:58 | 2018-11-11T19:59:58 | 157,115,783 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 485 | rd | annotateBinsFromGRanges.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annotation.R
\name{annotateBinsFromGRanges}
\alias{annotateBinsFromGRanges}
\title{Annotate bins per category}
\usage{
annotateBinsFromGRanges(bins, annotations)
}
\arguments{
\item{bins}{GRanges object with a disjoint partition of the genome}
\item{annotations}{GRanges object with annotations (e.g. genes, lncRNAs)}
}
\value{
Annotation according to category
}
\description{
Annotate bins per category
}
|
b5883420b8a8472b1843d36b63ea4f59cf1299e0 | 16380ddc05e6e9f64290be5727b8b63b74b7e975 | /ui.R | ef0c66da216746ea0ff0d03df050756abcab31e7 | [] | no_license | vlorman/CRB_vis | 3ece4a3db7dbe42b232809153980c9452e28bedb | 1a49d44e623a886e37f0ace7e0a69ecb0bfee5dd | refs/heads/main | 2023-04-08T17:12:44.864773 | 2021-04-15T00:42:07 | 2021-04-15T00:42:07 | 358,078,369 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,846 | r | ui.R | library(datasets)
library(shiny)
library(dplyr)
# Use a fluid Bootstrap layout
fluidPage(
# Give the page a title
titlePanel("What happens to complains made against the Rochester Police Department?"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("Review_Year", "Year:",
choices=c("All","2013","2014","2015","2016")),
# unique(crbreviews_plot_data$Review_Year)),
selectInput("Allegation_Type", "Allegation Type:",
choices=c("All", levels(crbreviews_plot_data$Allegation_Type))),
selectInput("Complaint_Type", "Complaint initiated by:",
choices=c("All", levels(crbreviews_plot_data$Complaint_Type))),
selectInput("Org", "Organization:",
choices=c("CRB","Chief", "PSS")),
hr(),
helpText("One square represents one RPD action in a complaint filed with the Civilian Review Board."),
helpText("Complaints are adjudicated by the Civilian Review Board (CRB), Professional Standards Section (RPD internal affairs, PSS), and by the Chief of RPD"),
helpText("Blue squares represent those complaints that were sustained (i.e. upheld) by the indicated organization (CRB, PSS, or the Chief)"),
helpText("(Each incident may have several allegations, and each allegation may have several actions associated to it.)"),
helpText("Ultimately, final disciplinary authority rests with the Chief of Police."),
helpText("Source: RPD-OBI dataset, available at: http://data-rpdny.opendata.arcgis.com/datasets/crb-reviews"),
helpText("Version 1, 12/4/2017")
),
# Create a spot for the barplot
mainPanel(
uiOutput("plot.ui"),
htmlOutput("rate")
)
)
) |
7627295af0dc74681eed310ba0d817ce4e621494 | cc3028dc544757ac97f0f381280c517e0408b806 | /Models/BiomeBGC-MUSO/RMuso/man/getyearlymax.Rd | 68f01f34c4beda96897bc85c8a8dd31568b77956 | [] | no_license | COST-FP1304-PROFOUND/ProfoundProducts | 06eb746db1dd7b58ae6de3532d3dbdc31c50acdd | 993079fa9aaf285b5c9f8b9ef409c9617e7fc0a7 | refs/heads/master | 2022-01-30T19:15:11.497485 | 2022-01-09T17:04:32 | 2022-01-09T17:04:32 | 80,059,532 | 4 | 2 | null | 2018-04-04T15:40:59 | 2017-01-25T21:26:42 | Fortran | UTF-8 | R | false | false | 480 | rd | getyearlymax.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/other_usefull_functions.R
\name{getyearlymax}
\alias{getyearlymax}
\title{'Function for getting the maximum values of the years, from daily data}
\usage{
getyearlymax(daily_observations)
}
\arguments{
\item{A}{vector of the daily observations}
}
\value{
A vector of yearly data
}
\description{
'Function for getting the maximum values of the years, from daily data
}
\author{
Roland Hollós
}
|
39b0607050ae73f97dbc35eeaf0cd229051aa995 | fd5f74329a767cc073514104ff4eedd24caa20d2 | /r_code/datavis.R | 6bb6727d235bdc574babf10dbf5f5bf9f8250950 | [] | no_license | zembrzuski/envirocar | 35d1cc86e16eb9d9d84e48b5ac6a96b3125071d7 | 40e41efcb68c23904d2985af68916c3d5c977ffc | refs/heads/master | 2021-01-20T00:33:44.215278 | 2017-07-09T23:34:04 | 2017-07-09T23:34:04 | 89,148,986 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 397 | r | datavis.R | library(ggplot2)
library(corrgram)
mycsv <- read.csv('/home/nozes/labs/datavis-article/mycsv.csv', sep = ';', header=TRUE)
colnames(mycsv)
typeof(mycsv)
xoxo <-mycsv[,2:8]
m <- as.matrix(xoxo)
colnames(m)
rownames(m) <- colnames(m)
m
heatmap(as.matrix(m))
library(corrgram)
corrgram(mycsv, order=TRUE, lower.panel=panel.shade,
upper.panel=panel.pie, text.panel=panel.txt)
|
cdc9c01de821a2d869c975138ee586e1dd45a074 | 932bf0b43c291671ff950ac986453aef9ca6af7f | /man/cpl_set_theme.Rd | d5f1a09fccfd443f2bed9f907389c455f9ef0cd0 | [] | no_license | californiapolicylab/cplthemes | 2794cdc438c276fb22c20c69e190e770e77d462b | cb922951976e4867bdfa3ce81e774c9ab767e0f7 | refs/heads/main | 2023-07-13T01:01:05.663956 | 2021-08-12T16:10:30 | 2021-08-12T16:10:30 | 384,549,202 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 613 | rd | cpl_set_theme.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpl_set_theme.R
\name{cpl_set_theme}
\alias{cpl_set_theme}
\title{Makes the custom CPL theme the default theme for ggplot}
\usage{
cpl_set_theme(
color_schema = "brief",
base_size = 12,
base_family = "Arial",
base_line_size = 0.5,
base_rect_size = 0.5
)
}
\arguments{
\item{base_size}{Base font size. Default is 12.}
\item{base_family}{Font. Default is Arial.}
\item{base_line_size}{Default is 0.5}
\item{base_rect_size}{Default is 0.5}
}
\value{
}
\description{
Makes the custom CPL theme the default theme for ggplot
}
|
167ce69d060c49e23756eeede0ba1705874811f3 | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googledeploymentmanageralpha.auto/man/TemplateContents.Rd | 21c9a6fab641526e7089901fd9aeb959b60b2e10 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 772 | rd | TemplateContents.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deploymentmanager_objects.R
\name{TemplateContents}
\alias{TemplateContents}
\title{TemplateContents Object}
\usage{
TemplateContents(imports = NULL, interpreter = NULL, schema = NULL,
template = NULL)
}
\arguments{
\item{imports}{Import files referenced by the main template}
\item{interpreter}{Which interpreter (python or jinja) should be used during expansion}
\item{schema}{The contents of the template schema}
\item{template}{The contents of the main template file}
}
\value{
TemplateContents object
}
\description{
TemplateContents Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Files that make up the template contents of a template type.
}
|
b80c4a26914f24ec3f9f2b108772aecea6307c62 | 77157987168fc6a0827df2ecdd55104813be77b1 | /COMPoissonReg/inst/testfiles/qzicmp_cpp/libFuzzer_qzicmp_cpp/qzicmp_cpp_valgrind_files/1612729201-test.R | 7531ab43537999259bb8adda6f7b70be4f63b7a2 | [] | no_license | akhikolla/updatedatatype-list2 | e8758b374f9a18fd3ef07664f1150e14a2e4c3d8 | a3a519440e02d89640c75207c73c1456cf86487d | refs/heads/master | 2023-03-21T13:17:13.762823 | 2021-03-20T15:46:49 | 2021-03-20T15:46:49 | 349,766,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 831 | r | 1612729201-test.R | testlist <- list(lambda = numeric(0), logq = c(9.34531021619517e-307, 3.67142983950248e+228, 3.22221023302038e-115, 2.46003930923884e+198, -3.40300006196655e-161, -3.40300006195676e-161, -5.37545598107191e+306, 2.25234192049871e-309, NaN, NaN, 9.3470485446822e-307, 1.13195565743196e-72, 4.95400018337535e-317, 2.84809997651136e-306, -9.45657241940647e-308, 1.53828278692384e-154, 2.88216599299855e-306, 9.10739414969616e-310, -5.87275939812147e-21, -5.87276176762982e-21, 2.88267589510685e-306, 9.366540590096e-97, -1.65454495223691e-24, -5.87276176762982e-21, 1.10343748751108e-312, 3.0261004369599e-306, 3.23785921002061e-319, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), nu = numeric(0), p = numeric(0), tol = 0)
result <- do.call(COMPoissonReg:::qzicmp_cpp,testlist)
str(result) |
2b0d355ab3bcd87e7d5317ce672972497047f150 | d5c01c52117d24cea10594e3a97bf66392abb90b | /RScriptCourse5.R | 8d16d02f4010d378222afb449fe2d1fab1b925ac | [] | no_license | kdejonge/RepData_PeerAssessment1 | 8f4482a87dd5fc419d2d28831eb5e87e336243bc | a0f19118bc19bc3eed1370e002133f1763eb1997 | refs/heads/master | 2020-03-18T09:22:00.825244 | 2018-05-23T14:46:02 | 2018-05-23T14:46:02 | 134,559,432 | 0 | 0 | null | 2018-05-23T11:26:37 | 2018-05-23T11:26:36 | null | UTF-8 | R | false | false | 2,612 | r | RScriptCourse5.R | # Set workspace and load data
setwd("D:/OneDrive - Esri Nederland/Kristin de Jonge/Studie/DataScience/Data Science Specialization/Course5")
data <- read.csv("activity.csv")
# Check data
head(data)
summary(data)
# Complete data
mydata <- as.data.frame(data[complete.cases(data), ])
summary(mydata)
# Total steps a day
TotalStepsaDay <- tapply(mydata$steps, mydata$date, sum)
hist(TotalStepsaDay)
# Mean total steps a day
meanTotStepsDay <- mean(TotalStepsaDay, na.rm = TRUE)
meanTotStepsDay
# Median total steps a day
medianTotStepsDay <- median(TotalStepsaDay, na.rm = TRUE)
medianTotStepsDay
# Average Daily activity pattern
avgDailActPatt <- aggregate(mydata$steps, by = list(mydata$interval), FUN = mean)
summary(avgDailActPatt)
plot(avgDailActPatt[, 1], avgDailActPatt[, 2], type = "l", xlab = "5 min Intervals in a day", ylab = "Average Number of Steps", main = "The Average Daily Activity Pattern")
maxStepsIntervalTime <- avgDailActPatt[which.max(avgDailActPatt[, 2]), 1]
maxStepsIntervalTime
# Missing Values
summary(data)
sum(!complete.cases(data))
# Fill missing values with the mean
mydata2 <- data
data1 = nrow(mydata2)
data2 = nrow(avgDailActPatt)
for (i in 1:data1) {
if (is.na(mydata2$steps[i])) {
for (j in 1:data2) {
if (mydata2$interval[i] == avgDailActPatt[j, 1]) {
mydata2$steps[i] = avgDailActPatt[j, 2]
}
}
}
}
summary(mydata2)
# Recalculate total, mean and median
totalStepsaDay2 <- tapply(mydata2$steps, mydata2$date, sum)
hist(totalStepsaDay2)
meanTotStepsDay <- mean(totalStepsaDay2, na.rm = TRUE)
meanTotStepsDay
medianTotStepsDay <- median(totalStepsaDay2, na.rm = TRUE)
medianTotStepsDay
# Week and weekday
# add new column
mydata$weekday = TRUE
# fill new column
weekday <- weekdays(as.POSIXct(mydata$date, format = "%Y-%m-%d" ))
for (i in 1:length(weekday)) {
if (weekday[i] == "Saturday" | weekday[i] == "Sunday") {
mydata$weekday[i] = FALSE
}
}
Weekday <- mydata[which(mydata$weekday == TRUE), ]
Weekend <-mydata[which(mydata$weekday == FALSE), ]
avgWeekdayPatt <- aggregate(Weekday$steps, by = list(Weekday$interval), FUN = mean)
names(avgWeekdayPatt) <- c("interval", "steps")
avgWeekdayPatt$dayTag <- "weekday"
avgWeekendPatt <- aggregate(Weekend$steps, by = list(Weekend$interval), FUN = mean)
names(avgWeekendPatt) <- c("interval", "steps")
avgWeekendPatt$dayTag <- "weekend"
avgPatt <- rbind(avgWeekdayPatt, avgWeekendPatt)
library(lattice)
xyplot(steps ~ interval | dayTag, data = avgPatt, type = "l", layout = c(1, 2))
|
e39ac9d08ac595b6f519db9d717245e0a8f85813 | 8c382ab97d22269542f5f0d1cee4781dd306abde | /master-thesis/codes/CodonTable.R | 24e91019b0f28ba93da9d38bc4f5b11e25d313c4 | [] | no_license | fredysiegrist/thesis | 2bc4c13d5e40888ad7255532a9992c299663c26b | e2bed48aacfd9beb8c808711f18c52f0d94e2515 | refs/heads/master | 2021-01-09T20:11:33.130444 | 2016-08-18T15:15:41 | 2016-08-18T15:15:41 | 65,204,362 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 873 | r | CodonTable.R | require(statanacoseq)
require(seqinr)
verified <- mylist("/windows/R/Round3.all.maker.transcripts.verified.fasta", "/windows/R/Round3.all.maker.proteins.verified.fasta", whatout=1)
# the process is killed when using to many sequences, so it is divided in four parts and added together later
tabl <- uco(verified[[1]], 0, "eff")
for (n in 2:10000) { #length(verified)
tabl <- tabl + uco(verified[[n]], 0, "eff")
}
tabl1 <- uco(verified[[10001]], 0, "eff")
for (n in 10002:20000) {
tabl1 <- tabl + uco(verified[[n]], 0, "eff")
}
tabl2 <- uco(verified[[20001]], 0, "eff")
for (n in 20002:30000) {
tabl2 <- tabl + uco(verified[[n]], 0, "eff")
}
tabl3 <- uco(verified[[30001]], 0, "eff")
for (n in 30002:41988) {
tabl3 <- tabl + uco(verified[[n]], 0, "eff")
}
tabla <- tabl+tabl1+tabl2+tabl3
perc <- round(tabla/nuu*100,2)
(rbind(tabla, perc))
|
5300ed431efe67966d54148a350a8a8258d0055e | 5138db926647742e4b39a6b58f8c8a992b2578c3 | /simulation/test-ranking.R | 345af7fe1af2a4032194a51fd4aa63283d39a3bc | [] | no_license | helenecharlotte/grfCausalSearch | 9863f4a1009832b9b8f1aa03bb2f2f9e2c820e5f | e14badd8f15dfc4e1067b2198de0f945b5cb3fd8 | refs/heads/master | 2023-01-20T04:59:30.175316 | 2023-01-10T10:55:42 | 2023-01-10T10:55:42 | 217,540,473 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,588 | r | test-ranking.R | try(setwd("~/research/SoftWare/grfCausalSearch/"),silent=TRUE)
try(setwd("/maps/projects/biostat01/people/grb615/research/SoftWare/grfCausalSearch"),silent=TRUE)
library(targets)
## e <- tar_read(ESTIMATE_ATE)
## e[,n:=factor(n,levels=c("500","1000","2000","5000"),labels=c("500","1000","2000","5000"))]
## e[,net:=factor(net,levels=c("0","1"),labels=c("0","1"))]
## e <- e[theme == "ranking"]
## e2 <- e[intervene == "A2"&A2_T2 == 2]
## ggplot(e2,aes(y = ate,group = net,color = net))+geom_boxplot()+facet_grid(~n)
library(parallel)
library(targets)
library(tarchetypes)
library(ranger)
library(survival)
source("./setting/simulation_targets.R")
for(f in list.files("R",".R$",full.names=TRUE)){source(f)}
for(f in list.files("functions",".R$",full.names=TRUE)){source(f)}
this <- varying_ranking[A2_T2 == 2&sample.size == 1000&net == TRUE]
this$A2_T2 <- .1
this$horizon <- 5
library(data.table)
set.seed(8159)
REPETITIONS <- 1:20
MC <- 5
## formula_ranking <- list(T1 ~ f(X1,0) + f(X2,0) + f(X6,0),T2 ~ f(X1,0) + f(X2,0) + f(X6,0),C ~ f(A1,0),A1 ~ f(X1,0) + f(X6,0) + f(A7,0))
formula_ranking <- list(T1 ~ f(X1,1) + f(X2,.3) + f(X6,.3),T2 ~ f(X1,-.1) + f(X2,.6) + f(X6,.1),C ~ f(A1,0),A1 ~ f(X1,-1) + f(X6,.7) + f(A7,.2))
## formula_ranking <- list(T1 ~ f(X1,1) + f(X2,.3) + f(X6,.3),T2 ~ f(X1,-.1) + f(X2,.3) + f(X6,-0.1),C ~ f(A1,0),A1 ~ f(X1,-1) + f(X6,.7) + f(A7,.2))
this$scale.censored <- -Inf
this$num.trees <- 50
Y <- do.call("rbind",lapply(c(FALSE),function(net){
do.call("rbind",mclapply(REPETITIONS,function(b){
print(b)
set = fixed
set$formula.list = formula_ranking
tt = theTruth(setting = set,A1_T1 = this$A1_T1,A1_T2 = this$A1_T2,A2_T1 = this$A2_T1,A2_T2 = this$A2_T2,horizon = this$horizon,scale.censored = this$scale.censored,B = 1,cores = 1,n = 100000)[intervene == "A2"&cause == 1]
simulated_data <- simulateData(setting = set,A1_T1 = this$A1_T1,A1_T2 = this$A1_T2,A2_T1 = this$A2_T1,A2_T2 = this$A2_T2,n = this$sample.size,scale.censored = this$scale.censored,keep.latent = FALSE)
## ff = Hist(time,event)~intervene(A1)+intervene(A2)+intervene(A3)+intervene(A4)+intervene(A5)+intervene(A6)+intervene(A7)+intervene(A8)+intervene(A9)+intervene(A10)+X1+X2+X3+X4+X5+X6+X7
ff = Hist(time,event)~A1+intervene(A2)+A3+A4+A5+A6+A7+A8+A9+A10+X1+X2+X3+X4+X5+X6+X7
## w1 = weighter(formula = Hist(time,event)~A1+A2,data = simulated_data,CR.as.censoring = 1,times = 5,method = "km")
## w2 = weighter(formula = Hist(time,event)~A1+A2,data = simulated_data,CR.as.censoring = 1,times = 5,method = "ranger")
## cbind(w1[w1 != 0],w2[w2 != 0])
## plot(w1[w1 != 0],w2[w2 != 0])
## cbind(w1,w2)
x1 <- causalhunter(formula=ff,method = "causal_forest",weighter="km",args.weight = list(num.trees = this$num.trees),num.trees=this$num.trees,CR.as.censoring = net,data=simulated_data,times=this$horizon,formula.weight = Hist(time,event)~A1+A2)
x2 <- causalhunter(formula=ff,method = "causal_forest",weighter="ranger",args.weight = list(num.trees = this$num.trees,alpha = 0.05,mtry = 17),fit.separate = TRUE,num.trees=this$num.trees,CR.as.censoring = net,data=simulated_data,times=this$horizon,formula.weight = Hist(time,event)~A1+A2+A3+A4+A5+A6+A7+A8+A9+A10+X1+X2+X3+X4+X5+X6+X7)
x3 <- causalhunter(formula=ff,method = "causal_forest",weighter="ranger",args.weight = list(num.trees = 50,alpha = 0.05,mtry = 17),fit.separate = TRUE,num.trees=this$num.trees,CR.as.censoring = net,data=simulated_data,times=this$horizon,formula.weight = Hist(time,event)~A1+A2+A3+A4+A5+A6+A7+A8+A9+A10+X1+X2+X3+X4+X5+X6+X7)
## formula.weight = Hist(time,event)~A1+A2+A3+A4+A5+A6+A7+A8+A9+A10+X1+X2+X3+X4+X5+X6+X7)
## x[,rank := rank(-abs(ate))]
## x <- cbind(x,data.table(n = this$sample.size,net = as.numeric(net),scale.censored = this$scale.censored,num.trees = this$num.trees,method = "causal_forest",A1_T1 = this$A1_T1,A1_T2 = this$A1_T2,A2_T1 = this$A2_T1,A2_T2 = this$A2_T2,formula = "this",theme = "ranking"))
x <- cbind(x1[,data.table::data.table(intervene,km = ate,net = net)],x2[,data.table::data.table(ranger = ate)],x3[,data.table::data.table(ranger2 = ate)])
## if (net == 1) x = cbind(x,truth = tt[net == 1]$ate) else x = cbind(x,truth = tt[net == 0]$ate)
x
},mc.cores = MC))}))
y <- Y[,data.table::data.table(ranger,ranger2,km,net)]
y[,data.table::data.table(km = mean(km),ranger = mean(ranger),ranger2 = mean(ranger2)),by = net]
y1 <- Y[intervene == "A1",data.table::data.table(ate,net,n)]
y1[,mean(ate),by = net]
|
139741a5dc3e9662ad2f6cec77ca86fd4033f50a | 09b7ed67240880a82e7da204914699d416f45089 | /plot3.R | 9c8f2f137881d24c0b44abaca9b4ec5bae982bd3 | [] | no_license | lmorenon/ExData_Plotting1 | aaa5df5272c598a556d1109d605823727364e9e0 | 212e09e0432d6938c6b822eb31daaa4d32d2bc65 | refs/heads/master | 2022-11-19T23:20:23.607399 | 2020-07-22T03:18:12 | 2020-07-22T03:18:12 | 281,473,939 | 0 | 0 | null | 2020-07-21T18:26:45 | 2020-07-21T18:26:44 | null | UTF-8 | R | false | false | 1,413 | r | plot3.R | ###Course: Exploratory Data Analysis (Coursera) ###
###Project 1#######
### Made by Leonardo Moreno ####
## In this script i will try to reconstruct the graphics detailed in the asignment
## Graph 3
## Load the dataset
library(dplyr)
library(lubridate)
library(readr)
library(zoo)
Sys.setlocale("LC_ALL","English")
Sys.setlocale("LC_TIME", "English")
Graph_DB<- read_delim("household_power_consumption.txt", ";", escape_double = FALSE,
col_types = cols(Date = col_datetime(format = "%d/%m/%Y")), trim_ws = TRUE)
subset3<-Graph_DB[Graph_DB$Date >= "2007-01-31" & Graph_DB$Date <= "2007-02-02",]
HDS<-as.POSIXct(paste(subset2$Date, subset3$Time), format="%Y-%m-%d %H:%M:%S")
subset3<-cbind(subset3,HDS)
ts.met1 <- zoo(subset3$Sub_metering_1, order.by = subset3$HDS) ## transform to time series object
ts.met2 <- zoo(subset3$Sub_metering_2, order.by = subset3$HDS) ## transform to timse series object
ts.met3 <- zoo(subset3$Sub_metering_3, order.by = subset3$HDS) ## transform to timse series object
ts.gen<- cbind(ts.met1,ts.met2,ts.met3)
## Graph Code
png(file="plot3.png", width=450, height=450)
plot3<-plot(ts.gen,screens = 3,xlab="",ylab="Energy sub metering")
lines(ts.gen$ts.met1)
lines(ts.gen$ts.met2,col="red")
lines(ts.gen$ts.met3,col="blue")
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off() |
8e906bb876789ab50cb73bdf02f6a2064d2aff35 | 576d9ea86b038b70153d39a0332bcf0ff065d93b | /local_example.R | c99b3b9aaca765c0e492a469176da4d6ac639046 | [
"MIT"
] | permissive | Winston-Gong/test_for_containment_release | 038ec1cd8f3cca46734e4ca02139bc14138e3d6b | b10e4c996b5f3c04a016505803759a1fbadd647a | refs/heads/main | 2023-03-27T22:19:57.012148 | 2021-03-25T07:34:14 | 2021-03-25T07:34:14 | 346,427,335 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,682 | r | local_example.R | code_version<-"Multisetting_20200519"
############################################
# change following parameters for different setting and social distancing
setting <- 2 # rural simulation; 2 for urban, 3 for slum
country_id <- 1 # we use Uganda demographic for demonstration
social_distancing_flg <- 1 # no social distancing
library(reshape2)
library(ggplot2)
# library(tidyverse)
suppressMessages(library(ergm))
para <- list()
para$setting <- setting # 1=rural 2=affluent 3=slum
# Environment setup
para$pop_sz <- 200 # simulation a smaller community
para$Non_HH_CC_rate <- c(1,.8,.6,.4,.2)[social_distancing_flg]
community_setting <- c("Rural", "Non-slum urban", "Slum")[setting]
print(paste0("Simulate ",para$pop_sz," individuals in ",community_setting," setting"))
##########################################
# loading country specific variables:
# age distribution & household
##########################################
if(country_id == 1){
para$age_dist <- c(0.481, 0.203, 0.316) # Uganda
para$HH_dist <- c(.11, .22, .27, .4) # Uganda
}else if(country_id == 2){
para$age_dist <- c(0.292, 0.193, 0.515) # South africa
para$HH_dist <- c(.27, .35, .23, .15) # South Africa
}else if(country_id == 3){
para$age_dist <- c(0.419, 0.195, 0.386) # kenya
para$HH_dist <- c(.19, .28, .3, .23) # kenya
}else if(country_id == 4){
para$age_dist <- c(0.440, 0.190, 0.370) # nigeria
para$HH_dist <- c(.16, .26, .26, .32) # nigeria
}
##########################################
# processing demographic information;
# for details: refer to the supplementary methods
##########################################
# para$HH_affluent_dist <- c(.31, .5, .18, .02) # UK
if (para$setting==1) {
para$num_cc <- 7 # set daily close contact to be 7
para$family_sz <- 5 # average household size 5
# set the percentage of HH_cc
para$percent_HH_cc <- .5
}else if (para$setting==2) {
para$num_cc <- 13
para$family_sz <- 5
para$percent_HH_cc <- .23
}else if (para$setting==3) {
para$num_cc <- 14
para$family_sz <- 15
para$percent_HH_cc <- .5
para$HH_dist <- c(.00, .06, .17, .77) # afganistan
}else print ("Parameter setting error")
para$AGE <- unlist(sapply(1:length(para$age_dist), function(x) rep(x,round(para$age_dist[x] * para$pop_sz))))
stopifnot(length(para$AGE) == para$pop_sz)
if(country_id == 1){
age_mix <- read.csv("Uganda_age_mixture_population_level.csv")
para$age_mix <- age_mix$total_age
para$Home_age_mix <- age_mix$home_age
}else{
if(country_id == 2){
AGE_matrix <- read.csv("south_africa_age.csv", header = F)
HOME_age_matrix <- read.csv("home_south_africa_age.csv", header = F)
}else if(country_id == 3){
AGE_matrix <- read.csv("kenya_age.csv", header = F) # kenya
HOME_age_matrix <- read.csv("home_kenya_age.csv", header = F) # kenya
}
else if(country_id == 4){
AGE_matrix <- read.csv("Nigeria_age.csv", header = F) # nigeria
HOME_age_matrix <- read.csv("home_Nigeria_age.csv", header = F) # Nigeria
}
# the age mixing matrix need to collpase into 3*3 since we only have age distribution for 3 groups.
## ## ## ##
## this step is important: the contact matrix is not symetrical: row and columns represent different things
## ## ## ##
# the columns represent contact, we take colSums
# the rows are the participant, we take the average number of contact for the participant: rowMeans
age <- cbind(rowMeans(AGE_matrix[,1:3]),rowMeans(AGE_matrix[,4:5]),rowMeans(AGE_matrix[,6:16]))
AGE_matrix <- rbind(colSums(age[1:3,]),colSums(age[4:5,]),colSums(age[6:16,]))
# weight each column by the age distribution: we have to change the matrix to reflect the age distribution in the population
AGE_matrix <- (as.matrix(rep(1,length(para$age_dist))) %*% t(para$age_dist)) * AGE_matrix
# matrix in the end should be the number of contact for each age group pair
AGE_matrix <- (AGE_matrix + t(AGE_matrix))/2
para$age_mix <- as.matrix(AGE_matrix)[which(upper.tri(AGE_matrix,diag = T))]
age <- cbind(rowMeans(HOME_age_matrix[,1:3]),rowMeans(HOME_age_matrix[,4:5]),rowMeans(HOME_age_matrix[,6:16]))
HOME_age_matrix <- rbind(colSums(age[1:3,]),colSums(age[4:5,]),colSums(age[6:16,]))
HOME_age_matrix <- (as.matrix(rep(1,length(para$age_dist))) %*% t(para$age_dist)) * HOME_age_matrix
# matrix in the end should be a triangle one
HOME_age_matrix <- (HOME_age_matrix + t(HOME_age_matrix))/2
para$Home_age_mix <- as.matrix(HOME_age_matrix)[which(upper.tri(HOME_age_matrix,diag = T))]
}
# adjust the age matrix to represent the specified household contact rate
para$age_mix <- para$Home_age_mix + (para$age_mix - para$Home_age_mix) *
sum(para$Home_age_mix)/sum(para$age_mix - para$Home_age_mix) * (1-para$percent_HH_cc)/para$percent_HH_cc
# adjust R0 for 1) young people susceptibility 2) subclinical cases
# adjust for the social distancing
para$num_cc_scdst <- para$num_cc * ((1-para$percent_HH_cc)*para$Non_HH_CC_rate + para$percent_HH_cc) # reduce the number of cc
para$age_mix_scdst <- para$Home_age_mix + (para$age_mix - para$Home_age_mix) * para$Non_HH_CC_rate
para$percent_HH_cc_scdst <- para$percent_HH_cc/((1-para$percent_HH_cc)*para$Non_HH_CC_rate + para$percent_HH_cc)
#####################
# generate synthetic population
source(paste0(code_version,"_functions.R"),print.eval =F)
# generate the contact networks, each one represent a uniquely sampled population (households are formed via sampling the family size distribution)
if(setting ==1 ){
searched_clustering_number <- 200
}else if(setting ==2 ){
searched_clustering_number <- 100
}else if(setting ==3 ){
searched_clustering_number <- 100
}
print("Construct the synthetic population: this will take a few minutes")
nw_para <- network_generate(para, searched_clustering_number)
searched_clustering_number <- nw_para[[3]] # reduce the searched number if it did not converge
ego.sim100 <- simulate(nw_para[[1]],nsim = 100)
sim.stats <- attr(ego.sim100,"stats")
trgt <- rbind(colMeans(sim.stats), nw_para[[1]]$target.stats)
deviation_target_statistics <- mean(abs(trgt[1,] - trgt[2,])/trgt[2,])
print("Plot one sample of the contact network")
png(filename = paste0("network_example_",community_setting,"_set.png"), width = 1000, height = 1000)
plot(simulate(nw_para[[1]]))
dev.off()
NW_SIM <- list(nw_para, deviation_target_statistics, nw_para[[3]])
save(NW_SIM, file = paste0("network_example_",community_setting,"_set.Rdata"))
###########################################
# simulate a few outbreaks
###########################################
len_sim <- 100 # length of simulation
num_rep <- 20
para$E_0 <- 2 # number of initial importation
para$infect_sz <- (-1)*para$pop_sz/200 # containment intervention only starts after X per 1000 individuals are already infected
R_UK <- 2.7
num_cc_UK <- 13
R0_baseline <- R_UK/num_cc_UK # the R0 is measured in affluent region
# Transmission parameter & tool availability
if (para$setting==1) {
para$symptom_report <- 0.7 # precentage infected report symptom
para$R0 <- R0_baseline * para$num_cc # R naught
para$theta <- 0.1 # quarantine effect: precentage of remaining transmission quarantined individual have
para$pcr_available <- -1 # daily maximum PCR tests per 1000 population.
para$ppe_coef <- 1 # if people wear ppe, then their transmissibility will be different outside their family
}else if (para$setting==2) {
para$symptom_report <- 0.7
para$R0 <- R0_baseline * para$num_cc
para$theta <- 0.1
para$pcr_available <-1000*para$pop_sz/1000
para$ppe_coef <- 1
}else if (para$setting==3) {
para$symptom_report <- 0.6
para$R0 <- R0_baseline * para$num_cc
para$theta <- 0.2
para$pcr_available <-2*para$pop_sz/1000
para$ppe_coef <- 1
}else print ("Parameter setting error")
# subclinical rate
para$sub_clini_rate <- 0.3
para$asym_rate <- 0.2 # transmission rate of asymptomatic patient
# Parameters about Infected pts
para$ab_test_rate <- 0.7 # % accepted ab testing among detected (symptomatic) patients
para$pcr_test_rate <- 0.8 # % accepted pcr testing among detected (symptomatic) patients
para$onsetiso <- 0.2 # Isolation compliance rate at onset based on symptom
para$abiso <-0.9 # Isolation compliance rate among ab testing positive
para$pcriso <-0.9 # Isolation compliance rate among pcr testing positive
para$delay_symptom <- 1 # days of delay after onset to detect symptomatic patients
para$delay_ab <- 8 # days of delay after onset to receive ab test and obtain result
para$delay_pcr <- 5 # days of delay after onset to report pcr test result
# Parameters about tracing contects
para$tracing_cc_onset <- 3 # set how many days we trace close contact back after symptom-based patient detection
para$tracing_cc_ab <- para$delay_ab # set how many days we trace close contact back after a positive ab_test
para$tracing_cc_pcr <- para$delay_pcr # set how many days we trace close contact back after a positive ab_test
para$cc_success_symptom <- 0.85 # precentage close contact successfully traced after symptom-based patient detection
para$cc_success_ab <- 0.75 # precentage close contact successfully traced after positive ab test
para$cc_success_pcr <- 0.80 # precentage close contact successfully traced after positive pcr test
para$qrate_symptom <- 0.5 # CC quarantine compliance rate based on symptom
para$qrate_ab <- 0.9 # CC quarantine compliance rate based on positive ab test
para$qrate_pcr <- 0.9 # CC quarantine compliance rate based on positive pcr test
# Parameters about testing tools
para$ab_rate <- function(x, t_onset) 1/(1 + exp(7+t_onset-x)) # seroconversion rate from infection day, based on the clinical paper from Yumei Wen
para$sensitivity_ab <- 0.9 # ab test sensitivity
para$sensitivity_pcr <- 0.999 # pcr test sensitivity
para$samplefailure_pcr <- 0.0 # pcr sampling failure
para$sensitivity_antig <- 0.9 # antigen sensitivity is 0.8
#########################################################################
# adjust R0 for 1) young people susceptibility 2) subclinical cases
#########################################################################
# ajust for R0
# norm1 <- (sum(para$age_mix) - para$age_mix[c(1)]/2- sum(para$age_mix[c(2,4)])/4)/sum(para$age_mix)
# using next generation matrix to compute norm1; only kept terms connected with young people susceptibility
Cyy <- para$age_mix[c(1)] # number of comtact for young <--> young
Coy <- sum(para$age_mix[c(2,4)]) # number of comtact for young <--> old
Coo <- sum(para$age_mix[c(3,5,6)]) # number of comtact for old <--> old
Ny <- para$age_dist[1] # number of young people, Cyy/Ny is the average number of young contact for a yong person
No <- sum(para$age_dist[c(2,3)]) # number of young people, Cyy/Ny is the average number of young contact for a yong person
y_sus <- 0.5 # susceptability of young person
NGM <- matrix(c(y_sus * Cyy/Ny, Coy/Ny, y_sus * Coy/No, Coo/No) , nrow = 2)
trNGM <- sum(diag(NGM))
detNGM <- det(NGM)
Spectral_radius_half <- trNGM + (trNGM^2 - 4*detNGM )^0.5
y_sus <- 1 # susceptability of young person
NGM <- matrix(c(y_sus * Cyy/Ny, Coy/Ny, y_sus * Coy/No, Coo/No) , nrow = 2)
trNGM <- sum(diag(NGM))
detNGM <- det(NGM)
Spectral_radius_1 <- trNGM + (trNGM^2 - 4*detNGM )^0.5
norm1 <- Spectral_radius_half/Spectral_radius_1
# norm2 account for the redution of the low transmission rate of asymptomatic cases
norm2 <- 1 - para$sub_clini_rate * (1 - para$asym_rate)
para$R0_adj <- para$R0/(norm1 * norm2)
##################################
# compute Re
norm_age_mix_scdst <- para$age_mix_scdst/sum(para$age_mix_scdst)
para$Re <- para$R0_adj/para$num_cc *
((1- norm_age_mix_scdst[c(1)]/2- sum(norm_age_mix_scdst[c(2,4)])/4) * para$num_cc_scdst) *
norm2 # approximate Re (haven't taken age-susceptibility into account)
##################################
###############################################
# simulate an outbreak
###############################################
all_new_daily <- matrix(nrow = num_rep, ncol = len_sim)
Rt <- matrix(nrow = num_rep, ncol = len_sim)
for(rp in 1:num_rep){
est1 <- nw_para[[1]]
print(paste("Running Setting=",community_setting,", Scenario 1, Rep: ",rp))
# C is the contact matrix, which is traced for 7 days. If i,j element is 7, it means the latest contact of i,j is today,
# if it is 0, then there is no contact between i,j in the past 7 days,
C <- matrix(0, para$pop_sz,para$pop_sz)
# I is a vector indicating when an individual has been infected
# NA means susceptible, 0 means infected date, 1 indicates the person has been infected 1 day
I <- matrix(NA, para$pop_sz,1)
# Z is a vector indicating if the infected is detectable
Z <- matrix(F, para$pop_sz,1)
############################################
############################################
trace_inf_n <- matrix(0, para$pop_sz,1)
# onset just save the incubation time
O <- matrix(NA, para$pop_sz,1)
# Q is the vector indicating when an individual has been quarantine, NA means not quarantine, 1 means first day
Q <- matrix(NA, para$pop_sz,1)
C_lst <- list()
O_lst <- list()
I_lst <- list()
# initial case
# E_0 <- floor(runif(1,min = 1, max = 3))
init_idx <- sample(1:para$pop_sz, para$E_0)
I[init_idx] <- 0
O[init_idx] <- incub(para$E_0)
Z[init_idx] <- F # initial case is never detectable
for(t in 1:len_sim){
C_lst[[t]] <- C
C <- C_update(C, Q, est1, para)
lst <- I_O_update(I, Q, C, O, Z, trace_inf_n, para)
I <- lst[[1]]
O <- lst[[2]]
Z <- lst[[3]]
trace_inf_n <- lst[[4]]
I_lst[[t]] <- I
O_lst[[t]] <- O
}
# Rt
rt <- rep(NA, len_sim)
for(t in 1:len_sim){
i <- I_lst[[t]]
idx <- which(i == 2) # from the second day they are infectious
if(length(idx)) rt[t] <- mean(trace_inf_n[idx])
}
# plot the daily new case
new_daily <- rep(NA, len_sim)
new_daily[1] <- para$E_0
for(t in 2:len_sim){
new_daily[t] <- sum(is.na(I_lst[[t-1]])) - sum(is.na(I_lst[[t]]))
}
all_new_daily[rp,] <- new_daily
Rt[rp,] <- rt
}
err_daily <- apply(all_new_daily, 2, function(x) sd(x)/sqrt(length(x)))
df_plt <- data.frame(day = 1:len_sim, new_daily = colMeans(all_new_daily), err_daily)
plt <- ggplot(df_plt,aes(x=day)) +
geom_line(aes(y = new_daily, color = "Baseline")) +
geom_ribbon(aes( ymax = new_daily + 2*err_daily, ymin = new_daily - 2*err_daily), fill = "red",alpha = 0.3) +
labs(x= "Days from first importing case", y = "New case per day" ) +#,title = paste0("Physical distancing imposed"))+
scale_color_manual(values = c("Baseline" = "red")) +
theme(legend.position = "None", panel.background=element_blank())
ggsave(paste0("transmission_setting",community_setting, "_baseline.png"),plt, width = 5, height = 4)
|
0771513bff8db5d1b7fb58f8ad8c1eb793c094d0 | 450f18a3d54568ffe1cddf70e6c28ce6ab06868d | /scRNA-seq/pooled_data/All/do_randomForest.r | 37eb9067089d615ca1c8a2548a5a25024b6f31c8 | [
"Apache-2.0"
] | permissive | shunsunsun/GeACT | a96639943d66d6d830430e31ee5a5d2162008a5e | 2bf8db3ac855f8b3bf14b757520655121b5198c7 | refs/heads/master | 2023-06-06T15:26:50.654196 | 2021-06-24T08:09:40 | 2021-06-24T08:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,304 | r | do_randomForest.r | # random forest analysis
setwd("~/lustre/06-Human_cell_atlas/pooled_data/All/")
suppressMessages({
library("arrow")
library("randomForest")
library("ggplot2")
library("cowplot")
})
#source("../../scripts/cluster_tools.r")
samplingPos <- "."
OUT <- paste0("03-expression/merged/randomForest/", samplingPos)
dir.create(OUT, showWarnings = F, recursive = T)
#load(file = paste0(OUT, "/randomForest.RData"))
TF <- read.table("../../Data/human_TF.txt", header = F, sep = "\t", stringsAsFactors = F)
dim(TF)
lncRNA <- read.table("../../Data/human_lncRNA.txt", header = F, sep = "\t", stringsAsFactors = F)
dim(lncRNA)
chromtainRM <- read.table("../../Data/human_cr.txt", header = F, sep = "\t", stringsAsFactors = F)
dim(chromtainRM)
# 1. pre-process ----
# load gene ID
geneID <- read.table("~/lustre/06-Human_cell_atlas/Genomes/human/gene_ID2Name_fixed.txt", header = F, sep = "\t", stringsAsFactors = F)
dim(geneID)
colnames(geneID) <- c("ensembl", "symbol")
# load gene expression matrix (CPM)
expr_data_normed <- read_feather(file = paste0("03-expression/merged/filtering/", samplingPos, "/UMIcount_filtered_CPM.feather"))
expr_data_normed <- as.data.frame(expr_data_normed)
expr_data_normed_gene <- read.table(paste0("03-expression/merged/filtering/", samplingPos, "/UMIcount_filtered_CPM.gene"), header = F, sep = "\t", stringsAsFactors = F)
rownames(expr_data_normed) <- expr_data_normed_gene$V1
# load cell metatable
cellMetaData <- read.table(file = "../../pooled_data_all/All/cell_metatable_RNA_global.txt", header = T, sep = "\t", stringsAsFactors = F, row.names = 1)
cellMetaData <- cellMetaData[colnames(expr_data_normed), ]
all(colnames(expr_data_normed) == rownames(cellMetaData))
cellMetaData$ts_ident <- Hmisc::capitalize(paste(cellMetaData$tissue, cellMetaData$ident, sep = "."))
#cellMetaData$ts_clgrp <- Hmisc::capitalize(paste(cellMetaData$tissue, ident2clgrp(cellMetaData$ident), sep = "."))
#length(unique(cellMetaData$ts_clgrp))
# avg
#expr_data_avg <- sapply(split(rownames(cellMetaData), cellMetaData$ts_ident), function(x) { y <- rowMeans(expr_data_normed[, x, drop = F]) })
# extract TF
expr_data_normed_onlyTF <- as.data.frame(t(expr_data_normed[rownames(expr_data_normed) %in% TF$V1, ]))
# modify special char
colnames(expr_data_normed_onlyTF) <- gsub("-", "___", colnames(expr_data_normed_onlyTF))
# add cell type
expr_data_normed_onlyTF$cellType <- cellMetaData$ts_ident
## all cell types ----
# split data
set.seed(1234)
split_idx <- sample(nrow(expr_data_normed_onlyTF), nrow(expr_data_normed_onlyTF) * 0.7)
expr_data_train <- expr_data_normed_onlyTF[split_idx, ]
expr_data_train$cellType <- factor(expr_data_train$cellType)
expr_data_test <- expr_data_normed_onlyTF[- split_idx, ]
set.seed(1234)
rf_fit <- randomForest(cellType ~ ., data = expr_data_train, importance = T)
varImpPlot(rf_fit)
rf_importance <- importance(rf_fit, type = 2)
rf_importance <- as.data.frame(rf_importance)
rownames(rf_importance) <- gsub("___", "-", rownames(rf_importance))
rf_importance$gene <- rownames(rf_importance)
rf_importance <- rf_importance[order(rf_importance$MeanDecreaseGini, decreasing = T), ]
rf_importance_sub <- head(rf_importance, 10)
rf_importance_sub$gene <- factor(rf_importance_sub$gene, levels = rev(unique(rf_importance_sub$gene)))
pdf(paste0(OUT, "/rf_TF_allCellType.pdf"), width = 5, height = 3.5)
ggplot(rf_importance_sub, aes(x = gene, y = MeanDecreaseGini)) +
geom_blank() +
geom_vline(xintercept = 1:nrow(rf_importance_sub), linetype = "dashed", color = "grey") +
geom_point(color = "dodgerblue", size = 3) +
xlab("TF") + ylab("Mean decrease Gini") +
#ggtitle("All cell types") +
coord_flip()
dev.off()
## Epi ----
cells_sub <- rownames(cellMetaData)[grepl("^Epi", cellMetaData$ident) | (cellMetaData$ident %in% c("PT", "LoH", "LoH-Prog", "DT", "PC-CLU", "PC-BCAT1", "Podocyte-GPC3", "Podocyte-PLA2R1"))]
expr_data_normed_onlyTF <- expr_data_normed_onlyTF[cells_sub, ]
table(expr_data_normed_onlyTF$cellType)
# split data
set.seed(1234)
split_idx <- sample(nrow(expr_data_normed_onlyTF), nrow(expr_data_normed_onlyTF) * 0.7)
expr_data_train <- expr_data_normed_onlyTF[split_idx, ]
expr_data_train$cellType <- factor(expr_data_train$cellType)
expr_data_test <- expr_data_normed_onlyTF[- split_idx, ]
set.seed(1234)
rf_fit2 <- randomForest(cellType ~ ., data = expr_data_train, importance = T)
varImpPlot(rf_fit2)
rf_importance <- importance(rf_fit2, type = 2)
rf_importance <- as.data.frame(rf_importance)
rownames(rf_importance) <- gsub("___", "-", rownames(rf_importance))
rf_importance$gene <- rownames(rf_importance)
rf_importance <- rf_importance[order(rf_importance$MeanDecreaseGini, decreasing = T), ]
rf_importance_sub <- head(rf_importance, 10)
rf_importance_sub$gene <- factor(rf_importance_sub$gene, levels = rev(unique(rf_importance_sub$gene)))
pdf(paste0(OUT, "/rf_TF_Epi.pdf"), width = 5, height = 3.5)
ggplot(rf_importance_sub, aes(x = gene, y = MeanDecreaseGini)) +
geom_blank() +
geom_vline(xintercept = 1:nrow(rf_importance_sub), linetype = "dashed", color = "grey") +
geom_point(color = "dodgerblue", size = 3) +
xlab("TF") + ylab("Mean decrease Gini") +
ggtitle("Epithelial cells") +
coord_flip()
dev.off()
# X. save ----
save.image(file = paste0(OUT, "/randomForest.RData"))
|
494570ebd81d3a1879721bf00900fa877c0adc3b | dc66fe1b9e4c21615711fc5513ebbf954f959da7 | /DataTreesV2.R | 03c9e168f65c24e26b89c43851fa1d920c26005d | [] | no_license | lyonslj/CTS | faaf1e7a77fcbca53e549c7ed064849297d5fbe7 | 79991cf56d59f0ec2efcc3dc0a30e619b1e2ca88 | refs/heads/master | 2021-01-19T10:14:25.418036 | 2020-11-12T07:29:47 | 2020-11-12T07:29:47 | 82,169,315 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,793 | r | DataTreesV2.R | DataTree <- function(PbsSvc) {
library(data.tree)
PBS <- Node$new("Parent Business Service")
CBS <- PBS$AddChild("Child Business Service")
PTS <- CBS$AddChild("Parent Technical Service")
CTS <- PTS$AddChild("Child Technical Service")
Application <- CTS$AddChild("Application")
#print(PBS)
##
library(treemap)
library(readxl)
setwd("/Volumes/C/JLTemp")
Services <- read_excel("ServicesTree.xlsx")
Svcs_data <- as.data.frame(Services)
mydat <- subset(Svcs_data,Svcs_data$PBS == "Investment_Banking_Client_Management_Service_PBS")
# Set levels and their colors #
lev1 <- unique(mydat[,1])
col2 <- "lightskyblue"
lev2 <- unique(paste("SetNodeStyle(Svcs$",mydat[,1],"$",mydat[,2],",inherit = FALSE, fillcolor = col2)",sep=""))
col3 <- "hotpink"
lev3 <- unique(paste("SetNodeStyle(Svcs$",mydat[,1],"$",mydat[,2],"$",mydat[,4],",inherit = FALSE, fillcolor = col3)",sep=""))
col4 <- "gold"
lev4 <- unique(paste("SetNodeStyle(Svcs$",mydat[,1],"$",mydat[,2],"$",mydat[,4],"$",mydat[,5],",inherit = FALSE, fillcolor = col4)",sep=""))
col5 <- "darkturquoise"
lev5 <- unique(paste("SetNodeStyle(Svcs$",mydat[,1],"$",mydat[,2],"$",mydat[,4],"$",mydat[,5],"$",mydat[,3],",inherit = FALSE, fillcolor = col5)",sep=""))
#
mydat$pathString <- paste("Services",mydat$PBS,mydat$CBS,mydat$PTS,mydat$CTS,mydat$Application, sep = "/")
Svcs <- as.Node(mydat)
#
SetNodeStyle(Svcs, style = "filled,rounded", shape = "box", fillcolor = "GreenYellow", fontname = "helvetica", tooltip = GetDefaultTooltip)
#SetNodeStyle(Svcs, inherit = FALSE, fillcolor = "darkturquoise")
for(i in lev2) {eval(parse(text = i))}
for(i in lev3) {eval(parse(text = i))}
for(i in lev4) {eval(parse(text = i))}
#for(i in lev5) {eval(parse(text = i))}
plot(Svcs)
}
|
8161765fe4725d68ae807a99dfb8893a18495a34 | 7c28c847f5f7f88c0573af1f4b7ff53899ada816 | /cachematrix.R | 8c4779f68bec75d7d1a167b671b132a366395ff5 | [] | no_license | pagirtas/ProgrammingAssignment2 | 6989586732b53fcc16b6a944a26a9044a5760237 | fd4c2ffa5de745a126dcd0b84b13451c6dda2af6 | refs/heads/master | 2020-12-03T08:00:13.440959 | 2015-12-27T14:15:01 | 2015-12-27T14:15:01 | 48,627,904 | 0 | 0 | null | 2015-12-27T00:33:59 | 2015-12-27T00:33:59 | null | UTF-8 | R | false | false | 1,569 | r | cachematrix.R | ## Functions for ProgrammingAssignment2:
## Framework for caching the computed inverse of a matrix so that it does
## not have to be computed again.
## makeCacheMatrix: creates a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
x_inv <- NULL
set <- function(y) {
x <<- y
x_inv <<- NULL
}
get <- function() x
set_xinv <- function(solve) x_inv <<- solve
get_xinv <- function() x_inv
list(set = set,
get = get,
set_xinv = set_xinv,
get_xinv = get_xinv)
}
## cacheSolve: computes the inverse of a matrix returned by makeCacheMatrix.
## If the inverse was already calculated and the matrix has not changed,
## then cacheSolve retrieves the inverse from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## Cannot assume that the input matrix to solve is
## square. Have to do it ourselves.
xrowcol <- dim(x)
if( xrowcol[1] != xrowcol[2]) {
print("Error: Input matrix to cacheSolve is not Square.")
return
}
## Here if x is square. Compute its inverse, or retrieve it
## from cache
x_inv <- x$get_xinv()
if(!is.null(x_inv)) {
message("Getting data from cache...")
return(x_inv)
}
data <- x$get()
x_inv <- solve(data,...)
x$set_xinv(x_inv)
x_inv
}
|
9209127c1c3bccb635a6a41e80bfe2894e58e98d | 9d3a9cf98ba20b66e91be4629e8d852d22c728c2 | /figures/figure_3.R | 0e386a8c4daefb5cf0fb0a3e2687704b12929617 | [
"MIT"
] | permissive | rmccoy7541/aneuploidy-gwas | 2162ff95d3be6f07365f6bad39df6b52618c6cae | 2e0e96de42fc1f4a6fb918a541f74dd05e755e5c | refs/heads/master | 2021-01-01T18:48:25.374418 | 2015-04-19T21:22:07 | 2015-04-19T21:22:07 | 33,620,273 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,359 | r | figure_3.R | #################################################
# File: figure_3.R
#################################################
# Author: Rajiv McCoy
# The purpose of this figure is to describe
# various aspects of the effect of genotype at
# the associated locus on various phenotypes
# related to aneuploidy. These figures require
# genotype data to produce.
#################################################
# load packages
library(ggplot2)
library(gridExtra)
library(gtable)
source("~/Desktop/aneuploidy_functions.R")
# the aneuploidy calls were published along with the paper
data <- read.table("~/Desktop/aneuploidy_calls.csv", sep = ",", header=T) # import the data
# don't do any QC filtering, since we only care about number of embryos submitted, not their ploidy calls
# data_filtered <- filterData(data)
data_te <- selectSampleType(data, TE)
data_te <- callPloidy(data_te)
#################################################
# read in genotype data at the associated locus; combine "discovery" and "validation" sets from GWAS
gt <- read.table("/sandbox/rs2305957_f.gt"); gt2 <- read.table("/sandbox/rs2305957_validate_f.gt"); gt <- rbind(gt, gt2)
names(gt) <- c("case", "sample_id", "genotype")
data_te <- merge(data_te, gt, "case")
means <- data.frame(table(data_te$genotype) / table(data_te[!duplicated(data_te$case),]$genotype))
fam_counts <- data.frame(table(data_te$case))
names(fam_counts) <- c("case", "counts")
results <- merge(fam_counts, gt, "case")
results$gt <- NA
results[results$genotype == "GG",]$gt <- 0
results[results$genotype == "AG",]$gt <- 1
results[results$genotype == "AA",]$gt <- 2
summary(lm(data = results, counts ~ gt))
summary(glm(data = results, counts ~ gt, family = quasipoisson()))
maternal_age <- data.frame(aggregate(data_te$maternal_age ~ data_te$case, FUN = mean))
names(maternal_age) <- c("case", "maternal_age")
results<- merge(results, maternal_age, "case")
# fit a Poisson linear model testing for association between number of day 5 embryos submitted and maternal genotype; include maternal age as a covariate
summary(glm(data = results, counts ~ maternal_age + I(maternal_age ^ 2) + gt, family = quasipoisson()))
#################################################
means <- c(mean(results[results$genotype == "GG",]$counts), mean(results[results$genotype == "AG",]$counts), mean(results[results$genotype == "AA",]$counts))
se <- c(std(results[results$genotype == "GG",]$counts), std(results[results$genotype == "AG",]$counts), std(results[results$genotype == "AA",]$counts))
bars <- data.frame(means, se, c("GG", "AG", "AA"))
names(bars) <- c("counts", "se", "genotype")
limits <- aes(ymax = (counts + se), ymin = (counts - se))
c <- ggplot(data = bars, aes(x = genotype, y = means, fill = genotype)) + geom_bar(stat = "identity") + geom_errorbar(limits, alpha = 1, width = 0.25) + ylab("TE samples per mother") + theme(legend.position = "none")
c <- ggplot_gtable(ggplot_build(c))
#################################################
### generate boxplots ###
gt <- read.table("/sandbox/rs2305957_f.gt")
names(gt) <- c("case", "sampleid", "genotype")
pheno <- read.table("/sandbox/all_nonseg_mitotic_blastomere_f.matlab.response", sep=',')
names(pheno) <- c("case", "controls", "cases")
gt <- merge(gt, pheno, "case")
# remove missing genotypes
gt <- gt[gt$genotype!="00",]
# plot proportions stratified by genotype, requiring at least 3 blastomeres per mother
a <- ggplot(data = gt[gt$controls + gt$cases > 2,], aes(fill = factor(genotype), x = genotype, y = (cases / (controls + cases)))) + geom_boxplot() + xlab("Genotype") + ylab("Prop. blastomeres w/ mitotic error") + ylim(0,1) + ggtitle("Discovery") + theme(legend.position="none")
mean(gt[gt$genotype == "GG",]$cases / (gt[gt$genotype == "GG",]$controls + gt[gt$genotype == "GG",]$cases))
mean(gt[gt$genotype == "AG",]$cases / (gt[gt$genotype == "AG",]$controls + gt[gt$genotype == "AG",]$cases))
mean(gt[gt$genotype == "AA",]$cases / (gt[gt$genotype == "AA",]$controls + gt[gt$genotype == "AA",]$cases))
a <- ggplot_gtable(ggplot_build(a))
gt_validate <- read.table("/sandbox/rs2305957_validate_f.gt")
names(gt_validate) <- c("case", "sampleid", "genotype")
pheno_validate <- read.table("/sandbox/validate_nonseg_mitotic_blastomere.results", sep=',')
names(pheno_validate) <- c("case", "controls", "cases")
gt_validate <- merge(gt_validate, pheno_validate, "case")
mean(gt_validate[gt_validate$genotype == "GG",]$cases / (gt_validate[gt_validate$genotype == "GG",]$controls + gt_validate[gt_validate$genotype == "GG",]$cases))
mean(gt_validate[gt_validate$genotype == "AG",]$cases / (gt_validate[gt_validate$genotype == "AG",]$controls + gt_validate[gt_validate$genotype == "AG",]$cases))
mean(gt_validate[gt_validate$genotype == "AA",]$cases / (gt_validate[gt_validate$genotype == "AA",]$controls + gt_validate[gt_validate$genotype == "AA",]$cases))
# remove missing genotypes
gt_validate <- gt_validate[gt_validate$genotype!="00",]
# plot proportions stratified by genotype, requiring at least 3 blastomeres per mother
b <- ggplot(data = gt_validate[gt_validate$controls + gt_validate$cases > 2,], aes(fill = factor(genotype), x = genotype, y = (cases / (controls + cases)))) + geom_boxplot() + xlab("Genotype") + ylab("Prop. blastomeres w/ mitotic error") + ylim(0,1) + ggtitle("Validation") + theme(legend.position = "none")
b <- ggplot_gtable(ggplot_build(b))
#################################################
### plot effect size versus age ###
data <- read.table("~/Desktop/aneuploidy_calls.csv", sep = ",", header=T) # import the data
data_filtered <- filterData(data)
data_blastomere <- selectSampleType(data_filtered, blastomere)
data_blastomere <- callPloidy(data_blastomere)
aneuploid_binom <- aneuploidyByCase(data_blastomere)
gt <- read.table("/sandbox/rs2305957_f.gt")
names(gt) <- c("i", "sample_id", "genotype")
gt$numeric <- 0
gt[gt$genotype == "GG",]$numeric <- 0
gt[gt$genotype == "AG",]$numeric <- 1
gt[gt$genotype == "AA",]$numeric <- 2
aneuploid_binom <- merge(aneuploid_binom, gt, "i")
gt <- read.table("/sandbox/rs2305957_f.gt")
names(gt) <- c("case", "sampleid", "genotype")
pheno <- read.table("/sandbox/all_nonseg_mitotic_blastomere_f.matlab.response", sep=',')
names(pheno) <- c("case", "no_mitotic_error", "mitotic_error")
gt <- merge(gt, pheno, "case")
# remove missing genotypes
gt <- gt[gt$genotype != "00",]
maternal_age <- data.frame(cbind(data[!duplicated(data$case),]$case, data[!duplicated(data$case),]$maternal_age))
names(maternal_age)<-c("case", "maternal_age")
age_gt <- merge(gt, maternal_age, "case")
age_gt <- age_gt[complete.cases(age_gt),]
age_gt$prop <- age_gt$mitotic_error/ (age_gt$no_mitotic_error + age_gt$mitotic_error)
base<-2
mround <- function(x){
2*round(x/2)
}
aneuploidyByGenotypeByAge <- function(data, genotype) {
age_results_frame <- data.frame(matrix(ncol = 4))
names(age_results_frame) <- c("prop", "se", "maternal_age", "genotype")
range <- unique(mround(data[data$genotype == genotype,]$maternal_age))[order(unique(mround(data[data$genotype == genotype,]$maternal_age)))]
for (i in range) {
age_subset <- data[(mround(data$maternal_age) == i & data$genotype == genotype),]
prop <- mean(age_subset$prop)
se <- sqrt((prop * (1 - prop)) / nrow(age_subset))
age_results_frame <- rbind(age_results_frame, c(prop, se, i, genotype))
}
age_results_frame<-age_results_frame[-1,]
age_results_frame$prop <- as.numeric(as.character(age_results_frame$prop))
age_results_frame$se <- as.numeric(as.character(age_results_frame$se))
age_results_frame$maternal_age <- as.numeric(as.character(age_results_frame$maternal_age))
return(age_results_frame)
}
gg <- aneuploidyByGenotypeByAge(age_gt, "GG")
ag <- aneuploidyByGenotypeByAge(age_gt, "AG")
aa <- aneuploidyByGenotypeByAge(age_gt, "AA")
age_results_frame <- rbind(gg, ag, aa)
limits <- aes(ymax = (prop + se), ymin = (prop - se))
d <- ggplot(data = age_results_frame, aes(x = maternal_age, y = prop, color = factor(genotype))) + geom_point() + geom_line() + geom_errorbar(limits, width = 0.5) + coord_cartesian(ylim = c(-0.05, 1.05)) + ylab("Prop. blastomeres w/ mitotic error") + theme(legend.justification = c(0,0), legend.position = c(.1,0.45)) + xlab('Maternal Age') + labs(fill = "") + scale_color_discrete(name = "Genotype")
d<-ggplot_gtable(ggplot_build(d))
#################################################
aneuploid_binom$prop <- aneuploid_binom$aneuploid_1 / (aneuploid_binom$aneuploid_1 + aneuploid_binom$aneuploid_0)
gg <- aneuploidyByGenotypeByAge(aneuploid_binom, "GG")
ag <- aneuploidyByGenotypeByAge(aneuploid_binom, "AG")
aa <- aneuploidyByGenotypeByAge(aneuploid_binom, "AA")
age_results_frame <- rbind(gg, ag, aa)
e <- ggplot(data = age_results_frame, aes(x = maternal_age, y = prop, color = factor(genotype))) + geom_point() + geom_line() + geom_errorbar(limits, width=0.5) + coord_cartesian(ylim = c(-0.05, 1.05)) + ylab("Prop. blastomeres w/ mitotic error") + theme(legend.justification = c(0,0), legend.position = c(.5,0)) + xlab('Maternal Age') + labs(fill = "") + scale_color_discrete(name = "Genotype")
e <- ggplot_gtable(ggplot_build(e))
#################################################
# put all panels in a single figure
b$widths <- a$widths
c$widths <- a$widths
d$widths <- a$widths
e$widths <- a$widths
grid.arrange(a, b, d, e, c, nrow = 3)
|
aa4db66ca9fca42a3b276c7bd8abf84af1240c31 | 021b4f4af578b0fd5edf16361d5e99d3b5f990fd | /Plot1.R | ef1910cdcf3ff22d7096c9ceb192844baed6767b | [] | no_license | lucita1981/Exploratory-and-Data-Analysis-Course | cd32c7b1c114dc809c5c9695f1524c78fbcb401c | ac826fc869c03636112c8afafe458d00fc48b951 | refs/heads/master | 2021-04-09T13:24:40.418183 | 2018-03-17T04:05:40 | 2018-03-17T04:05:40 | 125,583,587 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 715 | r | Plot1.R | ## Downloading & Unzip
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileUrl, destfile="./data/Data.zip")
unzip(zipfile="./data/Data.zip",exdir="./data")
## Reading Files
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
#Plot function & Using PNG Device
TotalByYear <- aggregate(Emissions ~ year, NEI, sum)
png("Plot1.png")
barplot(height=TotalByYear$Emissions, names.arg = TotalByYear$year, col=c("Magenta", "Cyan", "Green", "Orange"), main="Total PM2.5 emission by year",xlab="Years", ylab = "Emission")
dev.off()
|
362a65875b3eea2a772bc4edbfb2cc5d91169579 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /meteor/inst/testfiles/ET0_PenmanMonteith/libFuzzer_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1612736691-test.R | cc8a5f9c25f1793fc728c57a00f4e802c8aba0db | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 497 | r | 1612736691-test.R | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = numeric(0), rs = numeric(0), temp = c(NaN, NaN, 2.34067609905209e-306, 1.32498004728931e+213, -5.14674537495229e+291, -36711776.0230951, 1.97523508538512e+289, 9.88817898942117e+58, 5.14276508926819e+25, NaN, NaN, NaN, NaN, NaN, NaN, -8589934592, 6.92439507277589e+212, 2.11720969454505e-314, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
b9a01264ac6810a232435f867f419f4dd8da739c | b547d1e9639299f2876c091145ab7fb65c140fd4 | /Simulate_SBM.R | 8b90c97187d1a102593aa5fe882808c7f6751587 | [] | no_license | dankessler/graphclass | 0fdfa7c696a8b3961d7831437d338b9b011db7bf | 7571fb21d38a89a4563b84c1a8423a148cee45eb | refs/heads/master | 2021-01-16T20:32:21.788280 | 2016-03-30T14:50:03 | 2016-03-30T14:50:03 | 52,468,871 | 0 | 0 | null | 2016-02-24T19:32:26 | 2016-02-24T19:32:26 | null | UTF-8 | R | false | false | 3,232 | r | Simulate_SBM.R | # Simulate random networks
sbm <- function(p,q, K, nk) {
Psi = (p-q)*diag(K) + q
A = kronecker(Psi,matrix(rep(1,(nk)^2),nrow = nk))
Aobs = apply(A,MARGIN = c(1,2),function(u) rbinom(1,1,prob = u))
Aobs[lower.tri(Aobs)] = 0
Aobs = Aobs + t(Aobs)
diag(Aobs) = 0
Degree = apply(Aobs,1, sum)
L = diag(Degree) - Aobs
return(list(Aobs= Aobs, membership = kronecker(1:K,rep(1,nk)),Laplacian = L))
}
sbm_q1 <- function(p,q, K, nk, q1) {
Psi = (p-q)*diag(K) + q
Psi[2,1] = q1
Psi[1,2] = q1
A = kronecker(Psi,matrix(rep(1,(nk)^2),nrow = nk))
Aobs = apply(A,MARGIN = c(1,2),function(u) rbinom(1,1,prob = u))
Aobs[lower.tri(Aobs)] = 0
Aobs = Aobs + t(Aobs)
diag(Aobs) = 0
Degree = apply(Aobs,1, sum)
L = diag(Degree) - Aobs
return(list(Aobs= Aobs, membership = kronecker(1:K,rep(1,nk)),Laplacian = L))
}
sbm_q2 <- function(p,q, K, nk, q1) {
Psi = (p-q)*diag(K) + q
Psi[3,4] = q1
Psi[4,3] = q1
A = kronecker(Psi,matrix(rep(1,(nk)^2),nrow = nk))
Aobs = apply(A,MARGIN = c(1,2),function(u) rbinom(1,1,prob = u))
Aobs[lower.tri(Aobs)] = 0
Aobs = Aobs + t(Aobs)
diag(Aobs) = 0
Degree = apply(Aobs,1, sum)
L = diag(Degree) - Aobs
return(list(Aobs= Aobs, membership = kronecker(1:K,rep(1,nk)),Laplacian = L))
}
signal_subgraph <- function(m,s, p, q, N, n = 70) {
signal_vertex = sample(x = 1:n,size = m,replace = F)
selected_edges = sample(1:(m*(2*n-m-1)/2),s,replace = F)
signal_edges = lapply(1:m, function(x) c())
for(i in 1:m) {
u = selected_edges[which(selected_edges<= i*n-i*(i+1)/2 & selected_edges >(i-1)*n-(i-1)*(i)/2)]
signal_edges[[i]] = sapply(u, function(x) x+sum(x>signal_vertex[1:i]))
}
prob_matrix = array(p,dim = c(n,n))
for(i in 1:m){
prob_matrix[signal_vertex[i], signal_edges[[i]]] = q
prob_matrix[signal_edges[[i]], signal_vertex[i]] = q
}
Alist = lapply(1:N,function(x) {
Aobs = apply(prob_matrix,MARGIN = c(1,2),function(u) rbinom(1,1,prob = u))
Aobs[lower.tri(Aobs)] = 0
Aobs = Aobs + t(Aobs)
return(Aobs)
diag(Aobs) = 0
})
return(list(A_sample = Alist, signal_vertex = signal_vertex, signal_edges = signal_edges))
}
model_signal_subgraph <- function(m,s,p,q,n=70) {
signal_vertex = sample(x = 1:n,size = m,replace = F)
selected_edges = sample(1:(m*(2*n-m-1)/2),s,replace = F)
signal_edges = lapply(1:m, function(x) c())
for(i in 1:m) {
u = selected_edges[which(selected_edges<= i*n-i*(i+1)/2 & selected_edges >(i-1)*n-(i-1)*(i)/2)]
signal_edges[[i]] = sapply(u, function(x) x+sum(x>signal_vertex[1:i]))
}
prob_matrix = array(p,dim = c(n,n))
for(i in 1:m){
prob_matrix[signal_vertex[i], signal_edges[[i]]] = q
prob_matrix[signal_edges[[i]], signal_vertex[i]] = q
}
return(list(signal_vertex = signal_vertex, signal_edges = signal_edges, prob_matrix= prob_matrix))
}
sample_probMatrix <- function(prob_matrix, n = 70, N) {
Alist = lapply(1:N,function(x) {
Aobs = apply(prob_matrix,MARGIN = c(1,2),function(u) rbinom(1,1,prob = u))
Aobs[lower.tri(Aobs)] = 0
Aobs = Aobs + t(Aobs)
return(Aobs)
diag(Aobs) = 0
})
return(list(A_sample = Alist))
} |
eafa1be9f1ae5549107b65a44c24319218f22dad | b8fa00b408af080b5e25363c7fdde05e5d869be1 | /Task3_0867117/exercise_9.r | 8770870ef05102164193e4721f3b4c1c51c2cc7b | [] | no_license | anurag199/r-studio | bc89f0c18a8d44164cb4ede8df79321ea965bc77 | e42909505fbe709f476081be97f89cc945a2745d | refs/heads/master | 2020-04-26T23:04:25.061780 | 2019-03-05T06:56:26 | 2019-03-05T06:56:26 | 173,891,322 | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 578 | r | exercise_9.r | library(dplyr)
library(tidyverse)
library(stringr)
# loading the sales.txt file
sales <- read.csv("sales.txt", stringsAsFactors = FALSE, header = T, sep = "\n")
# cleaning the currency data and converting the Pounds into US dollars
sales %>% mutate(currency = str_extract(sales,"\\$|£") ) %>% mutate(raw_amount = str_replace_all(sales, ",|\\$|£|Â", "") ) %>% mutate(amount = as.numeric(raw_amount)) %>%mutate(convertedAmountInUSD = ifelse(currency == ''
, amount, ifelse(currency == '£', round(amount*1.44,2), NA))) |
9743b88c5c42150513a868f89c5c518699c6c352 | 8388dae510687c5dbffdfcad680b2a047e197ef1 | /man/fish_isolation_SS1.Rd | c24f008b97938ad7dec81d8363d77c1e14bdf520 | [
"MIT"
] | permissive | RodolfoPelinson/PredatorIsolationStochasticity | 18bb74ba0bbdbe973f2cb562defe60a7453aa084 | fde83564521815a850cab6a5c77036aad8e38335 | refs/heads/master | 2023-04-17T23:52:32.012403 | 2022-02-11T18:41:02 | 2022-02-11T18:41:02 | 304,638,200 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 448 | rd | fish_isolation_SS1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{fish_isolation_SS1}
\alias{fish_isolation_SS1}
\title{Isolation and fish treatment identities for the First survey}
\format{
Factor w/ 3 levels: "030","120","480", and 24 observations
}
\source{
{Experimental Data}
}
\usage{
fish_isolation_SS1
}
\description{
Isolation and fish treatment identities for the First survey
}
\keyword{datasets}
|
600bba79d91ec8172142fe4f616b373c8a6c950c | 454258ebbc883bb6338e11302b2a9dccfcb2dd16 | /man/create_play_spectrum_output.Rd | 9cf13542a4f23698ff7b9d90250e1f6307b713ce | [
"CC0-1.0"
] | permissive | Solanrewaju/datapackr | 22248534548af131bf7b46c3a4e7c0527d938f7d | fa5085a069bb55cb201ee9e900fa0ab3b8553cd1 | refs/heads/master | 2023-03-26T20:47:57.894480 | 2021-03-03T19:43:50 | 2021-03-03T19:43:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 915 | rd | create_play_spectrum_output.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_play_spectrum_output.R
\name{create_play_spectrum_output}
\alias{create_play_spectrum_output}
\title{Create fake Spectrum output for testing}
\usage{
create_play_spectrum_output(
country_uids,
cop_year,
output_folder = NULL,
d2_session = dynGet("d2_default_session", inherits = TRUE)
)
}
\arguments{
\item{country_uids}{Unique IDs for countries to include in the Data Pack.
For full list of these IDs, see \code{datapackr::dataPackMap}.}
\item{cop_year}{Specifies COP year for dating as well as selection of
templates.}
\item{output_folder}{Local folder where you would like your Data Pack to be
saved upon export. If left as \code{NULL}, will not output externally.}
}
\value{
Fake Spectrum dataset
}
\description{
Creates a fake Spectrum Data Pack export for use in testing with Data Packs.
Requires login to DATIM.
}
|
c401e9b35ee12f7526c291a26061b12e4b4f66f9 | b88f0857833f09fbde024bcd09e5236299b5f67f | /PrevisaoCustosAberturaFranquia/app.R | 45ad84a8ab23cfdde9dfbf1551fafe5e2c0c8f72 | [] | no_license | alexandre-trapp/PrevisaoCustosAberturaFranquiaComR | 74f4614467c935c710fe1c8fd335bb47ec7ef517 | 6c79e6b10b0710f3e088e9ba643c4be6a7315453 | refs/heads/master | 2021-01-04T00:48:25.662942 | 2020-02-13T17:21:05 | 2020-02-13T17:21:05 | 240,309,175 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,444 | r | app.R | library(shiny)
dados = read.csv("Modelos/slr12.csv", sep = ";")
modelo = lm(CusInic ~ FrqAnual, data=dados)
# UI do app pra exibir o histograma
ui <- fluidPage(
#t?tulo do app
titlePanel("Previsao de custo inicial para montar uma franquia"),
fluidRow(
column(4,
h2("Dados"),
tableOutput("Dados")
),
column(8,
plotOutput("Graf")
),
),
fluidRow(
column(6,
h3("Valor anual da franquia:"),
numericInput("NovoValor", "Insira um novo valor", 1500, min=1, max=9999999),
actionButton("Processar", "Processar")
),
column(6,
h1(textOutput("Resultado"))
),
)
)
server <- function(input, output) {
output$Graf = renderPlot({
plot(CusInic ~ FrqAnual, data = dados)
abline(modelo)
})
output$Dados = renderTable({ head(dados, 10) })
observeEvent(input$Processar, {
valr = input$NovoValor
prev = predict(modelo, data.frame(FrqAnual = eval(parse(text=valr))))
prev = paste0("Previsao de custo inicial R$: ", round(prev, 2))
output$Resultado = renderText({prev})
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
b9fbc45899804aa82a489d2f57b0bfe3e0c751a9 | ef456f8a94ca3168fe04e09b37c2fcba1379135c | /modules/dependen_dropdown.R | ad965a1675a0f4d6430dce76a2172cb28ecf2d81 | [] | no_license | alencaru/shipapp | ae435cd348c3a5beaaff74757e3d4d2134c339a7 | e635248f49e52c9a21af2d1f9a370765d6c27b4e | refs/heads/main | 2023-02-21T12:18:07.394457 | 2021-01-24T11:23:45 | 2021-01-24T11:23:45 | 332,394,905 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 859 | r | dependen_dropdown.R | #-------------------------------------------------------------------------------
#
# Module - dependent dropdown
#
#-------------------------------------------------------------------------------
dropdown_shipname_UI <- function(id, filter = NULL) {
ns = NS(id)
list(
dropdown_input(ns("shipname")
, choices = NULL
, value = "NAN"
)
)
}
dropdown_shipname <- function(input, output, session, data, input_shiptype) {
ns <- session$ns
output$dropdown <- renderText(input[["shipname"]])
observe({
ns <- session$ns
update_dropdown_input(
session
, "shipname"
, choices = unique(data()$SHIPNAME[data()$ship_type == input_shiptype()])
)
return(reactive({input$shipname}))
})
}
|
f38407dd23a1616f263c9041a9b378ef55cdd302 | 2205d2dd8a493a5035599cdc8b974c2eda339ee4 | /plot4.R | 65fa9c9ee16f8a1794050fec5b0f4ba33269e9c0 | [] | no_license | benjaminwelge/ExData_Plotting1 | 2126ee865a91e9b763f09a8d7b2cceaf60ec2b65 | 94c45da42c9a7c685f0edf98536121dc4a81381e | refs/heads/master | 2021-01-15T12:42:05.940872 | 2014-09-10T09:10:18 | 2014-09-10T09:10:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,416 | r | plot4.R | plot4 <- function(){
## Setting the working directory
setwd("C:\\data_science")
## As this runs on a German laptop I have to switch to English Time setting
## otherweise plot output will show German weekday names
Sys.setlocale("LC_TIME", "English")
## Download and unzip data, but only if we haven't downloaded and unpacked it yet
if(!file.exists("C:\\data_science\\household_power_consumption.txt")){
fileUrl <- "http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "household_power_consumption.zip", method = "auto")
unzip("./household'_power_consumption.zip")
}
## Reads in the rawdata table and makes sure stringAsFactors is set to false
rawdata <- read.table("C:\\data_science\\household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?", stringsAsFactors=FALSE)
## Subsetting data - we only want to use rows from the 1st and 2nd Feb. in 2007
subdata <- rawdata[ which(rawdata$Date == '1/2/2007' | rawdata$Date == '2/2/2007'), ]
## Create a new column with merged data and time properly formatted
subdata$Date <- as.Date(subdata$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(subdata$Date), subdata$Time)
subdata$Date_time <- as.POSIXct(datetime)
## Creates the PNG file to save the plot in
png(filename = "C:\\data_science\\plot4.png", width = 480, height = 480, units = "px", bg = "white")
## As we want several plots in one graphic we use par with the mfrow parameter to
## create a 2x2 grid for the four desired plots
par(mfrow=c(2,2))
## Now we create all four individual plots
## Plot 1
plot(subdata$Global_active_power~subdata$Date_time, type="l", xlab="", ylab="Global Active Power", col="black")
## Plot 2
plot(subdata$Voltage~subdata$Date_time, type="l", xlab="datetime", ylab="Voltage")
## Plot 3
plot(subdata$Sub_metering_1~subdata$Date_time, type="l", xlab="", ylab="Energy sub metering", col="black")
lines(subdata$Sub_metering_2~subdata$Date_time, col= "red")
lines(subdata$Sub_metering_3~subdata$Date_time, col= "blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Plot 4
plot(subdata$Global_reactive_power~subdata$Date_time, type="l", xlab="datetime", ylab = "Global_reactive_power", col = "black")
## Closing the device so we can access it
dev.off()
}
|
f89b6c5b1ca204b6e06b4aa39b5382037073b556 | 34cbdcb751f19743216d86473c59d1c635b920ae | /fetching_specific_genes_v2.R | 5a6c440168dc4239b2486cdb5d183a9dccc510d9 | [] | no_license | michbur/extracting_sequences | 59e1c0d39fab8fe29e9d6706fc6823ae58f43fb9 | 55b8ab2cf27061e16766ccc11fa81f760e79309a | refs/heads/master | 2021-05-14T07:14:29.279568 | 2018-01-06T20:51:33 | 2018-01-06T20:51:33 | 116,257,762 | 0 | 0 | null | 2018-01-04T12:30:04 | 2018-01-04T12:30:04 | null | UTF-8 | R | false | false | 3,561 | r | fetching_specific_genes_v2.R | library(dplyr)
library(pbapply)
library(seqinr)
library(stringr)
genomes_path <- "/home/michal/Dokumenty/Genomy_backup/"
genome_files <- list.files(genomes_path)
gbff_file <- genome_files[1]
get_sequence <- function(ith_file, seq_name) {
all_lines <- readLines(paste0(genomes_path, ith_file))
#grep(pattern = "LOCUS ", all_lines, ignore.case = TRUE)
locus_ids <- grep("LOCUS", all_lines)
locus_id <- matrix(c(locus_ids,
locus_ids[-1] - 1, length(all_lines)),
nrow = 2, byrow = TRUE)
all_loci <- lapply(1L:ncol(locus_id), function(ith_locus) {
all_lines[locus_id[1, ith_locus]:locus_id[2, ith_locus]]
})
lapply(all_loci, function(ith_loci_lines) {
if(length(grep(pattern = seq_name, ith_loci_lines)) > 1) {
genome_start <- grep(pattern = "^ 1", ith_loci_lines)
gene_pos <- ith_loci_lines[sapply(grep(pattern = seq_name, ith_loci_lines), function(i)
grep("rRNA", ith_loci_lines[(i - 10):i]) + i - 11)] %>%
strsplit(" ") %>%
sapply(last) %>%
data.frame(raw = ., stringsAsFactors = FALSE) %>%
mutate(complement = grepl("complement", raw),
x1 = as.numeric(sub(pattern = "complement(", replacement = "",
sapply(strsplit(raw, "..", fixed = TRUE), first),
fixed = TRUE)),
x2 = as.numeric(sub(pattern = ")", replacement = "",
sapply(strsplit(raw, "..", fixed = TRUE), last),
fixed = TRUE)))
genes <- lapply(1L:nrow(gene_pos), function(ith_row) {
x1 <- gene_pos[ith_row, "x1"]
x2 <- gene_pos[ith_row, "x2"]
# x1 and x2 converted to ids in the cutted sequence
xx1 <- x1 - floor((x1 - 1)/60)*60
xx2 <- x2 - floor((x1 - 1)/60)*60
ith_loci_lines[(genome_start + floor((x1 - 1)/60)):(genome_start + floor((x2 - 1)/60))] %>%
strsplit("1 ") %>%
lapply(last) %>%
paste0(collapse = "") %>%
gsub(" ", "", .) %>%
substr(xx1, xx2) %>%
strsplit(split = "") %>%
unlist
})
genes[gene_pos[["complement"]]] <- lapply(genes[gene_pos[["complement"]]], function(ith_gene)
rev(comp(ith_gene)))
org_name <- ith_loci_lines[grep(pattern = "ORGANISM", ith_loci_lines)] %>%
strsplit("ORGANISM[ ]*") %>%
sapply(last)
gene_names <- ith_loci_lines[sapply(grep(pattern = seq_name, ith_loci_lines), function(i)
grep("gene=", ith_loci_lines[(i - 10):i])[1] + i - 11)] %>%
strsplit('\"', fixed = TRUE) %>%
sapply(last)
locus_names <- ith_loci_lines[sapply(grep(pattern = seq_name, ith_loci_lines), function(i)
grep("locus_tag=", ith_loci_lines[(i - 10):i])[1] + i - 11)] %>%
strsplit('\"', fixed = TRUE) %>%
sapply(last)
# browser()
# sapply(grep(pattern = seq_name, ith_loci_lines), function(i)
# ith_loci_lines[(i - 10):i])
names(genes) <- paste0(">", org_name, "|", gene_names, "|", locus_names)
sapply(grep(pattern = seq_name, ith_loci_lines), function(i)
ith_loci_lines[(i - 10):i])
genes
}
}) %>%
unlist(recursive = FALSE)
}
seq_rna <- pblapply(genome_files[1L:5], function(ith_file)
get_sequence(ith_file, "16S ribosomal RNA")) %>%
unlist(recursive = FALSE)
|
deb85d5c5ae3851f7e1e9a028860a05543ee69c4 | fcb54c2a955708d84f8ccd233cb07765795e44e5 | /man/find.Rd | c0bbe0b13d5f30271fd94cfab89a3ccdc7349e55 | [
"MIT"
] | permissive | hieuqtran/RVerbalExpressions | b652c9483e1800f11cdfe60b35ffaf7dcc52d4a9 | da44deb3a55cb225f2d827e3a4d7c922c493c68b | refs/heads/master | 2020-04-26T14:06:31.624514 | 2019-03-03T16:23:54 | 2019-03-03T16:23:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,015 | rd | find.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find.R
\name{find}
\alias{find}
\title{Match an expression (alias for \code{then()}).}
\usage{
find(.data = NULL, value)
}
\arguments{
\item{.data}{Expression to append, typically pulled from the pipe \code{ \%>\% }}
\item{value}{Expression to match}
}
\description{
This expression uses a \emph{non capturing group} to identify a
specific pattern when you do not need the group to capture its match. Alias
for then. Meant for semantics when used at the beginning of a verbal
expression. For example, \code{find(value = 'foo')} is more readable than
\code{then(value = 'foo')}.
}
\examples{
find(value = "apple")
# create expression
x <- start_of_line() \%>\%
find("apple") \%>\%
end_of_line()
grepl(x, "apples") # should be false
grepl(x, "apple") # should be true
}
\references{
Non capturing group: \url{https://www.regular-expressions.info/brackets.html}
Stack Overflow: \url{https://stackoverflow.com/questions/3512471}
}
|
7e378a70423664e74dad4e8f25cd74d252d6a823 | 8fd6f63de8b96c2cfdc8cb054cafe69ada8349ab | /app.R | c67a44cbcbd143af506ad2ecd7889c67b07ff676 | [] | no_license | Promintzer/analyzr | 679cf84f96db2754f49d82f62c71668b3a44fe92 | 26efd58a5646dd8c7155bd9d56a1f8208a8762e3 | refs/heads/main | 2023-04-11T20:56:25.516831 | 2021-05-13T21:10:57 | 2021-05-13T21:10:57 | 361,424,508 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 50,833 | r | app.R | ###################################
########### ###########
########### analyzr ###########
########### ###########
###################################
# Libraries
library(shiny)
library(shinydashboard)
library(shinydashboardPlus)
library(ggplot2)
library(ggthemes)
library(ggExtra)
library(DT)
library(sqldf)
library(colourpicker)
library(stringr)
library(rlang)
# Create example data and Workspace
dat_mtcars <- mtcars
dat_diamonds <- diamonds
dat_economics <- economics
wslist <- ls()
# Create number of tabs and layers to be supported
tabsnum <- 25
layers <- 10
# Layer data: necessary for UI Remover Layer and Filter (append layer is not affected)
layerdat <- as.data.frame(paste0(rep(1:tabsnum, each=layers), "_", rep(1:layers, times=tabsnum)))
names(layerdat) <- "Name"
layerdat$ID <- seq.int(nrow(layerdat))
layerdat$Tab <- rep(1:tabsnum, each=layers)
layerdat$Layer <- rep(1:layers, times=tabsnum)
layerdat$Flag_Close <- 0
# Themes
themes <- list(
"Clean" = theme_clean(),
"Dark" = theme_dark(),
"Economist" = theme_economist(),
"Excel" = theme_excel(),
"Void" = theme_void(),
"WSJ" = theme_wsj()
)
# Color scales
colorscales <- list(
"Economist" = scale_color_economist(),
"Excel" = scale_color_excel(),
"WSJ" = scale_color_wsj()
)
# Fill scales
fillscales <- list(
"Economist" = scale_fill_economist(),
"Excel" = scale_fill_excel(),
"WSJ" = scale_fill_wsj()
)
# Sidebar width
sbwidth <- "20px"
# Functions ----
# shinydashboardPlus::boxSidebar ----
# 1) -> Without the tooltip "More".
boxSidebarNew <- function (..., id = NULL, width = 50, background = "#333a40",
startOpen = FALSE, icon = shiny::icon("cogs"))
{
stopifnot(width >= 25 && width <= 100)
toolbarTag <- shiny::tags$button(id = id, `data-background` = background,
`data-width` = width, `data-widget` = "chat-pane-toggle",
#`data-toggle` = "tooltip", `data-original-title` = "More",
`data-start-open` = tolower(startOpen), type = "button",
icon)
contentTag <- shiny::tags$div(style = "z-index: 1; height: inherit;",
class = "direct-chat-contacts", shiny::tags$ul(class = "contacts-list",
shiny::tags$li(...)))
shiny::tagList(toolbarTag, contentTag)
}
# GG-Handler ----
# Required to put arguments in the right place regarding custom_-type and also make factor.
gghandler <- function(geom, type, react, widgetval, widgetout, session, factorname, factorval, ...){
# Argument should be added, if the widget it not null or if custom_ is selected
if(!is.null(react) || substr(widgetval, start = 1, stop = 9) == "**CUSTOM_"){
if(substr(widgetval, start = 1, stop = 9) == "**CUSTOM_"){
#print("custom color")
if(type != "shape"){
updateCheckboxInput(session, inputId = factorname, value = FALSE)
}
geom$aes_params[[type]] <- widgetout
} else {
#print("color by")
for(k in 1:length(geom$mapping)){
if(!is.null(geom$mapping[[k]])){
adder <- get_env(geom$mapping[[k]])
}
}
adder <- as_quosure(x = str2lang(widgetval), env = adder)
geom$mapping[[type]] <- adder
}
}
# Make it a factor, if the widget is not null, not equal to cusotm_ and if factor is set.
if(!is.null(react) && factorval && substr(widgetval, start = 1, stop = 9) != "**CUSTOM_"){
col <- geom$mapping[[type]]
expr <- quo_get_expr(col)
expr <- paste0("as.factor(", expr, ")")
expr <- str2lang(expr)
col <- quo_set_expr(quo = col, expr = expr)
geom$mapping[[type]] <- col
}
# Output
return(geom)
}
# UI ----
ui <- dashboardPage(
# Header
dashboardHeader(
# Width
titleWidth = sbwidth,
# Height
tags$li(
class = "dropdown",
#downloadButton(outputId = "download_png", label = "PNG"),
tags$style(".main-header {max-height: 20px}"),
tags$style(".main-header .logo {height: 20px;}"),
tags$style(".sidebar-toggle {height: 20px; padding-top: 1px !important;}"),
tags$style(".navbar {min-height:20px !important}")
)
),
#dashboardSidebar(disable = TRUE, width = "3px"),
dashboardSidebar(
# Width
width = sbwidth,
# Hide toggle
tags$script(JS("document.getElementsByClassName('sidebar-toggle')[0].style.visibility = 'hidden';"))
),
# Body
dashboardBody(
# CSS
tags$head(tags$style(HTML('
/* logo */
.skin-blue .main-header .logo {background-color: #303030;}
/* logo when hovered */
.skin-blue .main-header .logo:hover {background-color: #303030;}
/* navbar (rest of the header) */
.skin-blue .main-header .navbar {background-color: #87CEFA;}
/* main sidebar */
.skin-blue .main-sidebar {background-color: #303030;}
# /* header height */
# .skin-blue .main-header {}
'))),
# Toolbox
fluidRow(
box(id = "toolbox", width = 12, title = NULL,
# New tab button
actionButton("add", "New Tab", icon = icon("plus-circle"))
# # Save as
# actionButton("save_as", "Save As", icon = icon("save")),
#
# # Open from source
# actionButton("open", "Open", icon = icon("folder-open"))
),
# Remove header from box
tags$head(tags$style('#toolbox .box-header{display: none}'))
),
# Tab panel
fluidRow(
# Style Tabs
tags$style(HTML(".tabbable > .nav > li > a {color:black; width: 80px; height: 40px; text-align: center; margin-top:1px; margin-bottom:1px; padding-top:5px; padding-bottom:1px;}")),
tabsetPanel(id = "plottabs")
)
)
)
# Server
server <- function(input, output, session){
# Panel ----
# UI Adder ----
observeEvent(input$add, {
appendTab(inputId = "plottabs", select = TRUE,
tab = tabPanel(
# Title and Close-Button
title = fluidRow(
paste0("Tab", input$add),
actionButton(inputId = paste0("close", input$add), label = NULL, icon = icon("times"), style='font-size:100%; padding-left:1px; padding-right:1px; padding-top:0px; padding-bottom:0px; margin-top:4px; margin-bottom:7px; margin-left:5px; margin-right:5px')
),
# Data Selection
tags$br(),
selectInput(inputId = paste0("data_select", input$add), label = "Data Selection", choices = c("", wslist), selected = "", multiple = FALSE),
# Show only, if data is selected
conditionalPanel(
# Condition
condition = paste0("input.data_select", input$add, "!== ''"),
# Add Filter
actionButton(inputId = paste0("addfilter", input$add), label = "Add Filter", icon = icon("filter")),
# Apply Filter
actionButton(inputId = paste0("filter_apply", input$add), label = "Apply Filter", icon = icon("check-square")),
# Filter
fluidRow(
box(title = "Filter", width = 12, collapsible = TRUE,
div(id = paste0("filter_div", input$add))
)
),
# Add Layer
actionButton(inputId = paste0("addlayer", input$add), label = "Add Layer", icon = icon("layer-group")),
# Plot Parameters
fluidRow(
#tags$style(HTML(".tabbable > .nav > li > a {color:black; width: 80px; height: 40px; text-align: center; margin-top:1px; margin-bottom:1px; padding-top:5px; padding-bottom:1px;}")),
tabBox(id = paste0("plottabbox", input$add), title = "Plot Parameters", width = 12)
),
# Plot output
# Somehow the spinner does not disappear after closing the tab, but continues loading on all other tabs.
# withSpinner(plotOutput(outputId = paste0("plot", input$add)), type = 5, size = 0.5, id = input$add)
fluidRow(
box(
title = "",
width = 6,
sidebar = boxSidebarNew(id = paste0("plotsidebar", input$add), icon = icon("gear"), width = 30,
#menuItem(text = "Title and Axis", tabName = paste0("ps_titleaxis", input$add)),
# # collapsible =
# textInput(inputId = paste0("ps_title", input$add), label = "Title", placeholder = "Title of the plot"),
# selectInput(inputId = paste0("ps_theme", input$add), label = "Theme", choices = c("<null>", names(themes)), selected = "<null>"),
# selectInput(inputId = paste0("ps_colorsc", input$add), label = "Color Scales", choices = c("<null>", names(colorscales)), selected = "<null>"),
# selectInput(inputId = paste0("plot_facet", input$add), label = "Multiple Plots by", choices = NULL)
# )
# conditionalPanel(
# condition = paste0("input.plotsidebar", input$add, " == 'ps_titleaxis", input$add, "'"),
# Title and Axis label
textInput(inputId = paste0("ps_title", input$add), label = "Title", placeholder = "Title of the plot"),
textInput(inputId = paste0("ps_xlab", input$add), label = "X-Axis Label", placeholder = "Label for X-Axis"),
textInput(inputId = paste0("ps_ylab", input$add), label = "Y-Axis Label", placeholder = "Label for Y-Axis"),
# Themes, color-scales, fill-scales
tags$hr(style="border-color: gray"),
selectInput(inputId = paste0("ps_theme", input$add), label = "Theme", choices = c("<null>", names(themes)), selected = "<null>"),
selectInput(inputId = paste0("ps_colorsc", input$add), label = "Color Scales", choices = c("<null>", names(colorscales)), selected = "<null>"),
selectInput(inputId = paste0("ps_fillsc", input$add), label = "Fill Scales", choices = c("<null>", names(fillscales)), selected = "<null>"),
# Facet
tags$hr(style="border-color: gray"),
selectInput(inputId = paste0("plot_facet", input$add), label = "Multiple Plots by", choices = NULL),
# Marginal
tags$hr(style="border-color: gray"),
checkboxInput(inputId = paste0("marginal_check", input$add), label = "Marginal Graphs", value = FALSE),
selectInput(inputId = paste0("marginal_type", input$add), label = "Marginal Plot Type", choices = c("density","histogram","boxplot","violin","densigram"), selected = "histogram"),
selectInput(inputId = paste0("marginal_axis", input$add), label = "Show on Axis", choices = c("both", "x", "y"), selected = "both"),
numericInput(inputId = paste0("marginal_size", input$add), label = "Size Main vs Marginal", value = 5, min = 1, max = 5, step = 1),
checkboxInput(inputId = paste0("marginal_color", input$add), label = "Marginal Color", value = FALSE),
checkboxInput(inputId = paste0("marginal_fill", input$add), label = "Marginal Fill", value = FALSE),
# )
),
plotOutput(outputId = paste0("plot", input$add))
),
tabBox(id = paste0("tabbox", input$add), width = 6,
tabPanel(title = "Data", div(style = 'overflow-x: scroll; overflow-y: scroll', dataTableOutput(outputId = paste0("table", input$add)))),
tabPanel(title = "Summary", verbatimTextOutput(outputId = paste0("summary", input$add)))
)
)
)
)
)
})
# UI Remover ----
lapply(1:tabsnum, FUN = function(i){
observeEvent(input[[paste0("close", i)]], {
print(paste0("close", i))
removeTab(inputId = "plottabs", target = input$plottabs)
})
})
# UI Add Layer ----
# i -> tab-count, addlayer-i -> layer-button-count
lapply(1:tabsnum, FUN = function(i){
observeEvent(input[[paste0("addlayer", i)]],{
# Print
print(paste0("add layer", i, "_", input[[paste0("addlayer", i)]]))
# Append Tab
appendTab(inputId = paste0("plottabbox", i), select = TRUE, tab = tabPanel(
# Title
title = fluidRow(
paste0("Layer", input[[paste0("addlayer", i)]]),
actionButton(inputId = paste0("closelayer", i, "_", input[[paste0("addlayer", i)]]), label = NULL, icon = icon("times"), style='font-size:100%; padding-left:1px; padding-right:1px; padding-top:0px; padding-bottom:0px; margin-top:4px; margin-bottom:7px; margin-left:5px; margin-right:5px')
),
# # Title and Close-Button
# title = fluidRow(
# paste0("Tab", input$add),
# actionButton(inputId = paste0("close", input$add), label = NULL, icon = icon("times"), style='font-size:100%; padding-left:1px; padding-right:1px; padding-top:0px; padding-bottom:0px; margin-top:4px; margin-bottom:7px; margin-left:5px; margin-right:5px')
# ),
# Plot Parameters
fluidRow(
column(width = 4,
# Plot Type
selectInput(inputId = paste0("plottype", i, "_", input[[paste0("addlayer", i)]]), label = "Plot Type", choices = NULL),
# Y-Axis
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], "!== '<null>' && input.plottype", i, "_", input[[paste0("addlayer", i)]], "!== 'Histogram' && input.plottype", i, "_", input[[paste0("addlayer", i)]], "!== 'Density'"),
selectInput(inputId = paste0("axisy_select", i, "_", input[[paste0("addlayer", i)]]), label = "Y-Axis", choices = NULL),
),
# X-Axis
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], "!== '<null>' && input.plottype", i, "_", input[[paste0("addlayer", i)]], "!== 'Boxplot'"),
selectInput(inputId = paste0("axisx_select", i, "_", input[[paste0("addlayer", i)]]), label = "X-Axis", choices = NULL)
)
),
column(width = 4,
# Color
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Scatter' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Smooth' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Line' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Histogram' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Bar' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Boxplot' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Area' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Density'"),
tags$div(style = 'display: inline-block; vertical-align:middle; width: 250px;', selectInput(inputId = paste0("plot_color", i, "_", input[[paste0("addlayer", i)]]), label = "Color by", choices = NULL)),
tags$div(style = 'display: inline-block; vertical-align:middle; width: 150px;', checkboxInput(inputId = paste0("plot_color_factor", i, "_", input[[paste0("addlayer", i)]]), label = "As Factor", value = FALSE))
),
# Fill
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Bar' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Histogram' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Boxplot' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Area' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Density'"),
tags$div(style = 'display: inline-block; vertical-align:middle; width: 250px;', selectInput(inputId = paste0("plot_fill", i, "_", input[[paste0("addlayer", i)]]), label = "Fill by", choices = NULL)),
tags$div(style = 'display: inline-block; vertical-align:middle; width: 150px;', checkboxInput(inputId = paste0("plot_fill_factor", i, "_", input[[paste0("addlayer", i)]]), label = "As Factor", value = FALSE))
),
# Shape
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Scatter'"),
selectInput(inputId = paste0("plot_shape", i, "_", input[[paste0("addlayer", i)]]), label = "Shape by", choices = NULL)
),
# Line Type
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Line' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Density'"),
selectInput(inputId = paste0("plot_linetype", i, "_", input[[paste0("addlayer", i)]]), label = "Line Type", choices = c("solid", "blank", "dashed", "dotted", "dotdash", "longdash", "twodash"), selected = "solid")
),
# Size
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Scatter' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Line'"),
tags$div(style = 'display: inline-block; vertical-align:middle; width: 250px;', selectInput(inputId = paste0("plot_size", i, "_", input[[paste0("addlayer", i)]]), label = "Size by", choices = NULL)),
tags$div(style = 'display: inline-block; vertical-align:middle; width: 150px;', checkboxInput(inputId = paste0("plot_size_factor", i, "_", input[[paste0("addlayer", i)]]), label = "As Factor", value = FALSE))
),
# Position
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Bar' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Histogram'"),
selectInput(inputId = paste0("plot_position", i, "_", input[[paste0("addlayer", i)]]), label = "Position", choices = c("Stack","Dodge","Fill"), selected = "Stack")
),
# Smooth
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Smooth'"),
selectInput(inputId = paste0("smooth_method", i, "_", input[[paste0("addlayer", i)]]), label = "Method", choices = c("lm","loess","gam"), selected = "lm"),
checkboxInput(inputId = paste0("smooth_se", i, "_", input[[paste0("addlayer", i)]]), label = "Confidence Int.", value = TRUE),
numericInput(inputId = paste0("smooth_level", i, "_", input[[paste0("addlayer", i)]]), label = "Confidence Int. - Level", value = 0.95, min = 0.05, max = 0.99, step = 0.05),
conditionalPanel(
condition = paste0("input.smooth_method", i, "_", input[[paste0("addlayer", i)]], " == 'loess'"),
numericInput(inputId = paste0("smooth_span", i, "_", input[[paste0("addlayer", i)]]), label = "Span", value = 0.50, min = 0.05, max = 1, step = 0.05)
)
)
),
column(width = 4,
# Histogram
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Histogram'"),
numericInput(inputId = paste0("histogram_bins", i, "_", input[[paste0("addlayer", i)]]), label = "Bins", value = 30, step = 1)
),
# Kernel density method
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Density'"),
selectInput(inputId = paste0("density_method", i, "_", input[[paste0("addlayer", i)]]), label = "Method", choices = c("gaussian","epanechnikov","rectangular","triangular","biweight","cosine","optcosine"), selected = "gaussian")
),
# Color Colorpicker
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Scatter' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Smooth' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Line' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Histogram' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Bar' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Boxplot' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Area' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Density'"),
conditionalPanel(
condition = paste0("input.plot_color", i, "_", input[[paste0("addlayer", i)]], "== '**CUSTOM_COLOR**'"),
colourInput(inputId = paste0("plot_color_picker", i, "_", input[[paste0("addlayer", i)]]), label = "Custom Color", value = "black", allowTransparent = TRUE)
)
),
# Fill Colorpicker
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Bar' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Histogram' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Boxplot' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Area' || input.plottype", i, "_", input[[paste0("addlayer", i)]], "== 'Density'"),
conditionalPanel(
condition = paste0("input.plot_fill", i, "_", input[[paste0("addlayer", i)]], "== '**CUSTOM_FILL**'"),
colourInput(inputId = paste0("plot_fill_picker", i, "_", input[[paste0("addlayer", i)]]), label = "Custom Fill", value = "black", allowTransparent = TRUE)
)
),
# Shape Custom
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Scatter'"),
conditionalPanel(
condition = paste0("input.plot_shape", i, "_", input[[paste0("addlayer", i)]], "== '**CUSTOM_SHAPE**'"),
numericInput(inputId = paste0("plot_shape_picker", i, "_", input[[paste0("addlayer", i)]]), label = "Custom Shape", value = 19, min = 0, max = 25, step = 1)
)
),
# Size Custom
conditionalPanel(
condition = paste0("input.plottype", i, "_", input[[paste0("addlayer", i)]], " == 'Scatter'"),
conditionalPanel(
condition = paste0("input.plot_size", i, "_", input[[paste0("addlayer", i)]], "== '**CUSTOM_SIZE**'"),
numericInput(inputId = paste0("plot_size_picker", i, "_", input[[paste0("addlayer", i)]]), label = "Custom Size", value = 2)
)
)
)
)
))
})
})
# UI Layer Remover ----
removerr <- reactiveValues()
lapply(1:nrow(layerdat), FUN = function(i){
observeEvent(input[[paste0("closelayer", layerdat$Name[i])]], {
# Print info
print(paste0("closelayer", layerdat$Name[i]))
# Save removed layers in reactive value
removerr[[ paste0(layerdat$Tab[i]) ]] <- c(removerr[[ paste0(layerdat$Tab[i]) ]], layerdat$Layer[i])
# Remove tab
#print(input[[paste0("plottabbox", layerdat$Tab[i])]])
removeTab(inputId = paste0("plottabbox", layerdat$Tab[i]), target = input[[paste0("plottabbox", layerdat$Tab[i])]])
})
})
# UI Filter ----
lapply(1:tabsnum, FUN = function(i){
observeEvent(input[[paste0("addfilter", i)]], {
insertUI(
# Location of the filter
selector = paste0("#filter_div", i),
# UI, - inside div to make removing easier
ui = div(id = paste0("filter_div_remover", i, "_", input[[paste0("addfilter", i)]]),
fluidRow(
# Logical Operator
column(width = 1,
conditionalPanel(
condition = paste0(input[[paste0("addfilter", i)]], " > 1"),
selectInput(inputId = paste0("filter_oplog", i, "_", input[[paste0("addfilter", i)]]), label = paste0("Logical Op ", input[[paste0("addfilter", i)]]), choices = c("<null>", "and", "or"), selected = "<null>")
)
),
#
column(width = 4,
selectInput(inputId = paste0("filter", i, "_", input[[paste0("addfilter", i)]]), label = paste0("Filter ", input[[paste0("addfilter", i)]]), choices = c("<null>", colnames(get(input[[paste0("data_select", i)]]))), selected = "<null>")
),
column(width = 1,
selectInput(inputId = paste0("filter_operator", i, "_", input[[paste0("addfilter", i)]]), label = paste0("Operator ", input[[paste0("addfilter", i)]]), choices = c("<null>", "=", "<>", "<", ">", "<=", ">="), selected = "<null>")
),
column(width = 4,
textInput(inputId = paste0("filter_value", i, "_", input[[paste0("addfilter", i)]]), label = paste0("Value ", input[[paste0("addfilter", i)]]), value = NULL, placeholder = "use quotes for characters, e.g.: 'character'")
),
column(width = 1,
actionButton(inputId = paste0("filter_remover", i, "_", input[[paste0("addfilter", i)]]), label = NULL, icon = icon("times"), style = 'margin-top:25px; margin-left:-10px')
)
)
)
)
})
})
# UI Filter Update / Remover ----
lapply(1:nrow(layerdat), FUN = function(i){
observeEvent(input[[paste0("filter_remover", layerdat$Name[i])]], {
# Update
updateSelectInput(session, inputId = paste0("filter_oplog", layerdat$Name[i]), selected = "<null>")
updateSelectInput(session, inputId = paste0("filter", layerdat$Name[i]), choices = c("<null>", colnames(get(input[[paste0("data_select", layerdat$Tab[i])]]))), selected = "<null>")
updateSelectInput(session, inputId = paste0("filter_operator", layerdat$Name[i]), selected = "<null>")
updateTextInput(session, inputId = paste0("filter_value", layerdat$Name[i]), value = "")
# # Remove
# removeUI(
# selector = paste0("div#filter_div_remover", layerdat$Name[i]),
# multiple = TRUE
# )
})
})
# Update widgets data_select ----
lapply(1:tabsnum, FUN = function(i){
observeEvent(input[[paste0("data_select", i)]], {
if(!is.null(input[[paste0("data_select", i)]])){
if(input[[paste0("data_select", i)]] != ""){
print("update widgets: data")
# Facet
updateSelectInput(session, inputId = paste0("plot_facet", i), choices = c("<null>", colnames(get(input[[paste0("data_select", i)]]))), selected = "<null>")
# Theme & Colorscales
updateSelectInput(session, inputId = paste0("ps_theme", i), selected = "<null>")
updateSelectInput(session, inputId = paste0("ps_colorsc", i), selected = "<null>")
updateSelectInput(session, inputId = paste0("ps_fillsc", i), selected = "<null>")
# Title & Axis labels
updateTextInput(session, inputId = paste0("ps_title", i), value = "")
updateTextInput(session, inputId = paste0("ps_xlab", i), value = "")
updateTextInput(session, inputId = paste0("ps_ylab", i), value = "")
# Marginal
updateCheckboxInput(session, inputId = paste0("marginal_check", i), value = FALSE)
updateSelectInput(session, inputId = paste0("marginal_type", i), selected = "histogram")
updateSelectInput(session, inputId = paste0("marginal_axis", i), selected = "both")
updateNumericInput(session, inputId = paste0("marginal_size", i), value = 5)
updateCheckboxInput(session, inputId = paste0("marginal_color", i), value = FALSE)
updateCheckboxInput(session, inputId = paste0("marginal_fill", i), value = FALSE)
}
}
})
})
# Update widgets data_select OR layer ----
lapply(1:tabsnum, FUN = function(i){
data_or_layer <- reactive(list(input[[paste0("data_select", i)]], input[[paste0("addlayer", i)]]))
observeEvent(data_or_layer(), {
if(!is.null(input[[paste0("data_select", i)]])){
if(input[[paste0("data_select", i)]] != ""){
print("update widgets: data or layer")
# Plot Type
updateSelectInput(session, inputId = paste0("plottype", i, "_", input[[paste0("addlayer", i)]]), choices = c("<null>","Scatter", "Bar", "Line", "Area", "Histogram", "Density", "Boxplot", "Smooth"), selected = "<null>")
# Axis
updateSelectInput(session, inputId = paste0("axisy_select", i, "_", input[[paste0("addlayer", i)]]), choices = c("<null>", colnames(get(input[[paste0("data_select", i)]]))), selected = "<null>")
updateSelectInput(session, inputId = paste0("axisx_select", i, "_", input[[paste0("addlayer", i)]]), choices = c("<null>", colnames(get(input[[paste0("data_select", i)]]))), selected = "<null>")
# Scatter
updateSelectInput(session, inputId = paste0("plot_color", i, "_", input[[paste0("addlayer", i)]]), choices = c("<null>", "**CUSTOM_COLOR**", colnames(get(input[[paste0("data_select", i)]]))), selected = "<null>")
updateSelectInput(session, inputId = paste0("plot_shape", i, "_", input[[paste0("addlayer", i)]]), choices = c("<null>", "**CUSTOM_SHAPE**", colnames(get(input[[paste0("data_select", i)]]))), selected = "<null>")
updateSelectInput(session, inputId = paste0("plot_size", i, "_", input[[paste0("addlayer", i)]]), choices = c("<null>", "**CUSTOM_SIZE**", colnames(get(input[[paste0("data_select", i)]]))), selected = "<null>")
# Bar
updateSelectInput(session, inputId = paste0("plot_fill", i, "_", input[[paste0("addlayer", i)]]), choices = c("<null>", "**CUSTOM_FILL**", colnames(get(input[[paste0("data_select", i)]]))), selected = "<null>")
updateSelectInput(session, inputId = paste0("plot_position", i, "_", input[[paste0("addlayer", i)]]), selected = "Stack")
# Smooth
updateSelectInput(session, inputId = paste0("smooth_method", i, "_", input[[paste0("addlayer", i)]]), selected = "lm")
updateCheckboxInput(session, inputId = paste0("smooth_se", i, "_", input[[paste0("addlayer", i)]]), value = TRUE)
updateNumericInput(session, inputId = paste0("smooth_level", i, "_", input[[paste0("addlayer", i)]]), value = 0.95)
updateNumericInput(session, inputId = paste0("smooth_span", i, "_", input[[paste0("addlayer", i)]]), value = 0.50)
# Line
updateSelectInput(session, inputId = paste0("plot_linetype", i, "_", input[[paste0("addlayer", i)]]), selected = "solid")
# Histogram
updateNumericInput(session, inputId = paste0("histogram_bins", i, "_", input[[paste0("addlayer", i)]]), value = 30)
# Density
updateSelectInput(session, inputId = paste0("density_method", i, "_", input[[paste0("addlayer", i)]]), selected = "gaussian")
}
}
})
})
# Update widgets filter data (update all filters) ----
lapply(1:tabsnum, FUN = function(i){
observeEvent(input[[paste0("data_select", i)]], {
if(!is.null(input[[paste0("data_select", i)]])){
if(input[[paste0("data_select", i)]] != ""){
print("update widgets: filter all")
lapply(1:input[[paste0("addfilter", i)]], FUN = function(j){
#Filter
updateSelectInput(session, inputId = paste0("filter_oplog", i, "_", j), selected = "<null>")
updateSelectInput(session, inputId = paste0("filter", i, "_", j), choices = c("<null>", colnames(get(input[[paste0("data_select", i)]]))), selected = "<null>")
updateSelectInput(session, inputId = paste0("filter_operator", i, "_", j), selected = "<null>")
updateTextInput(session, inputId = paste0("filter_value", i, "_", j), value = "")
})
}
}
})
})
# Plot Output ----
lapply(1:tabsnum, FUN = function(i){
plotid <- paste0("plot", i)
output[[plotid]] <- renderPlot({
# Reactive values for the plot - functions ----
# Function to make checks on input
checker <- function(data, inputid, inputid_f){
if(!is.null(inputid)){
if(inputid != ""){
if(inputid != "<null>"){
if(inputid %in% colnames(datr())){
# if(inputid_f){
# as.factor(data()[,inputid])
# } else {
# data()[,inputid]
# }
if(inputid_f){
inputid
} else {
inputid
}
}}}}
}
# Function to make checks on input, list
checker_list <- function(list, inputid){
if(!is.null(inputid)){
if(inputid != ""){
if(inputid != "<null>"){
list[[ inputid ]]
}}}
}
# Function to make checks on filter
checker_f <- function(inputid){
if(!is.null(inputid)){
if(inputid != ""){
if(inputid != "<null>"){
inputid
}}}
}
# Checker Facet
checker_facet <- function(data, inputid){
if(!is.null(inputid)){if(inputid != ""){if(inputid != "<null>"){if(inputid %in% colnames(datr())){
data()[,inputid]
}}}}
}
# Reactive values for the plot ----
# Data: If data is selected and apply filter
# datr <- reactive({
# if(!is.null(input[[paste0("data_select", i)]])){
# if(input[[paste0("data_select", i)]] != ""){
# as.data.frame(get(input[[paste0("data_select", i)]]))
# }}
# })
# Filter - Reactive Values
sql1 <- "select * from "
sql2 <- input[[paste0("data_select", i)]]
sql3 <- " where "
sql4 <- reactiveValues()
sql5 <- reactiveValues()
sql6 <- reactiveValues()
sql7 <- reactiveValues()
# Create data with or without Filter
data_or_filterapply <- reactive(list(input[[paste0("data_select", i)]], input[[paste0("filter_apply", i)]]))
datr <- eventReactive(data_or_filterapply(), {
#print("within event reactive")
# Collect all filters
filter_collect <- lapply(1:input[[paste0("addfilter", i)]], FUN = function(j){
sql4[[paste0(i, "_", j)]] <- checker_f(inputid = input[[paste0("filter_oplog", i, "_", j)]])
sql5[[paste0(i, "_", j)]] <- checker_f(inputid = input[[paste0("filter", i, "_", j)]])
sql6[[paste0(i, "_", j)]] <- checker_f(inputid = input[[paste0("filter_operator", i, "_", j)]])
sql7[[paste0(i, "_", j)]] <- checker_f(inputid = input[[paste0("filter_value", i, "_", j)]])
if(is.null(sql4[[paste0(i, "_", j)]])){
paste0(sql5[[paste0(i, "_", j)]], sql6[[paste0(i, "_", j)]], sql7[[paste0(i, "_", j)]])
} else {
paste0(sql4[[paste0(i, "_", j)]], " ", sql5[[paste0(i, "_", j)]], sql6[[paste0(i, "_", j)]], sql7[[paste0(i, "_", j)]])
}
})
# Remove empty elements
filter_collect <- filter_collect[lengths(filter_collect) > 0L]
#print(filter_collect)
#print(length(filter_collect))
#print(filter_collect[1])
#print(sql5[[paste0(i, "_1")]])
#print(colnames(as.data.frame(get(input[[paste0("data_select", i)]]))))
# Create final sql
# If data does not exist, at start up.
# This is just a workaround. Final solution should be debounce, or other conditioning.
if(sql2 == ""){
#print("30")
sql_final <- "select * from mtcars"
# If filter is empty or first filter does not exist
} else if(length(filter_collect) == 0 | is.null(sql5[[paste0(i, "_1")]])){
#print("31a")
#print(paste0(sql1, sql2))
sql_final <- paste0(sql1, sql2)
# If first filter is not set to one of the colnames (change of data). Alternative: debounce.
} else if(!(sql5[[paste0(i, "_1")]] %in% colnames(as.data.frame(get(input[[paste0("data_select", i)]]))))){
#print("31b")
#print(paste0(sql1, sql2))
sql_final <- paste0(sql1, sql2)
# Apply Filter
} else {
#print("32")
#print(paste0(sql1, sql2, sql3, paste0(filter_collect, collapse = " ")))
sql_final <- paste0(sql1, sql2, sql3, paste0(filter_collect, collapse = " "))
}
#print(paste0(filter_collect, " "))
#print(paste0(sql1, sql2, sql3, paste0(filter_collect, collapse = " ")))
# Empty data and apply final sql
sqldf()
sqldf(sql_final)
})
#print(head(datr()))
# Facet
plot_facetr <- reactive({checker_facet(data = datr, inputid = input[[paste0("plot_facet", i)]])})
# Theme
plot_themer <- reactive({checker_list(list = themes, inputid = input[[paste0("ps_theme", i)]])})
# Color Scale
plot_colorscr <- reactive({checker_list(list = colorscales, inputid = input[[paste0("ps_colorsc", i)]])})
# Fill Scale
plot_fillscr <- reactive({checker_list(list = fillscales, inputid = input[[paste0("ps_fillsc", i)]])})
# Plot construction ----
p1 <- ggplot(data = datr())
# Initial reactive Values
axisxr <- reactiveValues()
axisyr <- reactiveValues()
plot_colorr <- reactiveValues()
plot_shaper <- reactiveValues()
plot_sizer <- reactiveValues()
plot_fillr <- reactiveValues()
# Geoms to be added
adder <- lapply(1:input[[paste0("addlayer", i)]], FUN = function(j){
# Set reactive values
axisxr[[paste0(i, "_", j)]] <- checker(data = datr, inputid = input[[paste0("axisx_select", i, "_", j)]], inputid_f = FALSE)
axisyr[[paste0(i, "_", j)]] <- checker(data = datr, inputid = input[[paste0("axisy_select", i, "_", j)]], inputid_f = FALSE)
plot_colorr[[paste0(i, "_", j)]] <- checker(data = datr, inputid = input[[paste0("plot_color", i, "_", j)]], inputid_f = input[[paste0("plot_color_factor", i, "_", j)]])
plot_shaper[[paste0(i, "_", j)]] <- checker(data = datr, inputid = input[[paste0("plot_shape", i, "_", j)]], inputid_f = TRUE)
plot_sizer[[paste0(i, "_", j)]] <- checker(data = datr, inputid = input[[paste0("plot_size", i, "_", j)]], inputid_f = input[[paste0("plot_size_factor", i, "_", j)]])
plot_fillr[[paste0(i, "_", j)]] <- checker(data = datr, inputid = input[[paste0("plot_fill", i, "_", j)]], inputid_f = input[[paste0("plot_fill_factor", i, "_", j)]])
# Print (debug)
# print(paste0("plottype", i, "_", j))
# print(input[[paste0("plottype", i, "_", j)]])
# print(axisyr[[paste0(i, "_", j)]])
# print(axisxr[[paste0(i, "_", j)]])
# print(plot_colorr[[paste0(i, "_", j)]])
#############################################################
# Message for adding FIRST layer
if(j == 0){
geomeval <- ggtitle(paste0("<Press 'Add Layer'-Button to add first layer.>"))
} else
#############################################################
# Scatter
if(
input[[paste0("plottype", i, "_", j)]] == "Scatter" &&
!is.null(axisxr[[paste0(i, "_", j)]]) &&
!is.null(axisyr[[paste0(i, "_", j)]])
){
# Make geom
geomeval <- geom_point(
aes_string(
x = axisxr[[paste0(i, "_", j)]],
y = axisyr[[paste0(i, "_", j)]],
shape = plot_shaper[[paste0(i, "_", j)]],
size = plot_sizer[[paste0(i, "_", j)]]
)
)
} else
#############################################################
# Bar - X & Y-Axis
if(
input[[paste0("plottype", i, "_", j)]] == "Bar" &&
!is.null(axisyr[[paste0(i, "_", j)]]) &&
!is.null(axisxr[[paste0(i, "_", j)]])
){
geomeval <- geom_bar(
aes_string(
x = axisxr[[paste0(i, "_", j)]],
y = axisyr[[paste0(i, "_", j)]]
),
position = input[[paste0("plot_position", i, "_", j)]],
stat = "identity"
)
} else
#############################################################
# Bar - Y-Axis
if(
input[[paste0("plottype", i, "_", j)]] == "Bar" &&
!is.null(axisyr[[paste0(i, "_", j)]])
){
geomeval <- geom_bar(
aes_string(
y = axisyr[[paste0(i, "_", j)]]
),
position = input[[paste0("plot_position", i, "_", j)]]
)
} else
#############################################################
# Bar - X-Axis
if(
input[[paste0("plottype", i, "_", j)]] == "Bar" &&
!is.null(axisxr[[paste0(i, "_", j)]])
){
geomeval <- geom_bar(
aes_string(
x = axisxr[[paste0(i, "_", j)]]
),
position = input[[paste0("plot_position", i, "_", j)]]
)
} else
#############################################################
# Area - X & Y-Axis
if(
input[[paste0("plottype", i, "_", j)]] == "Area" &&
!is.null(axisyr[[paste0(i, "_", j)]]) &&
!is.null(axisxr[[paste0(i, "_", j)]])
){
geomeval <- geom_area(
aes_string(
x = axisxr[[paste0(i, "_", j)]],
y = axisyr[[paste0(i, "_", j)]]
)
)
} else
#############################################################
# Area - Y-Axis
if(
input[[paste0("plottype", i, "_", j)]] == "Area" &&
!is.null(axisyr[[paste0(i, "_", j)]])
){
geomeval <- geom_area(
aes_string(
y = axisyr[[paste0(i, "_", j)]]
),
stat = "bin"
)
} else
#############################################################
# Area - X-Axis
if(
input[[paste0("plottype", i, "_", j)]] == "Area" &&
!is.null(axisxr[[paste0(i, "_", j)]])
){
geomeval <- geom_area(
aes_string(
x = axisxr[[paste0(i, "_", j)]]
),
stat = "bin"
)
} else
#############################################################
# Line
if(
input[[paste0("plottype", i, "_", j)]] == "Line" &&
!is.null(axisyr[[paste0(i, "_", j)]]) &&
!is.null(axisxr[[paste0(i, "_", j)]])
){
geomeval <- geom_line(
aes_string(
x = axisxr[[paste0(i, "_", j)]],
y = axisyr[[paste0(i, "_", j)]],
size = plot_sizer[[paste0(i, "_", j)]]
),
linetype = input[[paste0("plot_linetype", i, "_", j)]],
)
} else
#############################################################
# Histogram
if(
input[[paste0("plottype", i, "_", j)]] == "Histogram" &&
!is.null(axisxr[[paste0(i, "_", j)]])
){
geomeval <- geom_histogram(
aes_string(
x = axisxr[[paste0(i, "_", j)]]
),
position = input[[paste0("plot_position", i, "_", j)]],
bins = input[[paste0("histogram_bins", i, "_", j)]]
)
} else
#############################################################
# Density
if(
input[[paste0("plottype", i, "_", j)]] == "Density" &&
!is.null(axisxr[[paste0(i, "_", j)]])
){
geomeval <- geom_density(
aes_string(
x = axisxr[[paste0(i, "_", j)]]
),
linetype = input[[paste0("plot_linetype", i, "_", j)]],
kernel = input[[paste0("density_method", i, "_", j)]]
)
} else
#############################################################
# Boxplot
if(
input[[paste0("plottype", i, "_", j)]] == "Boxplot" &&
!is.null(axisyr[[paste0(i, "_", j)]])
){
geomeval <- geom_boxplot(
aes_string(
y = axisyr[[paste0(i, "_", j)]]
)
)
} else
#############################################################
# Smooth
if(
input[[paste0("plottype", i, "_", j)]] == "Smooth" &&
!is.null(axisyr[[paste0(i, "_", j)]]) &&
!is.null(axisxr[[paste0(i, "_", j)]])
){
geomeval <- geom_smooth(
aes_string(
x = axisxr[[paste0(i, "_", j)]],
y = axisyr[[paste0(i, "_", j)]]
),
method = input[[paste0("smooth_method", i, "_", j)]],
se = input[[paste0("smooth_se", i, "_", j)]],
level = input[[paste0("smooth_level", i, "_", j)]],
span = input[[paste0("smooth_span", i, "_", j)]]
)
} else
#############################################################
# Message for selecting plot type and axis
{
geomeval <- ggtitle(paste0("<Select Plot Type and Axis for Layer ", j, ", or close it.>"))
#geomeval <- ggtitle(paste0("<Press 'Add Layer'-Button to add first layer.>"))
}
if(exists("geomeval")){
if(class(geomeval)[1] != "labels"){
if(!is.null(input[[paste0("plottype", i, "_", j)]])){
# Add Color
geomeval <- gghandler(
geom = geomeval,
type = "colour",
react = plot_colorr[[paste0(i, "_", j)]],
widgetval = input[[paste0("plot_color", i, "_", j)]],
widgetout = input[[paste0("plot_color_picker", i, "_", j)]],
session = session,
factorname = paste0("plot_color_factor", i, "_", j),
factorval = input[[paste0("plot_color_factor", i, "_", j)]]
)
# Add Fill
if(input[[paste0("plottype", i, "_", j)]] %in% c("Bar", "Area", "Histogram", "Density", "Boxplot")){
geomeval <- gghandler(
geom = geomeval,
type = "fill",
react = plot_fillr[[paste0(i, "_", j)]],
widgetval = input[[paste0("plot_fill", i, "_", j)]],
widgetout = input[[paste0("plot_fill_picker", i, "_", j)]],
session = session,
factorname = paste0("plot_fill_factor", i, "_", j),
factorval = input[[paste0("plot_fill_factor", i, "_", j)]]
)
}
# Add Shape, Size
if(input[[paste0("plottype", i, "_", j)]] %in% c("Scatter")){
geomeval <- gghandler(
geom = geomeval,
type = "shape",
react = plot_shaper[[paste0(i, "_", j)]],
widgetval = input[[paste0("plot_shape", i, "_", j)]],
widgetout = input[[paste0("plot_shape_picker", i, "_", j)]],
session = session,
#factorname = paste0("plot_fill_factor", i, "_", j),
factorval = TRUE
)
geomeval <- gghandler(
geom = geomeval,
type = "size",
react = plot_sizer[[paste0(i, "_", j)]],
widgetval = input[[paste0("plot_size", i, "_", j)]],
widgetout = input[[paste0("plot_size_picker", i, "_", j)]],
session = session,
factorname = paste0("plot_size_factor", i, "_", j),
factorval = input[[paste0("plot_size_factor", i, "_", j)]]
)
}
}}}
return(geomeval)
})
# print(adder)
# print(removerr[[paste0(i)]])
# Add geoms, but remove layers, if remover is not null.
if(is.null(removerr[[paste0(i)]])){
p1 <- p1+adder
} else {
p1 <- p1+adder[-c(removerr[[paste0(i)]])]
}
# Features not specific to layer
# Facet
if(!is.null(plot_facetr())){
p1 <- p1 + facet_wrap(~ plot_facetr())
}
# Theme
if(!is.null(plot_themer())){
p1 <- p1 + plot_themer()
}
# Colorscale
if(!is.null(plot_colorscr())){
p1 <- p1 + plot_colorscr()
}
# Fillscale
if(!is.null(plot_fillscr())){
p1 <- p1 + plot_fillscr()
}
# Title
if(!is.null(input[[paste0("ps_title", i)]])){
if(input[[paste0("ps_title", i)]] != ""){
p1 <- p1 + ggtitle(input[[paste0("ps_title", i)]])
}}
# Axis Label X
if(!is.null(input[[paste0("ps_xlab", i)]])){
if(input[[paste0("ps_xlab", i)]] != ""){
p1 <- p1 + xlab(input[[paste0("ps_xlab", i)]])
}}
# Axis Label X
if(!is.null(input[[paste0("ps_ylab", i)]])){
if(input[[paste0("ps_ylab", i)]] != ""){
p1 <- p1 + ylab(input[[paste0("ps_ylab", i)]])
}}
# Marginal Plot
if(!is.null(input[[paste0("marginal_check", i)]])){
if(input[[paste0("marginal_check", i)]]){
p1 <- ggMarginal(
p = p1,
type = input[[paste0("marginal_type", i)]],
margins = input[[paste0("marginal_axis", i)]],
size = input[[paste0("marginal_size", i)]],
groupColour = input[[paste0("marginal_color", i)]],
groupFill = input[[paste0("marginal_fill", i)]]
)
}
}
# Return final Plot
return(p1)
})
})
# Table data ----
lapply(1:tabsnum, FUN = function(i){
tableid <- paste0("table", i)
output[[tableid]] <- renderDataTable(
if(!is.null(input[[paste0("data_select", i)]])){
if(input[[paste0("data_select", i)]] != ""){
dattab <- head(x = as.data.frame(get(input[[paste0("data_select", i)]])), n = 100)
datatable(dattab, options = list(pageLength = 5))
}}
)
})
# Summary ----
lapply(1:tabsnum, FUN = function(i){
sumid <- paste0("summary", i)
output[[sumid]] <- renderPrint(
if(!is.null(input[[paste0("data_select", i)]])){
if(input[[paste0("data_select", i)]] != ""){
summary(as.data.frame(get(input[[paste0("data_select", i)]])))
}}
)
})
}
shinyApp(ui, server)
|
fce9c14ccb920c66fe739c30b364f895bdf93d2e | 42a40d1f9c44007bd0a37b3daa4b975d799e35de | /R/Data-methods.R | 12eb76e81fd88bb408b28e5633479701b9c66b4c | [] | no_license | cran/crmPack | cb11c2a9a49c7662206ad2e7a576f854010949a2 | 2325e3fef8dbfd4d68f0fd918bf377d27dfff573 | refs/heads/master | 2022-09-27T13:51:37.214576 | 2022-09-02T22:00:11 | 2022-09-02T22:00:11 | 48,078,571 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,808 | r | Data-methods.R | #####################################################################################
## Author: Daniel Sabanes Bove [sabanesd *a*t* roche *.* com],
## Wai Yin Yeung [ w *.*yeung1 *a*t* lancaster *.* ac *.* uk]
## Project: Object-oriented implementation of CRM designs
##
## Time-stamp: <[Data-methods.R] by DSB Mon 11/05/2015 17:43>
##
## Description:
## Methods for handling the data. Plot ideas taken from bcrm package.
##
## History:
## 30/01/2014 file creation
## 06/02/2014 add method for conversion to list
## 17/02/2014 add update methods
## 21/07/2015 add plots using data and pseudo models
###################################################################################
## ============================================================
## --------------------------------------------------
## Converting Data object to list
## --------------------------------------------------
##' as.list method for the "GeneralData" class
##'
##' @param x the \code{\linkS4class{GeneralData}} object we want to convert
##' @param \dots unused
##' @return a list of all slots in \code{x}
##'
##' @example examples/Data-method-asList.R
##' @export
##' @keywords methods
setMethod("as.list",
signature=
signature(x="GeneralData"),
def=
function(x, ...){
nams <- slotNames(x)
ret <- lapply(nams,
function(n){
slot(x, n)
})
names(ret) <- nams
return(ret)
})
## ============================================================
## --------------------------------------------------
## Plotting the Data objects
## --------------------------------------------------
##' Plot method for the "Data" class
##'
##' @param x the \code{\linkS4class{Data}} object we want to plot
##' @param y missing
##' @param blind Logical (default FALSE) if to blind the data. If TRUE, then placebo
##' subjects are reported by the active dose level of the corresponding cohort and
##' DLEs are always assigned to the firsts subjects.
##' @param \dots not used
##' @return the \code{\link[ggplot2]{ggplot}} object
##'
##' @importFrom ggplot2 ggplot geom_point scale_colour_manual xlab ylab aes
##' scale_y_continuous scale_x_continuous
##'
##' @example examples/Data-method-plot-Data.R
##' @export
##' @keywords methods
setMethod("plot",
signature=
signature(x="Data", y="missing"),
def=
function(x, y, blind=FALSE, ...){
if(x@nObs == 0)
{
return()
}
df <- data.frame(patient=seq_along(x@x),
dose=x@x,
toxicity=ifelse(x@y==1, "Yes", "No"),
ID=paste(" ", x@ID))
cols <- c("No" = "black","Yes" = "red")
# If there are placebo, consider this a y=0.0 for the plot
if(x@placebo & !blind)
df$dose[df$dose == x@doseGrid[1]] <- 0.0
# This is to blind the data
# For each cohort, the placebo is set to the active dose level for that cohort.
# In addition, all DLTs are assigned to the first subjects in the cohort
if(x@placebo & blind){
cohort.id <- unique(x@cohort)
for(iCoh in seq(a=cohort.id)){
filter.coh <- which(x@cohort == cohort.id[iCoh])
df[filter.coh,"dose"] <- max(df[filter.coh,"dose"])
df[filter.coh,"toxicity"] <- sort(df[filter.coh,"toxicity"],
decreasing=TRUE)
}
}
a <- ggplot(df, aes(x=patient,y=dose)) +
scale_y_continuous(breaks=
sort(unique(c(0, df$dose))),
minor_breaks=numeric(),
limits=c(0, max(df$dose) * 1.1))
a <- a +
geom_point(aes(shape=toxicity,colour=toxicity),
size=3) +
scale_colour_manual(values=cols) +
xlab("Patient") + ylab("Dose Level")
if(!blind)
a <- a + geom_text(aes(label=ID, size=2),
data=df,
hjust=0, vjust=0.5,
angle=90, colour=I("black"),
show.legend = FALSE)
a <- a + scale_x_continuous(breaks=df$patient,
minor_breaks=numeric())
# add a vertical lines separating sub-sequent cohorts
if(x@placebo & length(unique(x@cohort)) > 1)
a <- a + geom_vline(xintercept=head(cumsum(table(x@cohort)),n=-1) + 0.5,
colour="green",
linetype = "longdash")
return(a)
})
## --------------------------------------------------
## Subclass with additional biomarker information
## --------------------------------------------------
##' Plot method for the "DataDual" class
##'
##' @param x the \code{\linkS4class{DataDual}} object we want to plot
##' @param y missing
##' @param blind Logical (default FALSE) if to blind the data
##' @param \dots not used
##' @return the \code{\link[ggplot2]{ggplot}} object
##'
##' @importFrom ggplot2 ggplot geom_point scale_colour_manual xlab ylab aes
##' @importFrom gridExtra arrangeGrob
##'
##' @example examples/Data-method-plot-DataDual.R
##' @export
##' @keywords methods
setMethod("plot",
signature=
signature(x="DataDual", y="missing"),
def=
function(x, y, blind=FALSE, ...){
## call the superclass method, to get the first plot
plot1 <- callNextMethod(x, blind=blind, ...)
## now to get the second plot
df <- data.frame(patient=seq_along(x@x),
dose=x@x,
biomarker=x@w,
toxicity=ifelse(x@y==1, "Yes", "No"))
cols <- c("No" = "black","Yes" = "red")
# If there are placebo, consider this a y=0.0 for the plot
if(x@placebo & !blind)
df$dose[df$dose == x@doseGrid[1]] <- 0.0
if(x@placebo & blind){
cohort.id <- unique(x@cohort)
for(iCoh in seq(a=cohort.id)){
filter.coh <- which(x@cohort == cohort.id[iCoh])
df[filter.coh,"dose"] <- max(df[filter.coh,"dose"])
}
}
# This is to blind the data
# For each cohort, the placebo is set to the active dose level for that cohort.
plot2 <- ggplot(df, aes(x=dose, y=biomarker))
plot2 <- plot2 +
geom_point(aes(shape=toxicity, colour=toxicity),
size=3) +
scale_colour_manual(values=cols) +
xlab("Dose Level") + ylab("Biomarker")
if(!blind)
plot2 <- plot2 +
geom_text(data=df,
aes(label=patient, y=biomarker+0.02 * diff(range(biomarker)), size=2), hjust=0,
vjust=0.5, angle=90, colour=I("black"),
show.legend=FALSE)
## arrange both plots side by side
ret <- gridExtra::arrangeGrob(plot1, plot2, ncol=2)
return(ret)
})
## ============================================================
## --------------------------------------------------
## Update a Data object
## --------------------------------------------------
##' Update method for the "Data" class
##'
##' Add new data to the \code{\linkS4class{Data}} object
##'
##' @param object the old \code{\linkS4class{Data}} object
##' @param x the dose level (one level only!)
##' @param y the DLT vector (0/1 vector), for all patients in this cohort
##' @param ID the patient IDs
##' @param newCohort logical: if TRUE (default) the new data are assigned
##' to a new cohort
##' @param \dots not used
##' @return the new \code{\linkS4class{Data}} object
##'
##' @example examples/Data-method-update-Data.R
##' @export
##' @keywords methods
setMethod("update",
signature=
signature(object="Data"),
def=
function(object,
x,
y,
ID=(if(length(object@ID)) max(object@ID) else 0L) + seq_along(y),
newCohort=TRUE,
...){
## some checks
stopifnot(is.scalar(x),
all(y %in% c(0, 1)))
## which grid level is the dose?
gridLevel <- matchTolerance(x, object@doseGrid)
## add it to the data
if(is.na(gridLevel))
{
stop("dose is not on grid")
} else {
object@xLevel <- c(object@xLevel,
rep(gridLevel,
length(y)))
}
## increment sample size
object@nObs <- object@nObs + length(y)
## add dose
object@x <- c(object@x,
rep(x,
length(y)))
## add DLT data
object@y <- c(object@y, as.integer(y))
## add ID
object@ID <- c(object@ID, ID)
## add cohort number
if(newCohort){
object@cohort <- c(object@cohort,
rep(max(tail(object@cohort, 1L), 0L) + 1L,
length(y)))
}else{
object@cohort <- c(object@cohort,
rep(max(tail(object@cohort, 1L), 0L),
length(y)))
}
## return the object
return(object)
})
## --------------------------------------------------
## Update a DataParts object
## --------------------------------------------------
##' Update method for the "DataParts" class
##'
##' Add new data to the \code{\linkS4class{DataParts}} object
##'
##' @param object the old \code{\linkS4class{DataParts}} object
##' @param x the dose level (one level only!)
##' @param y the DLT vector (0/1 vector), for all patients in this cohort
##' @param ID the patient IDs
##' @param \dots not used
##' @return the new \code{\linkS4class{DataParts}} object
##'
##' @example examples/Data-method-update-DataParts.R
##' @export
##' @keywords methods
setMethod("update",
signature=
signature(object="DataParts"),
def=
function(object,
x,
y,
ID=(if(length(object@ID)) max(object@ID) else 0L) + seq_along(y),
...){
## first do the usual things as for Data objects
object <- callNextMethod(object=object, x=x, y=y, ID=ID, ...)
## update the part information
object@part <- c(object@part,
rep(object@nextPart,
length(y)))
## now decide which part the next cohort will belong to:
## only if the nextPart was 1, it can potentially be required to
## change it to 2 (once it is 2, it stays)
if(object@nextPart == 1L)
{
## if there was a DLT in one of the cohorts,
## or if the current dose was the highest from part 1:
if(any(object@y == 1L) || x == max(object@part1Ladder))
{
## then this closes part 1 and the next cohort will
## be from part 2:
object@nextPart <- 2L
}
}
## return the object
return(object)
})
## --------------------------------------------------
## Update a DataDual object
## --------------------------------------------------
##' Update method for the "DataDual" class
##'
##' Add new data to the \code{\linkS4class{DataDual}} object
##'
##' @param object the old \code{\linkS4class{DataDual}} object
##' @param x the dose level (one level only!)
##' @param y the DLT vector (0/1 vector), for all patients in this cohort
##' @param w the biomarker vector, for all patients in this cohort
##' @param ID the patient IDs
##' @param newCohort logical: if TRUE (default) the new data are assigned
##' to a new cohort
##' @param \dots not used
##' @return the new \code{\linkS4class{DataDual}} object
##'
##' @example examples/Data-method-update-DataDual.R
##' @export
##' @keywords methods
setMethod("update",
signature=
signature(object="DataDual"),
def=
function(object,
x,
y,
w,
newCohort=TRUE,
ID=(if(length(object@ID)) max(object@ID) else 0L) + seq_along(y),
...){
## first do the usual things as for Data objects
object <- callNextMethod(object=object, x=x, y=y, ID=ID,
newCohort=newCohort, ...)
## update the biomarker information
object@w <- c(object@w,
w)
## return the object
return(object)
})
## -----------------------------------------------------------------------------------------
## Extracting efficacy responses for subjects without DLE observed
## ---------------------------------------------------------------------------------
##' Extracting efficacy responses for subjects without or with a DLE. This is a class where we separate
##' efficacy responses with or without a DLE. It outputs the efficacy responses and their corresponding
##' dose levels treated at in two categories (with or without DLE)
##'
##' @param object for data input from \code{\linkS4class{DataDual}} object
##' @param \dots unused
##'
##' @export
##' @keywords methods
setGeneric("getEff",
def=function(object,...){
standardGeneric("getEff")},
valueClass="list")
##' @rdname getEff
##' @param x todo
##' @param y todo
##' @param w todo
##' @example examples/Data-method-getEff.R
setMethod("getEff",
signature=
signature(object="DataDual"),
def=
function(object,
x,
y,
w,...){
if (length(which(object@y == 1))==0){
wNoDLE<-object@w
wDLE<-NULL
xNoDLE<- object@x
xDLE<-NULL
} else {##with observed efficacy response and DLE observed
IndexDLE<-which(object@y==1)
##Take efficacy responses with no DLE observed
wNoDLE<-object@w[-IndexDLE]
wDLE<-object@w[IndexDLE]
##Take the corresponding dose levels
xNoDLE<-object@x[-IndexDLE]
xDLE<-object@x[IndexDLE]
}
ret<-list(wDLE=wDLE,xDLE=xDLE,wNoDLE=wNoDLE,xNoDLE=xNoDLE)
return(ret)
})
|
6d06c4d2ff3bd2f015e52d24150d16f3b4678644 | 2764143779bda1d3a777b1311a27506498704cad | /R/autoLearn.R | 83975787ab6bcba5405218035edd2af43a945b99 | [
"MIT"
] | permissive | hansvomkreuz/autoML | 7ed716170c8d3e3ea33f2f813455c28be0ceb04d | 36af7614f63a4ecb21da735591c7ff509193d33b | refs/heads/master | 2021-01-26T00:17:22.648913 | 2020-02-20T15:46:42 | 2020-02-20T15:46:42 | 243,238,443 | 1 | 0 | MIT | 2020-02-26T10:44:50 | 2020-02-26T10:44:49 | null | UTF-8 | R | false | false | 19,032 | r | autoLearn.R |
#' Automated machine learning training of models
#'
#' Automated training, tuning and validation of machine learning models. Models are tuned and resampling validated on an experiment set and trained on the full set and validated and testing on external sets. Classification models tune the probability threshold automatically and returns the results. Each model contains information of performance, the trained model as well as some plots.
#'
#' @param train [data.frame | Required] Training set
#' @param test [data.frame | Optional] Optional testing set to validate models on. If none is provided, one will be created internally. Default of NULL
#' @param target [character | Optional] If a target is provided classification or regression models will be trained, if left as NULL unsupervised models will be trained. Default of NULL
#' @param codeFrame [data.frame | Optional] If the code data.frame object returned from autoPreProcess is provided along with the EDA data.frame "dataSummary" then each model will modify the code to be model specific and is returned in the model object
#' @param edaFrame [data.frame | Optional] [data.frame | Optional] If the code data.frame object returned from autoPreProcess is provided along with the EDA data.frame "dataSummary" then each model will modify the code to be model specific and is returned in the model object
#' @param problemType [character | Optional] Machine learning problem type, options are: binary, multi, regression and cluster. If left as NULL but target feature provided, problem type is automatically detected. Default of NULL
#' @param maxLevels [integer | Optional] Number of unique values in target feature before the problem type is seen as a regression problem. Default of 100
#' @param testSplit [numeric | Optional] Percentage of data to allocate to the test set. Stratified sampling is done. Default of 0.1
#' @param validationSplit [numeric | Optional] Percentage of data to allocate to the validation set. Stratified sampling is done. Default of 0.3
#' @param trainMode [character | Optional] Specifies how to train models, options are: all, full, reduced, balanced, reducedBalanced. all will use all of the other options when suitable. full trains models on all features. reduced trains models on top n features selected by a random forest. balanced trains models on all features but with oversampling the target to 50/50 proportion when the target is binary. reducedBalanced uses the top features as well as balancing the target when the target is binary. Either one or many options can be specified
#' @param tuneIters [integer | Optional] Number of tuning iterations to search for optimal hyper parameters. Default of 10
#' @param tuneType [character | Optional] Tune method applied, options are: random and frace. random uses random tuning and frace uses iterated f-racing algorithm for the best solution. Default of random
#' @param perfMetric [character | Optional] Optimization metric on which to train and validate the model. Default of NULL wuill automatically select a metric, else for avaialble metrics use the function availableMetrcs
#' @param performResampling [logical | Optional] Should resampling be performed after tuning of the model have taken place. Default of FALSE
#' @param resampleMethod [character | Optional] Should resampling be performed, specifies the resampling method, options are: CV, Bootstrap
#' @param resampleIters [integer | Optional] Number of folds or bootstrap iterations to validate the model on
#' @param topFeatures [integer | Optional] Top performing features as identified by the random forest model and used in the reduced training methods. Default of 30, if the training set has less than 30 features 50% of the top features will be used
#' @param models [character | Optional] Which models to train. Default of all. Available models can be seen by calling availableLearners. Either one or many options can be specified
#' @param clusters [integer | Optional] For unsupervised problems, the number of clusters to optimize for. Default of NULL which will search for the best optimized number of clusters
#' @param cores [integer | Optional] Number of CPU cores available for computation. Default of NULL which uses all but one core
#' @param maxObs [integer | Optional] Number of observations in the experiment training set on which models are trained, tuned and resampled on. Default of 40000. If the training set has less than 40k observations all will be used
#' @param verbose [logical | Optional] Chatty function or not. Default of TRUE
#' @param seed [integer | Optional] Random number seed for reproducible results
#'
#' @return List of trained models each containing unque information relating to the machine learning problem type
#' @export
#'
#' @examples
#' mod <- autoLearn(train = iris, target = "Species")
#' @author
#' Xander Horn
#'
autoLearn <- function(
train,
test = NULL,
target = NULL,
codeFrame = NULL,
edaFrame = NULL,
problemType = NULL,
maxLevels = 100,
testSplit = 0.1,
validationSplit = 0.3,
trainMode = "all",
tuneIters = 10,
tuneType = "random",
perfMetric = "auto",
performResampling = FALSE,
resampleMethod = "CV",
resampleIters = 5,
topFeatures = 30,
models = "all",
clusters = NULL,
cores = NULL,
maxObs = 40000,
verbose = TRUE,
seed = 1991){
library(mlr)
library(parallelMap)
library(parallel)
library(caret)
set.seed(seed, "L'Ecuyer")
options(scipen = 999)
if(missing(train) == TRUE){
stop("No training data provided")
}
if(is.null(target) == FALSE & any(trainMode %in% c("all","reduced","balancedReduced")) & ncol(train) < 10){
topFeatures <- ncol(train)
}
if(is.null(target) == FALSE & any(trainMode %in% c("all","reduced","balancedReduced")) & (ncol(train) - 1) < topFeatures){
topFeatures <- round(0.5 * (ncol(train)- 1),0)
}
if(is.null(target) == TRUE){
trainMode <- "all"
}
if(is.null(target) == TRUE){
trainMode <- "full"
} else {
if(length(unique(train[,target])) == 2){
if(trainMode == "all"){
trainMode <- c("full","reduced","balanced","balancedReduced")
} else {
trainMode <- setdiff(trainMode, "all")
}
} else if(length(unique(train[,target])) <= maxLevels & length(unique(train[,target])) > 2){
if(trainMode == "all"){
trainMode <- c("full","reduced")
} else {
trainMode <- setdiff(trainMode, c("balanced","balancedReduced","all"))
}
} else {
if(trainMode == "all"){
trainMode <- c("full","reduced")
} else {
trainMode <- setdiff(trainMode, c("balanced","balancedReduced","all"))
}
}
}
train <- train[sample(nrow(train)),]
if(is.null(test) == TRUE & is.null(target) == FALSE){
ind <- caret::createDataPartition(y = train[,target], p = testSplit, list = FALSE)
test <- train[ind,]
train <- train[-ind,]
if(verbose == TRUE){
cat("autoLearn | Test set created \n")
}
}
exp <- autoSample(x = train, y = target, seed = seed, maxObs = maxObs)
expTasks <- list()
fullTasks <- list()
expTasks$fullTask <- generateTask(x = exp, y = target, problemType = problemType, maxLevels = maxLevels)
fullTasks$fullTask <- generateTask(x = train, y = target, problemType = problemType, maxLevels = maxLevels)
if(verbose == TRUE){
cat(paste0("autoLearn | ", expTasks$fullTask$type," task generated \n"))
}
learners <- suppressWarnings(generateLearners(task = expTasks$fullTask))
if(verbose == TRUE){
cat(paste0("autoLearn | Learners generated \n"))
}
params <- generateHyperParams(task = expTasks$fullTask, cores = cores, clusters = clusters)
if(verbose == TRUE){
cat(paste0("autoLearn | Hyper parameters generated \n"))
}
metrics <- generateMetrics(task = expTasks$fullTask)
if(perfMetric == "auto"){
if(expTasks$fullTask$type %in% c("Binary classification", "Multi class classification")){
metric <- metrics$auc
perfMetric <- "auc"
} else if(expTasks$fullTask$type == "Regression"){
metric <- metrics$rmse
perfMetric <- "rmse"
} else if(expTasks$fullTask$type == "Unsupervised"){
metric <- metrics$dunn
perfMetric <- "dunn"
}
} else {
metric <- metrics[[which(tolower(names(metrics)) == tolower(perfMetric))]]
}
if(verbose == TRUE){
cat(paste0("autoLearn | Performance metric generated as: ",metric$id,"\n"))
}
if(expTasks$fullTask$type %in% c("Binary classification","Multi class classification")){
if(tuneType == "random"){
tune <- makeTuneControlRandom(maxit = tuneIters, tune.threshold = TRUE)
} else if(tuneType == "frace") {
tune <- makeTuneControlIrace(maxExperiments = tuneIters, tune.threshold = TRUE)
}
} else {
if(tuneType == "random"){
tune <- makeTuneControlRandom(maxit = tuneIters)
} else if(tuneType == "frace"){
tune <- makeTuneControlIrace(maxExperiments = tuneIters)
}
}
if(verbose == TRUE){
cat("autoLearn | Tune control generated \n")
}
if(expTasks$fullTask$type %in% c("Binary classification", "Multi class classification")){
resamples <- makeResampleDesc(method = resampleMethod, iters = resampleIters, stratify = TRUE)
} else if(expTasks$fullTask$type == "Regression"){
resamples <- makeResampleDesc(method = resampleMethod, iters = resampleIters, stratify = FALSE)
} else {
resamples <- makeResampleDesc(method = resampleMethod, iters = resampleIters, stratify = FALSE, predict = "both")
}
if(performResampling == TRUE & verbose == TRUE){
cat("autoLearn | Resampling strategy generated \n")
}
if(expTasks$fullTask$type %in% c("Binary classification","Multi class classification")){
validation <- makeResampleDesc(method = "Holdout", stratify = TRUE, split = validationSplit)
} else {
validation <- makeResampleDesc(method = "Holdout", stratify = FALSE, split = validationSplit)
}
if(verbose == TRUE){
cat("autoLearn | Validation set generated \n")
}
if(any(trainMode %in% c("reduced","balancedReduced"))){
rf <- ranger::ranger(as.formula(paste0(target," ~ .")),
data = exp,
num.trees = 100,
importance = "permutation",
min.node.size = 10,
seed = seed,
verbose = FALSE)
imp <- data.frame(Importance = rf$variable.importance)
imp$Feature <- row.names(imp)
imp <- imp[order(-imp$Importance),]
feats <- as.character(imp[1:topFeatures, "Feature"])
expTasks$reducedTask <- generateTask(x = exp[,c(feats,target)], y = target, problemType = problemType, maxLevels = maxLevels)
fullTasks$reducedTask <- generateTask(x = train[,c(feats,target)], y = target, problemType = problemType, maxLevels = maxLevels)
if(verbose == TRUE){
cat(paste0("autoLearn | Reduced task generated with top features \n"))
}
} else {
expTasks$reducedTask <- NULL
fullTasks$reducedTask <- NULL
}
if(any(trainMode %in% c("balancedReduced","balanced"))){
expTasks$balancedTask <- generateBalancedTask(expTasks$fullTask$task)
fullTasks$balancedTask <- generateBalancedTask(fullTasks$fullTask$task)
if(verbose == TRUE){
cat(paste0("autoLearn | Balanced task generated \n"))
}
} else {
expTasks$balancedTask <- NULL
fullTasks$balancedTask <- NULL
}
if(any(trainMode %in% c("balancedReduced") & is.null(expTasks$reducedTask) == FALSE)){
expTasks$balancedReducedTask <- generateBalancedTask(task = expTasks$reducedTask$task)
fullTasks$balancedReducedTask <- generateBalancedTask(task = fullTasks$reducedTask$task)
if(verbose == TRUE){
cat(paste0("autoLearn | Balanced reduced task generated with top features \n"))
}
} else {
expTasks$balancedReducedTask <- NULL
fullTasks$balancedReducedTask <- NULL
}
names(expTasks) <- gsub("Task","",names(expTasks))
names(fullTasks) <- gsub("Task","",names(fullTasks))
expTasks[sapply(expTasks, is.null)] <- NULL
fullTasks[sapply(fullTasks, is.null)] <- NULL
results <- expand.grid(Model = names(learners),
Metric = perfMetric,
TrainMode = unique(names(expTasks)),
Train = NA,
Validation = NA,
Resamples = NA,
ResamplesStDev = NA,
Test = NA,
OverfitIndex = NA)
if(is.null(target) == TRUE){
names(results)[which(names(results) == "Test")] <- "NrClusters"
}
suppressWarnings(if(models != "all"){
results <- subset(results, tolower(results$Model) %in% tolower(models))
})
suppressWarnings(if(trainMode != "all"){
results <- subset(results, tolower(results$TrainMode) %in% tolower(trainMode))
expTasks <- expTasks[which(tolower(names(expTasks)) %in% tolower(trainMode))]
fullTasks <- fullTasks[which(tolower(names(fullTasks)) %in% tolower(trainMode))]
})
if(is.null(cores) == TRUE){
cores <- (detectCores() - 1)
}
parallelStartSocket(cpus = cores, show.info = FALSE)
trainedModels <- list()
if(verbose == TRUE){
cat(paste0("autoLearn | Training learners \n"))
}
for(i in 1:nrow(results)){
set.seed(seed, "L'Ecuyer")
model <- list()
tuneTask <- expTasks[[which(names(expTasks) == results[i, "TrainMode"])]]$task
trainTask <- fullTasks[[which(names(fullTasks) == results[i, "TrainMode"])]]$task
taskName <- tolower(names(fullTasks)[which(names(fullTasks) == results[i, "TrainMode"])])
modName <- as.character(results[i, "Model"])
mod <- learners[[which(names(learners) == modName)]]
modelPlots <- list()
taskName <- ifelse(taskName == "reduced", "Reduced features",
ifelse(taskName == "full", "All features",
ifelse(taskName == "balancedreduced", "Reduced features with balanced target", "All features with balanced target")))
if(modName != "LinearRegr"){
ps <- params[[which(tolower(names(params)) == tolower(as.character(modName)))]]
tuned <- suppressMessages(tuneParams(task = tuneTask,
resampling = validation,
control = tune,
par.set = ps,
measures = metric,
learner = mod,
show.info = FALSE))
results[i, "Validation"] <- round(tuned$y, 4)
mod <- setHyperPars(mod, par.vals = tuned$x)
model$model <- mlr::train(learner = mod, task = trainTask)
model$tuneData <- generateHyperParsEffectData(tuned, partial.dep = TRUE)
modelPlots$LearningCurve <- plotLearningCurve(generateLearningCurveData(learners = mod, task = tuneTask, measures = metric)) +
ggtitle("Learning curve analysis") +
theme_light() +
xlab("Percentage of data used for training") +
ylab(metric$id)
if(verbose == TRUE){
cat("autoLearn |",taskName,modName,"tuned and trained \n")
}
} else {
results[i, "Validation"] <- round(resample(learner = mod, task = trainTask, resampling = validation,
measures = metric, show.info = FALSE)$aggr, 4)
model$model <- mlr::train(learner = mod, task = trainTask)
if(verbose == TRUE){
cat("autoLearn |",taskName,modName,"trained \n")
}
}
if(performResampling == TRUE & tuneTask$task.desc$type != "cluster"){
cv <- resample(learner = mod, task = tuneTask, resampling = resamples,
measures = metric, show.info = FALSE)
results[i, "Resamples"] <- round(cv$aggr, 4)
results[i, "ResamplesStDev"] <- round(sd(cv$measures.test[,2]), 4)
} else if(tuneTask$task.desc$type == "cluster"){
cv <- resample(learner = mod, task = tuneTask, resampling = resamples,
measures = list(metric, setAggregation(metric, train.mean)), show.info = FALSE)
results[i, "Resamples"] <- round(mean(cv$measures.test[,2]), 4)
results[i, "ResamplesStDev"] <- round(sd(cv$measures.test[,2]), 4)
results[i, "Train"] <- round(mean(cv$measures.train[,2]), 4)
}
if(tuneTask$task.desc$type != "cluster"){
p.test <- predict(model$model, newdata = test[,model$model$features])
p.test$data$truth <- test[,target]
}
p.train <- predict(model$model, newdata = train[,model$model$features])
p.train$data$truth <- train[,target]
if(tuneTask$task.desc$type != "cluster"){
results[i, "Train"] <- round(mlr::performance(pred = p.train, task = trainTask, measures = metric, model = model$model)[[1]], 4)
results[i, "Test"] <- round(mlr::performance(pred = p.test, task = trainTask, measures = metric, model = model$model)[[1]], 4)
} else {
results$Test <- NULL
}
if(tuneTask$task.desc$type == "cluster" & is.null(clusters) == TRUE){
results[i, "NrClusters"] <- length(unique(p.train$data$response))
} else if(tuneTask$task.desc$type == "cluster" & is.null(clusters) == FALSE){
results[i, "NrClusters"] <- clusters
}
results[i, "OverfitIndex"] <- round(abs(results[i,"Train"] - results[i,"Validation"]) / results[i,"Train"],4)
model$performance <- results[i,]
if(tuneTask$task.desc$type == "classif"){
model$probCutoff <- tuned$threshold
modelPlots$Calibration <- plotCalibration(generateCalibrationData(p.train)) +
theme_light() +
ggtitle("Model calibration")
if(length(unique(train[,target])) == 2){
temp <- generateThreshVsPerfData(p.train, measures = list(fpr, tpr, acc))
temp2 <- generateThreshVsPerfData(p.test, measures = list(fpr, tpr, acc))
plot1 <- plotROCCurves(temp) +
ggtitle(paste0("Train ROC Curve: ", modName)) +
theme_light()
plot2 <- plotROCCurves(temp2) +
ggtitle(paste0("Test ROC Curve: ", modName)) +
theme_light()
modelPlots$TrainROC <- plot1
modelPlots$TestROC <- plot2
plot <- plotThreshVsPerf(temp) +
theme_light()
modelPlots$Threshold <- plot
}
}
if(is.null(codeFrame) == FALSE & is.null(edaFrame) == FALSE){
model$ProductionCode <- modifyCode(trainedModel = model$model,
codeFrame = codeFrame,
edaFrame = edaFrame)
}
model$modelPlots <- modelPlots
trainedModels[[i]] <- model
}
names(trainedModels) <- paste0(results$Model,"_",results$TrainMode)
parallelStop()
rm(list = setdiff(ls(), c("trainedModels","results")))
invisible(gc())
return(list(trainedModels = trainedModels,
results = results))
}
|
fd36814f3bf0ed927b5919fe0ad877bac80f0906 | f58d73bb5d624a78c329e79a60d5fb06b4c36837 | /inst/MLE/global.R | c2cbf248f2135c4a83261febecf52113aa709641 | [] | no_license | cran/irtDemo | be108fc0c36aa0328f1ed23b5d2c153ed3c0b701 | 3b36e362d74563f404374c8333f11cda023abc70 | refs/heads/master | 2020-04-06T07:01:12.326558 | 2018-04-05T19:29:46 | 2018-04-05T19:29:46 | 57,357,089 | 3 | 5 | null | null | null | null | UTF-8 | R | false | false | 355 | r | global.R | # define prob function
Pfun <- function(theta, delta, D=1, u=1){
z = D*(theta-delta)
P = exp(u*z) / (1 + exp(z))
return(P)
}
deltas <- c(-1.90, -0.60, -0.25, 0.30, 0.45)
thetas <- seq(from=-6, to=6, by=0.001)
N <- length(thetas)
n <- length(deltas)
pji <- matrix(NA,N,n)
logLike <- matrix(NA,N,3)
colnames(logLike) <- c("thetas", "LogLike", "Like") |
9a85f7685dd1e3160cfa6cad7bd949dcac663963 | dde0427523c0da65fe90540b83a59de946aef0f6 | /R/fstat.R | 2f4bdada6a18f5f763a67b0035c73976d84fd576 | [] | no_license | cran/spatclus | 98d445f0e61a4e4488f4ea5a09bea83ab88bb3f5 | 686da4cb39b3e0bf3346a8918b33d88d1974641b | refs/heads/master | 2021-03-12T20:46:00.023494 | 2007-10-02T00:00:00 | 2007-10-02T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,301 | r | fstat.R | critval<-function(q=1,k=2,e)
{
(8.238+1.756*q-0.043*q*q-0.659*k-15.436*e+0.025*q/e)*exp(0.389/k-0.013/(e*k))
}
critvalwdm<-function(q=1,e)
{
(9.039+3.318*q-9.969*e)*exp(-0.03*q-0.327*e)
}
#################################################################
supf<-function(reslst,bc,k,T)
{
nomlst<-reslst
bc<-c(0,bc[[k]],T)
distp<-nomlst$distp[-(T+1)]
ninc<-nomlst$ninc[-(T+1)]
delta<-rep(0,k)
for (i in 1:(k+1)) delta[i]<-mean(distp[(bc[i]+1):bc[i+1]])
q<-1
p<-0
R<-matrix(0,nrow=k,ncol=(k+1))
diag(R)<-1
if (k>1) diag(R[,-1])<-(-1)
if (k==1) R[1,2]<-(-1)
Z<-matrix(0,nrow=T,ncol=(k+1))
for (i in 1:(k+1)) Z[(bc[i]+1):bc[i+1],i]<-1
sigdiff<-rep(0,k+1)
for (i in 1:(k+1)) sigdiff[i]<-sum((Z[,i]*distp-Z[,i]*delta[i])^2)/sum(Z[,i])
Vdiff<-diag(sigdiff*T*diag(solve(t(Z)%*%Z)))
Fdiff<-((T-(k+1)*q-p)/(k*q))*t(delta)%*%t(R)%*%solve(R%*%Vdiff%*%t(R))%*%R%*%delta
list(Fdiff=Fdiff)
}
#################################################################
fstat<-function(reslst,bc,m,T,eps)
{
f<-matrix(0,nrow=2,ncol=m)
for (i in 1:m) f[1,i]<-supf(reslst,bc,k=i,T)$Fdiff
for (j in 1:m) f[2,j]<-(critval(k=1,e=eps)/critval(k=j,e=eps))*f[1,j]
wdms<-critvalwdm(e=eps)
wdm<-max(f[2,])
signif<-(wdm>=wdms)
for (i in 1:m)
{
if (wdm==f[2,i]) kmax<-i
}
list(F=f,wdm=wdm,wdms=wdms,signif=signif,kmax=kmax)
}
|
79f0877b95e92ff96f613ec5e096d81c0ac52980 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/LogConcDEAD/examples/mlelcd.Rd.R | b4ba134e6a0a6a5787df66f8fa462954db598685 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,567 | r | mlelcd.Rd.R | library(LogConcDEAD)
### Name: mlelcd
### Title: Compute the maximum likelihood estimator of a log-concave
### density
### Aliases: mlelcd
### Keywords: nonparametric multivariate
### ** Examples
## Some simple normal data, and a few plots
x <- matrix(rnorm(200),ncol=2)
lcd <- mlelcd(x)
g <- interplcd(lcd)
par(mfrow=c(2,2), ask=TRUE)
plot(lcd, g=g, type="c")
plot(lcd, g=g, type="c", uselog=TRUE)
plot(lcd, g=g, type="i")
plot(lcd, g=g, type="i", uselog=TRUE)
## Some plots of marginal estimates
par(mfrow=c(1,1))
g.marg1 <- interpmarglcd(lcd, marg=1)
g.marg2 <- interpmarglcd(lcd, marg=2)
plot(lcd, marg=1, g.marg=g.marg1)
plot(lcd, marg=2, g.marg=g.marg2)
## generate some points from the fitted density
## via independent rejection sampling
generated1 <- rlcd(100, lcd)
colMeans(generated1)
## via Metropolis-Hastings algorithm
generated2 <- rlcd(100, lcd, "MH")
colMeans(generated2)
## evaluate the fitted density
mypoint <- c(0, 0)
dlcd(mypoint, lcd, uselog=FALSE)
mypoint <- c(1, 0)
dlcd(mypoint, lcd, uselog=FALSE)
## evaluate the marginal density
dmarglcd(0, lcd, marg=1)
dmarglcd(1, lcd, marg=2)
## evaluate the covariance matrix of the fitted density
covariance <- cov.LogConcDEAD(lcd)
## find the hat matrix for the smoothed log-concave that
## matches empirical mean and covariance
A <- hatA(lcd)
## evaluate the fitted smoothed log-concave density
mypoint <- c(0, 0)
dslcd(mypoint, lcd, A)
mypoint <- c(1, 0)
dslcd(mypoint, lcd, A)
## generate some points from the fitted smoothed log-concave density
generated <- rslcd(100, lcd, A)
|
d336b82acfaca61a2d575aa1133ddc42e5c2b4cc | 876ffa84231869e97536430bf1e0efc9810b6d12 | /man/pgenlogis.Rd | fd27dcfa2f3286d24ba6826f375e9e3b26b482b6 | [] | no_license | cran/sirt | 3f13a159b57926a51425ae0f530b4650fc690ba7 | a3daefdc2db1141263a5d52025eef37894d54a49 | refs/heads/master | 2023-08-31T23:28:39.426909 | 2023-08-11T09:40:02 | 2023-08-11T10:30:54 | 17,699,690 | 4 | 5 | null | null | null | null | UTF-8 | R | false | false | 3,585 | rd | pgenlogis.Rd | %% File Name: pgenlogis.Rd
%% File Version: 0.281
\name{pgenlogis}
\alias{pgenlogis}
\alias{genlogis.moments}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Calculation of Probabilities and Moments for the
Generalized Logistic Item Response Model
}
\description{
Calculation of probabilities and moments for the generalized logistic
item response model (Stukel, 1988).
}
\usage{
pgenlogis(x, alpha1=0, alpha2=0)
genlogis.moments(alpha1, alpha2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Vector
}
\item{alpha1}{
Upper tail parameter \eqn{\alpha_1} in the generalized
logistic item response model. The default is 0.
}
\item{alpha2}{
Lower tail parameter \eqn{\alpha_2} parameter in the generalized
logistic item response model. The default is 0.
}
}
\details{
The class of generalized logistic link functions contain
the most important link functions using the specifications (Stukel, 1988):
\itemize{
\item logistic link function \eqn{L}:
\deqn{ L(x) \approx G_{ ( \alpha_1=0, \alpha_2=0)}[ x ] }
\item probit link function \eqn{\Phi}:
\deqn{ \Phi(x) \approx G_{ ( \alpha_1=0.165, \alpha_2=0.165)}[ 1.47 x ] }
\item loglog link function \eqn{H}:
\deqn{ H(x) \approx G_{ (\alpha_1=-0.037, \alpha_2=0.62)}[
-0.39+1.20x-0.007x^2] }
\item cloglog link function \eqn{H}:
\deqn{ H(x) \approx G_{ ( \alpha_1=0.62, \alpha_2=-0.037)}[
0.54+1.64x+0.28x^2+0.046x^3] }
}
}
% # loglog link
% y <- pgenlogis( -.39 + 1.20*x -.007*x^2, alpha1=-.037, alpha2=.62 )
% # cloglog link
% y <- pgenlogis( .54+1.64*x +.28*x^2 + .046*x^3, alpha1=.062, alpha2=-.037 )
\value{
Vector of probabilities or moments
}
\references{
Stukel, T. A. (1988). Generalized logistic models.
\emph{Journal of the American Statistical Association, 83}(402), 426-431.
\doi{10.1080/01621459.1988.10478613}
}
%\author{
%Alexander Robitzsch
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
sirt::pgenlogis( x=c(-.3, 0, .25, 1 ), alpha1=0, alpha2=.6 )
## [1] 0.4185580 0.5000000 0.5621765 0.7310586
####################################################################
# compare link functions
x <- seq( -3,3, .1 )
#***
# logistic link
y <- sirt::pgenlogis( x, alpha1=0, alpha2=0 )
plot( x, stats::plogis(x), type="l", main="Logistic Link", lwd=2)
points( x, y, pch=1, col=2 )
#***
# probit link
round( sirt::genlogis.moments( alpha1=.165, alpha2=.165 ), 3 )
## M SD Var
## 0.000 1.472 2.167
# SD of generalized logistic link function is 1.472
y <- sirt::pgenlogis( x * 1.47, alpha1=.165, alpha2=.165 )
plot( x, stats::pnorm(x), type="l", main="Probit Link", lwd=2)
points( x, y, pch=1, col=2 )
#***
# loglog link
y <- sirt::pgenlogis( -.39 + 1.20*x -.007*x^2, alpha1=-.037, alpha2=.62 )
plot( x, exp( - exp( -x ) ), type="l", main="Loglog Link", lwd=2,
ylab="loglog(x)=exp(-exp(-x))" )
points( x, y, pch=17, col=2 )
#***
# cloglog link
y <- sirt::pgenlogis( .54+1.64*x +.28*x^2 + .046*x^3, alpha1=.062, alpha2=-.037 )
plot( x, 1-exp( - exp(x) ), type="l", main="Cloglog Link", lwd=2,
ylab="loglog(x)=1-exp(-exp(x))" )
points( x, y, pch=17, col=2 )
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%% \keyword{Generalized logistic item response model}
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
46d7a1cc0fbbc538cc3bd2f6eb9a494b4123ad52 | 29585dff702209dd446c0ab52ceea046c58e384e | /spsmooth/R/spmgcv.R | edfcdb42ec0c6afb4f0ad1ed6f3d60f6c9d06443 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,050 | r | spmgcv.R | ########################################################################
#
# Extensions to mgcv package allowing a projection Slepian basis
# to be used.
#
########################################################################
########################################################################
#
# Smooth Constructor
#
# Forms model matrix from basis vectors consisting of orthogonal
# Slepian sequences (DPSSs), using dpss code originally written
# for package:multitaper.
#
########################################################################
smooth.construct.sp.smooth.spec<-function(object,data,knots) {
# p.order is inapplicable; we are not using a polynomial
if (!is.na(object$p.order[1])) warning("Specifying 'p' is meaningless for sp smoothers.")
# is bandwidth W not specified?
if (! "W" %in% names(object[['xt']]) ) {
warning("Bandwidth W not specified as sub-parameter of xt.")
W <- 7/365 # default for smooth functions of time in GAMs, epidemiology
} else { # W specified, check to be sure correctly ...
W <- object[['xt']][['W']]
if (!is.numeric(W)) stop("Bandwidth W must be numeric.")
if (W > 0.5 || W < 0) stop("Bandwidth W is strictly bounded on (0,0.5).")
}
# use the data
x <- data[[object$term]]
# no knots for sp(), so this is the dimension of the projection subspace
nk <- object$bs.dim
if (nk >= 0 & nk<=4) stop("Dimension too small for sp smoother")
################################################################################
#
# Problem: when mgcv() evaluates the family call, it tries to load the 'mask'
# object. However ... it doesn't seem to be able to track it back to the subroutine
# from which the gam() call was made. So you have to assign the mask to the .GlobalEnv
# or everything craps out.
#
################################################################################
# number of input points; in mgcv, the passed data is post-na.action,
# so we require a mask to determine the actual structure
if(!is.null(object[['xt']][['mask']])) {
mask <- object[['xt']][['mask']]
# sanity check 1: is mask TRUE/FALSE?
if(length(c(which(mask==TRUE), which(mask==FALSE))) != length(mask) ) {
stop("'mask' must be populated with TRUE/FALSE elements.")
}
# sanity check 2: mask specified, does it match up with object$term?
if(length(which(mask==TRUE)) != length(data[[object$term]])) {
cat(paste0("\n", str(data[[object$term]]), " (Data)", "\n"))
cat(paste0(str(mask), " (Mask)", "\n"))
cat(paste0(length(data[[object$term]]), " good data points vs ", length(which(mask==TRUE)), " true elements \n"))
stop("Mask must correspond to missing data.")
}
nx <- length(mask)
nxF <- length(x)
} else {
# assume the user knows what they are doing ...
cat(paste0("Mask not found; assuming data is contiguous. \n"))
mask <- NULL
nx <- length(x)
}
# k specified ==> use that number of basis vectors
if (nk > 4) {
nw <- nx * W
dof <- floor(2 * nw) - 1
if (nk >= dof+2 ) {
warning(paste0("k provided (", nk,") exceeds dimensionality (", floor(2*nw), ")! Manually decreased to 2NW-2."))
nk <- dof
}
if(nk <= (floor(2 * nw) - 4)) {
warning(paste0("k provided (", nk, ") is smaller than 2NW-3; this will not provide full
coverage on the index ends. Are you sure you want to do this?"))
}
} else { # default to 2NW - 1
nw <- nx * W
dof <- floor(2 * nw) - 1
nk <- dof
}
# centering constraints:
# C > 0, numeric: drops column C, and orthonormalizes the rest as zero-mean
# columns
# C = zero-row matrix: constraint-free parametrization, keeps the Slepians as-is
# C = one-row matrix: constraints on each column; set col to 0 to ignore
object$C <- matrix(data=NA, nrow=0, ncol=nk)
# The Slepians are already orthonormal
# ** so we want to prevent the qr decomposition
# ** but being not-zero-mean is a problem
#
# discussion: pg 163-164 of Wood's GAM book
#
X <- .dpss(n = nx, k = nk, nw = nw)$v
# the odd tapers are already zero-mean; set the even tapers to be zero-mean
# as well
X[, seq(1, nk, 2)] <- apply(X[, seq(1, nk, 2)], MARGIN=2, FUN=function(x) {
x <- x - mean(x); x
})
if(!is.null(mask)) {
object$X <- X[mask==TRUE,]
object$size <- c(nxF,nk)
} else {
object$X <- X
object$size <- c(nx,nk)
}
object$rank <- nk # penalty rank
object$null.space.dim <- nx - nk # dim. of unpenalized space
# store "sp" specific stuff ...
object$mask <- mask
object$dof <- dof # maximum DoF (if unconstrained)
object$df <- nk
object$bs.dim <- object$size[2]
object$k <- nk
object$N <- nx
object$W <- W
# object$v <- X[,2:(nk+1)] # save only the tapers
object$v <- X
class(object)<-"sp.smooth" # Give object a class
object
}
########################################################################
#
# Predictor
#
# Forms prediction (for summary, predict, etc.) from model matrix
# using provided (sub-set) X.
#
########################################################################
Predict.matrix.sp.smooth<-function(object,data)
{
# die if freq=TRUE is not set
x <- data[[object$term]]
if(length(x) != object$size[1]) {
stop("sp requires that summary.gam be called with freq=TRUE and p.type=5.")
}
# grab objects
nx <- object$size[1]
nk <- object$size[2]
mask <- object[['xt']][['mask']]
if(is.null(mask)) {
# X <- cbind(rep(1,nx), object$v)
X <- object$v
} else {
nxF <- length(which(mask==TRUE))
# X <- cbind(rep(1,nxF), object$v[mask==TRUE, ])
X <- object$v[mask==TRUE, ]
}
X # return the prediction matrix
}
########################################################################
#
# Plot
#
# Requires work to make sure the predict is called properly ...
#
#
########################################################################
|
b03235f58b066d1a9dec6903a5758c66cbf09ba3 | 09ba7e6ad79d4ad9d191b374132dac1a9089032c | /UScar.R | 186d0099f00ce9127b544fae36d197e48cec550e | [] | no_license | amrutkinage/Car-Prices | cbdd0fae1c98636af6dab446e041243f53234dd1 | db32fe1c424a8c9780f2c87dd2be6c8028508fb1 | refs/heads/master | 2020-05-03T04:56:50.717011 | 2019-03-29T16:00:19 | 2019-03-29T16:00:19 | 178,436,092 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 31,659 | r | UScar.R | library(MASS)
library(car)
library(ggplot2)
library(stringr)
library(dplyr)
library(corrplot)
library(cowplot)
car1 = read.csv('C:/Users/admin/Downloads/CarPrice_Assignment.csv')
car12 = read.csv('C:/Users/admin/Downloads/CarPrice_Assignment.csv')
## Checking for duplicates #
length(unique(car1$car_ID)) != dim(car1)[1]
length(unique(car1$car_ID))
## There are no duplicates in the data set 205 columns of unique data
## available in the data set
## Looking at the data set ##
str(car1)
summary(car1)
##***************************************************************************##
# #
# Data Cleaning #
# #
##***************************************************************************##
# Checking for issues:
# * NA values
# * Car name column needs to be seperated. It as 2 delimiter "-" and " "
# * symboling column needs to be converted into factor
# * Converting factor varibale to numeric for using them in linear regression
##***************************************************************************##
# 1. NA values
## There are no NA values in the dataset.
colSums(is.na(car1))
# 2. Car name column needs to be seperated from model name. It as 2 delimiter "-" and " "
strsplit()
car1$CarName <- strsplit(car1$CarName, " ", 2)
car1$CarName <- str_split_fixed(car1$CarName, " ", 2)
car1$CarName <- sub('-romero*', '', car1$CarName)
car1$CarName <- car1$CarName['' ,-2]
car1$CarName <- car1$CarName[ ,-2]
## Converting to factor after the split
car1$CarName <- factor(car1$CarName, levels=unique(car1$CarName))
str(car1$CarName)
## want to check as we have lot of levels need to know if any manual error/correction possible.
check <- car1 %>% group_by(CarName) %>% summarise(count = n()) %>% arrange(desc(count))
## Observation 1: There is no car name as "Maxda" based on google search. Guess its a
## manual error and it should be "Mazda"
car1$CarName <- gsub("maxda", "mazda", car1$CarName)
## Observation 2: Nissan has been identified in 2 line items because of the upper case "N"
car1$CarName <- gsub("Nissan", "nissan", car1$CarName)
## Observation 3: Porsche has been miss spelled as "porcshce"
car1$CarName <- gsub("porcshce", "porsche", car1$CarName)
## Observation 4: toyota has been miss spelled as " toyouta"
car1$CarName <- gsub("toyouta", "toyota", car1$CarName)
## Observation 5: volkswagen has been miss spelled as "vokswagen" and has a abriviation as "VW"
car1$CarName <- gsub("vokswagen", "volkswagen", car1$CarName)
car1$CarName <- gsub("vw", "volkswagen", car1$CarName)
## Converting the CarName column into factor
car1$CarName <- factor(car1$CarName, levels=unique(car1$CarName))
## want to check the levels again
check1 <- car1 %>% group_by(CarName) %>% summarise(count = n()) %>% arrange(desc(count))
# 3. symboling column needs to be converted in to factor
car1$symboling <- factor(car1$symboling, levels = unique(car1$symboling))
############### Analysis on Data ##########################################
## Lets visualise the variables and see for relationships.
p1 <- ggplot(car1, aes(x=car1$fueltype)) + geom_bar()
p2 <- ggplot(car1, aes(x=car1$symboling)) + geom_bar()
p3 <- ggplot(car1, aes(x=car1$aspiration)) + geom_bar()
p4 <- ggplot(car1, aes(x=car1$doornumber)) + geom_bar()
p5 <- ggplot(car1, aes(x=car1$carbody)) + geom_bar()
p6 <- ggplot(car1, aes(x=car1$drivewheel)) + geom_bar()
p7 <- ggplot(car1, aes(x=car1$enginelocation)) + geom_bar()
p8 <- ggplot(car1, aes(x=car1$enginetype)) + geom_bar()
p9 <- ggplot(car1, aes(x=car1$cylindernumber)) + geom_bar()
p10 <- ggplot(car1, aes(x=car1$fuelsystem)) + geom_bar()
p11 <- ggplot(car1, aes(x=car1$CarName)) + geom_bar()
plot_grid(p1,p2,p3,p4,p5,p6,p7,p8,p9,p10)
## Observations on ploting 10 categorical variable considered in the sample.
## In fuel type - Gas is preferred over diesel
## symboling which has 0 have more count and very few has -2
## car aspiration standard is preferred over turbo
## Car door number 4 is preferred over 2
## Car body Sydan and Hatchback are prefered
## drive wheel fwd is having the major numbers
## engine location front is having a significant number
## Engine type ohc is prefered over others
## cylinder number 4 is prefered over others
## fuel system mpfi and 2bbl are prferred
p11
## in the car name major player is toyota and followed by few playes
## like nissan, mazda, volvo, subaru, volkswagen, honda
## Lets build a Correlation plot for continuous variable and see for variation
corrplot(cor(car1[, c(10:14,17, 19:26)]), method = "circle")
## Variables that has significant positive Correlation with price are
## Engine size, Horse Power, curbweight, car width, car length
## Variables that has negative correlation with price are
## citympg and highway mpg.
# 4. Converting factor varibale to numeric for using them in linear regression
############# Converting column with 2 categorical variable ########################
## Converting fuel type to numeric
levels(car1$fueltype)<-c(1,0)
## Converting aspiration to numeric
levels(car1$aspiration)<-c(1,0)
## Converting door number to numeric
levels(car1$doornumber)<-c(1,0)
## Converting enginelocation to numeric
levels(car1$enginelocation)<-c(1,0)
############ Converting columns with more than 2 categorical variable ##############
# Converting "carbody" into dummies .
dummy_1 <- data.frame(model.matrix( ~carbody, data = car1))
# Removing the unwanted column from the data set
dummy_1 <- dummy_1[,-1]
# Combine the dummy variables to the main data set
car1 <- cbind(car1,dummy_1)
# Converting "drivewheel" into dummies .
dummy_1 <- data.frame(model.matrix( ~drivewheel, data = car1))
# Removing the unwanted column from the data set
dummy_1 <- dummy_1[,-1]
# Combine the dummy variables to the main data set
car1 <- cbind(car1,dummy_1)
# Converting "cylindernumber" into dummies .
dummy_1 <- data.frame(model.matrix( ~cylindernumber, data = car1))
# Removing the unwanted column from the data set
dummy_1 <- dummy_1[,-1]
# Combine the dummy variables to the main data set
car1 <- cbind(car1,dummy_1)
# Converting "carname" into dummies .
dummy_1 <- data.frame(model.matrix( ~CarName, data = car1))
# Removing the unwanted column from the data set
dummy_1 <- dummy_1[,-1]
# Combine the dummy variables to the main data set
car1 <- cbind(car1,dummy_1)
# Converting "symboling" into dummies .
dummy_1 <- data.frame(model.matrix( ~symboling, data = car1))
# Removing the unwanted column from the data set
dummy_1 <- dummy_1[,-1]
# Combine the dummy variables to the main data set
car1 <- cbind(car1,dummy_1)
# Converting "enginetype" into dummies .
dummy_1 <- data.frame(model.matrix( ~enginetype, data = car1))
# Removing the unwanted column from the data set
dummy_1 <- dummy_1[,-1]
# Combine the dummy variables to the main data set
car1 <- cbind(car1,dummy_1)
# Converting "fuelsystem" into dummies .
dummy_1 <- data.frame(model.matrix( ~fuelsystem, data = car1))
# Removing the unwanted column from the data set
dummy_1 <- dummy_1[,-1]
# Combine the dummy variables to the main data set
car1 <- cbind(car1,dummy_1)
car <- car1[ , -(c(1, 2, 3, 7, 8, 15, 16, 18))]
#################### Divide into training and test data set ##############
#set the seed to 100, let's run it
set.seed(100)
# randomly generate row indices for train dataset
trainindices= sample(1:nrow(car), 0.7*nrow(car))
# generate the train data set
train = car[trainindices,]
#Similarly store the rest of the observations into an object "test".
test = car[-trainindices,]
#################### Buid the 1st model ###################################
#Execute the first model_1 multilinear model in the training set.
model_1 <-lm(price~.,data=train)
# Check the summary of model.
summary(model_1)
## I am getting 9 not defined because of singularities lets run Step AIC
## to eliminate the not significant varibales from the model
step <- stepAIC(model_1, direction = "both")
step
## It has eliminated 33 variable from the final model. Let us continue with
## model building
model_2 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm + citympg + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemercury + CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + symboling1 +
enginetypeohc + fuelsystem2bbl + fuelsystemmpfi, data = train)
summary(model_2)
sort(vif(model_2))
## On seeing the combination of VIF and P-Value we can eliminate - citympg
model_3 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemercury + CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + symboling1 +
enginetypeohc + fuelsystem2bbl + fuelsystemmpfi, data = train)
summary(model_3)
sort(vif(model_3))
## On seeing the combination of VIF and P-Value we can eliminate - fuelsystemmpfi
model_4 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemercury + CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + symboling1 +
enginetypeohc + fuelsystem2bbl, data = train)
summary(model_4)
sort(vif(model_4))
## On seeing the combination of VIF and P-Value we can eliminate - fuelsystem2bbl
model_5 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemercury + CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + symboling1 +
enginetypeohc, data = train)
summary(model_5)
sort(vif(model_5))
## On seeing the combination of VIF and P-Value we can eliminate - carbodyhardtop
model_6 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm +
carbodyhatchback + carbodysedan + carbodywagon + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemercury + CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + symboling1 +
enginetypeohc, data = train)
summary(model_6)
sort(vif(model_6))
## On seeing the combination of VIF and P-Value we can eliminate - carbodysedan
model_7 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm +
carbodyhatchback + carbodywagon + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemercury + CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + symboling1 +
enginetypeohc, data = train)
summary(model_7)
sort(vif(model_7))
## On seeing the combination of VIF and P-Value we can eliminate - carbodyhatchback
model_8 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm +
carbodywagon + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemercury + CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + symboling1 +
enginetypeohc, data = train)
summary(model_8)
sort(vif(model_8))
## On seeing the combination of VIF and P-Value we can eliminate - carbodywagon
model_9 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemercury + CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + symboling1 +
enginetypeohc, data = train)
summary(model_9)
sort(vif(model_9))
## On seeing the combination of VIF and P-Value we can eliminate - symboling
model_10 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemercury + CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_10)
sort(vif(model_10))
## On seeing the combination of VIF and P-Value we can eliminate - carnamemercury
model_11 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + stroke + peakrpm + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_11)
sort(vif(model_11))
## All the p-Values are significant. So on seeing the VIF table we can eliminate curbweight since it has 2 *
model_12 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
enginesize + stroke + peakrpm + drivewheelrwd +
cylindernumberfive + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_12)
sort(vif(model_12))
## All the P-Values are significant except one so lets remove cylindernumberfive
model_13 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize + stroke +
peakrpm + drivewheelrwd + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_13)
sort(vif(model_13))
## All the P-values are significant so lets check for multicoliniarity and drop enginesize
model_14 <- lm(formula = price ~ aspiration + enginelocation + carwidth + stroke +
peakrpm + drivewheelrwd + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_14)
sort(vif(model_14))
## On considering the P-Values and VIF combination lets drop drivewheelrwd
model_15 <- lm(formula = price ~ aspiration + enginelocation + carwidth + stroke +
peakrpm + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_15)
sort(vif(model_15))
## On considering the P-Values and VIF combination lets drop stroke
model_16 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
peakrpm + cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_16)
sort(vif(model_16))
## On considering the P-Values and VIF combination lets drop peakrpm
model_17 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault + CarNamesaab +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_17)
sort(vif(model_17))
## On considering the P-Values and VIF combination lets drop carnamesaab
model_18 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
cylindernumbertwo + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_18)
sort(vif(model_18))
## On considering the P-Values and VIF combination lets drop cylindernumbertwo
model_19 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
CarNamebmw + CarNamedodge + CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_19)
sort(vif(model_19))
## On considering the P-Values and VIF combination lets drop aspiration
model_20 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot +
CarNameplymouth + CarNameporsche + CarNamerenault +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_20)
sort(vif(model_20))
## On considering the P-Values and VIF combination lets drop carnameporsche
model_21 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamedodge +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot + CarNameplymouth + CarNamerenault +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_21)
sort(vif(model_21))
## On considering the P-Values and VIF combination lets drop carnamedodge
model_22 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot + CarNameplymouth + CarNamerenault +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_22)
sort(vif(model_22))
## On considering the P-Values and VIF combination lets drop carnameplymouth
model_23 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw +
CarNamehonda + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot + CarNamerenault +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_23)
sort(vif(model_23))
## Nothing from VIF. But, carnamehonda's P-Value has gone up. SO lets remove it from our next model.
model_24 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamenissan + CarNamepeugeot + CarNamerenault +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_24)
sort(vif(model_24))
## No multicoliniarity. It looks like a good model. byt lets try and take the higher value with one *
## carnamenissan
model_25 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamepeugeot + CarNamerenault +
CarNamesubaru + CarNametoyota + CarNamevolkswagen + enginetypeohc, data = train)
summary(model_25)
sort(vif(model_25))
## Nothing from VIF. and lets remove one with one * and see if the R values go down
##carnamevolkswagen
model_26 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamemitsubishi + CarNamepeugeot + CarNamerenault +
CarNamesubaru + CarNametoyota + enginetypeohc, data = train)
summary(model_26)
sort(vif(model_26))
## Nothing from VIF. All the P-values are significant. Lets see if we get major variation if we drop
## A variable which is close to 0.05 with one *. carnamemitsubishi
model_27 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamepeugeot + CarNamerenault + CarNamesubaru + CarNametoyota + enginetypeohc, data = train)
summary(model_27)
sort(vif(model_27))
## Nothing from VIF. All the P-values are significant. Lets see if we get major variation if we drop
## A variable which is close to 0.05 with one *. carnametoyota
model_28 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamepeugeot + CarNamerenault + CarNamesubaru + enginetypeohc, data = train)
summary(model_28)
sort(vif(model_28))
## Nothing from VIF. All the P-values are significant. Lets see if we get major variation if we drop
## A variable which is close to 0.05 with one *. carnamerenault
model_29 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamemazda + CarNamebuick +
CarNamepeugeot + CarNamesubaru + enginetypeohc, data = train)
summary(model_29)
sort(vif(model_29))
## This looks like a good model as all the P-values are below 0.05 and there are not VIF scores above 2.
## But lets see if we can drop one of the items having two *. I consider droping carnamemazda and check the results
model_30 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamebuick +
CarNamepeugeot + CarNamesubaru + enginetypeohc, data = train)
summary(model_30)
sort(vif(model_30))
## This looks like a good model as all the P-values are below 0.05 and there are not VIF scores above 2.
## But lets try to drop a variable carnamepeugeot
model_31 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamebuick +
CarNamesubaru + enginetypeohc, data = train)
summary(model_31)
vif(model_31)
## This looks like a good model as all the P-values are below 0.05 and there are not VIF scores above 2.
## But lets see if the values drop one more carnamesubaru
model_32 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamebuick +
enginetypeohc, data = train)
summary(model_32)
vif(model_32)
## which also has a significant P-value. We need to keep this as well in our model
## for further analysis.
##Lets try and test it in our model and check for R.Sq
Predict_1 <- predict(model_32,test[,-18])
test$test_price <- Predict_1
# Accuracy of the predictions
# Calculate correlation
r <- cor(test$price,test$test_price)
# calculate R squared by squaring correlation
rsquared <- cor(test$price,test$test_price)^2
## There is a heauge difference in the R.sq value 0.90 in train and .80 in test.
## Lets try adding one continuous variable and check for variation and accuracy.
## In the earlier correlation plot we saw there was high correlation for price and
## horse power so lets try considering it the model and see if we can have a mangeable
## VIF score and test and train data test results on R.Sq value.
model_33 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamebuick +
enginetypeohc + horsepower, data = train)
summary(model_33)
vif(model_33)
## for further analysis.
##Lets try and test it in our model and check for R.Sq
Predict_1 <- predict(model_33,test[,-18])
test$test_price <- Predict_1
# Accuracy of the predictions
# Calculate correlation
r <- cor(test$price,test$test_price)
# calculate R squared by squaring correlation
rsquared <- cor(test$price,test$test_price)^2
## After adding the horsepower variable this model has become more stable and the r.sq from train and test are .93 and .85
## respectively.
ggplot(test, aes(x=c(1:nrow(test)))) + geom_line(aes(y=test_price), col = 'red') + geom_line(aes(y=price), col = 'black')
## On comparing the price prediction with prices there seems to be a good overlap. This should be a good model to propose.
## But after seeing the VIF we see two variables are nearing 3. Lets see if we drop carwidth and see what difference
## It makes to the model.
model_34 <- lm(formula = price ~ enginelocation + CarNamebmw + CarNamejaguar + CarNamebuick +
enginetypeohc + horsepower, data = train)
summary(model_34)
vif(model_34)
## On seeing the P-value of enginetypeohc its above 0.5 and lets remove it and see
model_34a <- lm(formula = price ~ enginelocation + CarNamebmw + CarNamejaguar + CarNamebuick +
horsepower, data = train)
summary(model_34a)
vif(model_34a)
## for further analysis.
##Lets try and test it in our model and check for R.Sq
Predict_1 <- predict(model_34a,test[,-18])
test$test_price <- Predict_1
# Accuracy of the predictions
# Calculate correlation
r <- cor(test$price,test$test_price)
# calculate R squared by squaring correlation
rsquared <- cor(test$price,test$test_price)^2
## Droping carwidth and droping enginetypeohc is not doing any good as the variation between the R.sq from test and train is aroud.15
## So the best model to propose is model_33 which has less variation while testing the test data and train data
## In model_33 the enginetypeohc is having one * so lets try to remove it and check for variations
model_35 <- lm(formula = price ~ enginelocation + carwidth + CarNamebmw + CarNamejaguar + CarNamebuick +
horsepower, data = train)
summary(model_35)
vif(model_35)
## for further analysis.
##Lets try and test it in our model and check for R.Sq
Predict_1 <- predict(model_35,test[,-18])
test$test_price <- Predict_1
# Accuracy of the predictions
# Calculate correlation
r <- cor(test$price,test$test_price)
# calculate R squared by squaring correlation
rsquared <- cor(test$price,test$test_price)^2
## With S.qr from Train and Test beeing 0.93 and 0.86 respectively and also has the P-Values significant
## for all the variables. We should keep this model and not model_33.
## lets plot this and see the curves for variations if any
ggplot(test, aes(x=c(1:nrow(test)))) + geom_line(aes(y=test_price), col = 'red') + geom_line(aes(y=price), col = 'black')
## On comparing the price prediction with prices there seems to be a good overlap. This should be a good model to propose.
########################Conclusion############################################
##We have 5 independent variables impacting the prices when it comes to US market
##Engine location rear
##Car width
##BMW
##Jaguar
##Buick
##Horse Power
##Also the intersect is a negative value which means that the slop is steep. A minimal increase
##In variable X will have a significant increase in Y.
# Any results you write to the current directory are saved as output. |
b3b0eea62b51a46cb2b07109c620cad02a8acf24 | d105c853154679b3e66bfff9993bf10eee5ff7d9 | /Project 2/test_interim.R | 5f86035258aad3a201c3c3e7f849e081d754f016 | [] | no_license | finnagin/big-data-group-1 | 1bb70ea81690c01116fa51f8c3b84d84956c9486 | c503870a21c35ae85a1cb44eb30b41791e34b07a | refs/heads/master | 2020-05-05T05:33:28.411355 | 2019-06-07T22:14:00 | 2019-06-07T22:14:00 | 179,756,728 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,808 | r | test_interim.R | # Project 2 Code
# Group1: Finn Womack, Hisham Jashami, and Rama Krishna Baisetti
# Cactus Classification
library(OpenImageR)
#library(tidyverse)
library(e1071)
library(randomForest)
library(fastAdaboost)
library(gbm)
# Generate files paths
file_dir <- dirname(rstudioapi::getSourceEditorContext()$path)
train_path <- file.path(file_dir,"g1_train")
test_path <- file.path(file_dir,"g1_test")
# get training and testing file names
train_files <- list.files(train_path)
test_files <- list.files(test_path)
##########################################
# feature generation function
features <- function(file_path, file_name){
path <- file.path(file_path, file_name)
im <- readImage(path)
#dim(im)
im <- rgb_2gray(im)
#imageShow(im)
#intBl = resizeImage(im, width = 100, height = 100, method = 'bilinear')
#dim(intBl)
#im = im * 255
hog <- HOG(im, cells = 4, orientations = 6)
return(hog)
}
ans <- read.csv(file.path(file_dir,"train.csv"))
ans$id <- as.character(ans$id)
##########################################
len<-length(features(train_path,train_files[1]))
cn<-character()
for(a in 1:len){
cn <- append(cn,paste("x",as.character(a), sep = ""))
}
cn <- append(cn,"y")
X <- matrix(NA, nrow=length(train_files), ncol=len+1)
i <- 1
for(file in train_files){
X[i,] <- append(features(train_path,file),ans$has_cactus[ans$id==file])
i <- i+1
}
colnames(X)=cn
#X <- as_tibble(X,colnames=cn)
#X <- X %>%
# mutate(y = factor(y))
X <- as.data.frame(X)
X$y <- as.factor(X$y)
# Load test files
len<-length(features(train_path,train_files[1]))
cn<-character()
for(a in 1:len){
cn <- append(cn,paste("x",as.character(a), sep = ""))
}
cn <- append(cn,"y")
XTest <- matrix(NA, nrow=length(test_files), ncol=len+1)
i <- 1
for(file in test_files){
XTest[i,] <- append(features(test_path,file),ans$has_cactus[ans$id==file])
i <- i+1
}
colnames(XTest)=cn
XTest <- as.data.frame(XTest)
XTest$y <- as.factor(XTest$y)
set.seed(Sys.time())
train_idx <- sample(1:nrow(X),11200)
#########################################################
# Week 1 #
#########################################################
oob_err<-double(11)
val_err<-double(11)
#mtry is no of Variables randomly chosen at each split
for(mtry in 5:15){
rf<-randomForest(y ~ . , data = X , subset = train_idx,mtry=mtry,ntree=500)
oob_err[mtry-4] <- rf$err.rate[500] #Error of all Trees fitted
pred<-predict(rf,X[-train_idx,])
val_err[mtry-4]<- with(X[-train_idx,],mean(y!=pred))
}
print(val_err)
# mtry was best at 14
val_err <- double(4)
i <- 1
for(k_type in c("linear", "sigmoid", "polynomial", "radial")){
svmfit <- svm(y~.,data=X, kernel=k_type, subset=train_idx)
pred<-predict(svmfit,X[-train_idx,])
val_err[i]<- with(X[-train_idx,],mean(y!=pred))
i = i+1
}
print(val_err)
# output: l:0.10892857, s:0.20535714, p:0.13785714, r:0.07785714 => Radial was best
val_err <- double(5)
i <- 1
for(k in c(3,4,5,6,7)){
svmfit <- svm(y~.,data=X, kernel="radial", cost=k, subset=train_idx)
pred<-predict(svmfit,X[-train_idx,])
val_err[i]<- with(X[-train_idx,],mean(y!=pred))
i = i+1
}
# First run w/ c(.1,.5,1,5,10) => 5 was best at 0.07321429
# Second run w/ c(3,4,5,6,7) => 4,5,6 were all equal & the same so 5 what we will go with
rf <- randomForest(y~.,data=X, ntrees=1500, mtry=14, subset=train_idx)
rf_pred<-predict(rf,X[-train_idx,])
rf_pred_1<-predict(rf,dplyr::filter(X[-train_idx,],y=="1"))
rf_pred_0<-predict(rf,dplyr::filter(X[-train_idx,],y=="0"))
rf_err<-with(X[-train_idx,],mean(y!=rf_pred))
rf_err_1<- with(dplyr::filter(X[-train_idx,],y=="1"),mean(y!=rf_pred_1))
rf_err_0<- with(dplyr::filter(X[-train_idx,],y=="0"),mean(y!=rf_pred_0))
svm_model <- svm(y~.,data=X, kernel="radial", cost=5, subset=train_idx)
svm_pred<-predict(svm_model,X[-train_idx,])
svm_pred_1<-predict(svm_model,dplyr::filter(X[-train_idx,],y=="1"))
svm_pred_0<-predict(svm_model,dplyr::filter(X[-train_idx,],y=="0"))
svm_err<- with(X[-train_idx,],mean(y!=svm_pred))
svm_err_1<- with(dplyr::filter(X[-train_idx,],y=="1"),mean(y!=svm_pred_1))
svm_err_0<- with(dplyr::filter(X[-train_idx,],y=="0"),mean(y!=svm_pred_0))
######################################################
# Week 2 #
######################################################
pc <- prcomp(X[,!names(X) %in% c("y")]) # Eigen decomposition of Sample Covariance
#pc1 <- prcomp(select(X,-y), scale. = T) # Eigen decomposition of Sample Covariance
summary(pc)
Xpc = as.data.frame(pc$x[, 1:55])
Xpc$y = X$y
## work around bug in gbm 2.1.1
predict.gbm <- function (object, newdata, n.trees, type = "link", single.tree = FALSE, ...) {
if (missing(n.trees)) {
if (object$train.fraction < 1) {
n.trees <- gbm.perf(object, method = "test", plot.it = FALSE)
}
else if (!is.null(object$cv.error)) {
n.trees <- gbm.perf(object, method = "cv", plot.it = FALSE)
}
else {
n.trees <- length(object$train.error)
}
cat(paste("Using", n.trees, "trees...\n"))
gbm::predict.gbm(object, newdata, n.trees, type, single.tree, ...)
}
}
X2 <- as.data.frame(Xpc)
X2$y <- as.character(X2$y)
adb <- gbm(y~.,data=X2[train_idx,], distribution = "adaboost", n.trees = 500)
adb_pred<-predict(adb,X2[-train_idx,],n.trees=500, type = "response")
adb_pred_1<-predict(adb,dplyr::filter(X2[-train_idx,],y=="1"),n.trees=500, type = "response")
adb_pred_0<-predict(adb,dplyr::filter(X2[-train_idx,],y=="0"),n.trees=500, type = "response")
class_con <- function(x){
if(x >= .5){
return("1")
}
if(x < .5){
return("0")
}
}
adb_pred <- lapply(adb_pred, class_con)
adb_pred_1 <- lapply(adb_pred_1, class_con)
adb_pred_0 <- lapply(adb_pred_0, class_con)
adb_err<- with(X2[-train_idx,],mean(y!=adb_pred))
adb_err_1<- with(dplyr::filter(X2[-train_idx,],y=="1"),mean(y!=adb_pred_1))
adb_err_0<- with(dplyr::filter(X2[-train_idx,],y=="0"),mean(y!=adb_pred_0))
svm_model <- svm(y~.,data=Xpc, kernel="radial", cost=5, subset=train_idx)
svm_pred<-predict(svm_model,Xpc[-train_idx,])
svm_pred_1<-predict(svm_model,dplyr::filter(Xpc[-train_idx,],y=="1"))
svm_pred_0<-predict(svm_model,dplyr::filter(Xpc[-train_idx,],y=="0"))
svm_err<- with(Xpc[-train_idx,],mean(y!=svm_pred))
svm_err_1<- with(dplyr::filter(Xpc[-train_idx,],y=="1"),mean(y!=svm_pred_1))
svm_err_0<- with(dplyr::filter(Xpc[-train_idx,],y=="0"),mean(y!=svm_pred_0))
rf <- randomForest(y~.,data=Xpc, ntrees=1500, mtry=14, subset=train_idx)
rf_pred<-predict(rf,Xpc[-train_idx,])
rf_pred_1<-predict(rf,dplyr::filter(Xpc[-train_idx,],y=="1"))
rf_pred_0<-predict(rf,dplyr::filter(Xpc[-train_idx,],y=="0"))
rf_err<-with(Xpc[-train_idx,],mean(y!=rf_pred))
rf_err_1<- with(dplyr::filter(Xpc[-train_idx,],y=="1"),mean(y!=rf_pred_1))
rf_err_0<- with(dplyr::filter(Xpc[-train_idx,],y=="0"),mean(y!=rf_pred_0))
#############################################################
# Test #
#############################################################
# generate Test features
TestMat <- XTest[,!names(XTest) %in% c("y")]
TestMat1 <- dplyr::filter(XTest, y=="1")[,!names(XTest) %in% c("y")]
TestMat0 <- dplyr::filter(XTest, y=="0")[,!names(XTest) %in% c("y")]
Xpc = as.data.frame(pc$x[, 1:55])
Xpc$y = X$y
## work around bug in gbm 2.1.1
predict.gbm <- function (object, newdata, n.trees, type = "link", single.tree = FALSE, ...) {
if (missing(n.trees)) {
if (object$train.fraction < 1) {
n.trees <- gbm.perf(object, method = "test", plot.it = FALSE)
}
else if (!is.null(object$cv.error)) {
n.trees <- gbm.perf(object, method = "cv", plot.it = FALSE)
}
else {
n.trees <- length(object$train.error)
}
cat(paste("Using", n.trees, "trees...\n"))
gbm::predict.gbm(object, newdata, n.trees, type, single.tree, ...)
}
}
X2 <- as.data.frame(Xpc)
X2$y <- as.character(X2$y)
adb <- gbm(y~.,data=X2[train_idx,], distribution = "adaboost", n.trees = 500)
adb_pred<-predict(adb,X2[-train_idx,],n.trees=500, type = "response")
adb_pred_1<-predict(adb,dplyr::filter(X2[-train_idx,],y=="1"),n.trees=500, type = "response")
adb_pred_0<-predict(adb,dplyr::filter(X2[-train_idx,],y=="0"),n.trees=500, type = "response")
# generate test features
Testpc <- as.data.frame(predict(pc,TestMat))
Testpc$y <- as.character(XTest$y)
adb_test<-predict(adb,Testpc,n.trees=500, type = "response")
adb_test_1<-predict(adb,dplyr::filter(Testpc, y == "1"),n.trees=500, type = "response")
adb_test_0<-predict(adb,dplyr::filter(Testpc, y == "0"),n.trees=500, type = "response")
class_con <- function(x){
if(x >= .5){
return("1")
}
if(x < .5){
return("0")
}
}
adb_test <- lapply(adb_test, class_con)
adb_test_1 <- lapply(adb_test_1, class_con)
adb_test_0 <- lapply(adb_test_0, class_con)
err_adb<- with(XTest,mean(y!=adb_test))
err_adb_1<- with(dplyr::filter(XTest,y=="1"),mean(y!=adb_test_1))
err_adb_0<- with(dplyr::filter(XTest,y=="0"),mean(y!=adb_test_0))
rf <- randomForest(y~.,data=Xpc, ntrees=1500, mtry=14, subset=train_idx)
rf_test<-predict(rf,Testpc)
rf_test_1<-predict(rf,dplyr::filter(Testpc,y=="1"))
rf_test_0<-predict(rf,dplyr::filter(Testpc,y=="0"))
err_rf<-with(XTest,mean(y!=rf_test))
err_rf_1<- with(dplyr::filter(XTest,y=="1"),mean(y!=rf_test_1))
err_rf_0<- with(dplyr::filter(XTest,y=="0"),mean(y!=rf_test_0))
svm_model <- svm(y~.,data=X, kernel="radial", cost=5, subset=train_idx)
svm_test<-predict(svm_model,XTest)
svm_test_1<-predict(svm_model,dplyr::filter(XTest,y=="1"))
svm_test_0<-predict(svm_model,dplyr::filter(XTest,y=="0"))
err_svm<- with(XTest,mean(y!=svm_test))
err_svm_1<- with(dplyr::filter(XTest,y=="1"),mean(y!=svm_test_1))
err_svm_0<- with(dplyr::filter(XTest,y=="0"),mean(y!=svm_test_0))
|
c69e3a742f4acbd9004a24d8a617ca14d5dcecb2 | 8af2c1daf8406553e939d49d3acd42c24496f592 | /R/shinydetails-package.R | a1245ec69ec735be10f0cd5306a6b5145367fcb6 | [] | no_license | BioGenies/shinydetails | 291e2e00bf75bcbda902992eb2292d423f856e91 | 831603b64439df2f6fb8c00ec64024937d659643 | refs/heads/master | 2023-03-09T20:15:16.781605 | 2021-02-24T13:57:06 | 2021-02-24T13:57:06 | 280,454,375 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 996 | r | shinydetails-package.R | #' shinydetails
#'
#' @description Useful Shiny stuff.
#' @docType package
#' @name shinydetails-package
#' @aliases shinydetails
#' @importFrom ggplot2 ggplot_build
#' @importFrom ggplot2 ggsave
#' @importFrom shiny tabsetPanel
#' @importFrom shiny reactive
#' @importFrom shiny tabPanel
#' @importFrom shiny tagList
#' @importFrom shiny downloadButton
#' @importFrom shiny moduleServer
#' @importFrom shiny plotOutput
#' @importFrom shiny downloadHandler
#' @importFrom shiny sliderInput
#' @importFrom shiny numericInput
#' @importFrom shiny dataTableOutput
#' @importFrom shiny nearPoints
#' @importFrom shiny div
#' @importFrom shiny HTML
#' @importFrom shiny p
#' @importFrom shiny uiOutput
#' @importFrom shiny hoverOpts
#' @importFrom shiny NS
#' @importFrom shiny renderPlot
#' @importFrom shiny renderUI
#' @importFrom shiny br
#' @importFrom shinyhelper helper
#' @importFrom shinyhelper observe_helpers
#' @importFrom shinyhelper create_help_files
#' @importFrom DT datatable
#'
NULL
|
0f82aff3a51ed1cc4bdc82cf0a2ac46b5237d120 | 01411d60b66af197744af1cb885e77bebe13b912 | /R/split.R | 90b7c716464370ccc70fbda16b0a53632b323294 | [] | no_license | LaAzteca/re2r | a7c3a272860b58a902696947f08d01976336f5d6 | 1b1f4f963d9e3fa00b6259f1fe1eafb414eea734 | refs/heads/master | 2017-12-04T08:22:42.267522 | 2016-12-19T16:54:52 | 2016-12-19T16:54:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,859 | r | split.R | ## This file is part of the 're2r' package for R.
## Copyright (C) 2016, Qin Wenfeng
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
##
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## 3. Neither the name of the copyright holder nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
## BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
## FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
## OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
## EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#' Split up a string into pieces.
#'
#' Split up a string into pieces. Vectorised over string and pattern.
#'
#' @inheritParams re2_locate
#' @param n number of pieces to return. Default (Inf) for
#' \code{\link{re2_split}} uses all possible split positions.
#' For \code{\link{re2_split_fixed}}, if n is greater than
#' the number of pieces, the result will be padded with
#' empty strings.
#' @examples
#' re2_split("yabba dabba doo", " ")
#' re2_split_fixed(c("yabba dabba doo", "a bc"), " ", 2)
#' @return For \code{\link{re2_split_fixed}}, a character matrix with n columns.
#'
#' For \code{\link{re2_split}}, a list of character vectors.
#' @export
re2_split = function(string, pattern, n = Inf, parallel = FALSE, grain_size = 100000, ...) {
if (is.character(pattern) || mode(pattern) == "logical") {
pattern = re2(pattern, ...)
}
cpp_split(stri_enc_toutf8(string), pattern, n, FALSE,parallel,grain_size)
}
#' @rdname re2_split
#' @export
re2_split_fixed = function(string, pattern, n, parallel = FALSE, grain_size = 100000, ...) {
if (is.character(pattern) || mode(pattern) == "logical") {
pattern = re2(pattern, ...)
}
cpp_split(stri_enc_toutf8(string), pattern, n, TRUE, parallel,grain_size)
}
|
e7b642cea42236263b9f6b3de9511323ee26f0e3 | d6029fa181a9487ebf62b9a74a8d677c0d416fa6 | /main.R | 9b0afe95dc2048cf82dd2758a71ae4553025d5ed | [] | no_license | yingding99/CopulaRC | fea5f907bad38f0e8358d9a1b19280650fdd7392 | 5078181ac0ed13ea2f90a4fc353f4434e438c870 | refs/heads/master | 2020-04-02T11:32:30.197675 | 2018-10-23T20:49:41 | 2018-10-23T20:49:41 | 153,642,616 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,500 | r | main.R | #################################################################################################
### Estimation and Inference under the Clayton-Weibull model for bivariate right-censored data###
##################################################################################################
source("model_functions.R")
source("step1b.R")
source("lik.R")
library("stats")
library("pracma")
library("survival")
# data input
load("example_data.RData")
var_list= c("var1","var2")
n <- dim(data)[1]
p <- length(var_list)
# separate into two margins
indata1 <- data[data[,"ind"]==1, ]
indata2 <- data[data[,"ind"]==2, ]
status1 <- indata1$status
status2 <- indata2$status
x1 <- as.matrix(indata1[,var_list],nrow = n)
x2 <- as.matrix(indata2[,var_list],nrow = n)
x <- data.frame(id=c(indata1$id,indata2$id), event_time = c(indata1$event_time,indata2$event_time), status = c(indata1$status,indata2$status), rbind(x1,x2))
##################################################################################################
### Fitting Clayton-Weibull using covariate var1 only
##################################################################################################
#### step 1a: marginal model
M <- survreg(Surv(event_time, status) ~ var1 + cluster(id), data=x, dist="weibull")
lambda_ini <- exp(M$coef[1]) # scale
k_ini <- 1/M$scale # shape
beta_ini <- -1*coef(M)[-1]*k_ini # coefficients
#### step 1b: update eta
eta_ini <- 2
fit0<-nlm(step1b, p=eta_ini, p2 = c(lambda_ini,k_ini,beta_ini),
x1=x1, x2=x2,status1=status1,status2=status2, var_list=c("var1"),
iterlim=500, steptol = 1e-6)
eta_ini<-fit0$estimate
### step 2, with all starting values of p, optimize all parameters at the same time
model_step2 <- nlm(lik, p=c(lambda_ini,k_ini,beta_ini,eta_ini),
x1=x1, x2=x2,status1=status1,status2=status2, var_list=c("var1"),
iterlim = 500, steptol = 1e-6, hessian = T)
inv_info = solve(model_step2$hessian)
se = sqrt(diag(inv_info))
beta = model_step2$estimate # contains lambda, k, beta and eta
stat = (beta-0)^2/se^2
pvalue = pchisq(stat,1,lower.tail=F)
summary = cbind(beta, se, stat, pvalue)
rownames(summary) = c("lambda","k", "var1", "eta")
colnames(summary) = c("estimate","SE","stat","pvalue")
summary
# estimate SE stat pvalue
# lambda 9.80414809 0.66057049 220.28320 7.845309e-50
# k 1.93680321 0.07098497 744.45460 6.444329e-164
# var1 0.09564934 0.01956758 23.89404 1.017863e-06
# eta 0.58768426 0.12204221 23.18825 1.468920e-06
############################################################################
### Generalized score test Under Null (H0: effect of var2 =0)
############################################################################
estimates = c(model_step2$estimate[1:2], # lambda, k
model_step2$estimate[3:(2+p-1)], # var1
0, # var2
model_step2$estimate[length(model_step2$estimate)]) # eta
score = grad(lik, x0 = estimates,x1=x1, x2=x2,status1=status1,status2=status2, var_list=c("var1", "var2")) # score function by numerical approximation
hes = hessian(lik, x0 = estimates,x1=x1, x2=x2,status1=status1,status2=status2, var_list=c("var1", "var2")) # hessian matrix by numeircal approximation
test_stat = t(score) %*% solve(hes) %*% score # score test statistics
test_stat
# 0.05225796
p_value_score <- pchisq(test_stat,1, lower.tail = F) # score test p value
p_value_score
# 0.8191798
|
43d42a313d7c40f68c4636a90f1e3539f4bb88d4 | 78e05072507d3667ba2b7e7489ff7ccec3f09c1c | /run_analysis.R | 050bd78e4ef87c2a5e099bc130498f73868c0a70 | [] | no_license | kshitijnt/coursera_test | 9629d871709468fdbe1ffecc7a2b897c070ee3af | 1a48f3f21e020b7c1acfe5d28c0e460cdade5aa1 | refs/heads/master | 2020-03-28T19:39:26.326055 | 2015-03-22T08:03:31 | 2015-03-22T08:03:31 | 30,579,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,523 | r | run_analysis.R | # set working directory
# read activity labels and feature names
setwd("~/coursera_test/UCI_HAR_Dataset")
activitylabel <- read.table("activity_labels.txt", header=FALSE,sep=" ",col.names=c("Code","Label"))
features <- read.table("features.txt", header=FALSE, sep=" ")
featurelabel <- features[,2]
# Read the TEST files individually and then column bind them to form
#a single data frame for all test data
test_data <- read.table("./test/X_test.txt", col.names=featurelabel)
test_data_subject <- read.table("./test/subject_test.txt",col.names="Subject")
test_data_act_code <- read.table("./test/y_test.txt", col.names="Code")
test_data_all = cbind(test_data_subject,test_data_act_code, test_data)
# Read the TRAINING files individually and then column bind them to form
#a single data frame for all test data
train_data <- read.table("./train/X_train.txt", col.names=featurelabel)
train_data_subject <- read.table("./train/subject_train.txt",col.names="Subject")
train_data_act_code <- read.table("./train/y_train.txt",col.names="Code")
train_data_all <- cbind(train_data_subject,train_data_act_code,train_data)
#Merge two datasets
test_train_all <- rbind(test_data_all, train_data_all)
# Add activity Labels
alldata_labels = merge(activitylabel, test_train_all,by.activitylabel="Code", by.test_train_all="Code",all=TRUE)
## Take Averages and Submit the file
averages <- ddply(alldata_labels,.(Subject,Code,Label),function(x) colMeans(x[4:ncol(alldata_labels)]))
write.table(averages,"averages.txt",row.names=FALSE)
|
2d6b957b731ebf284378c5382adecf283303fd10 | 46eaba664467be3b3098532479e00cc5d7355286 | /source/functions-harvest_rate.R | b9163dcd3f9335e7d9734bb725dde04cb54cb12f | [] | no_license | szjhobbs/sturgeon_pop_model | c3f24db09f1a1b1a4ecb5e699427da1f0786d206 | 953d55f1d4876050735acbe42dace0ee7c4f8a92 | refs/heads/main | 2023-01-30T19:56:20.927231 | 2020-12-11T02:25:38 | 2020-12-11T02:25:38 | 320,441,883 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 348 | r | functions-harvest_rate.R |
# test data - totally made up
dat <- data.frame(
TagVal = c(20, 50, 100),
Rel = c(100, 99, 100),
Ret = c(1, 2, 4)
)
# develop function to get response rate
# (1) calc Ret / Rel for all levels
# (2) for Ret / Ret > 0, find highest reward value (call prime)
# (3) calc for results #1 (less results # 2) result #1 / prime
# (4)
# (5)
|
b6f399c7cbe876fba70b2169767ade79ed4dfc40 | 7e62bac9b546db47b5ac8dd3fbfcb8e7ca1a634c | /RNA_Seq_scripts/Export_Network_Cytoscape.R | 5d28d84a832e88dac21266f24002e1016b361d6a | [] | no_license | harrisonlab/phytophthora_fragariae | 75deca387ef5b26bad3e439cdb72aa04fa740ea6 | 96f93bbcb36afaf95e5186e5787c51beeca6e9e4 | refs/heads/master | 2021-01-23T19:45:10.146487 | 2020-02-02T17:15:02 | 2020-02-02T17:15:02 | 46,498,660 | 3 | 5 | null | null | null | null | UTF-8 | R | false | false | 1,499 | r | Export_Network_Cytoscape.R | #!/home/adamst/prog/R/R-3.2.5/bin/Rscript
# Load libaries
library("WGCNA")
library("optparse")
# Import option recommended in WGCNA documentation
options(stringsAsFactors = FALSE)
# Parse arguments
opt_list <- list(
make_option("--out_dir", type = "character",
help = "Directory for output to be written to"),
make_option("--module", type = "character",
help = "module to export for visualisation in Cytoscape")
)
opt <- parse_args(OptionParser(option_list = opt_list))
outdir <- opt$out_dir
module <- opt$module
lfile <- paste(outdir, "Cleaned_data.RData", sep = "/")
lnames <- load(file = lfile)
lfile2 <- paste(outdir, "modules.RData", sep = "/")
lnames2 <- load(file = lfile2)
# Set variables for writing out files for Cytoscape
transcripts <- names(datexpr)
inmodule <- is.finite(match(modulecolours, module))
modtranscripts <- transcripts[inmodule]
modtom <- tom[inmodule, inmodule]
dimnames(modtom) <- list(modtranscripts, modtranscripts)
# Write out files for Cytoscape
edgename_start <- paste("cyt_inp_edges", module, sep = "_")
edgename <- paste(edgename_start, "txt", sep = ".")
nodename_start <- paste("cyt_inp_nodes", module, sep = "_")
nodename <- paste(nodename_start, "txt", sep = ".")
cyt <- exportNetworkToCytoscape(modtom,
edgeFile = paste(outdir, edgename, sep = "/"),
nodeFile = paste(outdir, nodename, sep = "/"),
weighted = TRUE, threshold = 0.02, nodeNames = modtranscripts,
altNodeNames = modtranscripts, nodeAttr = modulecolours[inmodule])
|
77226cac2920ce9835d498e499ed1e27be4f8e03 | 4dedf112d0f2127af493176387d938a932f45af5 | /R/pfa.R | 0bd6d69dff834d74cdf266ba36348cb4ecbcdb64 | [] | no_license | cran/Correlplot | 6ee53b7d7b157a69f55f62e4ebb5119526534195 | 679244a0c7a25f8cf266310f377bfa9712e4e76a | refs/heads/master | 2023-03-16T08:26:09.358021 | 2023-03-06T22:50:11 | 2023-03-06T22:50:11 | 17,678,545 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,772 | r | pfa.R | pfa <- function(X, option="data", m=2, initial.communality ="R2", crit=0.001, verbose = FALSE)
{
S <- switch(option, cor = X, cov = X, data = cor(X), stop("pfa: invalid value for parameter option"))
p <- ncol(X)
R <- diag(1/sqrt(diag(S)))%*%S%*%diag(1/sqrt(diag(S)))
if(initial.communality=="R2")
#comu <- diag(1-1/solve(R))*diag(S)
comu <- 1-1/diag(solve(R))
if(initial.communality=="maxcor") {
Rn <- abs(R)
diag(Rn) <- 0
comu <- apply(Rn,1,max)
}
if(verbose) {
cat("Initial communalities\n")
print(comu)
}
L <- diag(svd(S)$d)
V <- svd(S)$u
A <- as.matrix(V[,1:m])%*%as.matrix(sqrt(L[1:m,1:m]))
eps <- 1
epsh <- c(eps)
niter <- 1
C <- S
while (eps > crit) {
diag(C) <- comu
L <-diag(svd(C)$d)
V <- svd(C)$u
A <- as.matrix(V[,1:m])%*%as.matrix(sqrt(L[1:m,1:m]))
comu.r <- diag(A%*%t(A))
corr <-(1:length(comu.r)) [comu.r > diag(S)]
comu.r[corr] <- diag(S)[corr]
eps <- max(abs(comu - comu.r))
epsh <- c(epsh, eps)
comu <- comu.r
niter <- niter + 1
}
if(verbose) {
ithist <- cbind(1:niter,epsh)
cat("iteration history\n")
print(ithist)
}
La <- A
Psi <- diag(diag(S)- comu)
colnames(Psi) <- colnames(X)
rownames(Psi) <- colnames(X)
rownames(La) <- colnames(X)
if(verbose) {
cat("Final communalities\n")
print(comu)
cat(niter," iterations till convergence\n")
cat("Specific variances:\n")
print(diag(Psi))
}
Shat <- (La%*%t(La) + Psi)
Dv <- diag(t(La)%*%La)
if(verbose) {
cat("Variance explained by each factor\n")
print(Dv)
cat("Loadings:\n")
print(La)
}
Res <- S - La%*%t(La) + Psi
if(option=="data") {
Xs <- scale(X)
Fs <- Xs%*%solve(R)%*%La
} else Fs <- NA
list(Res = Res, Psi = Psi, La = La, Shat = Shat, Fs = Fs)
}
|
cff9db5c46d9088582f1e203cba8d898ed643aaa | 31e651cd8a66a3bef518d9025806964ac9cb7095 | /R/kernelSx.R | 2e1919661d8ec060d09e4bd60aa0475ec1865a94 | [] | no_license | cran/ibr | 2975500b65c56e6b4bec7409ec3a344ff9c9d51a | 40dd843b0ae937f6fbee611820cc9a4290b76464 | refs/heads/master | 2021-01-22T08:34:00.049441 | 2017-05-01T20:23:16 | 2017-05-01T20:23:16 | 17,696,739 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 726 | r | kernelSx.R | kernelSx <- function(kernelx="g",X,Xetoile,bx){
poids <- function(kernelx="g",X,bx,valx){
X <- as.matrix(X)
n <- nrow(X)
p <- ncol(X)
VALX <- matrix(rep(valx,n),ncol=p,byrow=TRUE)
BX <- matrix(rep(bx,n),ncol=p,byrow=TRUE)
MAT <- (X-VALX)/BX
if(kernelx=="e"){noyau <- epane}
if(kernelx=="g"){noyau <- gaussien}
if(kernelx=="q"){noyau <- quartic}
if(kernelx=="u"){noyau <- uniform}
vv <- noyau(MAT)
vv <- apply(vv,1,prod)
return(W=vv)
}
X <- as.matrix(X)
n <- nrow(X)
Xetoile <- as.matrix(Xetoile)
netoile <- nrow(Xetoile)
H <- matrix(0,ncol=n,nrow=netoile)
for (i in 1:netoile){
w <- poids(kernelx,X,bx,Xetoile[i,])
H[i,] <- w/sum(w)
}
return(H)
}
|
3a17a6c786edde102f65283eeee0d7605db4f352 | 86c706879daa18231d5890d24f749fc27900d5be | /man/CleanTempFolder.Rd | 22508f9c1d4424f2fbf94ec8774195e85b114636 | [] | no_license | charlietiddens/rmsfuns | 0b98085936706b9d974cebc7d54795574afd54a1 | 4677ed0455b9b6efe6cbb791a5d922044ce3bded | refs/heads/master | 2021-04-26T22:17:17.066543 | 2017-10-09T06:47:43 | 2017-10-09T06:47:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 415 | rd | CleanTempFolder.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CleanTempFolder.R
\name{CleanTempFolder}
\alias{CleanTempFolder}
\title{CleanTempFolder}
\usage{
CleanTempFolder()
}
\value{
Cleaned R Temp Folder.
}
\description{
This function will clean all the csv files created using ViewXL in the temp folder. This might take a while to clean if not done regularly.
}
\examples{
CleanTempFolder()
}
|
dd1474ac78415a9155e0256de18ea00b8f1c70b7 | 7a6be82ecc13a6deafa51b207915b08336486d91 | /richness and diversity.R | 3ca63860d96818aec1c033cf948bdd4f75bf756e | [] | no_license | anerigarg/MSc_data | e60ab5293b382a3280b7688bddddbb399bd578d3 | 37b52f80204d8a02d605b3380003e2fc40cee6ab | refs/heads/master | 2023-01-24T11:16:27.387328 | 2020-12-08T19:37:44 | 2020-12-08T19:37:44 | 285,697,250 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,683 | r | richness and diversity.R | #Richness, Rarefaction and Diversity (accumulation curves)
# packages and palletes ---------------------------------------------------
library(readr)
library(tidyverse)
library(ggplot2)
library(viridis)
library(PNWColors)
library(dplyr)
library(plyr)
library(openxlsx)
library(vegan)
# df files ----------------------------------------------------------------
ARD_3_with_0 <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/test ARD_3/ARD_3_with_0.csv")
ARD_3_with_0 <- ARD_3_with_0 %>%
mutate(treatment = factor(treatment, levels = c("control", "100A", "30L70A", "50L50A", "70L30A", "100L"),
labels = c("control", "0%", "30%", "50%", "70%", "100%")))
ARD_3_with_0$treatment_complexity <-paste(ARD_3_with_0$treatment, "-",ARD_3_with_0$complexity)
ARD_3_with_0_new_2 <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/test ARD_3/ARD_3 (No HN day 4)/ARD_3_with_0_new_2.csv") %>%
mutate(treatment = factor(treatment, levels = c("control", "100A", "30L70A", "50L50A", "70L30A", "100L"),
labels = c("control", "0%", "30%", "50%", "70%", "100%")))
# Overall richness for each treatment/comp --------------------------------
#using tutorial from:https://www.flutterbys.com.au/stats/tut/tut13.2.html
#export to create spp presence matrix.
ARD_3_rich_pivot <- ARD_3_with_0 %>%
select(treatment, complexity, treatment_complexity, common_name, presence)
write.csv(ARD_3_rich_pivot, "ARD_3_rich_pivot.csv")
ARD_3_rich_matrix <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/all visits/ARD_3_rich_matrix.csv")
ddply(ARD_3_rich_matrix,~treatment,function(x) {data.frame(RICHNESS=sum(x[-1]>0))})
#overall low complexity sites are more species rich, copy and paste to new excel sheet, did Text to Col in Data to separate into seprate cols
ARD_3_richness <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/all visits/ARD_3_richness.csv")
ARD_3_richness <- ARD_3_richness %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%")))
ggplot(data = ARD_3_richness) +
geom_col(aes(x = treatment,
y = RICHNESS,
fill = treatment)) +
facet_grid(~complexity) +
ggtitle("Species Richness") +
labs(x = expression(Treatment),
y = expression(Number~of~Species)) +
theme_classic()+
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 14, angle = -60, hjust = 0.01),
legend.position = "none",
strip.text = element_text(size = 14)
)
# unique(ARD_3_with_0_new_2$days_since_outplanting)
# unique(ARD_3_with_0$visit)
# rarefaction ARD_3 -------------------------------------------------------
ARD_3_rich_matrix <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/all visits/ARD_3_rich_matrix.csv")
ddply(ARD_3_rich_matrix,~treatment,function(x) {data.frame(RAREFY=rarefy(x[-1], sample=10, MARGIN=1))})
#oh wait, this is for when total abundance inbalances are due to sampling diferences
#but we had the same sampling diffrences so doesn't apply to this data set
# diversity --------------------------------------------------------------
#takes into account richness and dominance/eveness
#1) shannon-wiener
# values range from o-5, typically from 1.5-3.5
ARD_3_rich_matrix <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/all visits/ARD_3_rich_matrix.csv")
# > library(plyr)
ddply(ARD_3_rich_matrix,~treatment,function(x) {data.frame(SHANNON=diversity(x[-1], index="shannon"))})
#copied and pested to new csv, called ARD_3_shannon
ARD_3_shannon <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/all visits/diversity/ARD_3_shannon.csv")
ARD_3_shannon <- ARD_3_shannon %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%")))
ggplot(data = ARD_3_shannon) +
geom_col(aes(x = treatment,
y = SHANNON,
fill = treatment)) +
facet_grid(~complexity) +
ggtitle("shannon-wiener Index of Species Diversity") +
labs(x = expression(Treatment),
y = expression(H~value)) +
theme_classic()+
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 14, angle = -60, hjust = 0.01),
legend.position = "none",
strip.text = element_text(size = 14)
)
#2) simpson's Index (eveness)
# more of a measure of dominance, weights towards most abundant taxa
# prob 2 individuals drawn at random from com will be dif spp. (so ranges from 0-1)
# higher values represent higher diversity
# less sensisitve to rare spp than simpson-weiner
ddply(ARD_3_rich_matrix,~treatment,function(x) {data.frame(SIMPSON=diversity(x[-1], index="simpson"))})
ARD_3_simpson <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/all visits/diversity/ARD_3_simpson.csv")
ARD_3_simpson <- ARD_3_simpson %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%")))
ggplot(data = ARD_3_simpson) +
geom_col(aes(x = treatment,
y = SIMPSON,
fill = treatment)) +
facet_grid(~complexity) +
ggtitle("simpson Index of Species Diversity") +
labs(x = expression(Treatment),
y = expression(D~value)) +
theme_classic()+
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 14, angle = -60, hjust = 0.01),
legend.position = "none",
strip.text = element_text(size = 14)
)
# diversity no bhw --------------------------------------------------------
#takes into account richness and dominance/eveness
#1) shannon-wiener
# values range from o-5, typically from 1.5-3.5
ARD_3_rich_matrix_nobhw <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/all visits/No bhw/ARD_3_rich_matrix_nobhw.csv")
# > library(plyr)
ddply(ARD_3_rich_matrix_nobhw,~treatment,function(x) {data.frame(SHANNON=diversity(x[-1], index="shannon"))})
#copied and pested to new csv, called ARD_3_shannon
ARD_3_shannon_nobhw <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/all visits/diversity/ARD_3_shannon_nobhw.csv")
ARD_3_shannon_nobhw <- ARD_3_shannon_nobhw %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%")))
ggplot(data = ARD_3_shannon_nobhw) +
geom_col(aes(x = treatment,
y = SHANNON,
fill = treatment)) +
facet_grid(~complexity) +
ggtitle("shannon-wiener Index of Species Diversity - no bhw") +
labs(x = expression(Treatment),
y = expression(D~value)) +
theme_classic()+
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 14, angle = -60, hjust = 0.01),
legend.position = "none",
strip.text = element_text(size = 14)
)
#2) simpson's Index (eveness)
# more of a measure of dominance, weights towards most abundant taxa
# prob 2 individuals drawn at random from com will be dif spp. (so ranges from 0-1)
# higher values represent higher diversity
# less sensisitve to rare spp than simpson-weiner
ddply(ARD_3_rich_matrix_nobhw,~treatment,function(x) {data.frame(SIMPSON=diversity(x[-1], index="simpson"))})
ARD_3_simpson_nobhw <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/all visits/diversity/ARD_3_simpson_nobhw.csv")
ARD_3_simpson_nobhw <- ARD_3_simpson_nobhw %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%")))
ggplot(data = ARD_3_simpson_nobhw) +
geom_col(aes(x = treatment,
y = SIMPSON,
fill = treatment)) +
facet_grid(~complexity) +
ggtitle("simpson Index of Species Diversity - no bhw") +
labs(x = expression(Treatment),
y = expression(H~value)) +
theme_classic()+
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 14, angle = -60, hjust = 0.01),
legend.position = "none",
strip.text = element_text(size = 14)
)
# diversity - by time_block -----------------------------------------------
ARD_3_with_0$treatment_complexity_time <- paste(ARD_3_with_0$treatment_complexity, "-",ARD_3_with_0$time_block)
#need to make a col with all crossed factors together (so treatment, complexity and time_block)
ARD_3_rich_pivot_time <- ARD_3_with_0 %>%
select(treatment_complexity_time, common_name, presence)
write.xlsx(ARD_3_rich_pivot_time, "ARD_3_rich_pivot_time.xlsx")
#make pivot table
ARD_3_rich_matrix_time <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/by time_block/ARD_3_rich_matrix_time.csv")
ddply(ARD_3_rich_matrix_time,~site,function(x) {data.frame(SIMPSON=diversity(x[-1], index="simpson"))})
#copy and paste output to new df, use text to columns in Data Tools to separate into cols.
ARD_3_simpson_time <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/by time_block/ARD_3_simpson_time.csv")
ARD_3_simpson_time <- ARD_3_simpson_time %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
mutate(time_block = factor(time_block, levels = c("start", "middle", "end")))
ggplot(data = ARD_3_simpson_time) +
geom_col(aes(x = time_block,
y = SIMPSON,
fill = time_block),
alpha = 0.6) +
scale_fill_viridis(discrete = TRUE, name = "Time Block") +
facet_grid(complexity ~ treatment) +
ggtitle("simpson Index of Species Diversity - time") +
labs(x = expression(Time),
y = expression(D~value)) +
theme_classic()+
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 14, angle = -60, hjust = 0.01),
legend.position = "none",
strip.text = element_text(size = 14)
)
# richness - over time ----------------------------------------------------
# ARD_3_with_0$treatment_complexity_time <- paste(ARD_3_with_0$treatment_complexity, "-",ARD_3_with_0$time_block)
# #need to make a col with all crossed factors together (so treatment, complexity and time_block)
#
# ARD_3_rich_pivot_time <- ARD_3_with_0 %>%
# select(treatment_complexity_time, common_name, presence)
#
# write.xlsx(ARD_3_rich_pivot_time, "ARD_3_rich_pivot_time.xlsx")
# #make pivot table
# ARD_3_rich_matrix_time <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/by time_block/ARD_3_rich_matrix_time.csv")
ddply(ARD_3_rich_matrix_time,~site,function(x) {data.frame(RICHNESS=sum(x[-1]>0))})
#copy and paste output to new df, use text to columns in Data Tools to separate into cols.
ARD_3_rich_time <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/by time_block/ARD_3_rich_time.csv")
ARD_3_rich_time <- ARD_3_rich_time %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
mutate(time_block = factor(time_block, levels = c("start", "middle", "end")))
ggplot(data = ARD_3_rich_time) +
geom_col(aes(x = time_block,
y = RICHNESS,
fill = time_block),
alpha = 0.7) +
scale_fill_viridis(discrete = TRUE, name = "Time Block") +
facet_grid(complexity ~ treatment) +
ggtitle("species richness over time") +
labs(x = expression(Time),
y = expression(Number~of~species)) +
theme_classic()+
theme(
axis.title = element_text(size = 14),
axis.text.x = element_text(size = 12, angle = -60, hjust = 0.01),
legend.position = "none",
strip.text = element_text(size = 14)
)
ggplot(data = ARD_3_rich_time) +
geom_point(aes(x = time_block,
y = RICHNESS,
colour = time_block)) +
facet_grid(complexity ~ treatment)
# diveristy - by visit ----------------------------------------------------
ARD_3_with_0$treatment_complexity_visit <- paste(ARD_3_with_0$treatment_complexity, "-",ARD_3_with_0$visit)
#need to make a col with all crossed factors together (so treatment, complexity and time_block)
ARD_3_rich_pivot_visit <- ARD_3_with_0 %>%
select(treatment_complexity_visit, common_name, presence)
write.xlsx(ARD_3_rich_pivot_visit, "ARD_3_rich_pivot_visit.xlsx")
#make pivot table
ARD_3_rich_matrix_visit <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/by visit/ARD_3_rich_matrix_visit.csv")
ddply(ARD_3_rich_matrix_visit,~site,function(x) {data.frame(RICHNESS=sum(x[-1]>0))})
#copy and paste output to new df, use text to columns in Data Tools to separate into cols.
ARD_3_rich_visit <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/by visit/ARD_3_rich_visit.csv")
ARD_3_rich_visit <- ARD_3_rich_visit %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
mutate(visit = factor(visit, levels = c("1", "2", "3", "4", "5","6", "7", "8", "9", "10","11", "12", "13", "14", "15","16", "17", "18"))) %>%
filter(visit != "18")
ggplot(data = ARD_3_rich_visit) +
geom_point(aes(x = visit,
y = RICHNESS,
colour = treatment,
group = treatment),
alpha = 0.6,
size = 2) +
geom_line(aes(x = visit,
y = RICHNESS,
colour = treatment,
group = treatment),
alpha = 0.6) +
geom_smooth(aes(x = visit,
y = RICHNESS,
colour = treatment,
group = treatment),
size = 2,
method = lm,
# formula = y ~ x + I(x^2),
se = FALSE ) +
# scale_fill_viridis(discrete = TRUE, name = "Time Block") +
facet_grid(complexity ~ treatment) +
scale_x_discrete(breaks = c(5, 10, 15)) +
ggtitle("Species Richness - visit (linear)") +
labs(x = expression(Visit),
y = expression(D~value)) +
theme_classic()+
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 14),
legend.position = "none",
strip.text = element_text(size = 14)
)
# Richness by visit -------------------------------------------------------
ARD_3_with_0$treatment_complexity_visit <- paste(ARD_3_with_0$treatment_complexity, "-",ARD_3_with_0$visit)
#need to make a col with all crossed factors together (so treatment, complexity and time_block)
ARD_3_rich_pivot_visit <- ARD_3_with_0 %>%
select(treatment_complexity_visit, common_name, presence)
write.xlsx(ARD_3_rich_pivot_visit, "ARD_3_rich_pivot_visit.xlsx")
#make pivot table
ARD_3_rich_matrix_visit <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/by visit/ARD_3_rich_matrix_visit.csv")
ddply(ARD_3_rich_matrix_visit,~site,function(x) {data.frame(SIMPSON=diversity(x[-1], index="simpson"))})
#copy and paste output to new df, use text to columns in Data Tools to separate into cols.
ARD_3_simpson_visit <- read_csv("Artificial coral experiment/data/Art Recruit data/richness and diversity/ARD_3/by visit/ARD_3_simpson_visit.csv")
ARD_3_simpson_visit <- ARD_3_simpson_visit %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
mutate(time_block = factor(visit, levels = c("1", "2", "3", "4", "5","6", "7", "8", "9", "10","11", "12", "13", "14", "15","16", "17", "18"))) %>%
filter(visit != "18")
ggplot(data = ARD_3_simpson_visit) +
geom_point(aes(x = visit,
y = SIMPSON,
colour = treatment,
group = treatment),
alpha = 0.6,
size = 2) +
geom_line(aes(x = visit,
y = SIMPSON,
colour = treatment,
group = treatment),
alpha = 0.6) +
# geom_smooth(aes(x = visit,
# y = SIMPSON,
# colour = treatment,
# group = treatment),
# size = 2,
# method = lm,
# formula = y ~ x + I(x^2),
# se = FALSE ) +
scale_fill_viridis(discrete = TRUE, name = "Time Block") +
facet_grid(complexity ~ treatment) +
ggtitle("simpson Index of Species Diversity - visit") +
labs(x = expression(Visit),
y = expression(D~value)) +
theme_classic()+
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 14),
legend.position = "none",
strip.text = element_text(size = 14)
)
# SAC: species accumulation curve - experimentation code ----------------------------------------------
data("BCI")
#ok so the data structure is a matrix of sum of presence (tree counts) for 50 plots (rows)
ARD_3_pivot_SAC_C_H <- ARD_3_with_0 %>%
filter(treatment == "control") %>%
filter(complexity == "High") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_C_H,"ARD_3_pivot_SAC_C_H.xlsx")
SAC_matrix_C_H <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/High/SAC_matrix_C__H.csv")
#using vegan
sac_exact_C_H <- specaccum(SAC_matrix_C_H, method = "exact")
sac_ran_C_H <- specaccum(SAC_matrix_C_H, method = "random")
#plot to compare
plot(sac_exact_C_H, col = "black", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - High Control")
plot(sac_ran_C_H, col = "black", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC random - High Control")
#estimators
sac_C_H_pool <- poolaccum(SAC_matrix_C_H, permutations = 1000)
plot(sac_C_H_pool)
#messy plots, so use this instead:
plot(sac_C_H_pool, display = "chao", col = "black", auto.key = FALSE,
grid = F, strip = FALSE, xlab = "Visit",
par.settings = list(axis.line = list(col = 0)),
scales = list(col=1, tck=c(1,0)),
panel = function(...){
lims <- current.panel.limits()
panel.xyplot(...)
panel.abline(h=lims$ylim[1], v=lims$xlim[1])
})
# SAC - by visit ----------------------------------------------------------
# SAC: control High -------------------------------------------------------
ARD_3_pivot_SAC_C_H1 <- ARD_3_with_0 %>%
filter(treatment == "control") %>%
filter(complexity == "High") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_C_H,"ARD_3_pivot_SAC_C_H.xlsx")
SAC_matrix_C_H <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/High/SAC_matrix_C__H.csv")
#using vegan
sac_exact_C_H <- specaccum(SAC_matrix_C_H, method = "exact")
#plot to compare
plot(sac_exact_C_H, col = "red", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - High Control")
# SAC: 0% High ------------------------------------------------------------
ARD_3_pivot_SAC_0_H <- ARD_3_with_0 %>%
filter(treatment == "0%") %>%
filter(complexity == "High") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_0_H,"ARD_3_pivot_SAC_0_H.xlsx")
#remeber to delete visit column
SAC_matrix_0_H <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/High/SAC_matrix_0_H.csv")
#using vegan
sac_exact_0_H <- specaccum(SAC_matrix_0_H, method = "exact")
#plot to compare
plot(sac_exact_0_H, col = "orange", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - High 0%")
# SAC: 30% High -----------------------------------------------------------
ARD_3_pivot_SAC_30_H <- ARD_3_with_0 %>%
filter(treatment == "30%") %>%
filter(complexity == "High") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_30_H,"ARD_3_pivot_SAC_30_H.xlsx")
#remeber to delete visit column
SAC_matrix_30_H <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/High/SAC_matrix_30_H.csv")
#using vegan
sac_exact_30_H <- specaccum(SAC_matrix_30_H, method = "exact")
#plot to compare
plot(sac_exact_30_H, col = "green", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - High 30%")
# SAC: 50% High -----------------------------------------------------------
ARD_3_pivot_SAC_50_H <- ARD_3_with_0 %>%
filter(treatment == "50%") %>%
filter(complexity == "High") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_50_H,"ARD_3_pivot_SAC_50_H.xlsx")
#remeber to delete visit column
SAC_matrix_50_H <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/High/SAC_matrix_50_H.csv")
#using vegan
sac_exact_50_H <- specaccum(SAC_matrix_50_H, method = "exact")
#plot to compare
plot(sac_exact_50_H, col = "aquamarine", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - High 50%")
# SAC: 70% High -----------------------------------------------------------
ARD_3_pivot_SAC_70_H <- ARD_3_with_0 %>%
filter(treatment == "70%") %>%
filter(complexity == "High") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_70_H,"ARD_3_pivot_SAC_70_H.xlsx")
#remeber to delete visit column
SAC_matrix_70_H <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/High/SAC_matrix_70_H.csv")
#using vegan
sac_exact_70_H <- specaccum(SAC_matrix_70_H, method = "exact")
#plot to compare
plot(sac_exact_70_H, col = "blue", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - High 70%")
# SAC: 100% High ----------------------------------------------------------
ARD_3_pivot_SAC_100_H <- ARD_3_with_0 %>%
filter(treatment == "100%") %>%
filter(complexity == "High") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_100_H,"ARD_3_pivot_SAC_100_H.xlsx")
#remeber to delete visit column
SAC_matrix_100_H <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/High/SAC_matrix_100_H.csv")
#using vegan
sac_exact_100_H <- specaccum(SAC_matrix_100_H, method = "exact")
#plot to compare
plot(sac_exact_100_H, col = "magenta", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - High 100%")
# SAC: control Low -------------------------------------------------------
ARD_3_pivot_SAC_C_L <- ARD_3_with_0 %>%
filter(treatment == "control") %>%
filter(complexity == "Low") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_C_L,"ARD_3_pivot_SAC_C_L.xlsx")
SAC_matrix_C_L <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/Low/SAC_matrix_C_L.csv")
#using vegan
sac_exact_C_L <- specaccum(SAC_matrix_C_L, method = "exact")
#plot to compare
plot(sac_exact_C_L, col = "red", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - Low Control")
# SAC: 0% Low ------------------------------------------------------------
ARD_3_pivot_SAC_0_L <- ARD_3_with_0 %>%
filter(treatment == "0%") %>%
filter(complexity == "Low") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_0_L,"ARD_3_pivot_SAC_0_L.xlsx")
SAC_matrix_0_L <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/Low/SAC_matrix_0_L.csv")
#using vegan
sac_exact_0_L <- specaccum(SAC_matrix_0_L, method = "exact")
#plot to compare
plot(sac_exact_0_L, col = "orange", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - Low 0%")
# SAC: 30% High -----------------------------------------------------------
ARD_3_pivot_SAC_30_L <- ARD_3_with_0 %>%
filter(treatment == "30%") %>%
filter(complexity == "Low") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_30_L,"ARD_3_pivot_SAC_30_L.xlsx")
SAC_matrix_30_L <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/Low/SAC_matrix_30_L.csv")
#using vegan
sac_exact_30_L <- specaccum(SAC_matrix_30_L, method = "exact")
#plot to compare
plot(sac_exact_30_L, col = "green", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - Low 30%")
# SAC: 50% High -----------------------------------------------------------
ARD_3_pivot_SAC_50_L <- ARD_3_with_0 %>%
filter(treatment == "50%") %>%
filter(complexity == "Low") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_50_L,"ARD_3_pivot_SAC_50_L.xlsx")
SAC_matrix_50_L <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/Low/SAC_matrix_50_L.csv")
#using vegan
sac_exact_50_L <- specaccum(SAC_matrix_50_L, method = "exact")
#plot to compare
plot(sac_exact_50_L, col = "aquamarine", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - Low 50%")
# SAC: 70% High -----------------------------------------------------------
ARD_3_pivot_SAC_70_L <- ARD_3_with_0 %>%
filter(treatment == "70%") %>%
filter(complexity == "Low") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_70_L,"ARD_3_pivot_SAC_70_L.xlsx")
SAC_matrix_70_L <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/Low/SAC_matrix_70_L.csv")
#using vegan
sac_exact_70_L <- specaccum(SAC_matrix_70_L, method = "exact")
#plot to compare
plot(sac_exact_70_L, col = "blue", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - Low 70%")
# SAC: 100% High ----------------------------------------------------------
ARD_3_pivot_SAC_100_L <- ARD_3_with_0 %>%
filter(treatment == "100%") %>%
filter(complexity == "Low") %>%
select(common_name, visit, presence)
write.xlsx(ARD_3_pivot_SAC_100_L,"ARD_3_pivot_SAC_100_L.xlsx")
SAC_matrix_100_L <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/Low/SAC_matrix_100_L.csv")
#using vegan
sac_exact_100_L <- specaccum(SAC_matrix_100_L, method = "exact")
#plot to compare
plot(sac_exact_100_L, col = "magenta", lwd = 3, xlab = "Visit", ylab = "Richness",
main = "SAC exact - Low 100%")
ggplot()+
geom_line(data = sac_exact_100_L,
aes(x = visit,
y = Richness))
# SAC - by days since outplanting -----------------------------------------
# control high ------------------------------------------------------------
ARD_3_pivot_SAC_C_H <- ARD_3_with_0_new_2 %>%
filter(treatment == "control") %>%
filter(complexity == "High") %>%
select(common_name, days_since_outplanting, presence)
write.xlsx(ARD_3_pivot_SAC_C_H,"ARD_3_pivot_SAC_C_H.xlsx")
SAC_matrix_C_H <- read_csv("Artificial coral experiment/data/Art Recruit data/species accumulation curve/ARD_3/days_since_outplanting (new)/High/SAC_matrix_C_H.csv")
#using vegan
sac_exact_C_H <- specaccum(SAC_matrix_C_H, method = "exact")
#plot to compare
plot(sac_exact_C_H, col = "red", lwd = 3, xlab = "Days Since Outplanting", ylab = "Richness",
main = "SAC exact - High Control")
|
efa05fc5a018bd9f23d3798c4fde12606e675947 | 867048395e7b85aee22f6058088ff8de70a6dff2 | /backtester_v4.2/images/analysis image/plot.R | 9a2ce3bcaf9b915342a5e9d01dc9c29c3af479d5 | [] | no_license | whyVeronica/backtester_v4.2 | 348fdb756d7683c83f52d0374c66cd5ede905697 | ad1c3172b236e996ef265f9941091f7e65a9d224 | refs/heads/master | 2016-09-06T03:33:49.229591 | 2015-05-13T15:22:59 | 2015-05-13T15:22:59 | 34,964,458 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 165 | r | plot.R | source('framework/data.R');
dataList <- getData(directory="PART2")
d <- dataList[[10]][1:1100,1:4]
chartSeries(d,type="candlesticks",theme="white",name="Series10") |
b4436e04a6856e4cddd88bfe789521572cab75a8 | 31d2d467030565c44f4d28d42c0e4d225dececaa | /man/item.fit.Rd | 1dde60ea49f1c124ff4f0d6aafa95f7260618540 | [] | no_license | cran/ltm | 84fd858915db9fe1506a40628f61e6500a21ed1c | dbbabfa99fa09ad94113856a6a5ae1535e7b817f | refs/heads/master | 2022-02-25T01:10:01.747125 | 2022-02-18T08:40:02 | 2022-02-18T08:40:02 | 17,697,218 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 4,840 | rd | item.fit.Rd | \name{item.fit}
\alias{item.fit}
\title{ Item-Fit Statistics and P-values }
\description{
Computation of item fit statistics for \code{ltm}, \code{rasch} and \code{tpm} models.
}
\usage{
item.fit(object, G = 10, FUN = median,
simulate.p.value = FALSE, B = 100)
}
\arguments{
\item{object}{a model object inheriting either from class \code{ltm}, class \code{rasch} or class \code{tpm}.}
\item{G}{either a number or a numeric vector. If a number, then it denotes the number of categories sample units
are grouped according to their ability estimates.}
\item{FUN}{a function to summarize the ability estimate with each group (e.g., median, mean, etc.).}
\item{simulate.p.value}{logical; if \code{TRUE}, then the Monte Carlo procedure described in the \bold{Details}
section is used to approximate the the distribution of the item-fit statistic under the null hypothesis.}
\item{B}{the number of replications in the Monte Carlo procedure.}
}
\details{
The item-fit statistic computed by \code{item.fit()} has the form: \deqn{\sum \limits_{j = 1}^G \frac{N_j
(O_{ij} - E_{ij})^2}{E_{ij} (1 - E_{ij})},}{\sum_{j = 1}^g N_j (O_{ij} - E_{ij})^2 / [E_{ij} (1 - E_{ij})],}
where \eqn{i} is the item, \eqn{j} is the interval created by grouping sample units on the basis of their ability
estimates, \eqn{G} is the number of sample units groupings (i.e., \code{G} argument), \eqn{N_j} is the number of
sample units with ability estimates falling in a given interval \eqn{j}, \eqn{O_{ij}} is the observed proportion of
keyed responses on item \eqn{i} for interval \eqn{j}, and \eqn{E_{ij}} is the expected proportion of keyed responses
on item \eqn{i} for interval \eqn{j} based on the IRT model (i.e., \code{object}) evaluated at the ability estimate
\eqn{z^*} within the interval, with \eqn{z^*} denoting the result of \code{FUN} applied to the ability estimates in
group \eqn{j}.
If \code{simulate.p.value = FALSE}, then the \eqn{p}-values are computed assuming a chi-squared distribution with
degrees of freedom equal to the number of groups \code{G} minus the number of estimated parameters. If
\code{simulate.p.value = TRUE}, a Monte Carlo procedure is used to approximate the distribution of the item-fit
statistic under the null hypothesis. In particular, the following steps are replicated \code{B} times:
\describe{
\item{Step 1:}{Simulate a new data-set of dichotomous responses under the assumed IRT model, using the maximum
likelihood estimates \eqn{\hat{\theta}}{\hat{theta}} in the original data-set, extracted from \code{object}.}
\item{Step 2:}{Fit the model to the simulated data-set, extract the maximum likelihood estimates
\eqn{\theta^*}{theta^*} and compute the ability estimates \eqn{z^*} for each response pattern.}
\item{Step 3:}{For the new data-set, and using \eqn{z^*} and \eqn{\theta^*}{theta^*}, compute the value of the
item-fit statistic.}
}
Denote by \eqn{T_{obs}} the value of the item-fit statistic for the original data-set. Then the \eqn{p}-value is
approximated according to the formula \deqn{\left(1 + \sum_{b = 1}^B I(T_b \geq T_{obs})\right) / (1 + B),}{(1 +
sum_{b = 1}^B I(T_b >= T_{obs})) / (1 + B),} where \eqn{I(.)} denotes the indicator function, and \eqn{T_b} denotes
the value of the item-fit statistic in the \eqn{b}th simulated data-set.
}
\value{
An object of class \code{itemFit} is a list with components,
\item{Tobs}{a numeric vector with item-fit statistics.}
\item{p.values}{a numeric vector with the corresponding \eqn{p}-values.}
\item{G}{the value of the \code{G} argument.}
\item{simulate.p.value}{the value of the \code{simulate.p.value} argument.}
\item{B}{the value of the \code{B} argument.}
\item{call}{a copy of the matched call of \code{object}.}
}
\references{
Reise, S. (1990) A comparison of item- and person-fit methods of assessing model-data fit in IRT. \emph{Applied
Psychological Measurement}, \bold{14}, 127--137.
Yen, W. (1981) Using simulation results to choose a latent trait model. \emph{Applied Psychological Measurement},
\bold{5}, 245--262.
}
\author{
Dimitris Rizopoulos \email{d.rizopoulos@erasmusmc.nl}
}
\seealso{
\code{\link{person.fit}},
\code{\link{margins}},
\code{\link{GoF.gpcm}},
\code{\link{GoF.rasch}}
}
\examples{
# item-fit statistics for the Rasch model
# for the Abortion data-set
item.fit(rasch(Abortion))
# Yen's Q1 item-fit statistic (i.e., 10 latent ability groups; the
# mean ability in each group is used to compute fitted proportions)
# for the two-parameter logistic model for the LSAT data-set
item.fit(ltm(LSAT ~ z1), FUN = mean)
}
\keyword{ multivariate }
|
132730313b735b121799f8a64d2adf7011392148 | 211b734581e0eefb217a864b7ffc6914f170e04e | /project_serie_tempo.R | 8b830930d6407bf7ca7b159d2d8ee16fb64347fe | [] | no_license | fern35/MasterProject | 4b04d5fbb043ca496f2f44fce621335243a0368a | caf330bf8315ab6e5ac862bce47bb67559c5eb44 | refs/heads/master | 2021-05-09T01:45:37.001877 | 2018-03-07T20:12:54 | 2018-03-07T20:12:54 | 119,184,682 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,630 | r | project_serie_tempo.R | rm(list=objects())
###############packages
library(forecast)
library(RColorBrewer)
library(magrittr)
library(lubridate)
rmse<-function(eps)
{
return(round(sqrt(mean(eps^2,na.rm=TRUE)),digits=0))
}
mape<-function(y,ychap)
{
return(round(100*mean(abs(y-ychap)/abs(y)),digits=10))
}
###############Import data
data0<-read.csv(file="D:/m2/projet datamining/ad_viz_plotval_data_no2_texas_austin2015.csv", header=TRUE, sep=",")
data1<-read.csv(file="D:/m2/projet datamining/ad_viz_plotval_data_no2_texas_austin2016.csv", header=TRUE, sep=",")
data_train <- rbind(data0, data1)
data_test<-read.csv(file="D:/m2/projet datamining/ad_viz_plotval_data_no2_texas_austin2017.csv", header=TRUE, sep=",")
data_train$Date <- as.Date(data_train$Date, "%m/%d/%Y")
data_test$Date <- as.Date(data_test$Date, "%m/%d/%Y")
data_train$Weekday <- wday(data_train$Date)
data_test$Weekday <- wday(data_test$Date)
# Date <- paste(data0$Day, data0$Month,data0$Year, sep='-')
# as.POSIXct(strptime(Date, "%d-%m-%Y"))
#
# data0$Date<-as.POSIXct(strptime(data0$Date, "%Y-%m-%d"))
# data1$Date<-as.POSIXct(strptime(data1$Date, "%Y-%m-%d"))
# data0$BH1<-as.factor(data0$BH1)
# data1$BH1<-as.factor(data1$BH1)
rollArima<-function (arima.model, ynew, horizon = 1)
{
prevARIMA <- array(0, dim = length(ynew))
prevARIMA[1] <- forecast(arima.model, h = horizon)$mean[horizon]
for (i in 1:(length(ynew) - 1)) {
ts2 <- c(arima.model$x, ynew[1:i])
refit <- Arima(ts2, model = arima.model)
prevARIMA[i + 1] <- forecast(refit, h = horizon)$mean[horizon]
}
return(prevARIMA)
}
rollHW <- function (hw.model, ynew, horizon = 1)
{
prevHW <- array(0, dim = length(ynew))
prevHW[1] <- forecast(hw.model, h = horizon)$mean[horizon]
refit <- hw.model
for (i in 1:(length(ynew) - 1)) {
if(hw.model$gamma==T)
{
ts2 <- ts(c(hw.model$x, ynew[1:i]), frequency = frequency(fit.hw$x))
refit <- HoltWinters(ts2, seasonal=hw.model$seasonal,l.start=refit$coefficients["a"], b.start=refit$coefficients["b"]
, s.start=refit$coefficients[grep("s",names(fit.hw$coefficients))]
, optim.start = c(alpha = refit$alpha, beta = refit$beta, gamma = refit$gamma))
}
else{
ts2 <- ts(c(hw.model$x, ynew[1:i]), frequency = frequency(fit.hw$x))
refit <- HoltWinters(ts2,l.start=refit$coefficients["a"], b.start=refit$coefficients["b"]
, optim.start = c(alpha = refit$alpha, beta = refit$beta, gamma = F))
}
prevHW[i + 1] <- forecast(refit, h = horizon)$mean[horizon]
}
return(prevHW)
}
rollETS <- function (ets.model, ynew, horizon = 1)
{
prevETS <- array(0, dim = length(ynew))
prevETS[1] <- forecast(ets.model, h = horizon)$mean[horizon]
for (i in 1:(length(ynew) - 1)) {
ts2 <- ts(c(ets.model$x, ynew[1:i]), frequency=frequency(ets.model$x))
refit <- ets(ts2, model = ets.model)
prevETS[i + 1] <- forecast(refit, h = horizon)$mean[horizon]
}
return(prevETS)
}
#####################################################################
#######################time series: SARIMA model
#####################################################################
ts<-ts(data_train$Daily.Max.1.hour.NO2.Concentration.ppb., frequency=7)
plot(data_train$Date, ts, type = 'l')
par(mfrow=c(1,2))
acf(ts)
pacf(ts)
par(mfrow=c(1,2))
acf(diff(ts), lag.max=7*3)
pacf(diff(ts), lag.max=7*3)
par(mfrow=c(1,2))
acf(diff(diff(ts), lag=7), lag.max=7*3)
pacf(diff(diff(ts), lag=7), lag.max=7*3)
#Pmax=2
#Qmax=1
par(mfrow=c(1,2))
acf(diff(diff(ts), lag=7), lag.max=20)
pacf(diff(diff(ts), lag=7), lag.max=20)
###pmax= 4 ou 11
###qmax=3
fit1 <- Arima(ts, order=c(4,1,3), seasonal=c(2,1,1), method=c("CSS"))
prevARIMA1<-rollArima(fit1,ynew=data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,horizon=1)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevARIMA1)
fit.arima <- auto.arima(ts,max.p=11,max.q=3, max.P=2, max.Q=1, trace=T,start.p=7,start.q=2,ic="aic")
prevARIMA<-rollArima(fit.arima,ynew=data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,horizon=1)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevARIMA)
fit.arimaBIC<- auto.arima(ts,max.p=7,max.q=7, max.P=8, max.Q=8, trace=T,start.p=7,start.q=2,ic="bic")
prevARIMABIC<-rollArima(fit.arimaBIC,ynew=data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,horizon=1)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevARIMABIC)
#####################################################################
#######################exponential smoothing
#####################################################################
####################################
####simple, non saisonnier
####################################
ts<-ts(data_train$Daily.Max.1.hour.NO2.Concentration.ppb.)
fit.ets<-ets(ts)
prevETS<-rollETS(fit.ets,ynew=data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,horizon=1)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevETS)
####################################
####double, non saisonnier
####################################
ts<-ts(data_train$Daily.Max.1.hour.NO2.Concentration.ppb., frequency=7)##frequency is necessary
fit.hw<-HoltWinters(ts, gamma=FALSE) ##to make prevHW run
fit.hw$gamma
prevHW<-rollHW(fit.hw,ynew=data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,horizon=1)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevHW)
fit.hw_mu<-HoltWinters(ts,seasonal="multiplicative")
fit.hw_mu$gamma
prevHW_mu<-rollHW(fit.hw_mu,ynew=data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,horizon=1)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevHW_mu)
par(ask=F)
par(mfrow=c(1,1))
col.pal<-brewer.pal(4, "Spectral")
plot(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,type='l')
lines(prevARIMA,col=col.pal[1])
lines(prevETS,col=col.pal[2])
lines(prevHW,col=col.pal[3])
lines(prevHW_mu,col=col.pal[4])
legend("topright",col=col.pal,legend=c("ARIMA","ETS","HW","HW_mu"),lty=1, cex =0.75)
# plot(cumsum(data1$Load-prevARIMA),type='l',col=col.pal[1])
# lines(cumsum(data1$Load-prevETS),col=col.pal[2])
# lines(cumsum(data1$Load-prevHW),col=col.pal[3])
# lines(cumsum(data1$Load-prevHW_mu),col=col.pal[4])
# legend("topleft",col=col.pal,legend=c("ARIMA","ETS","HW","HW_mu"),lty=1)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevARIMA)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevARIMABIC)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevETS)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevHW)
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,prevHW_mu)
rmse(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevARIMA)
rmse(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevARIMABIC)
rmse(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevETS)
rmse(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevHW)
rmse(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevHW_mu)
# time_series<-list()
# time_series$prevARIMA<-prevARIMA
# time_series$prevHW<-prevHW
# time_series$prevETS<-prevETS
# saveRDS(time_series,"C:\\Enseignement\\2016-2017\\Results\\time_series_models.RDS")
#
###############################################################
#########randomforest +arima correction models
###############################################################
# noel = which(abs(data0$Day - 24) <= 3 & data0$Month == 12)
# consoNoel = vector("numeric", length(data0$Time))
# consoNoel[noel] = 1
# data0 <- data.frame(data0, consoNoel)
# noel = which(abs(data1$Day - 24) <= 3 & data1$Month == 12)
# consoNoel = vector("numeric", length(data1$Time))
# consoNoel[noel] = 1
# data1 <- data.frame(data1, consoNoel)
library(randomForest)
eq<-Daily.Max.1.hour.NO2.Concentration.ppb. ~ TEM_HIGH + TEM_AVG + TEM_LOW + DEW_HIGH + DEW_AVG + DEW_LOW + HUM_HIGH + HUM_AVG + HUM_LOW + SLP_HIGH + SLP_AVG + SLP_LOW + VIS_HIGH + VIS_AVG + VIS_LOW + WIN_HIGH + WIN_HIGH + WIN_AVG + PRECIP + MUSIC + Weekday
data_train$VIS_HIGH = as.numeric(as.character(data_train$VIS_HIGH))
data_train$VIS_AVG = as.numeric(as.character(data_train$VIS_AVG))
data_train$VIS_LOW = as.numeric(as.character(data_train$VIS_LOW))
data_test$VIS_HIGH = as.numeric(as.character(data_test$VIS_HIGH))
data_test$VIS_AVG = as.numeric(as.character(data_test$VIS_AVG))
data_test$VIS_LOW = as.numeric(as.character(data_test$VIS_LOW))
data_train$PRECIP<-as.numeric(levels(data_train$PRECIP))[data_train$PRECIP]
data_test$PRECIP<-as.numeric(levels(data_test$PRECIP))[data_test$PRECIP]
data_train_impute <- rfImpute(eq,ntree=500,data=data_train, importance=TRUE)
data_test_impute <- rfImpute(eq,ntree=500,data=data_test, importance=TRUE)
rf0<-randomForest(eq, data=data_train_impute, sampsize=700, mtry=8, ntree=300, nodesize=1, importance=TRUE)
rf0.fitted <- predict(rf0,newdata=data_train_impute)
rf0.forecast <- predict(rf0,newdata=data_test_impute)
mape(data_test_impute$Daily.Max.1.hour.NO2.Concentration.ppb.,rf0.forecast)
rmse(data_test_impute$Daily.Max.1.hour.NO2.Concentration.ppb.-rf0.forecast)
rf0$residuals <- data_test_impute$Daily.Max.1.hour.NO2.Concentration.ppb.- rf0.forecast
par(mfrow=c(2,1))
acf(rf0$residuals)
pacf(rf0$residuals)
Nblock<-10
borne_block<-seq(1, nrow(data0), length=Nblock+1)%>%floor
block_list<-list()
l<-length(borne_block)
for(i in c(2:(l-1)))
{
block_list[[i-1]] <- c(borne_block[i-1]:(borne_block[i]-1))
}
block_list[[l-1]]<-c(borne_block[l-1]:(borne_block[l]))
res.bloc<-function(block, formula)
{
rf<- randomForest(formula, data=data_train_impute[-block,])
forecast<-predict(rf, data=data_train_impute[block,])
return(data_train_impute[block,]$Daily.Max.1.hour.NO2.Concentration.ppb.-forecast)
}
block= block_list[[1]]
res.cv <- lapply(block_list, res.bloc, formula=eq)%>%unlist
par(mfrow=c(2,1))
acf(res.cv)
pacf(res.cv)
fit.arima.res <- auto.arima(res.cv,max.p=4,max.q=12, max.P=0, max.Q=0, trace=T,ic="aic")
#res<-data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-rf0.forecast
prevARIMA.res<-rollArima(fit.arima.res,ynew=data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-rf0.forecast,horizon=1)
prevRF_ARIMA <- rf0.forecast+prevARIMA.res
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb., prevRF_ARIMA)
rmse(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevRF_ARIMA)
####################################################
##########################RF+ETS
####################################################
ts<-ts(res.cv)
fit.ets<-ets(ts)
prevETS<-rollETS(fit.ets,ynew=data_test_impute$Daily.Max.1.hour.NO2.Concentration.ppb.-rf0.forecast,horizon=1)
prevRF_ETS <- rf0.forecast+prevETS
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb., prevRF_ETS)
rmse(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevRF_ETS)
####
df0 <- data.frame(ds=data_train$Date, y= data_train$Daily.Max.1.hour.NO2.Concentration.ppb.)
df1 <- data.frame(ds=data_test$Date, y= data_test$Daily.Max.1.hour.NO2.Concentration.ppb.)
proph <- prophet(df = df0)
proph$forecast <- predict(proph, df1)$yhat
mape(data_test$Daily.Max.1.hour.NO2.Concentration.ppb., proph$forecast)
plot(data_test$Daily.Max.1.hour.NO2.Concentration.ppb., type='l')
lines(proph$forecast, col='red')
proph$weekly.seasonality
names(proph)
#####
par(ask=F)
par(mfrow=c(1,1))
col.pal<-brewer.pal(4, "Spectral")
plot(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,type='l')
lines(prevARIMA,col=col.pal[1])
lines(prevHW_mu,col=col.pal[2])
lines(prevRF_ARIMA,col=col.pal[3])
lines(prevRF_ETS,col=col.pal[4])
legend("topright",col=col.pal,legend=c("ARIMA", "HW_mu", "RF+ARIMA", "RF_ETS"),lty=1, cex =0.75)
##
par(ask=F)
par(mfrow=c(1,1))
plot(data_test$Date, abs(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevRF_ARIMA)/data_test$Daily.Max.1.hour.NO2.Concentration.ppb., type = 'l')
lines(data_test$Date, abs(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevARIMA)/data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,col='red')
lines(data_test$Date, abs(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevHW_mu)/data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,col='blue')
lines(data_test$Date, abs(data_test$Daily.Max.1.hour.NO2.Concentration.ppb.-prevRF_ETS)/data_test$Daily.Max.1.hour.NO2.Concentration.ppb.,col='yellow')
legend("topright",col=c('red', 'blue', 'yellow'),legend=c("ARIMA", "HW_mu", "RF_ETS"),lty=1, cex =0.75)
|
79ff1466f02fcd662dcf66b312312eebfc1b0b94 | 781fe5c7f294094d5716bab48a9571e71b8cc952 | /R/add_answer.R | 9fa065c6ce1fa66ce1ca3c5b7f3bdf8a56e27ab8 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Alaburda/pubquizr | 42e5504e38a25308f18641c61dff89cd70627ca6 | 56509618e797d621601f263eda49b266937329a3 | refs/heads/master | 2022-12-26T04:21:12.862999 | 2020-09-24T21:23:00 | 2020-09-24T21:23:00 | 298,101,893 | 0 | 0 | CC0-1.0 | 2020-09-24T21:23:01 | 2020-09-23T21:47:21 | R | UTF-8 | R | false | false | 565 | r | add_answer.R | #' @title Add an answer to your pub quiz slides
#'
#' @description This function knits a child Rmd document with an answer to a question
#' for use in a pub quiz template.
#' Answers are typically presented at the end of the pub quiz slides.
#' @param x ID from a database
#' @keywords question
#' @export
#' @examples
#' add_answer()
add_answer <- function(x, db) {
rs <- knitr::knit_child(text = knitr::knit_expand(system.file("rmd", "answer_template.Rmd", package = "pubquizr"), collapse = '\\n', quiet = TRUE), quiet = TRUE)
cat(rs, sep = '\\n')
}
|
1ee285b7a5d3695eebd4126b1466b24ef2dec6fd | d43c132dd7a404c43e3591575723c2acd124121b | /WorldCircumnavigation/MapTheTrip.R | 0fe694ac07eab178dfe7e89c126c002c896ff29e | [] | no_license | KendonDarlington/WorldCircumnavigation | 70e8d37cfb5944027dd59a862581fab96935dfe8 | 67baf8ca9307808ac796566a101f13f46ab79282 | refs/heads/main | 2023-06-05T01:47:38.292301 | 2021-06-25T02:39:17 | 2021-06-25T02:39:17 | 380,102,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 491 | r | MapTheTrip.R | #Import packages
library(leaflet)
library(tidyverse)
#Import the data
dfGeo <- read_csv('Henry Crews Journey - Geo.csv')
#Draw the map
leaflet(data = dfGeo) %>%
addTiles() %>%
addMarkers(lng=~Long, lat=~Lat, label=~Label, labelOptions = labelOptions(noHide = T, direction = "bottom",textsize = "17px")) %>%
addProviderTiles(providers$Stamen.Watercolor) %>%
addPolylines(data = dfGeo, lng = ~Long, lat = ~Lat, group = ~Label, color = 'red', dashArray = '10,20')
|
8c5c6304769d6f5d150193a34a929ee97787d478 | ef8d37c4edf65252f5c30a7647e52c8f9cf034b5 | /mock_analysis.R | 3f1ab064aef4dee9cb4faeed373cb7f2fc6ed3d3 | [
"MIT"
] | permissive | bsh2/Individual-response-framework | cdbe224d72b9e6933622cc372457110400f98770 | cd1efc79190ace1c7ca375411d6f9c7b03f6840f | refs/heads/master | 2023-05-11T22:39:18.012840 | 2023-05-07T20:40:27 | 2023-05-07T20:40:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,517 | r | mock_analysis.R | # ==============================================================================
# CONTACT
# ==============================================================================
# b.stephens-hemingway[at]rgu.ac.uk
# ==============================================================================
# IMPORT DATA
# ==============================================================================
mock_data <- read.csv('mock_data.csv')
# ==============================================================================
# CALCULATE TYPICAL ERROR FROM TEST-RETEST RELIABILITY TRIALS
# ==============================================================================
# Step 1: Calculate test-retest difference scores [Test(2)-Test(1)] for each
# individual across both outcome measures (S7SF, MCARN)
# Step 2: Calculate standard deviation of the difference scores
# Step 3: Estimate TE by dividing the standard deviation of the difference
# scores by the positive square root of 2.
calculate_TE <- function(RT1,RT2){
diff <- RT2 - RT1
stdev <- sd(diff)
te <- stdev/sqrt(2)
return(te)
}
S7SF_TE <- calculate_TE(mock_data$S7SF_RT1, mock_data$S7SF_RT2)
MCARN_TE <- calculate_TE(mock_data$MCARN_RT1, mock_data$MCARN_RT2)
# ==============================================================================
# COMPUTE TYPICAL ERROR FROM PUBLISHED RELIABILITY DATA (CoV)
# ==============================================================================
# See SF-S6 of the supplementary spreadsheet and paper for detailed explanation
# Take CV% as 4.94% for total work done (kJ) in the CCT110 exhaustion test
# TE Estimate = [CV * Mean(Baseline)]/100
CCT110_TE <- (mean(mock_data$CCT110_PRE) * 4.94) / 100
# ==============================================================================
# COMPUTE n-ADJUSTED TRUE-SCORE CONFIDENCE INTERVALS OF ANY WIDTH FOR BASELINE
# VALUES, USING THE T-DISTRIBUTION
# ==============================================================================
# Requires ---------------------------------------------------------------------
ci_width_ts <- 90 # Adjusted True Score CI width in % (1-99)
# ------------------------------------------------------------------------------
TS_CI_Adj <- function(base_vals, TE){
# Inputs *******************************************************************
# 1. base_vals: Vector of individual pre-intervention testing values for
# measure being used. DF = Length(base_vals)-1
# 2. TE: Typical error value already calculated for the measure
# **************************************************************************
tdf <- data.frame(participant = c(1:length(base_vals)), base_vals)
t_c <- abs(qt((((100-ci_width_ts)/100)/2), length(tdf$base_vals)-1))
tdf$LB <- tdf$base_vals - (t_c * TE)
tdf$UB <- tdf$base_vals + (t_c * TE)
return(tdf[,c(1,3,4)])
# Returns dataframe containing upper and lower bound values
}
S7SF_TSCI <- TS_CI_Adj(mock_data$S7SF_PRE, S7SF_TE)
MCARN_TSCI <- TS_CI_Adj(mock_data$MCARN_PRE, MCARN_TE)
CCT110_TSCI <- TS_CI_Adj(mock_data$CCT110_PRE, CCT110_TE)
# ==============================================================================
# COMPUTE n-ADJUSTED CHANGE SCORE CIs (OF ANY WIDTH) USING PRE-POST DIFFERENCE
# SCORES FOLLOWING THE INTERVENTION, USING THE T-DISTRIBUTION.
# ==============================================================================
# Requires ---------------------------------------------------------------------
ci_width_cs <- 90 # Adjusted Change Score CI width in % (1-99)
# Typical error value calculated in one of previous sections
# Pre and post-intervention measured values
# ------------------------------------------------------------------------------
CS_CI_Adj <- function(pre_vals, post_vals, TE){
# Inputs *******************************************************************
# 1. pre/post_vals : Pre-and-post intervention measured values
# 2. TE: Typical error value already calculated for the measure
# **************************************************************************
tdf <- data.frame(participant = c(1:length(pre_vals)), difference =
(post_vals - pre_vals))
t_c <- abs(qt((((100-ci_width_cs)/100)/2), length(tdf$participant)-1))
tdf$LB <- tdf$difference - (sqrt(2) * t_c * TE)
tdf$UB <- tdf$difference + (sqrt(2) * t_c * TE)
return(tdf)
# Returns dataframe containing upper and lower bound values of the interval
# of the change score.
}
S7SF_CSCI <- CS_CI_Adj(mock_data$S7SF_PRE, mock_data$S7SF_POST, S7SF_TE)
MCARN_CSCI <- CS_CI_Adj(mock_data$MCARN_PRE, mock_data$MCARN_POST, MCARN_TE)
CCT110_CSCI <- CS_CI_Adj(mock_data$CCT110_PRE, mock_data$CCT110_POST, CCT110_TE)
# ==============================================================================
# ZERO-BASED, AND SWC-BASED THRESHOLDS FOR ASSESSING MEANINGFUL CHANGE
# ==============================================================================
# Requires ---------------------------------------------------------------------
# Adjusted change score CI (upper and lower bound values) calculated in the
# previous section [for desired CI width]
SWC_factor <- 0.2
# You could pick < 0.2 (Trivial), 0.2-0.5 (small), 0.5-0.8 (moderate) or
# >0.8 (large). See Hopkins (2004) for more detail.
SWC_MCARN <- sd(mock_data$MCARN_PRE) * SWC_factor
SWC_S7SF <- -sd(mock_data$S7SF_PRE) * SWC_factor
# Note the - because we are looking for skinfold reduction post-intervention
# So threshold will be LB < SWC and UB < SWC for S7SF. We use ifelse() to
# control for this.
SWC_CCT110 <- sd(mock_data$CCT110_PRE) * SWC_factor
# ------------------------------------------------------------------------------
CSCI_Threshold <- function(LB, UB, SWC, negative){
# negative: TRUE means that improvement in the negative direction is
# considered good for the measure considered. FALSE implies improvement is
# considered as positive change in the measure.
ifelse(negative == TRUE, zero_satisfy <- ifelse(LB < 0 & UB < 0, TRUE, FALSE),
zero_satisfy <- ifelse(LB > 0 & UB > 0, TRUE, FALSE))
ifelse(SWC > 0, SWC_satisfy <- ifelse(LB > SWC & UB > SWC, TRUE, FALSE),
SWC_satisfy <- ifelse(LB < SWC & UB < SWC, TRUE, FALSE))
tempdf <- data.frame(participant = c(1:length(LB)), LB, UB, zero_satisfy,
SWC_satisfy)
return(tempdf)
# Function simply returns a true/false in the relevant 'satisfy' column,
# indicating whether both bounds of the CI lie to the correct side of the
# threshold considered.
}
S7SF_Threshold <- CSCI_Threshold(S7SF_CSCI$LB, S7SF_CSCI$UB, SWC_S7SF, T)
MCARN_Threshold <- CSCI_Threshold(MCARN_CSCI$LB, MCARN_CSCI$UB, SWC_MCARN, F)
CCT110_Threshold <- CSCI_Threshold(CCT110_CSCI$LB, CCT110_CSCI$UB, SWC_CCT110,F)
# ==============================================================================
# ESTIMATING PROPORTION OF RESPONSE
# ==============================================================================
# To estimate the proportion of 'response' and 'non-response' to a chronic
# intervention, we first estimate the variability in change scores directly
# attributable to the intervention, and the variability attributed to
# biological variation and measurement error. This partitioning of variance
# is achieved by comparing standard deviation of change scores between the
# intervention group and control group.
mock_data$S7SF_DIFF <- mock_data$S7SF_POST - mock_data$S7SF_PRE
mock_data$MCARN_DIFF <- mock_data$MCARN_POST - mock_data$MCARN_PRE
mock_data$CCT110_DIFF <- mock_data$CCT110_POST - mock_data$CCT110_PRE
# A bit pushed for time here, hence the repetitive code,
# the better implementation would be to write a function to do this,
# rather than my repetitive code! When I have time I will come back and rewrite.
response <- data.frame(
Measure = c("S7SF", "MCARN", "CCT110"),
Int_SD = c(sd(unlist(subset(mock_data, GROUP == 1, S7SF_DIFF))),
sd(unlist(subset(mock_data, GROUP == 1, MCARN_DIFF))),
sd(unlist(subset(mock_data, GROUP == 1, CCT110_DIFF)))),
Int_Mean = c(mean(unlist(subset(mock_data, GROUP == 1, S7SF_DIFF))),
mean(unlist(subset(mock_data, GROUP == 1, MCARN_DIFF))),
mean(unlist(subset(mock_data, GROUP == 1, CCT110_DIFF)))),
Ctr_SD = c(sd(unlist(subset(mock_data, GROUP == 2, S7SF_DIFF))),
sd(unlist(subset(mock_data, GROUP == 2, MCARN_DIFF))),
sd(unlist(subset(mock_data, GROUP == 2, CCT110_DIFF)))),
SWC = c(SWC_S7SF, SWC_MCARN, SWC_CCT110)
)
# Calculating SD_IR using SD_IR = sqrt((SD_INT)^2-(SD_CTR)^2)
response$SD_IR <- sqrt((descriptive_stats$Int_SD)^2 -
(descriptive_stats$Ctr_SD)^2)
# The distribution of interest is modelled as a normal distribution, centered at
# the mean observed change score with standard deviation equal to SD_IR.
response$Proportion <- ifelse(response$SWC < 0, ((pnorm(response$SWC,
response$Int_Mean,response$SD_IR)) * 100) ,
((1-pnorm(response$SWC,response$Int_Mean, response$SD_IR)) * 100))
# ==============================================================================
# BOOTSTRAPPING
# ==============================================================================
# As an additional series of steps, you can obtain confidence limits for the
# estimated proportion of response using bootstrapping, which is a resampling
# technique that uses the observed sample to model the population distribution.
library(boot)
boot_ci <- 90
nbootstraps <- 1000
propboot <- function(ctrl_data, int_data, swc, ci_width, bootstraps){
# Inputs:
# ctrl_data: Vector of control group difference scores
# int_data: Vector of intervention group difference scores
# swc: single numerical value for SWC
# ci_width: single numerical value (1-99) [units %] for CI width
# boostraps: number of bootstraps to compute
counter <- 0
proportion_response <- as.numeric()
for (i in 1:bootstraps){
tv_ctr <- sample(ctrl_data, length(ctrl_data), replace = TRUE)
tv_int <- sample(int_data, length(int_data), replace = TRUE)
mean_ctr <- mean(tv_ctr)
mean_int <- mean(tv_int)
sd_ctr <- sd(tv_ctr)
sd_int <- sd(tv_int)
if (sd_int > sd_ctr){
sd_ir <- sqrt( (sd_int)^2 - (sd_ctr)^2 )
if (swc < 0){
prop_resp <- as.numeric((pnorm(swc, mean_int, sd_ir) * 100))
}
if (swc >= 0){
prop_resp <- as.numeric(((1 - pnorm(swc, mean_int, sd_ir)) * 100))
}
proportion_response[length(proportion_response)+1] <- prop_resp
} else {counter <- counter + 1}
}
print("Number of bootstraps completed:")
print(bootstraps - counter)
output <- quantile(proportion_response,
probs = c(((1-(ci_width/100))/2),((ci_width/100) + ((1 - (ci_width/100))/2))))
return(output)
}
ctr_S7SF <- as.numeric(unlist(subset(mock_data, GROUP == 2, S7SF_DIFF)))
int_S7SF <- as.numeric(unlist(subset(mock_data, GROUP == 1, S7SF_DIFF)))
ctr_MCARN <- as.numeric(unlist(subset(mock_data, GROUP == 2, MCARN_DIFF)))
int_MCARN <- as.numeric(unlist(subset(mock_data, GROUP == 1, MCARN_DIFF)))
ctr_CCT110 <- as.numeric(unlist(subset(mock_data, GROUP == 2, CCT110_DIFF)))
int_CCT110 <- as.numeric(unlist(subset(mock_data, GROUP == 1, CCT110_DIFF)))
# Run the bootstraps
propboot(ctr_S7SF, int_S7SF, SWC_S7SF, boot_ci, nbootstraps)
propboot(ctr_MCARN, int_MCARN, SWC_MCARN, boot_ci, nbootstraps)
propboot(ctr_CCT110, int_CCT110, SWC_CCT110, boot_ci, nbootstraps) |
21f883166fa6793ec464c91bf11da8ed68881657 | 36ab27740e9f9ac5e7b3f0818b0b8c7e19728bf3 | /cluster.R | be6990123b9427ce5d42ca51fcf04c6195b7e85f | [] | no_license | MenglingHettinger/Large-Scale-Data-Analysis | f4a6790e27d3d51ad9fcd133a269b128cf2f696b | acba8597d4e0c266054ad294a7149edc060ae296 | refs/heads/master | 2021-01-19T11:14:41.138340 | 2016-03-02T16:41:39 | 2016-03-02T16:41:39 | 18,680,663 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,215 | r | cluster.R | library(vegan)
data200 <- read.table("20city200_removebias.txt", header = F, sep ="\t")
dataf5 <- read.table("20cityfreq5_removebias.txt", header = F, sep ="\t")
dataf10 <- read.table("20cityfreq10_removebias.txt", header = F, sep ="\t")
dim(data200)
dim(dataf5)
dim(dataf10)
datamatrix200 <- as.matrix(data200)
datamatrix200 <- t(datamatrix200)
datamatrixf5 <- as.matrix(dataf5)
datamatrixf5 <- t(datamatrixf5)
datamatrixf10 <- as.matrix(dataf10)
datamatrixf10 <- t(datamatrixf10)
d200 <- vegdist(datamatrix200, method="jaccard")
df5 <- vegdist(datamatrixf5, method="jaccard")
df10 <- vegdist(datamatrixf10, method="jaccard")
kmeansfit200 <- kmeans(d200, 4, iter.max = 10)
kmeansfitf5 <- kmeans(df5, 4, iter.max = 10)
kmeansfitf10 <- kmeans(df10, 4, iter.max = 10)
hclustfitcomp200 <- hclust(d200, method = "complete")
hclustfitsing200 <- hclust(d200, method = "single")
hclustfitcompf5 <- hclust(df5, method = "complete")
hclustfitsingf5 <- hclust(df5, method = "single")
hclustfitcompf10 <- hclust(df10, method = "complete")
hclustfitsingf10 <- hclust(df10, method = "single")
kmeans_k <- function(d){
for(i in c(1:20)){
kmeansfit <- kmeans(d, i, iter.max= 20)
print(i)
print(kmeansfit)
}
}
|
9370a88f4facb02df32f96167660eec6c5f4aaab | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/pifpaf/examples/paf.Rd.R | 2c7d46a969aac117d3b0a0cf27fdad1f3efc23bf | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,243 | r | paf.Rd.R | library(pifpaf)
### Name: paf
### Title: Population Attributable Fraction
### Aliases: paf
### ** Examples
#Example 1: Exponential Relative Risk
#--------------------------------------------
set.seed(18427)
X <- data.frame(Exposure = rnorm(100,3,1))
thetahat <- 0.12
rr <- function(X, theta){exp(theta*X)}
#Using the empirical method
paf(X, thetahat, rr)
#Same example with kernel method
paf(X, thetahat, rr, method = "kernel")
#Same example with approximate method
Xmean <- data.frame(Exposure = mean(X[,"Exposure"]))
Xvar <- var(X[,"Exposure"])
paf(Xmean, thetahat, rr, method = "approximate", Xvar = Xvar)
#Additional options for approximate:
paf(Xmean, thetahat, rr, method = "approximate", Xvar = Xvar,
deriv.method = "Richardson", deriv.method.args = list(eps=1e-3, d=0.1))
#Example 2: Linear Relative Risk with weighted sample
#--------------------------------------------
set.seed(18427)
X <- data.frame(Exposure = rbeta(100,3,1))
weights <- runif(100)
normalized_weights <- weights/sum(weights)
thetahat <- 0.12
rr <- function(X, theta){theta*X^2 + 1}
paf(X, thetahat, rr, weights = normalized_weights)
#Additional options for kernel:
paf(X, thetahat, rr, weights = normalized_weights,
method = "kernel", ktype = "cosine", bw = "nrd0")
#Example 3: Multivariate Linear Relative Risk
#--------------------------------------------
set.seed(18427)
X1 <- rnorm(100,4,1)
X2 <- rnorm(100,2,0.4)
X <- data.frame(Exposure = X1, Covariate = X2)
thetahat <- c(0.12, 0.03)
#When creating relative risks avoid using the $ operator
#as it doesn't work under approximate method
rr_not <- function(X, theta){
exp(theta[1]*X$Exposure + theta[2]*X$Covariate)
}
rr_better <- function(X, theta){
exp(theta[1]*X[,"Exposure"] + theta[2]*X[,"Covariate"])
}
#For the empirical method it makes no difference:
paf(X, thetahat, rr_better)
paf(X, thetahat, rr_not)
#But the approximate method crashes due to operator
Xmean <- data.frame(Exposure = mean(X[,"Exposure"]),
Covariate = mean(X[,"Covariate"]))
Xvar <- var(X)
paf(Xmean, thetahat, rr_better, method = "approximate", Xvar = Xvar)
## Not run:
##D #Error: $ operator in rr definitions don't work in approximate
##D paf(Xmean, thetahat, rr_not, method = "approximate", Xvar = Xvar)
## End(Not run)
## Not run:
##D #Error: Multivariate cases cannot be evaluated with kernel method
##D paf(X, thetahat, rr, method = "kernel")
## End(Not run)
#Example 4: Categorical Relative Risk & Exposure
#--------------------------------------------
set.seed(18427)
mysample <- sample(c("Normal","Overweight","Obese"), 100,
replace = TRUE, prob = c(0.4, 0.1, 0.5))
X <- data.frame(Exposure = mysample)
thetahat <- c(1, 1.2, 1.5)
#Categorical relative risk function
rr <- function(X, theta){
#Create return vector with default risk of 1
r_risk <- rep(1, nrow(X))
#Assign categorical relative risk
r_risk[which(X[,"Exposure"] == "Normal")] <- thetahat[1]
r_risk[which(X[,"Exposure"] == "Overweight")] <- thetahat[2]
r_risk[which(X[,"Exposure"] == "Obese")] <- thetahat[3]
return(r_risk)
}
paf(X, thetahat, rr, check_rr = FALSE)
#Example 5: Continuous Exposure and Categorical Relative Risk
#------------------------------------------------------------------
set.seed(18427)
#Assume we have BMI from a sample
BMI <- data.frame(Exposure = rlnorm(100, 3.1, sdlog = 0.1))
#Theoretical minimum risk exposure is at 20kg/m^2 in borderline "Normal" category
BMI_adjusted <- BMI - 20
thetahat <- c(Malnourished = 2.2, Normal = 1, Overweight = 1.8,
Obese = 2.5)
rr <- function(X, theta){
#Create return vector with default risk of 1
r_risk <- rep(1, nrow(X))
#Assign categorical relative risk
r_risk[which(X[,"Exposure"] < 0)] <- theta[1] #Malnourished
r_risk[intersect(which(X[,"Exposure"] >= 0),
which(X[,"Exposure"] < 5))] <- theta[2] #Normal
r_risk[intersect(which(X[,"Exposure"] >= 5),
which(X[,"Exposure"] < 10))] <- theta[3] #Overweight
r_risk[which(X[,"Exposure"] >= 10)] <- theta[4] #Obese
return(r_risk)
}
paf(BMI_adjusted, thetahat, rr, check_exposure = FALSE)
#Example 6: Bivariate exposure and rr ("classical PAF")
#------------------------------------------------------------------
set.seed(18427)
mysample <- sample(c("Exposed","Unexposed"), 1000,
replace = TRUE, prob = c(0.1, 0.9))
X <- data.frame(Exposure = mysample)
theta <- c("Exposed" = 2.5, "Unexposed" = 1.2)
rr <- function(X, theta){
#Create relative risk function
r_risk <- rep(1, nrow(X))
#Assign values of relative risk
r_risk[which(X[,"Exposure"] == "Unexposed")] <- theta["Unexposed"]
r_risk[which(X[,"Exposure"] == "Exposed")] <- theta["Exposed"]
return(r_risk)
}
paf(X, theta, rr)
#Example 7: Continuous exposure, several covariates
#------------------------------------------------------------------
X <- data.frame(Exposure = rbeta(100, 2, 3),
Age = runif(100, 20, 100),
Sex = sample(c("M","F"), 100, replace = TRUE),
BMI = rlnorm(100, 3.2, 0.2))
thetahat <- c(-0.1, 0.05, 0.2, -0.4, 0.3, 0.1)
rr <- function(X, theta){
#Create risk vector
Risk <- rep(1, nrow(X))
#Identify subpopulations
males <- which(X[,"Sex"] == "M")
females <- which(X[,"Sex"] == "F")
#Calculate population specific rr
Risk[males] <- theta[1]*X[males,"Exposure"] +
theta[2]*X[males,"Age"]^2 +
theta[3]*X[males,"BMI"]/2
Risk[females] <- theta[4]*X[females,"Exposure"] +
theta[5]*X[females,"Age"]^2 +
theta[6]*X[females,"BMI"]/2
return(Risk)
}
paf(X, thetahat, rr)
|
df40c747246414c48f6b7b7a203396c15308323b | f23e5cb1e2fdaff2f4a00cdd42ab90ebc5ed05b6 | /R/compressedReads.R | 1b42f2b2bae00cacc2057366bc253c98f93d9af7 | [] | no_license | lianos/TagSeq-original | 9bf5811a50d17b0d64af63223e6e8390cc0f655b | 533c5e351d96ff958e2bc1b1ed7828df402cf881 | refs/heads/master | 2021-01-21T11:09:14.368272 | 2013-03-18T15:25:45 | 2013-03-18T15:25:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,763 | r | compressedReads.R | ##
## Not really using these so anymore
##
setValidity("CompressedReads", function(object) {
meta <- values(object)
if (!'count' %in% colnames(meta)) {
return("`count` missing from elementMetadata")
}
TRUE
})
setGeneric("compressReads",
function(x, ...) {
standardGeneric("compressReads")
})
setAs("CompressedReads", "GRanges",function(from) {
if (length(from) == 0L) {
return(GRanges())
}
## times <- values(from)$count
## GRanges(seqnames=rep(seqnames(from), times),
## ranges=IRanges(start=rep(start(from), times),
## end=rep(end(from), times)),
## strand=rep(strand(from), times))
class(from) <- "GRanges"
from
})
setAs("CompressedReads", "IRanges", function(from) {
if (length(from) == 0L) {
return(IRanges())
}
times <- values(from)$count
IRanges(start=rep(start(from), times), end=rep(end(from), times))
})
setAs("GRanges", "CompressedReads", function(from) {
compressReads(from)
})
setAs("IRanges", "CompressedReads", function(from) {
compresRanges(from)
})
##' Created a \code{\link{CompressedReads}} object from \code{GRanges} reads
##' by keeping only the unique ranges, and adding a $count value in
##' the \code{elementMetadata} for the number of times that unique range
##' appares in \code{x}
##'
##' @param x The reads (\code{GRanges}) object
setMethod("compressReads", c(x="GRanges"),
function(x, with.values=TRUE, ...) {
chr.reads <- lapply(split(x, seqnames(x)), function(sreads) {
if (length(sreads) == 0) {
return(NULL)
}
seqname <- seqnames(sreads)[1]
cstrand <- lapply(split(sreads, strand(sreads)), function(.reads) {
iranges <- ranges(.reads)
values(iranges) <- values(.reads)
i <- compressRanges(iranges, with.values=with.values)
meta <- elementMetadata(i)
elementMetadata(i) <- NULL
if (length(i) > 0L) {
gr <- GRanges(seqnames=seqname, ranges=i, strand=strand(.reads)[1])
values(gr) <- meta
gr
} else {
NULL
}
})
do.call(c, unname(cstrand[!sapply(cstrand, is.null)]))
})
cr <- do.call(c, unname(chr.reads[!sapply(chr.reads, is.null)]))
class(cr) <- 'CompressedReads'
cr
})
setMethod("compressReads", c(x="IRanges"),
function(x, ...) {
compressRanges(x, ...)
})
compressRanges <- function(x, with.values=TRUE, ...) {
stopifnot(inherits(x, 'IRanges'))
if (length(x) == 0L) {
i <- IRanges()
values(i) <- DataFrame(count=rep(0L, 0))
} else {
x <- x[order(x)]
i <- unique(x)
orig.counts <- values(x)$count
has.counts <- !is.null(orig.counts)
sh <- subjectHits(findOverlaps(x, i, type='equal'))
if (!(any(duplicated(sh)))) {
## Nothing has changed
if (!has.counts) {
values(x)$count <- 1L
}
return(x)
}
if (has.counts) {
orig.count <- dlply(idata.frame(data.frame(idx=sh, count=orig.counts)),
.(idx), summarise, count=sum(count))
counts <- unlist(unname(orig.count), use.names=FALSE)
} else {
counts <- runLength(Rle(sh))
}
if (with.values) {
values(i)$count <- counts
} else {
values(i) <- DataFrame(count=counts)
}
}
i
}
setGeneric("uncompress", function(x, ...) standardGeneric("uncompress"))
setMethod("uncompress", c(x="CompressedReads"),
function(x, with.values=FALSE, ...) {
f <- selectMethod('uncompress', 'GRanges')
x <- f(x, with.values=FALSE, ...)
if (!is.null(values(x)$tpm)) {
values(x)$tpm <- values(x)$tpm / values(x)$count
}
values(x)$count <- 1L
x
})
setMethod("uncompress", c(x="GRanges"),
function(x, with.values=FALSE, ...) {
count <- values(x)$count
if (is.numeric(count)) {
x <- rep(x, count)
}
x
})
|
15418d54fd7db959b93f3723cee6d00cfda335a7 | 7aa2a91ca736a78b872e0512b5fe319667f2649a | /global.r | 971ed22d5752152fd228d498a55749554f1406d8 | [] | no_license | yvanlebras/wallace-docker | b5602a229da303c7cef46849d6cc1ed2239bca23 | d437ceeef89add209be3ef9a37ffe83e84b8d82d | refs/heads/master | 2020-08-15T06:28:33.760558 | 2019-11-07T17:18:39 | 2019-11-07T17:18:39 | 215,293,433 | 0 | 1 | null | 2019-11-07T17:18:40 | 2019-10-15T12:29:12 | R | UTF-8 | R | false | false | 38 | r | global.r | library('wallace')
library('rPython')
|
b5e58edf29dfe4c98222badba4b9b3e39626d5ef | ef16e698845df406c55074a3079b12c45b037f78 | /man/cellPopC.Rd | dfca58209686192cc516365f62c6d2f312ded3cd | [] | no_license | kailashbp/CellCODE | 5c738e04112926799c07e958cfa856679aeeb86c | 65b5640bb78da19ca89a6bbe529653485e553a0b | refs/heads/master | 2022-07-12T03:04:21.399082 | 2020-05-12T11:54:06 | 2020-05-12T11:54:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 590 | rd | cellPopC.Rd | \name{cellPopC}
\alias{cellPopC}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Assign cell population using correlation
}
\usage{
cellPopC(data, grp, SPVs)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
Gene expression data
}
\item{grp}{
Clinical group factors. Used only to remove the group effect before correlation computation.
}
\item{SPVs}{
A matrix of cell proportion variable with a row for each sample of data and a column for each cell type
}
}
\value{
A gene by cell-type matrix of correlation coefficients
}
|
d35a23d2f986192e38ff3c16b3772f9fd29b1b4b | 78bd071a206d4f35a02520a27a6d5e0434c152c6 | /R/process_video.R | 0a3024f564192e4d028ffcf40fd6c36d80f2b7a6 | [
"MIT"
] | permissive | Middleton-Lab/KinematicAnalysis | 96c1ab00ee8e24bbf970a9755c2c82c9e36219e9 | 8e1e4e4a064e8f873af4b9ed1eaadefa629ec1d3 | refs/heads/main | 2022-05-01T05:49:42.457253 | 2022-03-25T02:46:33 | 2022-03-25T02:46:33 | 240,816,100 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,853 | r | process_video.R | #' Process video file
#'
#' @param ii integer: row od \code{Trial_Info} to process
#' @param Trial_Info data.frame: Structure containing all the trials to process
#' @param bef_aft string: Before or after flag
#' @param calibration_coords data.frame: Calibration coordinates
#'
#' @return data.frame
#' \enumerate{
#' \item \code{MouseID}
#' \item \code{Trial}
#' \item \code{Before_After}
#' \item \code{mean_speed}
#' \item \code{stride_frequency}
#' \item \code{mean_stride_length}
#' \item \code{n_strides_df}
#' \item \code{duty_factor}
#' \item \code{stance_width}
#' \item \code{sacrum_vert_displacement}
#' }
#'
#' @export
#'
process_video <- function(ii, Trial_Info, bef_aft, calibration_coords) {
# M is tibble of xyz points for eye, sacrum, hip, knee, ankle, foot with
# time and frame number
# cal_rotate is the rotated calibration points
message(ii)
M_cal_rotate <- process_kine_data(Trial_Info,
ii,
calibration_coords,
bef_aft)
M <- M_cal_rotate$M
cal_rotate <- M_cal_rotate$cal_rotate
# Calculate variables of interest
mean_speed <- preferred_speed(M)
strides <- stride_parameters(ii, M)
duty_factor <- duty_factor_calc(M, thresh = 0.7)
stance_width <- stance_width_calc(ii, M)
return(data.frame(
MouseID = Trial_Info$MouseID[ii],
Trial = Trial_Info$Trial[ii],
Before_After = Trial_Info$Before_After[ii],
mean_speed = mean_speed,
stride_frequency = strides$stride_frequency,
mean_stride_length = strides$mean_stride_length,
n_strides_df = duty_factor$n_strides_df,
duty_factor = duty_factor$duty_factor,
stance_width = stance_width$mean_stance_width,
sacrum_vert_displacement = stance_width$sacrum_vert_displacement))
}
|
f945b89a882808685bf9ddf5ae29981979f0a811 | fa8f04d6013a8664499b2e0c36f78b2dedae1b74 | /man/geom_edge.Rd | 8348edbedc820f2bd599ee4dc44a32463294782e | [
"MIT"
] | permissive | RhiannonEdge/sirgraph | 9a2b3cebf8bae22806dfc7a06760b4c0a7ba7733 | f47eaba3b6d82493913ac489bd9e385c2400bf88 | refs/heads/master | 2021-01-17T23:31:15.217774 | 2014-04-08T14:41:38 | 2014-04-08T14:41:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 515 | rd | geom_edge.Rd | \name{geom_edge}
\alias{geom_edge}
\title{geom_edge}
\usage{
geom_edge(mapping = NULL, data = NULL, graph = NULL,
directed = FALSE, ...)
}
\arguments{
\item{mapping}{the aesthetic mapping}
\item{data}{data frame}
\item{graph}{the graph}
\item{directed}{currently ignored}
\item{...}{further parameters for geom_segment}
}
\value{
a geom_segment for ggplot
}
\description{
geom for edges
}
\details{
get edge lines for a graph and plot using geom_segment
}
\author{
Barry S Rowlingson
}
|
f424637b1174f20076f683a97bb860df314c300a | 16e0c39a5355479827f56268b2f26fe4654bdd59 | /RandomForest_Text.R | 42fa53ac5c4b2b3a8f8c3b7621acc886a6dc9c12 | [] | no_license | vijaysakhamuri/TextMining-Topic-Modelling | c76c2724b9f8baa34b5db404ccdc343a32564c93 | ed31851818c4e0e532aea527f7820b2670e67b31 | refs/heads/master | 2020-04-06T13:21:56.698496 | 2018-11-14T06:03:23 | 2018-11-14T06:03:23 | 157,496,471 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,766 | r | RandomForest_Text.R | ####################################################################
###########
setwd("F:\\DataMining_R\\3_LectureData\\section13")
reviews=read.csv("deceptive-opinion.csv")
head(reviews)
names(reviews)
unique(reviews$polarity)
reviews$text = as.character(reviews$text)
##convert word to numbers
reviews$polarity = as.integer(reviews$polarity=="positive")
reviews$deceptive = as.integer(reviews$deceptive=="truthful")
str(reviews)
table(reviews$deceptive,reviews$polarity)
library(tm)
corpus = Corpus(VectorSource(reviews$text))
corpus = tm_map(corpus, removePunctuation)
corpus = tm_map(corpus, content_transformer(tolower))
corpus = tm_map(corpus, removeNumbers)
corpus = tm_map(corpus, stripWhitespace)
corpus = tm_map(corpus, removeWords, stopwords('english'))
corpus = tm_map(corpus, removeWords, c("hotel", "hotels", "chicago"))
corpus = tm_map(corpus, stemDocument)
freq = DocumentTermMatrix(corpus)
freq
findAssocs(freq, terms="room", corlimit=0.25)
newcorpus = as.data.frame(as.matrix(freq))
colnames(newcorpus) = make.names(colnames(newcorpus))
head(newcorpus)
newcorpus$deceptive = reviews$deceptive
library(caret)
set.seed(99)
Train = createDataPartition(newcorpus$deceptive, p=0.75, list=FALSE)
#split data in 75%-25% ratio
training = newcorpus[ Train, ] #75% data for training
testing = newcorpus[ -Train, ] #25% testing
library(caTools)
library(e1071)
library(randomForest)
rfdeceptive = randomForest(as.factor(deceptive)~., data=training)
pred1 = predict(rfdeceptive, testing, type="class")
table(pred1, testing$deceptive)
confusionMatrix(pred1, testing$deceptive)
varImpPlot(rfdeceptive, n.var=10)
#important words that affects the accuracy of the model. |
3f97d8ca6690c797b1fc952a4bea59360e543a3d | 9edac818516c2b79a759c72d8285c746f1eef235 | /man/df.stack.Rd | 596cd6dffc5bf7d981551dee1b789c9a75bbf18f | [] | no_license | cran/PMmisc | b49d749b3911a583b0dca7ca94f845a08412216c | c29d91ebd3998fb7e1af1591232ac95d0d617901 | refs/heads/master | 2020-03-15T11:47:58.206657 | 2018-11-21T15:40:03 | 2018-11-21T15:40:03 | 132,128,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 801 | rd | df.stack.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/df.stack.R
\name{df.stack}
\alias{df.stack}
\title{Stack data frame by one classifier}
\usage{
df.stack(df,name)
}
\arguments{
\item{df}{: a data frame used to stack}
\item{name}{: new variable names of the data frame}
}
\description{
Stack data frame by one classifier. This function takes the first column as a ordering variable.
Then it take the variables names and repeat as the second column. The last column will be data under each variable name. This
function is created to enable easier investigation with apply functions.
}
\examples{
df <- data.frame(matrix(nrow=100,ncol=100))
for(i in 1:100){
df[,i] <- rep(runif(1,1,100),100)
}
dim(df)
hdf <- df.stack(df,c("date","tkr","price"))
}
|
9c860c3a46f0de99798d1b1812b270fc6899ab9d | df2b962f7a7dee56c5358cd83c73aa4c051df512 | /R/plot.noia.marloc.R | 95ef0b741e4ee716fd4d462418cea1b090398706 | [] | no_license | cran/noia | 787eb4f7ab9db691f11baff8fa9cde6c2f2c8660 | dacfb23d55f7fb1cb0d22428ff6aafb5d14a5b79 | refs/heads/master | 2023-03-24T11:03:33.032673 | 2023-03-08T07:10:10 | 2023-03-08T07:10:10 | 17,697,929 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 577 | r | plot.noia.marloc.R | plot.noia.marloc <-
function(x, xlab=NULL, ylim=NULL, ylab=attr(x, "what"), ...)
{
frq <- function(lab) as.numeric(strsplit(lab, split="|", fixed=TRUE)[[1]][2])
allele <- function(lab) strsplit(lab, split="|", fixed=TRUE)[[1]][1]
if (length(dim(x)) > 1) {
stop("Impossible to plot an array. For two dimensions, try image.")
}
if(is.null(xlab)) xlab <- allele(names(x)[1])
if(is.null(ylim)) {
ylim <- range(x)
if (attr(x, "what") %in% c("varA","varG"))
ylim[1] <- 0
}
plot(x=sapply(rownames(x), frq), y=x, xlab=xlab, ylim=ylim, ylab=ylab, type="l", ...)
}
|
ec824dcae847b1b202112cd2af1f71eb07d0a6c9 | 61ae15bd0c3b0f5f6755baaf55f7e74eb32d7659 | /options/simulate_SV.r | 1c0d4eec77afd601a10e13bc6ea41d8d0ddc6f32 | [] | no_license | anuragr81/research | c162fd8199ffae5f08465f27c943624a9cb807f1 | ee9e5c24da1cc545410fbad30b2e23e1d6438f27 | refs/heads/master | 2023-08-21T14:09:25.001243 | 2023-08-21T14:05:46 | 2023-08-21T14:05:46 | 22,212,095 | 0 | 0 | null | 2023-05-01T22:18:34 | 2014-07-24T12:29:32 | R | UTF-8 | R | false | false | 652 | r | simulate_SV.r |
set.seed(12345)
simulate<-function (n) {
phi <- 0.99 # AR(1) persistence parameter
sigma <- 0.08 # standard deviation of AR(1) noise
mu <- 0.01 # scalar factor in observation process
nu <- 8 # degrees of freedom of the t-distributed error variable in the observation process
g <- obs <- rep(NA,n)
g[1] <- rnorm(1,0,sigma/sqrt(1-phi^2)) # initial log-volatility drawn from stationary distribution of AR(1) process
for (k in 2:n){
g[k] <- phi*g[k-1]+rnorm(1,0,sigma)
}
for (k in 1:n){
obs[k] <- mu*exp(g[k]/2)*rt(1,nu)
}
hist(obs)
#plot(obs,xlab="time",ylab="simulated returns",type="l",ylim=c(-max(abs(obs)),max(abs(obs))))
} |
13d02ac7493372c20ca3aa9d82b71b7fc6856a4f | 2550544b679e91ad8c1ae8e4bb6fc3a37bc90f82 | /scripts/R/post_process_proteomics.R | 6f4b051f12f15fe07ad364d26d0f91f98e70b473 | [] | no_license | mlpaff/phage_attenuation | ba93525ff6dfb59127ad324750b18f365c0812b1 | d704da661ecc37378909042aa85c595def5883a6 | refs/heads/master | 2021-06-19T12:08:04.272137 | 2017-07-13T21:23:34 | 2017-07-13T21:23:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,724 | r | post_process_proteomics.R | ###########################################################################
## This script processes thermo MSF files that have been exported from
## from the Thermo software, after a database search has been conducted.
## The input files contain all peptides detected and the corresponding
## protein groups that they have been assigned.
##
## Author: Benjamin R. Jack
## Email: benjamin.r.jack@gmail.com
## May 2016
###########################################################################
# Remove any variables in memory
rm(list = ls())
# Set raw data directory
RAW_DATA_SRC <- "/Volumes/Seagate/data"
# Load appropriate libraries
library(readr)
library(dplyr)
library(tidyr)
library(parsemsf)
library(readxl)
library(stringr)
normalize_areas <- function(prots) {
# Replace an NaN (i.e. unobserved proteins, or proteins without enough peptides to quantitate) values with 0
prots[is.na(prots)] <- 0
# Normalize areas to total E. coli content
prots <- prots %>%
filter(!grepl('CONTAMINANT', protein_desc)) %>% # Filter out contaminants
ungroup() %>%
mutate(org = ifelse(grepl('NP', protein_desc), 'phage', 'ecoli')) %>% # Assign organism group
group_by(strain, time, org) %>%
# Compute sums for each organism type
mutate(org_area = sum(area_mean)) %>%
ungroup() %>%
group_by(strain, time) %>%
# Hack-y way of making a column that shows the sum of all ecoli protein areas
mutate(ecoli_area = ifelse((org == "phage"), (sum(area_mean) - org_area), org_area)) %>%
#Normalize to e. coli area
mutate(area_norm = area_mean/ecoli_area) -> prots2
return(prots2)
}
process_replicates <- function(df) {
# Relabel protein groups
relabel = c('NP_041997.1; NP_041998.1' = 'NP_041998.1',
'NP_041975.1; NP_041977.1' = 'NP_041975.1',
'NP_041997.1' = 'NP_041998.1',
'NP_041977.1' = 'NP_041975.1'
)
out_df <- df %>%
group_by(strain, time) %>%
do(quantitate(as.character(.$file), normalize = F, relabel = relabel)) %>%
group_by(strain, time) %>%
do(normalize_areas(.))
return(out_df)
}
samples <- read_excel(paste0(RAW_DATA_SRC, "/sample_list.xlsx")) %>%
filter(`Biological replicate` != 1, `Data Type` == "mass-spec") %>% # Skip replicate 1
mutate(File = paste0(RAW_DATA_SRC, "/mass_spec/msf_1_4/", str_replace(File, ".raw", ".msf"))) %>%
rename(b_rep = `Biological replicate`, time = `Time point`, strain = Strain, file = File) %>%
select(b_rep, strain, time, file) %>%
nest(-b_rep) %>%
mutate(msf = purrr::map(data, process_replicates))
out <- samples %>%
select(b_rep, msf) %>%
unnest()
# Write out data
write_csv(out, "./data/proteomics/abundances/all_reps.csv")
|
36814aeb3c184d9ee1ba1639434633f1731b415f | 85fd7570382bdccabc8daf3a41b561efb9cdf622 | /2 Biota/5 GET/code/B.GET.Consume.setup.01.R | 525a420dc489343f49fa3c115a90f0912aaca775 | [] | no_license | AndrewJConstable/EPOCuniverse | 2b86f40d2539cf79a66285cc8cd67d0c391bb669 | e6561eed547befde5b88563f73d96ce9033a5179 | refs/heads/master | 2021-11-25T23:35:40.476219 | 2021-11-17T01:15:33 | 2021-11-17T01:15:33 | 104,179,577 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,313 | r | B.GET.Consume.setup.01.R | # Create S4 method 'consumeSetup'
if (!isGeneric("consumeSetup"))
setGeneric("consumeSetup", function(element, period="Period", ptSA="list", modulenum="numeric", elementnum="numeric",
firstPeriod="numeric", dset="list") standardGeneric("consumeSetup"))
setMethod("consumeSetup", "GET",
function(
element, # element environment
period, # period
ptSA, # untransformed action for the period derived from timestep of element
# Note: PtSA is a list retained for concatenating to a list.
# Therefore, the action is the first element in the list
modulenum, # reference module number for the element
elementnum, # relative number of the element in the universe
firstPeriod, # logical indicating if this is the first period in the timestep
dset # dataset to assist with transformation
)
{
# Function: B.Pr.KPFM.Consume.setup.01
# Version 0.01
# Authors A.Constable
# last.edit 4 July 2008
# Description: General consumer function -
# 1. multiply per capita feeding rate by (duration in year of period)/(duration of timestep)
# fraction of timestep of element accounted by period is in calendar under
# the period in a list of matrices for each module.
# In the matrix - rows = element (by relative number) & cols =
# col 1 = timestep for element that is occurring in the calendar period
# col 2 = the proportion of the timestep that will be accounted for by the period
# col 3 = the number (in sequence) of the period in the timestep
adj <- getPeriodElementTSData(period, modulenum, elementnum)[2]
for (st in 1:getState(element, "StageN")) {
if (!is.null(ptSA[[1]]$dset[[st]])){
for (prey in 1:nrow(ptSA[[1]]$relatedElements)){
if (!is.null(ptSA[[1]]$dset[[st]]$Prey[[prey]][[1]])) {
ptSA[[1]]$dset[[st]]$Prey[[prey]][[1]] <- ptSA[[1]]$dset[[st]]$Prey[[prey]][[1]] * adj
}
}
}
}
return(ptSA)
}
)
###############################################################################
###############################################################################
# test routines
|
adbdc16ed2c0b8fd00fd3edf97d0171ca3750b3e | 8f6565e5310613a58bffb390591dbada2d4d00f7 | /scripts/internal/neigh_smooth.r | 9bf6f415286f64cdbc6be3c2ed2a5ff403226f29 | [] | no_license | AugustT/whats_flying_tonight | 2aa0ee6e85ea87449b2347b877bbe2640437c3d7 | afda2a00ca4df9a4152725812ec65657c1aeac68 | refs/heads/master | 2021-01-18T05:11:10.861463 | 2018-11-16T14:38:43 | 2018-11-16T14:38:43 | 45,030,483 | 0 | 1 | null | 2015-10-27T09:33:47 | 2015-10-27T09:33:47 | null | UTF-8 | R | false | false | 4,145 | r | neigh_smooth.r | neigh_smooth = function(gridref, smooth_radius = 1, square_list = NULL, sq_dimen = 10000, output_type = "neigh_only"){
# NOTE at present gridrefs assumed to be 10km gridrefs however could adapt it to work at other grid sizes
# If squares_list is null then return any square that can be validly converted to a gridreference in the same projection
# Valid options for output type are "neigh_only" or "both"
if(sq_dimen != 10000){
stop("ERROR: At present function will only work for 10km grid references")
}
# Determine unique gridrefs
data_gr = data.frame(GRIDREF = unique(gridref), stringsAsFactors = FALSE)
# determine eastings and northings (and projection)
data_gr[, c("EASTING","NORTHING", "PROJECTION")] = gr_let2num(data_gr$GRIDREF, centre = FALSE, return_projection = TRUE)
# Filter list of all squares to remove those that are in gridref
if(!is.null(square_list)){
square_list = square_list[!square_list %in% data_gr$GRIDREF]
}
# For each gridref work out neighbours
# Work out deviations from current easting & northing to calculate neighbours
# Number of neighbours in radius
no_neigh = (((smooth_radius*2)+1)^2)-1
# Deviations in one axis round squres
neigh_devs = (-smooth_radius:smooth_radius)*sq_dimen
# All conversions factors required to get neighbours (number of rows is number of cells in neighbourhood including focal square
neigh_conv = data.frame(EAST = rep(neigh_devs, each = (smooth_radius*2)+1 ), NORTH = rep(neigh_devs, times = (smooth_radius*2)+1))
# Remove focal square from neigh_conv (where east and north deviations are 0 (also update row number)
neigh_conv = neigh_conv[!(neigh_conv$EAST == 0 & neigh_conv$NORTH == 0),]
rownames(neigh_conv) = NULL
# Now extend neigh_conv to be same dimenstions as data_gr (so can add them together!)
neigh_conv = data.frame(EAST = rep(neigh_conv$EAST, nrow(data_gr)), NORTH = rep(neigh_conv$NORTH, nrow(data_gr)))
# Setup dataframe to hold data
sq_neighbours = data.frame(
SQUARE = rep(data_gr$GRIDREF, each = no_neigh),
EASTING = rep(data_gr$EASTING, each = no_neigh),
NORTHING = rep(data_gr$NORTHING, each = no_neigh),
PROJECTION = rep(data_gr$PROJECTION, each = no_neigh),
NEIGH_EASTING = NA,
NEIGH_NORTHING = NA
)
# Calculate neighbour easting & northings
sq_neighbours[,c("NEIGH_EASTING","NEIGH_NORTHING")] = sq_neighbours[,c("EASTING","NORTHING")] + neigh_conv
# Remove any eastings/northings that are negative
sq_neighbours = sq_neighbours[!sq_neighbours$NEIGH_EASTING < 0 | sq_neighbours$NEIGH_NORTH < 0,]
# Get rid of duplicates and also information on focal cell as we only want neighbouring cells not which cell they are a neighbour too
sq_neighbours = unique(sq_neighbours[,c("NEIGH_EASTING","NEIGH_NORTHING", "PROJECTION")])
# Determine gridrefs from these eastings and northings
sq_neighbours[,"NEIGH_GRIDREF"] = gr_num2let(sq_neighbours$NEIGH_EASTING, sq_neighbours$NEIGH_NORTHING, OSgrid = sq_neighbours$PROJECTION, keep_precision = FALSE)
# Look for any neighbours where the returned gridref is NA (i.e. eastings & northings could not be changed to a valid gr) and remove
rm_inds = which(is.na(sq_neighbours$NEIGH_GRIDREF))
if(length(rm_inds) > 0){
sq_neighbours = sq_neighbours[-rm_inds,]
}
# From this only keep those in the full square list (if square_list supplied)
if(!is.null(square_list)){
sq_neighbours = sq_neighbours[sq_neighbours$NEIGH_GRIDREF %in% square_list,]
}
# Return gridrefshead
if(tolower(output_type) == "neigh_only"){
out_obj = sq_neighbours$NEIGH_GRIDREF
} else if(tolower(output_type) == "both") {
# Combined original provided gridrefs with filled neighbouring gridrefs
out_obj = data.frame(GRIDREF = c(gridref, sq_neighbours$NEIGH_GRIDREF), OBSERVED = c(rep(1, length(gridref)), rep(0, length(sq_neighbours$NEIGH_GRIDREF))), stringsAsFactors = FALSE)
} else {
out_obj = sq_neighbours$NEIGH_GRIDREF
cat("WARNING: output_value (\"", output_type, "\") not recognised, permitted values are \"neigh_only\" or \"both\".\nNOTE: Only neighbouring gridrefs have been returned\n", sep="")
}
# Return output
return(out_obj)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.