blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c21cdb479737324a2bf002caea6cbc35342abd72
|
e0b530f1d389c1de35175643d306eb4be64445f4
|
/googleyoutubereportingv1.auto/R/youtubereporting_functions.R
|
0de2604ed1a621b647a2f81948eff325c3c9e463
|
[
"MIT"
] |
permissive
|
Phippsy/autoGoogleAPI
|
3ce645c2432b8ace85c51c2eb932e1b064bbd54a
|
d44f004cb60ce52a0c94b978b637479b5c3c9f5e
|
refs/heads/master
| 2021-01-17T09:23:17.926887
| 2017-03-05T17:41:16
| 2017-03-05T17:41:16
| 83,983,685
| 0
| 0
| null | 2017-03-05T16:12:06
| 2017-03-05T16:12:06
| null |
UTF-8
|
R
| false
| false
| 9,134
|
r
|
youtubereporting_functions.R
|
#' YouTube Reporting API
#' Schedules reporting jobs containing your YouTube Analytics data and downloads the resulting bulk data reports in the form of CSV files.
#'
#' Auto-generated code by googleAuthR::gar_create_api_skeleton
#' at 2016-09-03 23:23:23
#' filename: /Users/mark/dev/R/autoGoogleAPI/googleyoutubereportingv1.auto/R/youtubereporting_functions.R
#' api_json: api_json
#'
#' @details
#' Authentication scopes used are:
#' \itemize{
#' \item https://www.googleapis.com/auth/yt-analytics-monetary.readonly
#' \item https://www.googleapis.com/auth/yt-analytics.readonly
#' }
#'
#' @docType package
#' @name youtubereporting_googleAuthR
#'
NULL
## NULL
#' Method for media download. Download is supported on the URI `/v1/media/{+name}?alt=media`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/youtube/reporting/v1/reports/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/yt-analytics-monetary.readonly
#' \item https://www.googleapis.com/auth/yt-analytics.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/yt-analytics-monetary.readonly, https://www.googleapis.com/auth/yt-analytics.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param resourceName Name of the media that is being downloaded
#' @importFrom googleAuthR gar_api_generator
#' @export
media.download <- function(resourceName) {
url <- sprintf("https://youtubereporting.googleapis.com/v1/media/{+resourceName}",
resourceName)
# youtubereporting.media.download
f <- gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Lists report types.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/youtube/reporting/v1/reports/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/yt-analytics-monetary.readonly
#' \item https://www.googleapis.com/auth/yt-analytics.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/yt-analytics-monetary.readonly, https://www.googleapis.com/auth/yt-analytics.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param onBehalfOfContentOwner The content owner's external ID on which behalf the user is acting on
#' @param pageSize Requested page size
#' @param pageToken A token identifying a page of results the server should return
#' @param includeSystemManaged If set to true, also system-managed report types will be returned; otherwise only the report types that can be used to create new reporting jobs will be returned
#' @importFrom googleAuthR gar_api_generator
#' @export
reportTypes.list <- function(onBehalfOfContentOwner = NULL, pageSize = NULL, pageToken = NULL,
includeSystemManaged = NULL) {
url <- "https://youtubereporting.googleapis.com/v1/reportTypes"
# youtubereporting.reportTypes.list
f <- gar_api_generator(url, "GET", pars_args = list(onBehalfOfContentOwner = onBehalfOfContentOwner,
pageSize = pageSize, pageToken = pageToken, includeSystemManaged = includeSystemManaged),
data_parse_function = function(x) x)
f()
}
#' Creates a job and returns it.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/youtube/reporting/v1/reports/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/yt-analytics-monetary.readonly
#' \item https://www.googleapis.com/auth/yt-analytics.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/yt-analytics-monetary.readonly, https://www.googleapis.com/auth/yt-analytics.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param Job The \link{Job} object to pass to this method
#' @param onBehalfOfContentOwner The content owner's external ID on which behalf the user is acting on
#' @importFrom googleAuthR gar_api_generator
#' @family Job functions
#' @export
jobs.create <- function(Job, onBehalfOfContentOwner = NULL) {
url <- "https://youtubereporting.googleapis.com/v1/jobs"
# youtubereporting.jobs.create
f <- gar_api_generator(url, "POST", pars_args = list(onBehalfOfContentOwner = onBehalfOfContentOwner),
data_parse_function = function(x) x)
stopifnot(inherits(Job, "gar_Job"))
f(the_body = Job)
}
#' Lists jobs.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/youtube/reporting/v1/reports/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/yt-analytics-monetary.readonly
#' \item https://www.googleapis.com/auth/yt-analytics.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/yt-analytics-monetary.readonly, https://www.googleapis.com/auth/yt-analytics.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param onBehalfOfContentOwner The content owner's external ID on which behalf the user is acting on
#' @param pageSize Requested page size
#' @param pageToken A token identifying a page of results the server should return
#' @param includeSystemManaged If set to true, also system-managed jobs will be returned; otherwise only user-created jobs will be returned
#' @importFrom googleAuthR gar_api_generator
#' @export
jobs.list <- function(onBehalfOfContentOwner = NULL, pageSize = NULL, pageToken = NULL,
includeSystemManaged = NULL) {
url <- "https://youtubereporting.googleapis.com/v1/jobs"
# youtubereporting.jobs.list
f <- gar_api_generator(url, "GET", pars_args = list(onBehalfOfContentOwner = onBehalfOfContentOwner,
pageSize = pageSize, pageToken = pageToken, includeSystemManaged = includeSystemManaged),
data_parse_function = function(x) x)
f()
}
#' Gets a job.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/youtube/reporting/v1/reports/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/yt-analytics-monetary.readonly
#' \item https://www.googleapis.com/auth/yt-analytics.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/yt-analytics-monetary.readonly, https://www.googleapis.com/auth/yt-analytics.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param jobId The ID of the job to retrieve
#' @param onBehalfOfContentOwner The content owner's external ID on which behalf the user is acting on
#' @importFrom googleAuthR gar_api_generator
#' @export
jobs.get <- function(jobId, onBehalfOfContentOwner = NULL) {
url <- sprintf("https://youtubereporting.googleapis.com/v1/jobs/%s", jobId)
# youtubereporting.jobs.get
f <- gar_api_generator(url, "GET", pars_args = list(onBehalfOfContentOwner = onBehalfOfContentOwner),
data_parse_function = function(x) x)
f()
}
#' Deletes a job.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/youtube/reporting/v1/reports/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/yt-analytics-monetary.readonly
#' \item https://www.googleapis.com/auth/yt-analytics.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/yt-analytics-monetary.readonly, https://www.googleapis.com/auth/yt-analytics.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param jobId The ID of the job to delete
#' @param onBehalfOfContentOwner The content owner's external ID on which behalf the user is acting on
#' @importFrom googleAuthR gar_api_generator
#' @export
jobs.delete <- function(jobId, onBehalfOfContentOwner = NULL) {
url <- sprintf("https://youtubereporting.googleapis.com/v1/jobs/%s", jobId)
# youtubereporting.jobs.delete
f <- gar_api_generator(url, "DELETE", pars_args = list(onBehalfOfContentOwner = onBehalfOfContentOwner),
data_parse_function = function(x) x)
f()
}
|
52bc37f9ed88afabcef8bbd7863a2a8c2e0aca7c
|
c48b1d1d98128cb3c3d1bdf08f917276a02b1cc1
|
/sources/modules/VETravelPerformanceDL/man/OpCosts_ls.Rd
|
27f1eca253a6804793b43ae8e9426f4bde9415cc
|
[
"Apache-2.0"
] |
permissive
|
jslason-rsg/BG_OregonDOT-VisionEval
|
5ed51d550fb97793679d94d6d169fa091a89ba31
|
045cfecf341ec9fa80eac8d9614b4e547b0de250
|
refs/heads/master
| 2023-08-22T08:22:54.495432
| 2021-10-12T18:01:48
| 2021-10-12T18:01:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,425
|
rd
|
OpCosts_ls.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CalculateVehicleOperatingCost.R
\docType{data}
\name{OpCosts_ls}
\alias{OpCosts_ls}
\title{Vehicle operations costs}
\format{
A list containing the following three components:
\describe{
\item{VehCost_AgTy}{a matrix of annual vehicle maintenance, repair and tire costs by vehicle type and age category in 2010 dollars}
\item{CO2eCost_}{a vector of greenhouse gas emissions costs by forecast year in 2010 dollars per metric ton of carbon dioxide equivalents}
\item{OtherExtCost_}{a vector of other social costs by cost category. Values are in 2010 dollars per vehicle mile except for EnergySecurity which is in 2010 dollars per gasoline equivalent gallon}
\item{BLSOpCost_df}{a data frame of household vehicle annual maintenance, repair, and tire cost by vehicle age}
\item{VehicleCostByAgeAndType_df}{a data frame of calculated vehicle maintenance, repair, and tire cost per mile by vehicle type and age}
\item{CO2eCost_df}{a data frame of estimated cost of carbon in dollars per tonne by year under various scenarios}
\item{OtherExtCost_df}{a data frame of other social costs by type}
}
}
\source{
CalculateVehicleOperatingCost.R script.
}
\usage{
OpCosts_ls
}
\description{
A list containing vehicle operations cost items for maintenance, repair,
tires, greenhouse gas emissions costs, and other social costs.
}
\keyword{datasets}
|
185520c32ee523fe36ae5d20fbbd337d22921706
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EMMIXskew/examples/inverse.Rd.R
|
95ae840c4fae23c38ef8fc5b0ebaa82c5ab3c69c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 169
|
r
|
inverse.Rd.R
|
library(EMMIXskew)
### Name: inverse
### Title: Inverse of a covariance matrix
### Aliases: inverse
### ** Examples
a<- matrix(c(1,0,0,0),ncol=2)
a
inverse(a,2)
|
46889b0df3917f5e74bb1c3700fb03d2732d0366
|
e0b530f1d389c1de35175643d306eb4be64445f4
|
/googlespeechv1beta1.auto/R/speech_functions.R
|
363c396dbb0cd3d8ee295830dc69778af96404ec
|
[
"MIT"
] |
permissive
|
Phippsy/autoGoogleAPI
|
3ce645c2432b8ace85c51c2eb932e1b064bbd54a
|
d44f004cb60ce52a0c94b978b637479b5c3c9f5e
|
refs/heads/master
| 2021-01-17T09:23:17.926887
| 2017-03-05T17:41:16
| 2017-03-05T17:41:16
| 83,983,685
| 0
| 0
| null | 2017-03-05T16:12:06
| 2017-03-05T16:12:06
| null |
UTF-8
|
R
| false
| false
| 8,162
|
r
|
speech_functions.R
|
#' Google Cloud Speech API
#' Google Cloud Speech API.
#'
#' Auto-generated code by googleAuthR::gar_create_api_skeleton
#' at 2016-09-03 23:47:37
#' filename: /Users/mark/dev/R/autoGoogleAPI/googlespeechv1beta1.auto/R/speech_functions.R
#' api_json: api_json
#'
#' @details
#' Authentication scopes used are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' @docType package
#' @name speech_googleAuthR
#'
NULL
## NULL
#' Perform synchronous speech-recognition: receive results after all audiohas been sent and processed.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/speech/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param SyncRecognizeRequest The \link{SyncRecognizeRequest} object to pass to this method
#' #' @importFrom googleAuthR gar_api_generator
#' @family SyncRecognizeRequest functions
#' @export
syncrecognize <- function(SyncRecognizeRequest) {
url <- "https://speech.googleapis.com/v1beta1/speech:syncrecognize"
# speech.speech.syncrecognize
f <- gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(SyncRecognizeRequest, "gar_SyncRecognizeRequest"))
f(the_body = SyncRecognizeRequest)
}
#' Perform asynchronous speech-recognition: receive results via thegoogle.longrunning.Operations interface. Returns either an`Operation.error` or an `Operation.response` which containsan `AsyncRecognizeResponse` message.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/speech/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param AsyncRecognizeRequest The \link{AsyncRecognizeRequest} object to pass to this method
#' #' @importFrom googleAuthR gar_api_generator
#' @family AsyncRecognizeRequest functions
#' @export
asyncrecognize <- function(AsyncRecognizeRequest) {
url <- "https://speech.googleapis.com/v1beta1/speech:asyncrecognize"
# speech.speech.asyncrecognize
f <- gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(AsyncRecognizeRequest, "gar_AsyncRecognizeRequest"))
f(the_body = AsyncRecognizeRequest)
}
#' Gets the latest state of a long-running operation. Clients can use thismethod to poll the operation result at intervals as recommended by the APIservice.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/speech/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource
#' @importFrom googleAuthR gar_api_generator
#' @export
operations.get <- function(name) {
url <- sprintf("https://speech.googleapis.com/v1beta1/operations/{+name}", name)
# speech.operations.get
f <- gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Lists operations that match the specified filter in the request. If theserver doesn't support this method, it returns `UNIMPLEMENTED`.NOTE: the `name` binding below allows API services to override the bindingto use different resource name schemes, such as `users/*/operations`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/speech/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param pageSize The standard list page size
#' @param filter The standard list filter
#' @param name The name of the operation collection
#' @param pageToken The standard list page token
#' @importFrom googleAuthR gar_api_generator
#' @export
operations.list <- function(pageSize = NULL, filter = NULL, name = NULL, pageToken = NULL) {
url <- "https://speech.googleapis.com/v1beta1/operations"
# speech.operations.list
f <- gar_api_generator(url, "GET", pars_args = list(pageSize = pageSize, filter = filter,
name = name, pageToken = pageToken), data_parse_function = function(x) x)
f()
}
#' Starts asynchronous cancellation on a long-running operation. The servermakes a best effort to cancel the operation, but success is notguaranteed. If the server doesn't support this method, it returns`google.rpc.Code.UNIMPLEMENTED`. Clients can useOperations.GetOperation orother methods to check whether the cancellation succeeded or whether theoperation completed despite cancellation.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/speech/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param CancelOperationRequest The \link{CancelOperationRequest} object to pass to this method
#' @param name The name of the operation resource to be cancelled
#' @importFrom googleAuthR gar_api_generator
#' @family CancelOperationRequest functions
#' @export
operations.cancel <- function(CancelOperationRequest, name) {
url <- sprintf("https://speech.googleapis.com/v1beta1/operations/{+name}:cancel",
name)
# speech.operations.cancel
f <- gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(CancelOperationRequest, "gar_CancelOperationRequest"))
f(the_body = CancelOperationRequest)
}
#' Deletes a long-running operation. This method indicates that the client isno longer interested in the operation result. It does not cancel theoperation. If the server doesn't support this method, it returns`google.rpc.Code.UNIMPLEMENTED`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/speech/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource to be deleted
#' @importFrom googleAuthR gar_api_generator
#' @export
operations.delete <- function(name) {
url <- sprintf("https://speech.googleapis.com/v1beta1/operations/{+name}", name)
# speech.operations.delete
f <- gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
|
2c6d0b823e154b5627c8e96df877bfaa13d0b0c1
|
50c0013d8dd4320d70e48fa7047f7a6d5f967aad
|
/1_data_cleaning.R
|
41ad2c26d2ef198d12d2aa5fa1e188e2e3584715
|
[
"MIT"
] |
permissive
|
Joscha-K/cardio-project-spring-school
|
2c9922dd98f72b8f0efbaf7e83388c2120709eea
|
a43317b01dc65ae6f0503b533c58b6cc930dee8c
|
refs/heads/main
| 2023-07-14T17:49:28.099340
| 2021-08-26T14:05:15
| 2021-08-26T14:05:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 517
|
r
|
1_data_cleaning.R
|
rm(list = ls())
library(dplyr)
# set the working directory to cardio-project-site-example/ with setwd
# or use here() in the library(here)
dat <- read.csv("data.csv", stringsAsFactors = FALSE)
dat$CoronaryCA <- as.numeric(dat$CoronaryCA)
dat$Age_Part <- as.numeric(dat$Age_Part)
dat$Sex <- as.factor(dat$Sex)
dat$ln_cac_plus1 <- log(dat$CoronaryCA + 1)
dat <- dplyr::filter(dat, !is.na(dat$CoronaryCA) & dat$Demo != 1 & dat$Sex != 3)
dat$male <- as.numeric(dat$Sex == 1)
save(dat, file = "data_clean.RData")
|
dfa6a8928ad54b1027fbf4b64b94d28b52395f9a
|
5f546a630772d4158e10db221b5c7f19fbe31c7f
|
/Nat-Comm-2019_TMT_QE_averages.r
|
4aa8590521f34b4bedc9a33b1899000b2a4c3e20
|
[
"MIT"
] |
permissive
|
pwilmart/BCP-ALL_QE-TMT_Nat-Comm-2019
|
30d8ea99cf334032b87c7802d534b0bb492a22c5
|
646f1f4cf667df4f71aa31b3c994845e18c9a05d
|
refs/heads/master
| 2020-05-06T20:10:59.856144
| 2019-04-11T22:08:15
| 2019-04-11T22:08:15
| 180,223,616
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,805
|
r
|
Nat-Comm-2019_TMT_QE_averages.r
|
# library imports
library(tidyverse)
library(scales)
library(limma)
library(edgeR)
library(psych)
# get the default plot width and height
width <- options()$repr.plot.width
height <- options()$repr.plot.height
# load the IRS-normalized data and check the table
data_import <- read_tsv("labeled_grouped_protein_summary_TMT_9_AVE_IRS_normalized.txt", guess_max = 10326)
# the "Filter" column flags contams and decoys
# the "Missing" column flags proteins without reporter ion intensities (full sets missing)
# the prepped table from pandas is sorted so these are the upper rows
data_all <- filter(data_import, is.na(Filter), is.na(Missing))
# save gene names for edgeR so we can double check that results line up
accessions <- data_all$Accession
# see how many rows in the table
nrow(data_all)
# we want to get the SL normed columns, and subsetted by condition
sl_all <- data_all %>%
select(starts_with("SLNorm"))
sl_HeH <- sl_all %>% select(contains("_HeH_"))
sl_ETV6 <- sl_all %>% select(contains("_ETV6-RUNX1_"))
# and the IRS normed columns by condition
irs_all <- data_all %>%
select(starts_with("IRSNorm"))
irs_HeH <- irs_all %>% select(contains("_HeH_"))
irs_ETV6 <- irs_all %>% select(contains("_ETV6-RUNX1_"))
# and collect the pooled channels before and after IRS
sl_pool <- sl_all %>% select(contains("QC"))
irs_pool <- irs_all %>% select(contains("QC"))
# multi-panel scatter plot grids from the psych package
pairs.panels(log2(sl_pool), lm = TRUE, main = "Pooled Std before IRS")
pairs.panels(log2(irs_pool), lm = TRUE, main = "Pooled Std after IRS")
# multi-panel scatter plot grids
heh_sample <- sample(1:18, 5)
pairs.panels(log2(sl_HeH[heh_sample]), lm = TRUE, main = "HeH before IRS (random 5)")
pairs.panels(log2(irs_HeH[heh_sample]), lm = TRUE, main = "HeH after IRS (same 5)")
# multi-panel scatter plot grids
etv6_sample <- sample(1:9, 5)
pairs.panels(log2(sl_ETV6[etv6_sample]), lm = TRUE, main = "ETV6-RUNX1 before IRS (random 5)")
pairs.panels(log2(irs_ETV6[etv6_sample]), lm = TRUE, main = "ETV6-RUNX1 after IRS (same 5)")
# get the biological sample data into a DGEList object
group = c(rep('HeH', 18), rep('ETV6', 9))
y_sl <- DGEList(counts = cbind(sl_HeH, sl_ETV6), group = group, genes = accessions)
y_irs <- DGEList(counts = cbind(irs_HeH, irs_ETV6), group = group, genes = accessions)
# run TMM normalization (also includes a library size factor)
y_sl <- calcNormFactors(y_sl)
y_irs <- calcNormFactors(y_irs)
# set some colors by condition
colors = c(rep('red', 18), rep('blue', 9))
# check the clustering
plotMDS(y_sl, col = colors, main = "SL: all samples")
plotMDS(y_irs, col = colors, main = "IRS: all samples")
# we do not want the technical replicates in the mix for dispersion estimates
irs <- cbind(irs_HeH, irs_ETV6)
# load a new DGEList object (need to update the groups)
y <- DGEList(counts = irs, group = group, genes = accessions) # group was set above
y <- calcNormFactors(y)
# see what the normalization factors look like
y$samples
# Compute the normalized intensities (start with the IRS data)
# sample loading adjusts each channel to the same average total
lib_facs <- mean(colSums(irs)) / colSums(irs)
# print("Sample loading normalization factors")
print("Library size factors")
round(lib_facs, 4)
# the TMM factors are library adjustment factors (so divide by them)
norm_facs <- lib_facs / y$samples$norm.factors
# print these final correction factors
print("Combined (lib size and TMM) normalization factors")
round(norm_facs, 4)
# compute the normalized data as a new data frame
irs_tmm <- sweep(irs, 2, norm_facs, FUN = "*")
colnames(irs_tmm) <- str_c(colnames(irs), "_TMMnorm") # add suffix to col names
# head(results) # check that the column headers are okay
long_results <- gather(irs_tmm, key = "sample", value = "intensity") %>%
mutate(log_int = log10(intensity)) %>%
extract(sample, into = 'group', ".*?_(.*?)_", remove = FALSE)
head(long_results)
ggplot(long_results, aes(x = sample, y = log_int, fill = group)) +
geom_boxplot(notch = TRUE) +
coord_flip() +
ggtitle("edgeR normalized data")
# look at normalized intensity distributions for each sample
boxplot(log10(irs_tmm), col = colors,
xlab = 'TMT samples', ylab = 'log10 Intensity',
main = 'edgeR normalized data', notch = TRUE)
ggplot(long_results, aes(x = log_int, color = sample)) +
geom_density() +
guides(color = FALSE) +
ggtitle("edgeR normalized data (with legend is too busy)")
# we can compare CVs before and after IRS
sl <- cbind(sl_HeH, sl_ETV6)
# save column indexes for different conditions (indexes to data_raw frame)
# these make things easier (and reduce the chance for errors)
HeH <- 1:18
ETV6 <- (1:9) + 18
# create a CV computing function
CV <- function(df) {
ave <- rowMeans(df)
sd <- apply(df, 1, sd)
cv <- 100 * sd / ave
}
# put CVs in data frames to simplify plots and summaries
cv_frame <- data.frame(HeH_sl = CV(sl[HeH]), HeH_final = CV(irs_tmm[HeH]),
ETV6_sl = CV(sl[ETV6]), ETV6_final = CV(irs_tmm[ETV6]))
# see what the median CV values are
medians <- apply(cv_frame, 2, FUN = median)
print("Median CVs by condition, before/after IRS (%)")
round(medians, 1)
# see what the CV distibutions look like
# need long form for ggplot
long_cv <- gather(cv_frame, key = "condition", value = "cv") %>%
extract(condition, into = 'group', "(.*?)_+", remove = FALSE)
# traditional boxplots
cv_plot <- ggplot(long_cv, aes(x = condition, y = cv, fill = group)) +
geom_boxplot(notch = TRUE) +
ggtitle("CV distributions")
# vertical orientation
cv_plot
# horizontal orientation
cv_plot + coord_flip()
# density plots
ggplot(long_cv, aes(x = cv, color = condition)) +
geom_density() +
coord_cartesian(xlim = c(0, 150)) +
ggtitle("CV distributions")
# compute dispersions and plot BCV
y <- estimateDisp(y)
plotBCV(y, main = "BCV plot of IRS normed, TMM normed, all 27")
# the exact test object has columns like fold-change, CPM, and p-values
et <- exactTest(y, pair = c("HeH", "ETV6"))
# this counts up, down, and unchanged genes (proteins) at 10% FDR
summary(decideTestsDGE(et, p.value = 0.10))
# the topTags function adds the BH FDR values to an exactTest data frame
# make sure we do not change the row order (the sort.by parameter)!
topTags(et, n = 25)
tt <- topTags(et, n = Inf, sort.by = "none")
tt <- tt$table # tt is a list. We just need the "table" data frame
# make an MD plot (like MA plot)
plotMD(et, p.value = 0.10)
abline(h = c(-1, 1), col = "black")
# check the p-value distribution
ggplot(tt, aes(PValue)) +
geom_histogram(bins = 100, fill = "white", color = "black") +
geom_hline(yintercept = mean(hist(et$table$PValue, breaks = 100,
plot = FALSE)$counts[26:100])) +
ggtitle("HeH vs ETV6 PValue distribution")
# get the averages within each condition
# results already has the normalized data in its left columns
tt$ave_HeH <- rowMeans(irs_tmm[HeH])
tt$ave_ETV6 <- rowMeans(irs_tmm[ETV6])
# add the cadidate status column
tt <- tt %>%
mutate(candidate = cut(FDR, breaks = c(-Inf, 0.01, 0.05, 0.10, 1.0),
labels = c("high", "med", "low", "no")))
tt %>% count(candidate) # count candidates
ggplot(tt, aes(x = logFC, fill = candidate)) +
geom_histogram(binwidth=0.1, color = "black") +
facet_wrap(~candidate) +
coord_cartesian(xlim = c(-4, 4)) +
ggtitle("HeH vs ETV6-RUNX1 logFC distributions by candidate")
# ================= reformat edgeR test results ================================
collect_results <- function(df, tt, x, xlab, y, ylab) {
# Computes new columns and extracts some columns to make results frame
# df - data in data.frame
# tt - top tags table from edgeR test
# x - columns for first condition
# xlab - label for x
# y - columns for second condition
# ylab - label for y
# returns a new dataframe
# condition average vectors
ave_x <- rowMeans(df[x])
ave_y <- rowMeans(df[y])
# FC, direction, candidates
fc <- ifelse(ave_y > ave_x, (ave_y / ave_x), (-1 * ave_x / ave_y))
direction <- ifelse(ave_y > ave_x, "up", "down")
candidate <- cut(tt$FDR, breaks = c(-Inf, 0.01, 0.05, 0.10, 1.0),
labels = c("high", "med", "low", "no"))
# make data frame
temp <- cbind(df[c(x, y)], data.frame(logFC = tt$logFC, FC = fc,
PValue = tt$PValue, FDR = tt$FDR,
ave_x = ave_x, ave_y = ave_y,
direction = direction, candidate = candidate,
Acc = tt$genes))
# fix column headers for averages
names(temp)[names(temp) %in% c("ave_x", "ave_y")] <- str_c("ave_", c(xlab, ylab))
temp # return the data frame
}
# get the results
results <- collect_results(irs, tt, HeH, "HeH", ETV6, "ETV6")
transform <- function(results, x, y) {
# Make data frame with some transformed columns
# results - results data frame
# x - columns for x condition
# y - columns for y condition
# return new data frame
df <- data.frame(log10((results[x] + results[y])/2),
log2(results[y] / results[x]),
results$candidate,
-log10(results$FDR))
colnames(df) <- c("A", "M", "candidate", "P")
df # return the data frame
}
MA_plots <- function(results, x, y, title) {
# makes MA-plot DE candidate ggplots
# results - data frame with edgeR results and some condition average columns
# x - string for x-axis column
# y - string for y-axis column
# title - title string to use in plots
# returns a list of plots
# uses transformed data
temp <- transform(results, x, y)
# 2-fold change lines
ma_lines <- list(geom_hline(yintercept = 0.0, color = "black"),
geom_hline(yintercept = 1.0, color = "black", linetype = "dotted"),
geom_hline(yintercept = -1.0, color = "black", linetype = "dotted"))
# make main MA plot
ma <- ggplot(temp, aes(x = A, y = M)) +
geom_point(aes(color = candidate, shape = candidate)) +
scale_y_continuous(paste0("logFC (", y, "/", x, ")")) +
scale_x_continuous("Ave_intensity") +
ggtitle(title) +
ma_lines
# make separate MA plots
ma_facet <- ggplot(temp, aes(x = A, y = M)) +
geom_point(aes(color = candidate, shape = candidate)) +
scale_y_continuous(paste0("log2 FC (", y, "/", x, ")")) +
scale_x_continuous("log10 Ave_intensity") +
ma_lines +
facet_wrap(~ candidate) +
ggtitle(str_c(title, " (separated)"))
# make the plots visible
print(ma)
print(ma_facet)
}
scatter_plots <- function(results, x, y, title) {
# makes scatter-plot DE candidate ggplots
# results - data frame with edgeR results and some condition average columns
# x - string for x-axis column
# y - string for y-axis column
# title - title string to use in plots
# returns a list of plots
# 2-fold change lines
scatter_lines <- list(geom_abline(intercept = 0.0, slope = 1.0, color = "black"),
geom_abline(intercept = 0.301, slope = 1.0, color = "black", linetype = "dotted"),
geom_abline(intercept = -0.301, slope = 1.0, color = "black", linetype = "dotted"),
scale_y_log10(),
scale_x_log10())
# make main scatter plot
scatter <- ggplot(results, aes_string(x, y)) +
geom_point(aes(color = candidate, shape = candidate)) +
ggtitle(title) +
scatter_lines
# make separate scatter plots
scatter_facet <- ggplot(results, aes_string(x, y)) +
geom_point(aes(color = candidate, shape = candidate)) +
scatter_lines +
facet_wrap(~ candidate) +
ggtitle(str_c(title, " (separated)"))
# make the plots visible
print(scatter)
print(scatter_facet)
}
volcano_plot <- function(results, x, y, title) {
# makes a volcano plot
# results - a data frame with edgeR results
# x - string for the x-axis column
# y - string for y-axis column
# title - plot title string
# uses transformed data
temp <- transform(results, x, y)
# build the plot
ggplot(temp, aes(x = M, y = P)) +
geom_point(aes(color = candidate, shape = candidate)) +
xlab("log2 FC") +
ylab("-log10 FDR") +
ggtitle(str_c(title, " Volcano Plot"))
}
# make the DE plots
MA_plots(results, "ave_HeH", "ave_ETV6", "HeH vs ETV6/RUNX1")
scatter_plots(results, "ave_HeH", "ave_ETV6", "HeH vs ETV6/RUNX1")
volcano_plot(results, "ave_HeH", "ave_ETV6", "HeH vs ETV6/RUNX1")
# ============== individual protein expression plots ===========================
# function to extract the identifier part of the accesssion
get_identifier <- function(accession) {
identifier <- str_split(accession, "\\|", simplify = TRUE)
identifier[,3]
}
set_plot_dimensions <- function(width_choice, height_choice) {
options(repr.plot.width=width_choice, repr.plot.height=height_choice)
}
plot_top_tags <- function(results, nleft, nright, top_tags) {
# results should have data first, then test results (two condition summary table)
# nleft, nright are number of data points in each condition
# top_tags is number of up and number of down top DE candidates to plot
# get top ipregulated
up <- results %>%
filter(logFC >= 0) %>%
arrange(FDR)
up <- up[1:top_tags, ]
# get top down regulated
down <- results %>%
filter(logFC < 0) %>%
arrange(FDR)
down <- down[1:top_tags, ]
# pack them into one data frame
proteins <- rbind(up, down)
color = c(rep("red", nleft), rep("blue", nright))
for (row_num in 1:nrow(proteins)) {
row <- proteins[row_num, ]
vec <- as.vector(unlist(row[1:(nleft + nright)]))
names(vec) <- colnames(row[1:(nleft + nright)])
title <- str_c(get_identifier(row$Acc), ", int: ", scientific(mean(vec), 2),
", p-val: ", scientific(row$FDR, digits = 3),
", FC: ", round(row$FC, digits = 1))
barplot(vec, col = color, main = title)
}
}
# set plot size, make plots, reset plot size
set_plot_dimensions(6, 4)
plot_top_tags(results, length(HeH), length(ETV6), 25)
set_plot_dimensions(width, height)
write.table(results, "IRS_R_averages_results.txt", sep = "\t",
row.names = FALSE, na = " ")
sessionInfo()
|
1d810e40876b01b7fee7fe49b38078d89d4c500a
|
d8b2ab2974d83987a03b9dfd4ba08431a75fd0b5
|
/modeling.R
|
b1646b2f971ee0531d5a9fcf68a68ce5e0d254e5
|
[] |
no_license
|
jinc132/Info370_a3
|
5120cb45ac68ee610c3f60ad7e4a62da3058b96d
|
89ab2c3a0304ddb9fa1504798518ffcb78a3d5ea
|
refs/heads/master
| 2020-04-07T18:57:51.453690
| 2018-11-29T01:09:43
| 2018-11-29T01:09:43
| 158,631,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,468
|
r
|
modeling.R
|
library(dplyr)
library(corrplot)
data <- read.csv('./cleanData.csv', na.strings = c("", "NA"))
# Question 1
# Remove outliers from the data for a more accurate model
remove_outliers <- function(x, na.rm = TRUE, ...) {
qnt <- quantile(x, probs = c(0.25, 0.75), na.rm = na.rm, ...)
H <- 1.5 * IQR(x, na.rm)
y <- x
y[x < (qnt[1] - H)] <- NA
y[x > (qnt[2] + H)] <- NA
y
}
summary(male_data$average_speed)
# Begin removing outliers and create a male and female data frame
distance <- remove_outliers(data$distance)
moving_time <- remove_outliers(data$moving_time)
average_speed <- remove_outliers(data$average_speed)
max_speed <- remove_outliers(data$max_speed)
elapsed_time <- remove_outliers(data$elapsed_time)
clean_data <- data.frame(data$athlete.sex, data$athlete.country, distance, moving_time, average_speed, max_speed, elapsed_time) %>%
na.omit()
male_data <- clean_data %>%
filter(data.athlete.sex == "M")
female_data <- clean_data %>%
filter(data.athlete.sex == "F")
# male model
par(mfrow = c(4,4))
male_movng_lm <- lm(moving_time ~ average_speed + max_speed + elapsed_time + distance, data = male_data)
summary(male_movng_lm)
plot(male_movng_lm, main = "Male Exertion Model:")
# female model
fem_movng_lm <- lm(moving_time ~ average_speed + max_speed + elapsed_time + distance, data = female_data)
summary(fem_movng_lm)
plot(fem_movng_lm, main = "Female Exertion Model:")
# Q2
# Calculate the number of athletes in each country
country_corr <- clean_data %>%
group_by(data.athlete.sex, data.athlete.country) %>%
mutate(athletes.in.country = n()) %>%
na.omit()
country_corr <- country_corr %>%
select(athletes.in.country, distance, average_speed, moving_time, elapsed_time)
par(mfrow = c(1,1))
correlations <- cor(country_corr[3:7], use = "pairwise.complete.obs")
corrplot.mixed(correlations, lower.col = "black", number.cex = .7)
male_focus <- country_corr %>%
filter(data.athlete.sex == "M")
# model male athletes in for each country.
par(mfrow = c(4, 4))
lm_male <- lm (athletes.in.country ~ distance + average_speed + moving_time + elapsed_time, data = male_focus)
summary(lm_male)
plot(lm_male, main = "Male Model:")
female_focus <- country_corr %>%
filter(data.athlete.sex == "F")
# model male athletes in for each country.
lm_female <- lm (athletes.in.country ~ distance + average_speed + moving_time + elapsed_time, data = female_focus)
summary(lm_female)
plot(lm_female, main = "Female Model:")
|
1cf036eceafa6a94b954baa8a9c62abb0709d719
|
a18fa1fb80b3c76e8b67984b354ec03a4ea61d75
|
/utils/matching.R
|
c029856ac7c4db7cc77abbbdda02dd2f633184f5
|
[] |
no_license
|
galileukim/ego_patronage
|
cf96044fc51b93fc53bceba7bc665186f84efffb
|
da23480e7761f00759d6e319faa592829cce2a15
|
refs/heads/master
| 2023-04-05T11:06:03.545906
| 2021-04-14T23:10:25
| 2021-04-14T23:10:25
| 276,722,118
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,808
|
r
|
matching.R
|
# aux funs for matching
blocked_fastLink <- function(year, state, df_rais, df_filiados) {
t <- year
s <- state
rais <- df_rais %>%
filter(
year == t,
state == s
)
filiados <- df_filiados %>%
filter(
(year_start < t) & (year_cancel > t) &
state == s
)
rais_filiados_link <- fastLink(
dfA = rais,
dfB = filiados,
varnames = c("first_name", "last_name", "middle_name"),
stringdist.match = c("first_name", "last_name", "middle_name"),
n.cores = 1
)
return(rais_filiados_link)
}
run_diagnostics_fastLink <- function(fastLink, rais, filiados) {
matched_fastLink <- getMatches(
dfA = rais,
dfB = filiados,
fl.out = fastLink,
combine.dfs = F
)
# match rate by first name
first_name_matches <- matched_fastLink %>%
reduce(
inner_join,
by = c("first_name", "last_name")
)
exact_matches <-
if (number_of_first_name_matches <= 40) {
stop("match rate for first names less than 80 percent.")
} else{
print(sprintf("match rate is %s percent.",
number_of_first_name_matches/50)
)
}
# how many duplicated id_employees are there for each electoral title?
proportion_duplicated_rais_filiados <- setDT(matched_fastLink)[
, count := uniqueN(elec_title),
by = id_employee
] %>%
summarise(
proportion_duplicated = mean(count > 1)
) %>%
pull()
if (proportion_duplicated_rais_filiados > 20) {
stop("duplication rate is higher than 20 percent.")
} else{
print(sprintf("duplication rate is %s"),
proportion_duplicated_rais_filiados)
}
}
|
87405e71486c60be9fb82813ea9925c1ea2b472b
|
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
|
/paws/man/globalaccelerator_list_endpoint_groups.Rd
|
abc4bf8355e3f9c7652452c7dd7a3f042597927b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
johnnytommy/paws
|
019b410ad8d4218199eb7349eb1844864bd45119
|
a371a5f2207b534cf60735e693c809bd33ce3ccf
|
refs/heads/master
| 2020-09-14T23:09:23.848860
| 2020-04-06T21:49:17
| 2020-04-06T21:49:17
| 223,286,996
| 1
| 0
|
NOASSERTION
| 2019-11-22T00:29:10
| 2019-11-21T23:56:19
| null |
UTF-8
|
R
| false
| true
| 915
|
rd
|
globalaccelerator_list_endpoint_groups.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/globalaccelerator_operations.R
\name{globalaccelerator_list_endpoint_groups}
\alias{globalaccelerator_list_endpoint_groups}
\title{List the endpoint groups that are associated with a listener}
\usage{
globalaccelerator_list_endpoint_groups(ListenerArn, MaxResults,
NextToken)
}
\arguments{
\item{ListenerArn}{[required] The Amazon Resource Name (ARN) of the listener.}
\item{MaxResults}{The number of endpoint group objects that you want to return with this
call. The default value is 10.}
\item{NextToken}{The token for the next set of results. You receive this token from a
previous call.}
}
\description{
List the endpoint groups that are associated with a listener.
}
\section{Request syntax}{
\preformatted{svc$list_endpoint_groups(
ListenerArn = "string",
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
|
a071bf314295bc24415642a5c2841e10b46e1b86
|
3d73ec1a75b54fab0e36db36b3892b9de632316a
|
/R/Plots/dotted_BoxPlot.R
|
24a33a84025c9c64fee9c7c30a73cb9e4b7caa7c
|
[] |
no_license
|
fabiodorazio/ZebrafishPGCs
|
80f777cf1a36f65738649f5d9aeb67dfe6d22f95
|
5bcb3ffbe8a76e8f87b25e52587c0a77b78c6f4a
|
refs/heads/master
| 2023-02-06T01:13:12.315538
| 2020-12-09T10:44:16
| 2020-12-09T10:44:16
| 200,243,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 401
|
r
|
dotted_BoxPlot.R
|
r2 <- read.csv('../TestforR_Average.txt', sep = '\t')
posn.d <- position_dodge(width=0.4)
my_color = rep('dodgerblue1', times = 6)
ggplot(r2, aes(x=Treatment, y=Average.per.cell, color = Embryo)) +
geom_point(alpha = 1, position = posn.d) + scale_color_manual(values = my_color) +
geom_boxplot(alpha = 0, colour = "black") +
theme_classic() + ylim(c(0,2)) +
theme(legend.position = 'none')
|
879bccbe770ea5579a675d85c9285cbc8c411ca3
|
dec7da1e4189f2d66538162af20cfe333e05e8c5
|
/tests/testthat/test_genomic_annotation.R
|
19c77de1c497189890f17e601e6b2574405cba29
|
[] |
no_license
|
AvinashGupta/epic
|
7e5fbae3c3938945e5f33bd51442f46124062de5
|
b716f3561b3d9e58668c4d72d32c6ad6ea163ab3
|
refs/heads/master
| 2020-03-27T17:49:46.710854
| 2016-08-18T15:52:22
| 2016-08-18T15:52:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,760
|
r
|
test_genomic_annotation.R
|
context("test genomic annotations")
gr1 = GRanges(seqname = "chr1", ranges = IRanges(start = c(4, 10), end = c(6, 16)))
gr2 = GRanges(seqname = "chr1", ranges = IRanges(start = c(7, 13), end = c(8, 20)))
test_that("test percentOverlaps", {
expect_that(percentOverlaps(gr1, gr2), equals(c(0, 4/7)))
expect_that(percentOverlaps(gr2, gr1), equals(c(0, 4/8)))
})
if(Sys.getenv("IS_PBS") != "") {
makeGRangesFromDataFrameWithFirstThreeColumns = function(df) {
GRanges(seqnames = df[[1]], ranges = IRanges(df[[2]], df[[3]]))
}
files = dir("/icgc/dkfzlsdf/analysis/B080/guz/epic_test/data/narrow_peaks/", pattern = "gz$")
df = read.table(paste0("/icgc/dkfzlsdf/analysis/B080/guz/epic_test/data/narrow_peaks/", files[1]))
gr = makeGRangesFromDataFrameWithFirstThreeColumns(df)
load("/icgc/dkfzlsdf/analysis/B080/guz/epic_test/data/gr_list_1.RData")
genomic_features = lapply(gr_list_1[1:2], makeGRangesFromDataFrameWithFirstThreeColumns)
###
annotate_to_genomic_features(gr, genomic_features[[1]])
annotate_to_genomic_features(gr, genomic_features[[1]], name = "gene")
annotate_to_genomic_features(gr, genomic_features[[1]], name = "gene", type = "number")
annotate_to_genomic_features(gr, genomic_features)
annotate_to_genomic_features(gr, genomic_features, name = c("G", "E"))
annotate_to_genomic_features(gr, genomic_features, prefix = "")
### build a transcriptDb object
library(GenomicFeatures)
txdb = loadDb("/icgc/dkfzlsdf/analysis/B080/guz/epic_test/data/gen10.long.sqlite")
annotate_to_gene_models(gr, txdb, gene_model = "tx")
annotate_to_gene_models(gr, txdb, gene_model = "gene")
annotate_to_gene_models(gr, txdb, gene_model = "gene", annotation_type = "number")
annotate_to_gene_models(gr, txdb, gene_model = "gene", annotation_prefix = "")
}
|
2ae742528e95dac0568a50715190a03db2b3539e
|
0fcf20436b20ecfe3ef780dfe60f591ce069510b
|
/R/ht2distr.R
|
ce45f7dca04009e377534d7da3394f5dc12ff0d4
|
[] |
no_license
|
cran/tsxtreme
|
6d564f38e970eb197f0ab2caa1ddac2cd966a363
|
118b73925a316cbbcabbdad1d04721bec4ee8020
|
refs/heads/master
| 2021-06-13T23:02:52.708003
| 2021-04-23T20:20:03
| 2021-04-23T20:20:03
| 84,930,497
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,264
|
r
|
ht2distr.R
|
## Copyright (C) 2017 Thomas Lugrin
## Functions related to H+T model
## (Scope:) Heffernan-Tawn model fit in 2 stages
## List of functions: - p.res2
## - q.res2
##################################################
## > res: vector of reals, quantiles on which to compute the distribution function of the residuals
## > sorted.res: vector, EDF
## < ret: vector of the same length as z, distribution of the residuals evaluated in z
## . called by et.2.step
p.res2 <- function(res, sorted.res){
n <- length(sorted.res)
a <- approx(sorted.res, y=(1:n)/(n+1), xout=res, method="linear", ties="ordered", rule=2)$y
return(a)
}
## > p: scalar, probability (to compute quantiles of residual distribution), in [0,1]
## > a: scalar, alpha parameter, in [-1,1]
## > b: scalar, beta parameter, in [0,1]
## > data: bivariate vector, (X,Y) with Y | X>u
## < boolean, does the couple (a,b) satisfy the conditions?
## . called by verifies.conditions to get quantiles of the EDF of the residual distribution given (a,b)
q.res2 <- function(p, a, b, data){
if(dim(data)[2] != 2) stop("data are in the wrong format. Provide a 2-column matrix for (X,y), with Y|X>u")
Z <- (data[,2] - a*data[,1])/data[,1]^b
return(quantile(Z, p))
}
|
5774cfddaddf5a8193835e115be080ca1b09bee7
|
d6080d3ccfa5dae000118201c3d4c7e59ad70cdd
|
/plot1.R
|
deb6f8b401cdc2dad25a64b7c9d4119a8126278d
|
[] |
no_license
|
b3ckham/ExData_Plotting2
|
af71dcb41bb6fc7a2c0723672210f1d4c5e39b3b
|
c461f746f2ac7af111c928bd7309073ed9267058
|
refs/heads/master
| 2020-05-30T19:08:27.249228
| 2015-08-23T22:07:42
| 2015-08-23T22:07:42
| 41,269,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 442
|
r
|
plot1.R
|
library(plyr)
library(ggplot2)
## Step 1: load data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
aggTotals <- aggregate(Emissions ~ year, NEI, sum)
## Step 2: prepare to plot to png
png("plot1.png")
barplot(height=aggTotals$Emissions, names.arg=aggTotals$year, xlab="years", ylab=expression('total PM'[2.5]*' emission'),main=expression('Total PM'[2.5]*' emissions at various years'))
dev.off()
|
5ba0d16f0225675d52af1192e1ebc21727a65fde
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tigerstats/examples/ShallowReg.Rd.R
|
d8ae59a6779c9548d24718af043c5c112345e612
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
r
|
ShallowReg.Rd.R
|
library(tigerstats)
### Name: ShallowReg
### Title: Regression Line Too Shallow?
### Aliases: ShallowReg
### ** Examples
## Not run:
##D if (require(manipulate)) ShallowReg()
## End(Not run)
|
2eac34727d2979b31051df0dbb7c736090b9dfcb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/shiftR/examples/shiftrPrepare.Rd.R
|
85a087d15fa63dd38b6f921bebd24087eae2342f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
shiftrPrepare.Rd.R
|
library(shiftR)
### Name: shiftrPrepare
### Title: Prepare Data for Fast Circular Permutation Analysis
### Aliases: shiftrPrepareLeft shiftrPrepareRight
### ** Examples
### Number of features
nf = 1e6
### Generate left and right sets
lset = sample(c(0L,1L), size = nf, replace = TRUE)
rset = sample(c(0L,1L), size = nf, replace = TRUE)
# Prepare binary sets:
lbin = shiftrPrepareLeft(lset)
rbin = shiftrPrepareRight(rset)
### Check object sizes
# Notice asymetry in binary object sizes
object.size(lset)
object.size(rset)
object.size(lbin)
object.size(rbin)
|
5117817bfa074d7a3bb24a658ad401bff1d01301
|
41e8adc104bea0fa43537f081b38b305df0f70b8
|
/R/hotelling.trace.R
|
5d5b52d1fac28bfe8a0408c6c97d10db62c2779c
|
[] |
no_license
|
cran/agce
|
4f46a774552f2eab39b8eaa4494b8d813f0941b6
|
0ae0afb51e93080d6b742b7312b55e9964625833
|
refs/heads/master
| 2020-06-04T05:00:50.321637
| 2006-02-08T00:00:00
| 2006-02-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
hotelling.trace.R
|
"hotelling.trace" <-
function(X,Y,C,U)
{
n<-dim(Y)[1]
### Compute the projection matrix on V|W
PVW<-X%*%solve(t(X)%*%X)%*%t(C)%*%solve(C%*%solve(t(X)%*%X)%*%t(C))%*%C%*%solve(t(X)%*%X)%*%t(X)
S2<-t(Y%*%U)%*%PVW%*%(Y%*%U)
S3<-t(Y%*%U)%*%(diag(n)-X%*%solve(t(X)%*%X)%*%t(X))%*%(Y%*%U)
### Compute the H-L trace statistics
tr<-sum(diag(S2%*%solve(S3,tol=1e-50)))
}
|
7fd8829f824d24bf390bc91559f8ac531340a001
|
c87e8550ddea7f81714bb0b1153007596f11cb83
|
/R/select_KM.R
|
be761019ca381c38efca89a443e3deff184d0484
|
[
"MIT"
] |
permissive
|
JunLiuLab/SIMPLEs
|
78bced4473f929735d0d5899ed5b56b83de84d15
|
2f4289186bb84c5b2072c57731ea52d98995ca25
|
refs/heads/master
| 2021-07-09T03:35:23.017466
| 2021-03-10T15:44:42
| 2021-03-10T15:44:42
| 233,257,891
| 3
| 1
|
MIT
| 2021-03-25T09:06:19
| 2020-01-11T16:02:41
|
R
|
UTF-8
|
R
| false
| false
| 10,248
|
r
|
select_KM.R
|
#' Wrapper of SIMPLE or SIMPLE-B to select optimal K and M based on BIC
#' @param dat scRNASeq data matrix. Each row is a gene, each column is a cell.
#' @param bulk Bulk RNASeq data matrix. Should be log(1 + tpm/fpkm/rpkm). Each row is a gene which must be ordered the same as the scRNASeq data. Each column is a cell type which must be ordered as the cell type label in \emph{celltype}. Default: NULL
#' @param celltype A numeric vector for labels of cells in the scRNASeq. Each cell type has corresponding mean expression in the bulk RNASeq data. The labels must start from 1 to the number of types. If NULL, all cells are treated as a single cell type and the input bulk RNASeq should also have one column that is the mean expression over all the cell types. Default: NULL
#' @param b The scaling factor between scRNASeq and bulk RNASeq. If NULL, will do a weighted linear regression between mean of scRNASeq and bulk RNASeq for each cell type. Default = 1. Only relevant for SIMPLE-B.
#' @param K0 a vector for number of latent gene modules to be selected. Default is 10. Note that the minimum K0 is 2.
#' @param M0 a vector for Number of clusters to be selected. Default is 1. If 1 is not in the sequence of M0, it will add 1 to the sequence; and the imputed matrix when M=1 is used to initialize imputation for other Ms.
#' @param rel when increasing K, the algorithm will stop when the relative decrease of BIC is less than rel; but it will enumerate all given Ms. Default: 0.01.
#' @param clus Initial clustering of scRNASeq data. If NULL, the function will
#' use PCA and Kmeans to do clustering initially.
#' @param K The number of PCs used in the initial clustering. Default is 20.
#' @param iter The number of EM iterations using full data set. See details.
#' @param est_z The iteration starts to update Z.
#' @param impt_it The iteration starts to sample new imputed values in initial phase. See details.
#' @param max_lambda Whether to maximize over lambda. Default is True.
#' @param est_lam The iteration starts to estimate lambda.
#' @param penl L1 penalty for the factor loadings.
#' @param sigma0 The variance of the prior distribution of \eqn{\mu}.
#' @param pi_alpha The hyperparameter of the prior distribution of \eqn{\pi}.
#' See details.
#' @param beta A G by K0 matrix. Initial values for factor loadings (B). If
#' null, beta will be initialized from normal distribution with mean zero and
#' variance M0/K0. See details.
#' @param lambda A M0 by K0 matrix. Initial values for the variances of factors.
#' Each column is for a cell cluster. If null, lambda will initialize to be
#' 1/M0. See details.
#' @param sigma A G by M0 matrix. Initial values for the variance of
#' idiosyncratic noises. Each column is for a cell cluster. If null, sigma
#' will initialize to be 1. See details.
#' @param mu A G by M0 matrix. Initial values for the gene expression mean of
#' each cluster. Each column is for a cell cluster. If NULL, it will take the
#' sample mean of cells weighted by the probability in each cluster. See
#' details.
#' @param p_min Initialize parameters using genes expressed in at least
#' \emph{p_min} proportion of cells. If the number genes selected is less than
#' \emph{min_gene}, select \emph{min_gene} genes with higest proportion of non
#' zeros. Default = 0.4.
#' @param min_gene Minimal number of genes used in the initial phase. See
#' details.
#' @param fix_num If true, always use \emph{min_gene} number of genes with the
#' highest proportion of non zeros in the initial phase. Default = F. See details.
#' @param cutoff The value below cutoff is treated as no expression. Default =
#' 0.1.
#' @param verbose Whether to show some intermediate results. Default = False.
#' @param num_mc The number of Gibbs steps for generating new imputed data after the
#' parameters have been updated during Monte Carlo EM. Default = 3.
#' @param mcmc The number of Gibbs steps to sample imputed data after EM.
#' Default = 50.
#' @param burnin The number of burnin steps before sample imputed data after EM.
#' Default = 2.
#' @return \code{selectKM} returns a matrix of BIC for all Ks and Ms tests,
#' mK, the best K; mM, the best M; result,
#' list for the result with smallest BIC in the following order.
#' \enumerate{
#' \item{loglik} {The log-likelihood of each MCMC sample of imputed gene expressionafter EM. NULL if mcmc <= 0}
#' \item{loglik_tot} {The log-likelihood of the full imputed gene expression at each iteration and the prior of B matrix.}
#' \item{BIC} {BIC which is -2 *loglik_tot + penalty on the number of parameters. Can be used to select paramters.}
#' \item{pi} {The prior probabilites of cells belong to each cluster.}
#' \item{mu} {Mean expression for each gene in each cluster}
#' \item{sigma} {Variances of idiosyncratic noises for each gene in each cluster.}
#' \item{beta} {Factor loadings.}
#' \item{lambda} {Variances of factors for each cluster.}
#' \item{z} {The posterior probability of each cell belonging to each cluster.}
#' \item{Yimp0} {A matrix contains the expectation of gene
#' expression specified by the model.}
#' \item{pg} {A G by M0 matrix, dropout rate for each gene in each
#' cluster estimated from initial clustering.}
#' \item{initclus} {Output initial cluster results.}
#' \item{impt} {A matrix contains the mean of each imputed
#' entry by sampling multiple imputed values while the parameters are MLE. If mcmc <= 0, output
#' the imputed expressoin matrix at last step of EM}
#' \item{impt_var} {A matrix
#' contains the variance of each imputed entry by sampling multiple imputed
#' values while the parameters are MLE. NULL if mcmc <= 0.}
#' \item{Ef} {If mcmc >0, output posterior means of factors
#' given observed data (a n by K0 matrix). If mcmc <= 0, output conditional expectation of the factors for each cluster \eqn{E(f_i|z_i= m)}
#' at the last step of EM. A list with length M0,
#' each element in the list is a n by K0 matrix.}
#' \item{Varf} {If mcmc >0, output posterior variances of
#' factors given observed data (a n by K0 matrix). If mcmc <= 0, output conditional covariance matrix of factors for each cluster \eqn{Var(f_i|z_i = m)} at the last step of EM.
#' A list with length M0, each element in the list is a K0 by K0 matrix.}
#' \item{consensus_cluster} {Score for the clustering stability of each cell by multiple imputations.
#' NULL if mcmc <=0. }
#' }
#' @import doParallel
#' @importFrom foreach foreach
#' @importFrom irlba irlba
#' @seealso \code{\link{SIMPLE}} \code{\link{SIMPLE_B}}
#' @examples
#' library(foreach)
#' library(doParallel)
#' library(SIMPLEs)
#'
#' # simulate number of clusters
#' M0 <- 3
#' # number of cells
#' n <- 300
#' # simulation_bulk and getCluster is defined in the util.R under the util directory of the corresponding github repository.
#' source("utils/utils.R")
#' simu_data <- simulation_bulk(n = 300, S0 = 20, K = 6, MC = M0, block_size = 32, indepG = 1000 - 32 * 6, verbose = F, overlap = 0)
#' Y2 <- simu_data$Y2
#' # number of factors
#' K <- c(6,10)
#' M <- c(1, 3)
#' # parallel
#' registerDoParallel(cores = 6)
#' # estimate the parameters and sample imputed values
#' results <- selectKM(Y2, K0=K, M0=M, clus = NULL, K = 20, p_min = 0.5, max_lambda = T, min_gene = 200, cutoff = 0.01)
#' print(sprintf("best M and K: %d, %d", results$mM, results$mK))
#' result = results$result
#' # evaluate cluster performance
#' celltype_true <- simu_data$Z
#' mclust::adjustedRandIndex(apply(result$z, 1, which.max), celltype_true)
#' # or redo clustering based on imputed values (sometimes work better for real data)
#' getCluster(result$impt, celltype_true, Ks = 20, M0 = M0)[[1]]
#' @author Zhirui Hu, \email{zhiruihu@g.harvard.edu}
#' @author Songpeng Zu, \email{songpengzu@g.harvard.edu}
#' @export
selectKM <- function(dat, bulk = NULL, celltype = NULL, b = 1, K0 = 10, M0 = 1, iter = 10, est_lam = 1, impt_it = 5, penl = 1,
sigma0 = 100, pi_alpha = 1, beta = NULL, verbose = F, max_lambda = T, lambda = NULL,
sigma = NULL, mu = NULL, est_z = 1, clus = NULL, p_min = 0.4, cutoff = 0.1, K = 20,
min_gene = 300, num_mc = 3, fix_num = F, mcmc = 50, burnin = 2, rel = 0.01) {
G <- nrow(dat)
n <- ncol(dat)
mK = mM = NULL
mBIC = Inf
best = list() # for each M
M0 = sort(M0)
if(!(1 %in% M0)) M0 = c(1, M0)
K0 = sort(K0)
#record all BIC
BICs = data.frame(matrix(NA, nrow = length(M0), ncol = length(K0)))
rownames(BICs) = as.character(M0); colnames(BICs) = as.character(K0)
init_imp = NULL
for(M1 in M0)
{
prevBIC = Inf
print(sprintf("number of clusters: %d", M1))
for(K1 in K0)
{
print(sprintf("number of factors: %d", K1))
result = SIMPLE(dat, K1, M1, iter = iter, est_lam = est_lam, impt_it = impt_it, penl = penl, init_imp = init_imp,
sigma0 = sigma0, pi_alpha = pi_alpha, beta = beta, verbose = verbose, max_lambda = max_lambda, lambda = lambda,
sigma = sigma, mu = mu, est_z = est_z, clus = clus, p_min = p_min, cutoff = cutoff, K = K,
min_gene = min_gene, num_mc = num_mc, fix_num = fix_num, mcmc = mcmc, burnin = burnin)
if(is.null(p_min)) p_min = result$p_min
print(sprintf("BIC: %.0f", result$BIC0))
if(n < 1000)
{
if(mBIC > result$BIC0)
{
mBIC = result$BIC0
mK = K1
mM = M1
best[[M1]] = result
}
BICs[as.character(M1), as.character(K1)] = result$BIC0
if((result$BIC0 - prevBIC) > -abs(result$BIC0)* rel) break;
prevBIC = result$BIC0
}else{
if(mBIC > result$BIC)
{
mBIC = result$BIC
mK = K1
mM = M1
best[[M1]] = result
}
BICs[as.character(M1), as.character(K1)] = result$BIC
if((result$BIC - prevBIC) > -abs(result$BIC)* rel) break;
prevBIC = result$BIC
}
}
if(M1 == 1) init_imp = best[[1]]$impt
}
return(list(BICs = BICs, mK = mK, mM = mM, result = best[[mM]]))
}
|
770057e8737404052b8ee8eedf36613bc8513be2
|
a230b553d5bc4235d23e3f87343de8c69c9ca699
|
/man/corr_data.Rd
|
f5ebf9ed850ba9136dc28f3e79c17520c94b8624
|
[] |
no_license
|
acryland/nbastats
|
b184315d895d08602c5084921656d434a91d7d4e
|
ab24b18f4935a82abee182cb9d5ca69fa4276b40
|
refs/heads/master
| 2021-02-16T11:09:57.299591
| 2020-03-04T23:10:12
| 2020-03-04T23:10:12
| 244,999,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 470
|
rd
|
corr_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corr_data.R
\name{corr_data}
\alias{corr_data}
\title{This function selects all numeric variables for a given year and creates a correlogram.}
\usage{
corr_data(x, y)
}
\arguments{
\item{x}{deienfes the dataset}
\item{y}{difnes the year}
}
\description{
This function selects all numeric variables for a given year and creates a correlogram.
}
\examples{
corr_data()
}
\keyword{correlogram}
|
4393acc6982662d2c14935aa1b46bba8b966562e
|
90f4133259de3a5990553b49651938765caa9d0e
|
/man/Gaussian2binary.Rd
|
abc0cb4e560961f67333a0b9d6977138b3cdc26f
|
[] |
no_license
|
itsoukal/bBextremes
|
2099fdbf868078171ab756f5876db217363afef0
|
a3783d696bb07c01c4ca4f74591833d4e5dd5219
|
refs/heads/master
| 2023-06-25T08:24:10.153959
| 2021-07-30T12:03:47
| 2021-07-30T12:03:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 559
|
rd
|
Gaussian2binary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Gaussian2binary.R
\name{Gaussian2binary}
\alias{Gaussian2binary}
\title{Fast estimation of equivelant correlation coefficients for Bernoulli processes}
\usage{
Gaussian2binary(P, ACSn)
}
\arguments{
\item{P}{parameter of Bernoulli process}
\item{ACSn}{A vector, with the equivelant (i.e., Gaussian) autocorrelation structure.}
}
\value{
vector of equivelant correlation coefficients
}
\description{
Fast estimation of equivelant correlation coefficients for Bernoulli processes
}
|
f47addb8b21aaf00a0f917433a36c606b432ba62
|
f450df6d5114f36217e8c3258cd5e77b31eec3db
|
/My_Code/PackageFunctions/natmapr/inat_leaflet_save.Rd
|
8053fe94d51816a04c36521d19278de41bb8c4fe
|
[] |
no_license
|
WendyAnthony/Code_Each_Day
|
3b9c1a83a1102f078c79de9f3b093d3b0ce8e789
|
e122c8e8fd1f59cf12acf5b137e4659b58ad1b32
|
refs/heads/master
| 2023-02-25T00:14:53.298112
| 2023-02-16T22:45:54
| 2023-02-16T22:45:54
| 232,558,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,278
|
rd
|
inat_leaflet_save.Rd
|
\name{inat_leaflet_save}
\alias{inat_leaflet_save}
\title{
3. Save leaflet map object as html webpage
}
\description{
Save leaflet map object as html webpage, including the required .css and .js files. To save the finished map, uses inat_leaflet_save() >> inat_leaflet_save(m, filename) >> e.g. inat_leaflet_save(map3, file = "file.html")
}
\usage{
inat_leaflet_save(m, filename)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{m}{
Name of leaflet map object created by inat_leaflet(). Run m <- inat_leaflet(data, long, lat, var1, var2, var3, var4, var5, var6) to get the object m to use to save the leaflet map.
}
\item{filename}{
The "filename.html" used to save the leaflet map object as an html file.
}
}
\author{
Wendy Anthony <o2b.pickin@gmail.com>
}
\examples{
# m <- inat_leaflet(
# inat_esqlag,
# inat_esqlag$longitude,
# inat_esqlag$latitude,
# inat_esqlag$common_name,
# inat_esqlag$scientific_name,
# inat_esqlag$observed_on_string,
# inat_esqlag$place_guess,
# inat_esqlag$url, inat_esqlag$image_url)
# inat_leaflet_save(m, "./inst/extdata/esq_lag.html")
## The function is currently defined as
function (m, filename)
{
htmlwidgets::saveWidget(m, file = filename)
}
}
|
194f9cbf3ac56a18695f3eece0e977b04852492f
|
fd2a324a9505ed29e6136a06216edce999fa97a1
|
/man/generatePermutations.Rd
|
9ef1a1b638a3e3a28b544ce50f13bf2ecb729419
|
[] |
no_license
|
cran/mixAK
|
995c88ac9b1f70ab2dac51b4fc1347b9b1356eed
|
adc4c2229d8ad3573e560fd598158e53e5d1da76
|
refs/heads/master
| 2022-09-27T10:45:02.953514
| 2022-09-19T13:46:13
| 2022-09-19T13:46:13
| 17,697,529
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 623
|
rd
|
generatePermutations.Rd
|
\name{generatePermutations}
\alias{generatePermutations}
\alias{C_generatePermutations}
\title{
Generate all permutations of (1, ..., K)
}
\description{
It generates a matrix containing all permutations of (1, ..., K).
}
\usage{
generatePermutations(K)
}
\arguments{
\item{K}{integer value of \eqn{K}.}
}
\value{
A matrix of dimension \eqn{K! \times K}{K! x K} with generated
permutations in rows.
}
\author{
Arnošt Komárek \email{arnost.komarek@mff.cuni.cz}
}
\examples{
generatePermutations(1)
generatePermutations(2)
generatePermutations(3)
generatePermutations(4)
}
\keyword{utilities}
\keyword{arith}
|
6c3fe1d81ce8fc09fb1a78fe566b0b295ad8cc07
|
ca6d02c14d9cbe93d8460f1d20853203c831eaac
|
/Automation/00_hydra/deprecated/US_California.R
|
63f39ea6a63a0bdfc3b342bf781a5831f971f572
|
[
"CC-BY-4.0"
] |
permissive
|
timriffe/covid_age
|
f654bea1cdf87e9aa9cc660facfdffb9264a9f5a
|
8486772b0bfc0803efab603d8ac751ffe54e4d89
|
refs/heads/master
| 2023-08-18T04:56:18.683797
| 2023-08-11T10:22:19
| 2023-08-11T10:22:19
| 253,315,845
| 58
| 28
|
NOASSERTION
| 2023-08-25T11:03:04
| 2020-04-05T19:31:26
|
R
|
UTF-8
|
R
| false
| false
| 10,519
|
r
|
US_California.R
|
#source("https://raw.githubusercontent.com/timriffe/covid_age/master/R/00_Functions.R")
source("https://raw.githubusercontent.com/timriffe/covid_age/master/Automation/00_Functions_automation.R")
library(lubridate)
# assigning Drive credentials in the case the script is verified manually
#Im changing this to not use the change_here function, sourced from the functions script
#which I cant run due to problems installing demotools-JD
#change_here(wd_sched_detect())
#startup::startup()
#setwd(here())
#nz <- read_rds(paste0(dir_n, ctr, ".rds"))
if (!"email" %in% ls()){
email <- "tim.riffe@gmail.com"
}
# info country and N drive address
ctr <- "US_California" # it's a placeholder
dir_n <- "N:/COVerAGE-DB/Automation/Hydra/"
# Drive credentials
drive_auth(email = Sys.getenv("email"))
gs4_auth(email = Sys.getenv("email"))
# Drive urls
# rubric <- get_input_rubric() %>%
# filter(Region == "California")
#
# ss_i <- rubric %>%
# dplyr::pull(Sheet)
#
# ss_db <- rubric %>%
# dplyr::pull(Source)
# Get current data (to keep the tests)
Tests <- read_rds(paste0(dir_n, ctr, ".rds")) %>%
filter(Measure == "Tests")
#saving data before source changed
Prior_data <- read_rds(paste0(dir_n, ctr, ".rds")) %>%
mutate(Date = dmy(Date))%>%
filter(Date <= "2021-01-24")%>%
mutate(
Date = ymd(Date),
Date = paste(sprintf("%02d",day(Date)),
sprintf("%02d",month(Date)),
year(Date),sep="."),
Sex = case_when(Sex == "TOT" ~ "b",
TRUE ~ Sex))
### data processing
#url1 <-"https://data.ca.gov/dataset/covid-19-time-series-metrics-by-county-and-state/resource/4d93df07-7c4d-4583-af53-03f950fe4365/download/6e8f6324-172d-4869-8e1f-662b998c576e#"
url1 <- "https://data.chhs.ca.gov/dataset/f333528b-4d38-4814-bebb-12db1f10f535/resource/e2c6a86b-d269-4ce1-b484-570353265183/download/covid19casesdemographics.csv"
CAage_in <-
read_csv(url1)
# TR: from here down needs a redux for the new data format.
# (unless the )
CAage <-
CAage_in %>%
mutate(Date = as_date(report_date)) %>%
filter(demographic_category == "Age Group") %>%
select(-report_date, -percent_cases, -percent_deaths, -percent_of_ca_population,-demographic_category, Cases = total_cases, Deaths = deaths, Age=demographic_value) %>%
pivot_longer(Cases:Deaths, names_to = "Measure", values_to = "Value") %>%
filter(!is.na(Value)) %>%
mutate(Age = recode(Age,
"0-17" = "0",
"18-49" = "18",
"50-64" = "50",
"65 and Older" = "65",
"65+" = "65",
"Unknown" = "UNK",
"Missing" = "UNK",
"missing" = "UNK",
"Total" = "TOT"),
Sex = "b",
Country = "USA",
Region = "California",
Metric = "Count",
Date = ddmmyyyy(Date),
Code = paste0("US-CA"),
AgeInt = case_when(Age == "0" ~ 18L,
Age == "18" ~ 32L,
Age == "50" ~ 15L,
Age == "65" ~ 40L,
Age == "UNK" ~ NA_integer_,
Age == "TOT" ~ NA_integer_)) %>%
select(Country, Region, Code, Date, Sex, Age, AgeInt, Metric, Measure, Value)
# By Sex
###updated data processing for new url
CAsex_in <-
read_csv(url1)
CAsex <-
CAsex_in%>%
mutate(Date = as_date(report_date)) %>%
filter(demographic_category== "Gender")%>%
select(-report_date, -percent_cases, -percent_deaths, -percent_of_ca_population,-demographic_category, Cases = total_cases, Deaths = deaths, Sex=demographic_value) %>%
pivot_longer(Cases:Deaths, names_to = "Measure", values_to = "Value") %>%
filter(!is.na(Value),
Sex != "Unknown") %>%
mutate(Sex = case_when(Sex == "Female" ~ "f",
Sex == "Male" ~ "m",
Sex== "Total" ~ "b"),
Country = "USA",
Region = "California",
Metric = "Count",
Date = paste(sprintf("%02d",day(Date)),
sprintf("%02d",month(Date)),
year(Date),sep="."),
Code = paste0("US-CA"),
Age = "TOT",
AgeInt = NA_integer_)%>%
select(Country, Region, Code, Date, Sex, Age, AgeInt, Metric, Measure, Value)
#vaccine data
urlvaccine <- "https://data.chhs.ca.gov/dataset/e283ee5a-cf18-4f20-a92c-ee94a2866ccd/resource/faee36da-bd8c-40f7-96d4-d8f283a12b0a/download/covid19vaccinesadministeredbydemographics.csv"
CAvaccine_in <-
read_csv(urlvaccine)
vaccine=CAvaccine_in %>%
mutate(Date = as_date(administered_date)) %>%
filter(demographic_category== "Age Group")%>%
select(Date,
Age=demographic_value,
Vaccinations= cumulative_total_doses,
Vaccination1=cumulative_at_least_one_dose,
Vaccination2=cumulative_fully_vaccinated,
Vaccination3 = cumulative_booster_recip_count) %>%
pivot_longer(!Date &!Age, names_to = "Measure", values_to = "Value") %>%
filter(!is.na(Value)) %>%
mutate(Age = recode(Age,
"Under 5" = "0",
"5-11" = "5",
"12-17" = "12",
"18-49" = "18",
"50-64" = "50",
"65+" = "65",
"Unknown Agegroup" = "UNK"),
AgeInt = case_when(Age == "0" ~ 5L,
Age == "5" ~ 7L,
Age == "12" ~ 6L,
Age == "18" ~ 32L,
Age == "50" ~ 15L,
Age == "65" ~ 40L,
Age == "UNK" ~ NA_integer_),
Sex = "b",
Country = "USA",
Region = "California",
Metric = "Count",
Date = ddmmyyyy(Date),
Code = paste0("US-CA")) %>%
select(Country, Region, Code, Date, Sex, Age, AgeInt, Metric, Measure, Value) %>%
sort_input_data()
# bind together
CAout <- bind_rows(CAage, CAsex, Tests,vaccine,Prior_data) %>%
filter(Age != "Total") %>%
sort_input_data()%>%
mutate(Code = "US-CA")
n <- duplicated(CAout[,c("Date", "Sex","Age","Measure","Metric")])
CAout <-
CAout[!n, ]
CAout <- CAout %>%
mutate(Date = dmy(Date)) %>%
filter(Measure != "Deaths" | Date < "2020-09-26") %>%
mutate(Date = ymd(Date),
Date = paste(sprintf("%02d",day(Date)),
sprintf("%02d",month(Date)),
year(Date),sep="."))
# push to drive
# write_sheet(ss = ss_i,
# CAout,
# sheet = "database")
write_rds(CAout, paste0(dir_n, ctr, ".rds"))
N <- nrow(CAage) + nrow(CAsex)
#log_update(pp = ctr, N = N)
# store
data_source_1 <- paste0(dir_n, "Data_sources/", ctr, "/age_sex",today(), ".csv")
data_source_2 <- paste0(dir_n, "Data_sources/", ctr, "/vaccine_",today(), ".csv")
download.file(url1, destfile = data_source_1)
download.file(urlvaccine, destfile = data_source_2)
data_source <- c(data_source_1, data_source_2)
zipname <- paste0(dir_n,
"Data_sources/",
ctr,
"/",
ctr,
"_data_",
today(),
".zip")
zipr(zipname,
data_source,
recurse = TRUE,
compression_level = 9,
include_directories = TRUE)
# clean up file chaff
file.remove(data_source)
###############################################################
#outdated code
#vaccine data gets manually entered into drive sheet
#this can go now, there is an excel file now (03.06)
#Vaccine <-
#get_country_inputDB("US_CA") %>%
#filter(Measure== "Vaccination1"| Measure== "Vaccination2"| Measure== "Vaccinations") %>%
#select(-Short)
#########################data processing prior 23.03.2021###############################################################
# read in data by age
#url1 <- "https://data.ca.gov/dataset/590188d5-8545-4c93-a9a0-e230f0db7290/resource/339d1c4d-77ab-44a2-9b40-745e64e335f2/download/case_demographics_age.csv"
#CAage <-
#CAage_in %>%
#mutate(Date = as_date(date)) %>%
#select(-date, -case_percent, -deaths_percent, -ca_percent, Cases = totalpositive, Deaths = deaths) %>%
#pivot_longer(Cases:Deaths, names_to = "Measure", values_to = "Value") %>%
#filter(!is.na(Value)) %>%
#mutate(Age = recode(age_group,
#"0-17" = "0",
#"18-49" = "18",
# "50-64" = "50",
# "65 and Older" = "65",
# "65+" = "65",
# "Unknown" = "UNK",
# "Missing" = "UNK"),
# Sex = "b",
# Country = "USA",
# Region = "California",
#Metric = "Count",
#Date = ddmmyyyy(Date),
# Code = paste0("US_CA_",Date),
# AgeInt = case_when(Age == "0" ~ 18L,
# Age == "18" ~ 32L,
# Age == "50" ~ 15L,
# Age == "65" ~ 30L,
# Age == "UNK" ~ NA_integer_)) %>%
#select(Country, Region, Code, Date, Sex, Age, AgeInt, Metric, Measure, Value)
##############################################################################################
#########################data processing prior 23.03.2021##########################################################################
#url2 <- "https://data.ca.gov/dataset/590188d5-8545-4c93-a9a0-e230f0db7290/resource/ee01b266-0a04-4494-973e-93497452e85f/download/case_demographics_sex.csv"
#CAsex_in <-
#read_csv(url2)
#CAsex <-
# CAsex_in%>%
#mutate(Date = as_date(date)) %>%
#select(Sex = sex, Cases = totalpositive2, Deaths = deaths, Date) %>%
#pivot_longer(Cases:Deaths, names_to = "Measure", values_to = "Value") %>%
#filter(!is.na(Value)) %>%
#group_by(Date) %>%
#mutate(Value = ifelse(Sex == "Unknown", sum(Value),Value)) %>%
#ungroup() %>%
#mutate(Sex = case_when(Sex == "Unknown"~ "b",
#Sex == "Female" ~ "f",
#Sex == "Male" ~ "m"),
# Country = "USA",
# Region = "California",
# Metric = "Count",
#Date = paste(sprintf("%02d",day(Date)),
#sprintf("%02d",month(Date)),
# year(Date),sep="."),
# Code = paste0("US_CA_",Date),
# Age = "TOT",
#AgeInt = NA_integer_)
# storage_dir <- file.path(dir_n, "Data_sources",ctr)
#
# if (!dir.exists(storage_dir)){
# dir.create(storage_dir)
# }
#
# data_source_1 <- file.path(storage_dir,paste0("age_",today(), ".csv"))
# data_source_2 <- file.path(storage_dir,paste0("sex_",today(), ".csv"))
# write_csv(CAage_in, path = data_source_1)
# write_csv(CAsex_in, path = data_source_2)
##########################################################################################################################################
|
4cadd06a3c941ce28dcedeb9af0083708a54a631
|
8b6103d2a356350d77f52d3cbd15b650b6616afa
|
/2_RProgramming/ProgrammingAssignment3/rankhospital.R
|
795fde23a817e6c6f12af965395ab7b787eaaa21
|
[] |
no_license
|
cesarpbn1/Johns_Hopkins_University
|
95f3a803f3f342d975800309bd0082128fab89f3
|
601f51834e9435f263bfa4b0d44d0af79d99e33e
|
refs/heads/master
| 2021-07-07T04:40:40.659948
| 2017-10-04T04:53:12
| 2017-10-04T04:53:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,004
|
r
|
rankhospital.R
|
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
ncol(outcome)
nrow(outcome)
names(outcome)
outcome[, 11] <- as.numeric(outcome[, 11])
hist(outcome[, 11])
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv")
## Check that state and outcome are valid
states <- levels(data[, 7])[data[, 7]]
state_flag <- FALSE
for (i in 1:length(states)) {
if (state == states[i]) {
state_flag <- TRUE
}
}
if (!state_flag) {
stop ("invalid state")
}
if (!((outcome == "heart attack") | (outcome == "heart failure")
| (outcome == "pneumonia"))) {
stop ("invalid outcome")
}
## Return hospital name in that state with the given rank 30-day death
## rate
col <- if (outcome == "heart attack") {
11
} else if (outcome == "heart failure") {
17
} else {
23
}
data[, col] <- suppressWarnings(as.numeric(levels(data[, col])[data[, col]]))
data[, 2] <- as.character(data[, 2])
statedata <- data[grep(state, data$State), ]
orderdata <- statedata[order(statedata[, col], statedata[, 2], na.last = NA), ]
if(num == "best") {
orderdata[1, 2]
} else if(num == "worst") {
orderdata[nrow(orderdata), 2]
} else{
orderdata[num, 2]
}
}
rankhospital("MD", "heart failure", 5)
source("rankhospital.R")
rankhospital("TX", "heart failure", 4)
rankhospital("MD", "heart attack", "worst")
rankhospital("MN", "heart attack", 5000)
rankhospital("NC", "heart attack", "worst")
rankhospital("WA", "heart attack", 7)
rankhospital("TX", "pneumonia", 10)
rankhospital("NY", "heart attack", 7)
|
340bbb0e1ad9a44ec8a4da242b1fc35a91b9a3cc
|
5009212e26f354715c83200d579dc0299e134da6
|
/R/nCov2019.R
|
022b0e0640740c96c9213940919bda3e71789321
|
[] |
no_license
|
LiGuangming309/nCov2019
|
63d608cc05bdc6e11f0a102debc0601f84757725
|
9dbcacbbe42ca1e5429b4d6ffce0da8b8e9f2e9a
|
refs/heads/master
| 2021-02-11T18:27:30.302844
| 2020-03-02T10:59:11
| 2020-03-02T10:59:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,679
|
r
|
nCov2019.R
|
#' download statistical numbers of the wuhan 2019-nCov
#'
#' @title get_nCov2019
#' @param lang one of 'zh' and 'en', for setting language of province and city names.
#' If lang = "auto" (default), it will be set based on Sys.getlocale("LC_CTYPE")
#' @return nCov2019 object
#' @export
#' @importFrom jsonlite fromJSON
#' @author Guangchuang Yu
get_nCov2019 <- function(lang = 'auto') {
lang <- which_lang(lang)
data <- .get_qq_data()
if (lang == 'en') {
# change countries to English
nn <- readRDS(system.file("country_translate.rds", package="nCov2019"))
data$areaTree$name <- nn[as.character(data$areaTree$name)]
# change provinces to English
prov_cities <- jsonlite::fromJSON(system.file('provinces_and_cities.json', package="nCov2019"))
data$areaTree[[1,"children"]]$name <- trans_province(data$areaTree[[1, "children"]]$name)
# change cities to English
for (i in 1:nrow(data$areaTree[[1,"children"]])) {
data$areaTree[[1, "children"]][[i, "children"]]$name <- trans_city(data$areaTree[[1, "children"]][[i, "children"]]$name)
}
}
data$lang <- lang
structure(data, class = 'nCov2019')
}
#' load historical data of wuhan 2019-Cov
#'
#' @title load_nCov2019
#' @inheritParams get_nCov2019
#' @param source historical data source, one of 'github' or 'dxy'
#' @return nCov2019History object
#' @importFrom downloader download
#' @export
#' @author Guangchuang Yu
load_nCov2019 <- function(lang = 'auto', source="github") {
lang <- which_lang(lang)
source <- match.arg(source, c("github", "dxy"))
rds <- tempfile(pattern=".rds")
if (source == 'dxy'){
url = 'https://gitee.com/timze/historicaldata/raw/master/dxy_origin_historical_data.rds'
} else {
url = 'https://gitee.com/timze/historicaldata/raw/master/dxy_historical_data.rds'
}
downloader::download(url,destfile = rds, quiet = TRUE)
data <- readRDS(rds)
## data <- readRDS(system.file("nCov2019History.rds", package="nCov2019"))
prov_cities <- jsonlite::fromJSON(system.file('provinces_and_cities.json', package="nCov2019"))
if (lang == 'en') {
# change provinces and city columns to English; for x$data
data$data$province <- trans_province(data$data$province)
data$data$city <- trans_city(data$data$city)
# change provinces to English; for x$province
data$province$province <- trans_province(data$province$province)
# change countries to English; for github source only
if (source == 'github'){
nn <- readRDS(system.file("country_translate.rds", package="nCov2019"))
data$global$country <- nn[as.character(data$global$country)]
}
}
data$lang <- lang
return(data)
}
|
a0ff8f9b5ce86ee62843277bb2d2455220a07fc1
|
ac098be0111a2a7f6788842dbd665a03e15ae1fd
|
/R source files/sally_vat.R
|
033063e838cca89f0f62b28ea7cecb6bdb415863
|
[] |
no_license
|
sallyshi/NYPH-PROJECT
|
dc67d6e2959dcecacec5c8f65e2e4bb95c8da576
|
15b19b2db6539493dc2a45759f01f60718e78980
|
refs/heads/master
| 2020-05-27T08:21:35.622303
| 2015-09-10T00:04:01
| 2015-09-10T00:04:01
| 41,886,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,281
|
r
|
sally_vat.R
|
## MEng Project
## Last Modified: March 3rd 2014
library(car)
library(hydroGOF)
data <-read.csv("~/Dropbox/MEng Project/R Stuff/vat-training.csv")
test_vat <- read.csv("~/Dropbox/MEng Project/R Stuff/vat-testing.csv")
## Constructing the Model
data$ASA.f = factor(data$ASA) #convert into categorical
test_vat$ASA.f = factor(test_vat$ASA)
model_1 <- lm((Actual_Length^(1/2)) ~ Predicted_Length + Age + ASA_Level + Surgeon_Level + Emergency + Room_Level + Pt_Type, data)
qqPlot(model_1$residuals,distribution="norm",main= "QQ Plot of Residuals for Vat", ylab="Sample Quantiles" )
## Diagnostics
std_res = rstandard(model_1)
plot(model_1$fitted.values, std_res,ylab = 'Std. Residuals',xlab = "Fitted Values", main= " Standardized Residual Plot for Vat")
# SQRT RMSE and MSE of our predictions on training data
actual <- (data$Actual_Length)^(1/2)
predicted <- predict(model_1, data)
print(paste("SQRT RMSE of ours on training: ",rmse(predicted,actual)))
#SQRT RMSE and MSE of our predictions on testing data
actual <- (test_vat$Actual_Length)^(1/2)
predicted <- predict(model_1, test_vat)
print(paste("SQRT RMSE of ours on testing: ",rmse(predicted,actual)))
#LOG SCALE RMSE and MSE of our predictions on training data
actual <- log((data$Actual_Length))
predicted <- log(predict(model_1, data)^2)
print(paste("LOG SCALE RMSE of ours on training: ",rmse(predicted,actual)))
#LOG SCALE RMSE and MSE of our predictions on testing data
actual <- log((test_vat$Actual_Length))
predicted <- log(predict(model_1, test_vat)^2)
print(paste("LOG SCALE RMSE of ours on testing: ",rmse(predicted,actual)))
#LOG SCALE MAE of our predictions on testing data
actual <- log((test_vat$Actual_Length))
predicted <- log(predict(model_1, test_vat)^2)
print(paste("LOG SCALE MAE of ours on testing: ",mae(predicted,actual)))
#LOG SCALE MAE of their predictions
actual <- log(test_vat$Actual_Length)
predicted <- log(test_vat$Predicted_Length)
print(paste("MAE of theirs: ",mae(predicted,actual)))
#MAE of Bias-Corrected Version of their predictions
actual <- log(test_vat$Actual_Length)
bias_mean_c = mean(data$Actual_Length - data$Predicted_Length)
predicted <- log(test_vat$Predicted_Length + bias_mean_c)
print(paste("MAE of bias: ",mae(predicted,actual)))
|
2dc31e0fc0ff004002d36756711d384745c3d949
|
9f916fe7828f79b3355bc6ff3509ff4b0a62d0b7
|
/man/check_input.Rd
|
29eb6503d731442de16d725145aa049d52673e9e
|
[
"BSD-3-Clause"
] |
permissive
|
greenelab/ADAGEpath
|
9b5d44465a09fbed025bbdd793eaed89e7ec17a2
|
e556970ef334d86ddfbf533acb8753d4ddb93967
|
refs/heads/master
| 2023-08-01T00:11:19.677503
| 2022-05-20T16:48:18
| 2022-05-20T16:48:18
| 70,191,632
| 5
| 7
|
BSD-3-Clause
| 2022-05-20T16:48:19
| 2016-10-06T20:46:19
|
R
|
UTF-8
|
R
| false
| true
| 471
|
rd
|
check_input.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_data.R
\name{check_input}
\alias{check_input}
\title{Checking input format}
\usage{
check_input(input_data)
}
\arguments{
\item{input_data}{the input to check}
}
\value{
TRUE if the input_data.frame meets the requirements, otherwise FALSE.
}
\description{
Checks whether the input is a data.frame (or a tibble) with its first
column being character and the rest columns being numeric.
}
|
12b85ed0e595c86915a588e1fab264863391792b
|
3fdcdc91821391eb627cf198c948a70a852417f7
|
/蒙特卡洛+自助抽样法.R
|
10ab8c83253104d2f0911c71866e6d6438ed6393
|
[] |
no_license
|
huuuuuuuue/-r-
|
feff661772774b758727d67f7bbd45aed6bd2f39
|
ad91882a2614e1925e8b97271e236d6a0b0003fe
|
refs/heads/master
| 2020-05-26T20:42:57.161936
| 2019-05-31T06:46:28
| 2019-05-31T06:46:28
| 188,368,234
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,937
|
r
|
蒙特卡洛+自助抽样法.R
|
#蒙特卡洛
#不停生成随机数然后抽
library(MASS)
n = 100
alpha = c()
for (i in 1:10000) {
mu1 = c(0,0) #均值是0
sigma1 = matrix(c(1,0.5,0.5,1.25),nrow = 2)
rand1 = mvrnorm(n=100, mu = mu1,Sigma = sigma1) #生成多元正态分布的随机数
x = rand1[,1]
y = rand1[,2]
alpha[i] = (var(y)-cov(x,y))/(var(x)+var(y)-2*cov(x,y))
}
mean(alpha)
var(alpha)
sqrt(var(alpha))
#bootstrap自助抽样法
#生成一个随机样本,在样本里抽
mu1 = c(0,0) #均值是0
sigma1 = matrix(c(1,0.5,0.5,1.25),nrow = 2)
rand1 = mvrnorm(n=100, mu = mu1,Sigma = sigma1) #n 样本大小 mu 均值向量,sigma 相关矩阵
alpha = c()
for (i in 1:10000) {
ran1 = rand1[sample(c(1:100),100,replace = T),]
x = ran1[,1]
y = ran1[,2]
alpha[i] = (var(y)-cov(x,y))/(var(x)+var(y)-2*cov(x,y))
}
mean(alpha)
var(alpha)
sqrt(var(alpha))
#why 1/3
r <- 1:100000
r <- c(1:100000)
r = sample(c(1:100000),size=100000,replace=T)
plot(r)
summary(r)
n <- 100000
x <- sample(1:100000, size = n)
Mboot <- replicate(100000, expr = {
y <- sample(x, size = n, replace = TRUE)
median(y)
})
print(var(Mboot))
---------------------
作者:yujunbeta
来源:CSDN
原文:https://blog.csdn.net/yujunbeta/article/details/24142545
版权声明:本文为博主原创文章,转载请附上博文链接!
print(var(r))
store = rep(NA, 10000)
for (i in 1:10000){
store[i] = sum(sample(1:100, rep=TRUE) == 4) >0
}
mean(store)
View(store)
a = sample(1:100, rep=TRUE)
View(a)
b = sum(sample(1:100, rep=TRUE) == 4) >0
View(b)
#--------------
mean = rep(NA,20)
for (j in 1:20) {
store = rep(NA, 10000)
for (i in 1:10000){
store[i] = sum(sample(1:j, size = j, rep=TRUE) == 1) >0
mean[j] = mean(store)
j = j+1
}
}
#-------------------------------
mean = rep(NA,100000)
for (i in 1:100000){
store[i] = 1-(1-1/i)^i
}
plot(store, type = "b", ylim = c(0:1))
mean(store)
?plot
|
1b13694dbdb8425d169973ab2b5bfcca2c4a9e12
|
e4eb148f75005834704beb06bd8c966b4542d755
|
/5ii_RICKER_ESTIMATION_LAB_10_8_19.R
|
738942b46c48c5562f8487807717f02102432c4f
|
[] |
no_license
|
chythlook/ESC_GOAL_2019
|
85025d83712d11a258923a4408c5d4e8c0cd3156
|
88a4e2b941d3a248600f31a039258559d0bbb86f
|
refs/heads/master
| 2020-08-07T11:41:03.604327
| 2019-10-04T21:30:19
| 2019-10-04T21:30:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,390
|
r
|
5ii_RICKER_ESTIMATION_LAB_10_8_19.R
|
############### Ricker Estimation Lab I - Simulation Model ###############
# Functions are provided here for the first section.
# Load these functions, then calculations will be
# given more explicitly in further sections.
Ricker <- function(S, lnalpha, beta) {
S*exp(lnalpha - beta*S)
}
fitRicker <- function(S, R) {
lmy <- log(R/S)
lmfit <- lm(lmy~S)
lnalpha_fit <- unname(lmfit$coefficients[1])
lnalpha_p_fit <- lnalpha_fit + (sigma(lmfit)^2)/2
beta_fit <- unname(-lmfit$coefficients[2])
resids <- lmfit$residuals
fits <- lmfit$fitted.values
return(list(lnalpha_fit=lnalpha_fit, lnalpha_p_fit=lnalpha_p_fit, beta_fit=beta_fit, resids=resids, fits=fits, sigma=sigma(lmfit)))
}
simulateSR <- function(lnalpha, beta, sigS, F1, F2, sigW, N, phi, Sgoal, sigF) {
lnalpha_p <- lnalpha + 0.5*sigW*sigW
Seq <- lnalpha_p/beta
# F1 <- -log(1-hrange[1])
# F2 <- -log(1-hrange[2])
hrange <- 1-exp(-c(F1,F2))
# ----- initial values ----- #
# initial value for S: Seq minus some harvest
S <- 900 #Seq*runif(1, 1-hrange[2], 1-hrange[1])
# initial value for observed S
Shat <- S*rlnorm(1, sdlog=sigS)
# initializing all other values
redresid <- 0
E1R <- E2R <- R <- whiteresid <- epsF <- Rgoal <- F1t <- H <- Rhat <- lnRhatShat <- fittedR <- NA
# recursive portion...
for(i in 2:(N+1)) {
E1R[i] <- S[i-1]*exp(lnalpha - beta*S[i-1])
E2R[i] <- E1R[i]*exp(phi*redresid[i-1])
R[i] <- E2R[i]*rlnorm(1,0,sigW)
redresid[i] <- log(R[i]/E1R[i])
whiteresid[i] <- log(R[i]/E2R[i])
epsF[i] <- rnorm(1,0,sigF)
F1t[i] <- F1*exp(epsF[i])
Rgoal[i] <- Sgoal/exp(-F1t[i])
S[i] <- ifelse(R[i]<Rgoal[i], R[i]*exp(-F1t[i]), Sgoal+(R[i]-Rgoal[i])*exp(-F2*exp(epsF[i])))
Shat[i] <- S[i]*rlnorm(1, sdlog=sigS)
H[i] <- R[i]-S[i]
Rhat[i] <- Shat[i]+H[i]
lnRhatShat[i] <- log(Rhat[i]/Shat[i])
}
return(list(S=Shat[1:N],
R=Rhat[2:(N+1)],
Strue=S[1:N],
Rtrue=R[2:(N+1)]))
}
calc_Smax <- function(beta) 1/beta
calc_Seq <- function(lnalpha, beta) lnalpha/beta
calc_Smsy <- function(lnalpha, beta) calc_Seq(lnalpha, beta)*(0.5-0.07*lnalpha)
calc_Umsy <- function(lnalpha) lnalpha*(0.5-0.07*lnalpha)
summary_PlotTable <- function(simdata, lnalpha, beta, sigma, Sgoal, plot=T) {
fits <- fitRicker(S=simdata$S, R=simdata$R)
estimates <- c(fits$lnalpha_fit,
fits$beta_fit,
fits$sigma,
calc_Smax(fits$beta_fit),
calc_Seq(fits$lnalpha_p_fit, fits$beta_fit),
calc_Smsy(fits$lnalpha_p_fit, fits$beta_fit),
calc_Umsy(fits$lnalpha_p_fit))
truevals <- c(lnalpha,
beta,
sigma,
calc_Smax(beta),
calc_Seq(lnalpha + sigma^2/2, beta),
calc_Smsy(lnalpha + sigma^2/2, beta),
calc_Umsy(lnalpha + sigma^2/2))
error <- (estimates-truevals)/truevals
tbl <- data.frame(Estimates=estimates, Error=error)
tbl1 <- data.frame(Estimates=estimates, Error=paste0(100*round(error,2),"%"))
rownames(tbl) <- c("lnalpha_hat",
"beta_hat",
"sigW_hat",
"Smax_hat",
"Seq_hat",
"Smsy_hat",
"Umsy_hat")
rownames(tbl1) <- rownames(tbl)
# Plotting
if(plot) {
parmfrow <- par("mfrow") # storing graphical parameters before changing them
par(mfrow=c(1,2))
plotlimits <- c(0, 1.1 * max(simdata$S, simdata$R))
# histogram of S
hist(simdata$S, main="Observed Number of Spawners", xlim=plotlimits, breaks=seq(0,plotlimits[2],length.out=20), col=adjustcolor(4, alpha.f=.5), xlab="")
# histogram of R
hist(simdata$R, main="Observed Number of Recruits", xlim=plotlimits, breaks=seq(0,plotlimits[2],length.out=20), col=adjustcolor(2, alpha.f=.5), xlab="")
# SRR plot
plot(simdata$Strue, simdata$Rtrue, xlim=plotlimits, ylim=plotlimits, xlab="Spawners (S)", ylab="Recruits (R)", main="SRR")
abline(0,1)
ests <- fitRicker(S=simdata$S, R=simdata$R)
points(simdata$Strue, Ricker(S=simdata$Strue, lnalpha=lnalpha, beta=beta), col=4, pch=15)
points(simdata$Strue, Ricker(S=simdata$Strue, lnalpha=ests$lnalpha_fit, beta=ests$beta_fit), col=2, pch=18)
legend("topright",legend=c("Simulated","True","Estimated"), pch=c(1,15,18), col=c(1,4,2))
# RSR plot
plot(simdata$Rtrue[-length(simdata$Rtrue)], simdata$Strue[-1], xlim=plotlimits, ylim=plotlimits, ylab="Spawners (S)", xlab="Recruits (R)", main="RSR")
abline(0,1)
abline(h=Sgoal, lty=2, lwd=2, col=2)
legend("topleft", legend=c("Simulated data","R=S","Escapement goal"), pch=c(1,NA,NA), lty=c(NA,1,2), lwd=c(NA,1,2), col=c(1,1,2))
par(mfrow=parmfrow) # resetting graphical parameters
}
if(!plot) return(tbl)
if(plot) return(tbl1)
}
### Parts 1-6 ###
# Entering true parameter values
lnalpha <- 1.5
beta <- 0.001
sigW <- 0.4
phi <- 0
sigS <- 0.1
F1 <- 0.1 # 0.4
Sgoal <- 500
F2 <- 0.4 # 1.6
sigF <- 0.4
N <- 100
# Simulating a dataset
simdata <- simulateSR(lnalpha=lnalpha, beta=beta, sigW=sigW, phi=phi, sigS=sigS, F1=F1, F2=F2, Sgoal=Sgoal, sigF=sigF, N=N)
# Producing a summary plot, and table of parameter estimates & relative error
summary_PlotTable(simdata, lnalpha, beta, sigW, Sgoal)
# R extra: relative error of 1000 iterations of simulation & estimation!
nreps <- 1000
errormat <- matrix(nrow=nreps, ncol=7)
for(i in 1:nreps) { # this may take a few seconds to run
simdata1 <- simulateSR(lnalpha=lnalpha, beta=beta, sigW=sigW, phi=phi, sigS=sigS, F1=F1, F2=F2, Sgoal=Sgoal, sigF=sigF, N=N)
summarytbl <- summary_PlotTable(simdata1, lnalpha, beta, sigW, Sgoal, plot=F)
errormat[i,] <- summarytbl[,2]
}
colnames(errormat) <- rownames(summarytbl)
# Plotting the relative error of all estimates, for all iterations of simulation & estimation.
# Parameters and reference points that are estimated more precisely will have comparatively
# narrower distributions in the boxplot.
boxplot(errormat, ylim=c(-1.5,1.5))
### Part 7 ###
# Resetting parameters to default values...
lnalpha <- 1.5
beta <- 0.001
sigW <- 0.4
phi <- 0
sigS <- 0.1
F1 <- 0.1 # 0.4
Sgoal <- 500
F2 <- 0.4 # 1.6
sigF <- 0.4
N <- 100
############### Ricker Estimation Lab II - Point Estimates ###############
### Parts 1-3 ###
# Run the following lines to simulate a fresh dataset.
# Note: data can also be read into R from a spreadsheet.
simdata <- simulateSR(lnalpha=lnalpha, beta=beta, sigW=sigW, phi=phi, sigS=sigS, F1=F1, F2=F2, Sgoal=Sgoal, sigF=sigF, N=N)
S <- tail(simdata$S, 20) # this takes the last 20 years of our 100-year dataset
R <- tail(simdata$R, 20)
# plotting will happen at the end of Section II
log_RS <- log(R/S) # this calculates a new vector all at once
### Part 4 ###
# Linear regression is done in R using the lm() function, in the form lm(y~x).
# Storing the results from lm() in lm_fit creates an object that we can extract information from.
lm_fit <- lm(log_RS~S)
summary(lm_fit) # inspect the results
# plotting will happen at the end of Section II
# Extracting the coefficients.
# Note: unname() isn't needed, but some unnecessary information is carried over from lm_fit otherwise.
lnalpha_hat <- unname(lm_fit$coefficients[1])
beta_hat <- unname(-lm_fit$coefficients[2])
# beta_hat <- NA
# Bias-corrected lnalpha_p
sigma_hat <- sigma(lm_fit)
lnalpha_p_hat <- lnalpha_hat + (sigma_hat^2)/2
### Part 5 ###
Smax_hat <- 1/beta_hat
Seq_hat <- lnalpha_p_hat/beta_hat
Smsy_hat <- Seq_hat*(0.5-0.07*lnalpha_p_hat)
Umsy_hat <- lnalpha_p_hat*(0.5-0.07*lnalpha_p_hat)
MSY_hat <- Smsy_hat*exp(lnalpha_p_hat-beta_hat*Smsy_hat)-Smsy_hat
# Smax_hat <- NA
# Seq_hat <- NA
# Smsy_hat <- NA
# Umsy_hat <- NA
# MSY_hat <- NA
### Part 6-7 ###
# Fitted values and residuals can be extracted from lm_fit.
fits <- lm_fit$fitted.values
resids <- lm_fit$residuals
# # Fitted values and residuals can be extracted from lm_fit.
# # to find them try: str(lm_fit)
# fits <- NA
# resids <- NA
# The Durbin-Watson test is available in the car package.
# If the line below doesn't work, run install.packages("car") and try again.
library(car)
durbinWatsonTest(lm_fit)
### Part 8 ###
# This uses our Ricker function from above, though it could have been calculated by hand.
# Note: R does vector calculation automatically, so it returns the Rhat vector all at once.
Rhat <- Ricker(S, lnalpha_hat, beta_hat)
# # Calculate estimated recruitment by hand.
# # Note: R does vector calculation automatically, so it returns the Rhat vector all at once.
# Rhat <- NA
### Part 9 & results ###
# Printing point estimates and compare to simulated values
lnalpha_hat; lnalpha;
beta_hat; beta;
sigma_hat; sigW;
lnalpha_p_hat; (lnalpha_p <- lnalpha + sigW^2/2);
Smax_hat; 1/beta;
Seq_hat; (S_eq <- lnalpha_p / beta);
Smsy_hat; (S_msy <- S_eq * (0.5 - 0.07 * lnalpha_p));
Umsy_hat; lnalpha_p * (0.5 - 0.07 * lnalpha_p);
MSY_hat; S_msy * exp(lnalpha_p - beta * S_msy) - S_msy;
# Printing point estimates and compare to simulated values
# Blank, students complete
# Plotting...
par(mfrow=c(2,2)) # plots will now be on a 2x2 matrix
plot(S, R, xlim=c(0,max(S,R)), ylim=c(0,max(S,R)))
abline(0, 1, lty=3) # replacement line - arguments draw a line with y-int=0, slope=1, and dotted
curve(Ricker(x, lnalpha_hat, beta_hat), add=T) # adding a Ricker curve using our Ricker function from above
plot(S, log_RS)
abline(lm_fit) # regression line from lm_fit
abline(h=0, lty=3) # horizontal line at y=0
plot(S, ylim=c(0, max(S,R)), type='l', col="red")
lines(R, col="blue")
legend("topright", legend=c("S","R"), col=c("red","blue"), lty=1)
plot(resids, type='l', main="Residuals")
abline(h=0, lty=3) # horizontal line at y=0
par(mfrow=c(1,1))
############### Ricker Estimation Lab 2 - Quantifying Uncertainty ###############
### Parts 1-5 ###
# Other methods for bootstrapping exist in R, but the sample() function works well here.
# compare the original residuals to a single resample below
data.frame(original = resids, resampled = sample(resids, replace=T)) #resampled residuals
# The structure below is called a "for loop".
# The value of i is advanced by one (1, 2, ..., B) and R performs all the calculations
# within the { } braces for each possible value of i.
B <- 10000 # how many bootstrap replicates to do. This is upped from 1000 because computing power is cheap!
lnalpha_boot <- lnalpha_p_boot <- beta_boot <- NA # initializing vectors to fill in within the loop
for(i in 1:B) {
y_boot <- fits + sample(resids, replace=T)
lm_boot <- lm(y_boot~S)
lnalpha_boot[i] <- unname(lm_boot$coefficients[1])
lnalpha_p_boot[i] <- lnalpha_boot[i] + 0.5*(sigma(lm_boot))^2
beta_boot[i] <- unname(-lm_boot$coefficients[2])
}
# Censoring the impossible! See "When beta is Poorly Defined"...
# This creates a logical vector with TRUE for impossible values, and then removes them from the bootstrap results.
impossible <- (lnalpha_boot<0) | (beta_boot<0) # "|" = "or"
lnalpha_boot <- lnalpha_boot[!impossible]
lnalpha_p_boot <- lnalpha_p_boot[!impossible]
beta_boot <- beta_boot[!impossible]
### Part 6 ###
# Bootstrap distributions of biological reference points
# Note: these are all automatically calculated as vectors
Smax_boot <- 1/beta_boot
Seq_boot <- lnalpha_p_boot/beta_boot
Smsy_boot <- Seq_boot*(0.5-0.07*lnalpha_p_boot)
Umsy_boot <- lnalpha_p_boot*(0.5-0.07*lnalpha_p_boot)
MSY_boot <- Smsy_boot*exp(lnalpha_p_boot-beta_boot*Smsy_boot)-Smsy_boot
# Plotting as histograms
par(mfrow=c(2,2)) # Plots will now be on a 2x2 matrix
hist(lnalpha_boot)
hist(beta_boot)
hist(Seq_boot)
hist(Smsy_boot)
par(mfrow=c(1,1))
### Part 7 ###
# The quantile function can be used to calculate 10th and 90th percentiles from the bootstap distributions.
# try it for lnalpha_boot and beta_boot
quantile(lnalpha_boot, p=c(0.1, 0.9))
quantile(beta_boot, p=c(0.1, 0.9))
# quantile(lnalpha_boot, p=c(NA, NA))
# quantile(beta_boot, p=c(NA, NA))
# ...or you can do it all at once...
bootmat <- data.frame(lnalpha_boot,
beta_boot,
Smax_boot,
Seq_boot,
Smsy_boot,
Umsy_boot,
MSY_boot) # check out head(bootmat) to see what this is
sapply(bootmat, quantile, p=c(0.1, 0.9))
# Note: to dissect the sapply() function call:
# - bootmat: apply some function to each row element of bootmat
# - quantile: what function? the quantile() function
# - p=c(0.1, 0.9): additional argument for quantile()
### Part 8 ###
diff(quantile(lnalpha_boot, p=c(0.1, 0.9))) / median(lnalpha_boot) / 2.56
# Try this yourself for beta_boot
#diff(quantile(beta_boot, p=c(0.1, 0.9)))/median(lnalpha_boot)/2.56
# It seems useful to create a NPCV function, since it looks like we'll do it a few times.
NPCV <- function(x, conf=0.8) {
# this could all be nested in one line of code, but it's expanded to show the calculation
conf_bounds <- c((1-conf)/2, 1-(1-conf)/2) # CI bounds from overall confidence
quantiles <- unname(quantile(x, p=conf_bounds)) # getting the quantiles themselves
ci_range <- diff(quantiles) # the same as quantiles[2] - quantiles[1]
return(ci_range/median(x)/2.56) # what to return!
}
NPCV(lnalpha_boot)
NPCV(lnalpha_boot, conf=0.9)
sapply(bootmat, NPCV)
sapply(bootmat, NPCV, conf=0.9)
### Part 9 ###
par(mfrow=c(1,1))
plot(S, R, xlim=c(0,max(S,R)), ylim=c(0,max(S,R)))
for(i in 1:50) {
# adding a new curve using the Ricker() function, with each bootstrap rep of lnalpha_boot and beta_boot
curve(Ricker(x, lnalpha_boot[i], beta_boot[i]), col=adjustcolor(2,alpha.f=.3), add=T)
}
curve(Ricker(x, lnalpha, beta), lwd=2, col=4, add=T) # adding the overall curve again
points(S, R)
############### Ricker Estimation Lab IV - Graphical Tools for Evaluating Escapement Goals ###############
### Part 1 ###
# We will be working with matrices in this section:
# - each row corresponds to one bootstrap replicate
# - each column corresponds to one prospective escapement
S_max <- 2000 # Max value of prospective escapements
S_star <- seq(1, S_max, length.out=1000) # Prospective escapements
# expanded as a matrix
# one row for every bootstrap and one column for every prospective escapement
S_star_mat <- matrix(S_star, nrow=length(beta_boot), ncol=length(S_star), byrow=T) # expanded as a matrix
# check dimensions
dim(S_star_mat)
# initializing the R_star matrix, then filling it in one column at a time, using bootstrap vectors all at once
R_star <- matrix(nrow=length(beta_boot), ncol=length(S_star))
for(i in 1:length(S_star)) {
R_star[,i] <- Ricker(S_star[i], lnalpha_p_boot, beta_boot)
}
# SY_star is expanded as a matrix below
SY_star <- R_star - matrix(S_star, nrow=length(beta_boot), ncol=length(S_star), byrow=T)
# Also expanding MSY_boot and Smsy as matrix
MSY_boot_star <- matrix(MSY_boot, nrow=length(beta_boot), ncol=length(S_star))
Smsy_boot_star <- matrix(Smsy_boot, nrow=length(beta_boot), ncol=length(S_star))
# This is analogous to (c)-(d) and returns a vector of averages.
# colMeans() and rowMeans() return the means of each column or row of a matrix
# Note: we're taking the averages of ones and zeroes, just like the spreadsheet
OYP <- colMeans(SY_star >= 0.9*MSY_boot_star) # Optimal Yield Profile
OFP <- colMeans((SY_star < 0.9*MSY_boot_star) & (S_star_mat < Smsy_boot_star)) # Overfishing Profile
# Starting a plot...
make_OYP <- function() { # this is a shortcut for creating the plot
plot(S_star, OYP, type='l', col="green", ylim=0:1, ylab="Probability")
lines(S_star, OFP, col="red")
grid()
}
par(mfrow=c(2,1))
make_OYP()
### Part 2 ###
# extracting quantiles from expected yield
quants <- c(0.05, 0.1, 0.5, 0.9, 0.95) # which quantiles to extract
# This step pulls all quantiles at once. To dissect the apply() function call:
# - SY_star: the SY_star matrix is what we want to apply a function to
# - 2: we want one result for each column (1=rows, 2=columns) ...this can handle higher-dimension arrays if needed
# - quantile: quantile() is the function to apply
# - p=quants: an additional argument to the quantile() function
SY_quantiles <- apply(SY_star, 2, quantile, p=quants)
# This is the first 5 columnns it returned. There's a set of yield quantiles associated with each value of S_star.
SY_quantiles[,1:5]
make_EYP <- function() { # this is a shortcut for creating the plot
# Making a blank plot to add lines to
plot(NA, xlab="S", ylab="Expected Yield", xlim=range(S_star), ylim=c(0,max(SY_quantiles)))
ltys <- c(3,2,1,2,3) # the line type for plotting each line
for(i in 1:5) {
lines(S_star, SY_quantiles[i,], lty=ltys[i])
}
grid()
legend("topright", legend=c("median", "80% intvl", "90% intvl"), lty=1:3)
}
make_EYP()
### Part 3 ###
# look at the OYP and EYP to come up with an escapement goal range
EG <- c(NA, NA)
par(mfrow=c(2,1))
make_OYP()
abline(v=EG, lwd=3, col="red")
make_EYP()
abline(v=EG, lwd=3, col="red")
# probabilities associated with EG endpoints
OYP[S_star %in% floor(EG)]
# yield associated with EG range
round(c(min(SY_quantiles[1, S_star>=EG[1] & S_star<=EG[2]]),
max(SY_quantiles[5, S_star>=EG[1] & S_star<=EG[2]])))
############### Ricker Estimation Lab V - Percentile Goals ###############
# Calculate the contrast in S
max(S) / min(S)
percentiles <- c(NA, NA) # enter your chosen percentiles here!
percEG <- round(quantile(S, percentiles))
# return the resulting EG
percEG
# and plot
par(mfrow=c(2,1))
make_OYP()
abline(v=percEG, lwd=3, col="red")
make_EYP()
abline(v=percEG, lwd=3, col="red")
par(mfrow=c(1,1))
# probabilities associated with EG endpoints
OYP[S_star %in% floor(percEG)]
# yield associated with EG range
round(c(min(SY_quantiles[1, S_star>=percEG[1] & S_star<=percEG[2]]),
max(SY_quantiles[5, S_star>=percEG[1] & S_star<=percEG[2]])))
|
430655e2975d6c69ca9da500eb22de18e09d72c8
|
af72407b36c1ee3182f3a86c3e73071b31456702
|
/tests/testthat/test-misc.R
|
f0f60f392386b636b9cc462edfa691d4007f6dfb
|
[
"MIT"
] |
permissive
|
bcjaeger/ipa
|
f89746d499500e0c632b8ca2a03904054dc12065
|
2e4b80f28931b8ae6334d925ee8bf626b45afe89
|
refs/heads/master
| 2021-07-12T20:52:23.778632
| 2020-04-26T16:44:01
| 2020-04-26T16:44:01
| 207,016,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 682
|
r
|
test-misc.R
|
test_that(
"inputs work",
{
things <- 1:3
expect_equal(list_things(things),"1, 2 and 3")
things <- letters[1:2]
expect_equal(list_things(things),"a and b")
}
)
test_that('drop empty works', {
a = list(a = 1, b = integer())
expect_equal(drop_empty(a), list(a=1))
b <- list(a = 1, b = 2)
expect_equal(b, drop_empty(b))
})
test_that('text pillar works', {
expect_equal(
text_pillar(lhs = 'a', rhs = 1.2, middle = 'is'),
'<a> is <1.2>'
)
})
test_that('df_unique_indx works', {
vec <- c(0, 1, 1, 2, 4, 4, 4, 5)
expect_equal(df_unique_indx(vec), c(1, 3, 4, 7, 8))
vec <- c(1:10)
expect_equal(vec, df_unique_indx(vec))
})
|
d9e75dfee894aa1c2d461e3e5a6716db5f562494
|
8f46bd450429179c8530783cecf736cd7b88ae3d
|
/plot5.R
|
f56301fe86edd995bf41c9fb44fcf5f63640bc07
|
[] |
no_license
|
reckbo/Coursera-ExploratoryDataAnalysis-project2
|
802cd87a1841235afcc1a2fff465f855aa9489be
|
98be1d92d2e0e7c7af915fbb68d58d2d9c6f0229
|
refs/heads/master
| 2020-12-25T05:27:16.781528
| 2014-08-24T05:37:04
| 2014-08-24T10:31:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 994
|
r
|
plot5.R
|
source('setup.R')
library(dplyr)
if (!exists('NEI')) {
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
}
fips.baltimore <- "24510"
# I confirmed that subsetting by `type=='ON-ROAD'` is equivalent to subsetting with
# `grepl('vehicle', EI.Sector, ignore.case=T)`, and I will interpret motor
# vehicles this way, according to the helfpul link provided by a post in the discussion
# forums: http://www.epa.gov/ttn/chief/net/2008neiv3/2008_neiv3_tsd_draft.pdf
emissions <- left_join(NEI, SCC, by='SCC') %>%
filter(grepl('vehicle', EI.Sector, ignore.case=T) & fips == fips.baltimore) %>%
group_by(year) %>%
summarize(tot=sum(Emissions))
g <- ggplot(emissions, aes(x=year, y=tot)) +
geom_point(shape=16, size=4) +
geom_line() +
ylab('Total Emissions (tons)') +
xlab('Year') +
ggtitle(expression('Baltimore PM'[2.5]*' Emissions From Motor Vehicles'))
ggsave(filename='plot5.png', plot=g, h=7, w=7, dpi=100)
|
24eca3859af9e33d8807cacf687621b849d14388
|
18e1ea7f2a92537ce82660a8ed6376470539829c
|
/R/Load.R
|
eff595d03ea7bea38ad609157d7c5ada6dcb6457
|
[] |
no_license
|
fansi-sifan/Kickstarter_survivor
|
519458ca4875ba71e6fc8a42fade0780bac839f9
|
2027c6914fbb62c5674a58b44d29c541c5db73e7
|
refs/heads/master
| 2020-03-23T02:19:31.628507
| 2018-09-06T01:06:49
| 2018-09-06T01:06:49
| 140,968,841
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,724
|
r
|
Load.R
|
# Author:Sifan Liu
# Date: Fri Aug 31 22:10:53 2018
# --------------
pkgs <- c('dplyr',"RJSONIO","jsonlite",'rvest')
check <- sapply(pkgs,require,warn.conflicts = TRUE,character.only = TRUE)
if(any(!check)){
pkgs.missing <- pkgs[!check]
install.packages(pkgs.missing)
check <- sapply(pkgs.missing,require,warn.conflicts = TRUE,character.only = TRUE)
}
padz <- function(x, n=max(nchar(x))) gsub(" ", "0", formatC(x, width=n))
# START HERE =======================================================
# grab all of the downlinks from site
page <- read_html("https://webrobots.io/kickstarter-datasets/")
links <- page %>%
html_nodes("a") %>%
html_attr("href")
js_path = grep(".\\.json\\.gz",links, value = TRUE)
jszip_path = grep(".\\.json\\.zip",links, value = TRUE)
# read gz json
jsdownload <- function(x){
js <- stream_in(gzcon(url(x)))
df = js$data %>% select(id,state, goal,pledged,backers_count,location, category,launched_at)
df$location_id <- df$location$id
df$location_name <- df$location$displayable_name
df$location_state <- df$location$state
df$location_country <- df$location$country
df$category_name <- df$category$name
df$category_broad <- df$category$slug
df <- df %>% select(-location,-category)
df %>% filter(state != "live")
}
# loop over all links
unionfile = jsdownload(js_path[1])
for (file in js_path[2:28]){
unionfile <- union(unionfile, jsdownload(file))
}
# zipped file from url
# tmp <- tempfile()
# download.file(jszip_path[2], tmp)
#
# dat <- jsonlite::fromJSON(sprintf("[%s]", paste(readLines(unzip(tmp,"Kickstarter_2015-06-12.json")), collapse="")))
json_file <- RJSONIO::fromJSON("../../Downloads/Kickstarter_Kickstarter.json" )
temp <- sapply(json_file, function(x) {
x[sapply(x, is.null)] <- NA
unlist(x)
})
temp <- rbind_list(temp)
names(temp) <- gsub("projects.","",names(temp))
temp <- temp %>% select(id,state, goal,pledged,backers_count,launched_at,
location.id,location.name, location.state,location.country,
category.name,category.slug)
# unionfile <- union %>% select(-category, -success)
names(temp) <- names(unionfile)
temp <- temp %>%
filter(state!="live") %>%
mutate(id = as.integer(id),
goal = as.numeric(goal),
pledged = as.numeric(pledged),
backers_count = as.integer(backers_count),
launched_at = as.integer(launched_at),
location_id = as.integer(location_id))
union <- dplyr::union(unionfile, temp)
union$category <- gsub("(\\/.+)","",union$category_broad)
union$success <- ifelse(union$state =="successful", 1,0)
# save the results ======================================================
save(union, file = "union.Rda")
|
2634ce933037258f1707fd752bca06a79daa1963
|
3bfe56a625eadfb08b8ae569595869755cf7f906
|
/analysis_func.R
|
175a4571a6b5419f79bcc4ef20b4a926cb968641
|
[] |
no_license
|
zuowx/IBD_analysis
|
20ae1225bb0402d8bc577b72e7885b7f3ed3fd49
|
68e90a0fbcbfc92312395ee3f585a91e4cf1d5f0
|
refs/heads/main
| 2023-07-18T02:38:15.077861
| 2021-09-02T01:05:26
| 2021-09-02T01:05:26
| 402,243,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,728
|
r
|
analysis_func.R
|
library(ggpubr)
library(ggplot2)
plot.shannon <- function(df, plot_title) {
p <- ggplot(df, aes(x=Phenotype,y=shannon)) +
geom_boxplot(aes(colour=Phenotype),width=0.5,size=0.5,outlier.fill="white",outlier.color="white")+
geom_jitter(aes(colour=Phenotype,fill=Phenotype),width =0.2,shape = 21,size=2)+
scale_y_continuous(name = "Shannon Index",limits = c(0,max(df$shannon)+0.5))+
scale_x_discrete(name = "Phenotypes")+
labs(title=plot_title)+
stat_compare_means(comparisons=list(c("Healthy","UC")),correct=FALSE,label="p.format",method = "wilcox.test")+
theme_bw()+
theme(panel.grid.minor = element_blank())+
theme(legend.position="none")
return(p)
}
library(vegan)
library(ape)
plot.pcoa <- function(filename,metadata,plot_title){
# feature table
feature_table <- read.csv(filename,row.names=1, check.names = F)
# filter the samples
feature_table <- feature_table[,colnames(feature_table)%in%metadata$SampleID]
# normalize the feature table
feature_table <- as.data.frame(apply(feature_table,2,function(x) x/sum(x)))
# compute the bray curtis distance
beta_diversity <- as.matrix(vegdist(t(feature_table),method = "bray"))
# permanova
metadata <- metadata[rownames(beta_diversity),]
permanova <- adonis(beta_diversity~SampleType+Gender+Age, data=metadata, permutations=1000)
r2 <- permanova$aov.tab["SampleType","R2"]
p.value <- permanova$aov.tab["SampleType","Pr(>F)"]
# annotate the r2 and p value in the figure
r2 <- sprintf("italic(R^2) == %.3f",r2)
p.value <- sprintf("italic(p) == %.3f",p.value)
permanova_labels <- data.frame(r2=r2,p.value=p.value,stringsAsFactors = FALSE)
# pcoa plot
PCOA <- pcoa(as.dist(beta_diversity))
# data frame for pcoa plot
pcoa_df <- as.data.frame(PCOA$vectors[,1:2])
pcoa_df$Axis.1 <- -pcoa_df$Axis.1
pcoa_df$SampleID <- rownames(pcoa_df)
pcoa_df <- merge(pcoa_df,metadata,by="SampleID")
# axis
pro1 <- as.numeric(sprintf("%.3f",PCOA$values[,"Relative_eig"][1]))*100
pro2 <- as.numeric(sprintf("%.3f",PCOA$values[,"Relative_eig"][2]))*100
# plot
pcoa_plot <- ggplot(pcoa_df,aes(x=Axis.1,y=Axis.2,col=SampleType)) +
geom_point(size=3) +
xlim(-0.7,0.6) + ylim(-0.6,0.4) +
labs(x=paste0("PCOA1(",pro1,"%)"), y=paste0("PCOA2(",pro2,"%)"),title=plot_title) +
geom_vline(aes(xintercept=0),linetype="dotted") +
geom_hline(aes(yintercept=0),linetype="dotted") +
stat_ellipse(aes(group=SampleType,col=SampleType), level = 0.8, show.legend=FALSE)+
annotate("text",label="Phenotypes:",x=-0.65,y=-0.4,size=4,hjust=0)+
geom_text(data=permanova_labels,mapping=aes(x=-0.65,y=-0.47,label=r2),parse=TRUE,inherit.aes=FALSE,size=4,hjust=0)+
geom_text(data=permanova_labels,mapping=aes(x=-0.65,y=-0.54,label=p.value),parse=TRUE,inherit.aes=FALSE,size=4,hjust=0)+
theme_bw() +
theme(legend.title=element_blank(), legend.text=element_text(size=12))+
theme(title = element_text(size = 14))+
theme(axis.title = element_text(size = 16),axis.text = element_text(size = 12,colour="black"))+
theme(legend.justification=c(0.02,0.98), legend.position=c(0.02,0.98),legend.background = element_rect(fill = NA))
# return
return(pcoa_plot)
}
lm.analysis <- function(df) {
lm_fit <- lm(log(shannon)~Phenotype+Age+Gender, data=df)
print(lm_result <- summary(lm_fit))
return(lm_fit)
}
library(RcppCNPy)
library(ecodist)
mrm.analysis <- function(distance_matrix, sample_type_D1, sample_type_D2, age_D, gender_D1, gender_D2) {
mrm_result <- MRM(as.dist(log(distance_matrix)) ~ as.dist(sample_type_D1)+as.dist(sample_type_D2)+as.dist(age_D)+as.dist(gender_D1)+as.dist(gender_D2)-1)
print(mrm_result)
return(mrm_result)
}
|
83d51dd47e8c562922535be0bd6b75d4fec6e7ff
|
acc82a0f64be1e131967991dea0b848195b58ead
|
/tests/testthat/test-dist_bray.R
|
9932697db6a3387ff5afb1f5b9940c5106018990
|
[] |
no_license
|
dyerlab/gstudio
|
2ecc2220cfceb625395191384401ac86c3f38963
|
d7a207a8d66b4cd0b38faf4f42dc5590aae1a647
|
refs/heads/master
| 2023-06-01T06:35:55.709942
| 2023-05-15T17:52:05
| 2023-05-15T17:52:05
| 8,651,449
| 12
| 8
| null | 2020-03-27T00:09:45
| 2013-03-08T13:27:59
|
R
|
UTF-8
|
R
| false
| false
| 748
|
r
|
test-dist_bray.R
|
context("dist_bray.R")
test_that("individual",{
expect_that( dist_bray("Bob"), throws_error() )
expect_that( dist_bray(data.frame(Pop=1)), throws_error() )
expect_that( dist_bray(data.frame(Pop=1), stratum="bob"), throws_error() )
AA <- locus( c("A","A") )
AB <- locus( c("A","B") )
AC <- locus( c("A","C") )
BB <- locus( c("B","B") )
BC <- locus( c("B","C") )
CC <- locus( c("C","C") )
loci <- c(AA,AA,AB,AA,BB,BC,CC,BB,BB,CC)
df <- data.frame( Population=c(rep("A",5),rep("B",5) ), TPI=loci )
D <- dist_bray(df)
expect_that( D, is_a("matrix") )
expect_that( dim(D), is_equivalent_to(c(2,2)))
expect_that( sum(diag(D)), equals(0) )
expect_true( D[1,2]==D[2,1] )
expect_that( D[1,2], equals( 0.1 ) )
})
|
bb770a0aa1c2eea8568ad4e02f3a4427660bc98d
|
1c03917b86f5e47c4bf954afce910ce439fd552b
|
/data_analytic_utilities/Inferential_stats_utils/Correlation_Regression_Classification_ML/Correlation_analysis.R
|
aad21ebff8882e5340feec9846a0b3abca649e58
|
[] |
no_license
|
sameerpadhye/Personal-projects
|
d7da869de225c48ce7c7b3ece8663bc10042c55e
|
d04fc8bbe723d90559f64ea7c8f8ca01eae17c56
|
refs/heads/master
| 2022-07-31T03:30:24.966932
| 2022-07-11T15:25:25
| 2022-07-11T15:25:25
| 179,165,892
| 1
| 1
| null | 2020-03-05T15:54:40
| 2019-04-02T22:06:24
|
R
|
UTF-8
|
R
| false
| false
| 4,009
|
r
|
Correlation_analysis.R
|
#Correlation analysis
#libraries used
library(tidyverse)
library(reshape2)
#Here a sample dataset is used. The numerical data from any dataframe (correlations of which need to be determined) can be substituted as per requirement
#Data for analysis
correlation_data<-data.frame(
var_1=c(10,12,17,29,35,NA,56,89,112,156),
var_2=seq(160,1,length.out = 10),
var_3=runif(10),
var_4=sample(c(1:200),10,replace = T))
#Exploring the data
head(correlation_data,3)
## Correlation test for 2 variables
# cor.test provides the t statistic and p values as well. 'cor' only gives the correlation coefficient. Hence, cor.test is used
# Pearson correlation coefficient (used when data are normal)
cor.test(correlation_data$var_1,
correlation_data$var_2,
method=c("pearson"))
# Spearman correlation coefficient (used when data are normal)
cor.test(correlation_data$var_1,
correlation_data$var_2,
method=c("spearman"))
#Scatterplot for visualization (var_1 and var_2 visualized here)
correlation_data%>%
ggplot(aes(x=var_1,
y=var_2))+
geom_point(size=5,
color='black',
pch=21,
fill='steelblue')+
theme_bw(base_size = 18)
#Multiple correlation using all variables in the data
# Multiple correlation using pearson's correlation coefficient. Data needs to be a a matrix hence converted within the function itself. Here Pearson's correlation coefficient is used. Spearman can also be used as per requirement
mult_correlation<-Hmisc::rcorr(as.matrix(correlation_data),
type=c('pearson'))
#getting the correlation coefficients
mult_correlation$r
#getting the p values (significance) of the coefficients
mult_correlation$P
#Heatmap visualization of the correlations
heatmap(mult_correlation$r)
##Visualization using ggplot. Correlation coefficients are used
#getting the long format of the data
data_for_viz<-reshape2::melt(mult_correlation$r)
#plot (Var1 and Var2 are just the column names that reshape assigns. These can be changed if required)
data_for_viz%>%
ggplot(aes(x=Var1,
y=Var2,
fill=value))+
geom_tile()+
theme(axis.title.x=element_blank(),
axis.title.y = element_blank())
#Correlation of a binary variables
#The continous data have been first converted to binary data using the correlationfunnel package
if(!require(correlationfunnel))install.packages('correlationfunnel')
#Obtaining the data (First six columns selected at the end)
data_analysis_binary <- correlation_data %>%
dplyr::select(var_1:var_3)%>%
na.omit(.)%>%
binarize(n_bins = 4,
thresh_infreq = 0.01)%>%
dplyr::select(1:5)
#View the result
View(data_analysis_binary)
#Correlation of binary variables.
#Rename the new columns (for convenience)
colnames(data_analysis_binary)<-c("var_1_1","var_1_2","var_1_3","var_1_4","var_2")
#Correlation analysis Here the var_2 is selected as a response variable against which correlations of the other 4 variables will be displayed
data_analysis_binary%>%
na.omit(.)%>%
correlationfunnel::correlate(target = var_2)
#Visualizing the correlation analysis
data_analysis_binary%>%
na.omit(.)%>%
correlationfunnel::correlate(target = var_2)%>%
plot_correlation_funnel(interactive = FALSE)
#Finding highly correlated variables using caret package
if(!require(caret))install.packages('caret')
#1. Getting the correlation of the dataset
correlations_of_data<-correlation_data%>%
select_if(is.numeric)%>%
as.matrix(.)%>%
cor(.,method = "spearman")
#2. Index for selecting high correlated variables
index_selection<-findCorrelation(correlations_of_data,
cutoff = 0.65)%>%
sort(.)
#3. Finding the highly correlated variables (the columns displayed are the hgihly correlated variables based on the cutoff given above)
correlations_of_data[,c(index_selection)]
|
ea88075526e60068c6c689f90c64fc83fc92b78a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SuperGauss/examples/Toeplitz-class.Rd.R
|
c0392a6ea95c3a7509c39c3b6d4afd8f213f9a0e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 827
|
r
|
Toeplitz-class.Rd.R
|
library(SuperGauss)
### Name: Toeplitz-class
### Title: Constructor and methods for Toeplitz matrix objects.
### Aliases: Toeplitz-class .Toeplitz setAcf getAcf traceT2 traceT4
### show.Toeplitz %*% determinant solve %*%,ANY,Toeplitz-method
### %*%,Toeplitz,ANY-method determinant,Toeplitz-method
### dim,Toeplitz-method ncol,Toeplitz-method nrow,Toeplitz-method
### show,Toeplitz-method solve,Toeplitz-method Toeplitz
### ** Examples
# construction
acf <- exp(-(1:5))
Toep <- Toeplitz(acf = acf)
# alternatively, can allocate space first
Toep <- Toeplitz(n = length(acf))
Toep$setAcf(acf = acf)
dim(Toep) # == c(nrow(Toep), ncol(Toep))
Toep # show method
Toep$getAcf() # extract the acf
# linear algebra
X <- matrix(rnorm(10), 5, 2)
Toep %*% X
t(X) %*% Toep
solve(Toep, X)
determinant(Toep) # log-determinant
|
8919d5b4b747b4da3a026424674c77fc7066ab3e
|
f61c1ca2a35c4a4dab86725905a82b34fe2bb912
|
/complete.R
|
29238ad9f1b16651fab9c88b08ad5b7c47539b2d
|
[
"MIT"
] |
permissive
|
ankitprakash89/R--Assignment
|
25b9a37aa3aab1bb13181978338f1d16dd052299
|
4feca865840a8a96c2e39ca45128f8f50b42c2f7
|
refs/heads/master
| 2020-03-17T00:28:20.358045
| 2018-05-12T05:50:21
| 2018-05-12T05:50:21
| 133,117,899
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 911
|
r
|
complete.R
|
library(dplyr)
dir<- "C:/Users/Administrator/Downloads/Assignment 2/OneDrive_2018-01-28" # stored as a string
complete<- function(directory, id){
dir.files <- paste(dir, directory, sep="/")# for adding directory to the dir
setwd(dir.files) # to set the working directory
files<- list.files(pattern = '\\.csv') # to get the list of the names of the files present in the working directory
test<- data.frame() # used for stroing the monitor id and the nobs
k<-1
for(i in id)
{
temp <- read.csv(files[i], header = TRUE)
test[k,1] <- i
# below is the pipeline methodology to read get the nobs in each file as per the requirement
test[k,2] <- count(temp[complete.cases(temp),])
k = k+1
}
colnames(test) <- c("id","nobs")
return(test)
}
complete("specdata", 1)
complete("specdata", c(2,4,8,10,12))
complete("specdata", 30:25)
complete("specdata", 3)
|
24130472a532b60f0919ae5df3b5d70cf0c033e3
|
1f82e454a2b0a9f81a0bff4946507543f6ca4527
|
/ProgrammingAssignment2-master/cachematrix.R
|
889931fa83f0f19a6e895dac79b9bffb9ab91215
|
[] |
no_license
|
apriljkissinger/datasciencecourserajohnhopkins
|
1d7985320010b8dca9188f822edad2007823ebc7
|
c12fb15da162ea33ce67ee07a52feb93acdc81f3
|
refs/heads/master
| 2021-05-21T02:46:17.907860
| 2020-11-27T08:18:32
| 2020-11-27T08:18:32
| 252,507,540
| 0
| 0
| null | 2020-11-21T19:51:56
| 2020-04-02T16:23:51
|
HTML
|
UTF-8
|
R
| false
| false
| 943
|
r
|
cachematrix.R
|
## This program will take in a matrix and spit out its inverse.
rm (list =ls())
## makeCacheMatrix are the getter and setter functions that get a matrix m in and then perform the inverse calculation on it,
## and clears out any inverse that has already been computed in the cache.
makeCacheMatrix <- function(x = matrix()){
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function (solve) m <<- solve
getinv <- function () m
list(set = set, get = get, setinv = setinv, getinv = getinv )
}
## cacheSolve executes and returns a matrix that it is the inverse of mat.
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
0dcf2b19702d8e197d60f87d6168c2060daf20eb
|
1c3394f6720e005b338a03833044cec262808a35
|
/man/checkForAuxiliaryFiles.Rd
|
097f63e78f3ab7b75386450eaba3938d6fc6bbcb
|
[] |
no_license
|
mdsumner/reproducible
|
284e5e2603c6dc394ca8e547069fa4f4bfec4802
|
99cbc13e41d462c1445dfacf69549517c13efe2f
|
refs/heads/master
| 2022-07-30T07:30:33.754921
| 2020-05-18T18:16:45
| 2020-05-18T18:16:45
| 265,203,071
| 1
| 0
| null | 2020-05-19T09:25:30
| 2020-05-19T09:25:29
| null |
UTF-8
|
R
| false
| true
| 596
|
rd
|
checkForAuxiliaryFiles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepInputs.R
\name{.checkForAuxiliaryFiles}
\alias{.checkForAuxiliaryFiles}
\title{Check a neededFile for commonly needed auxiliary files}
\usage{
.checkForAuxiliaryFiles(neededFiles)
}
\arguments{
\item{neededFiles}{A character string of file name(s) that will be checked. Specifically
if the file extension is \code{.shp} it will output the names of files with
these extensions also:
c("shx", "dbf", "prj", "sbx", "sbn") files also.}
}
\description{
Currently, this is only used for shapefiles.
}
\keyword{internal}
|
809cb700b4a6a58775f062d29da637a6442aa4eb
|
55484e772eb403bfe98c0e568749a3d6be1c539a
|
/plot2.R
|
f0505f444e5f8068c85ece296347ff15c201ea28
|
[] |
no_license
|
merlandson14/ExData_Plotting1
|
0f46e82f7391c0af791182132f6dcf63930c8d9f
|
d4730c55f108b04b0cccd27283e9fedc2717e890
|
refs/heads/master
| 2021-01-16T22:26:10.355867
| 2016-01-25T01:20:49
| 2016-01-25T01:20:49
| 50,296,101
| 0
| 0
| null | 2016-01-24T16:22:43
| 2016-01-24T16:22:42
| null |
UTF-8
|
R
| false
| false
| 1,287
|
r
|
plot2.R
|
# This program will read in Household Power Consumption data for Feb 1 and 2, 2007.
# It will then create a line plot of the Global Active Power amounts over the two days.
library(dplyr)
zipfileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- tempfile()
download.file(zipfileUrl, temp)
housepowerData <- read.table(unz(temp, "household_power_consumption.txt"),
header = TRUE, sep = ";", na.strings = "?")[66637:69516,]
# In early examination of the data, these rows had the dates we are looking for.
# Date and Time were separate columns as factors, so we first put them together in a new var and then update to POSIX format.
housepowerData <- mutate(housepowerData, DateTime = paste(housepowerData$Date,housepowerData$Time))
housepowerData$DateTime <- strptime(housepowerData$DateTime, format = "%d/%m/%Y %H:%M:%S")
# Below is the line graph of Global Active Power over two days and saved to PNG file.
plot(housepowerData$DateTime, housepowerData$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off()
|
7c0bc740e6950c1cb12258661c75879de132c15b
|
c650e9bca1d76deda90309498506159c2906768e
|
/tests/testthat.R
|
29bae8565bad5ccf8f3de9698bdadcd49ca6c1b1
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
nemochina2008/netcdf.dsg
|
63cd9468ea1290c89fb573188e24a2942ad8287f
|
0600a0c13ea0f51f33d6058ea9a754241bbab711
|
refs/heads/master
| 2021-06-20T11:37:27.699344
| 2017-08-01T16:21:15
| 2017-08-01T16:21:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43
|
r
|
testthat.R
|
library(testthat)
test_check("netcdf.dsg")
|
bb96c330415b717b208fe95e5b0e925ed3f0dedf
|
04df0315f2208f0b009d0e1a55953cccfbb84c52
|
/man/geno.afc.Rd
|
3f3a6c6f54852b0decaea8519b6482f5d13d5633
|
[] |
no_license
|
cran/AssocAFC
|
5d4374156a20c7c89a53ae36658587fc3278c1a2
|
82afb6d83d99bc9e2373de41396fe2ca7bf33612
|
refs/heads/master
| 2021-09-13T22:32:24.496241
| 2018-05-05T08:34:22
| 2018-05-05T08:34:22
| 112,373,664
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
rd
|
geno.afc.Rd
|
\name{geno.afc}
\alias{geno.afc}
\docType{data}
\title{
AFC Genotype Example
}
\description{
An example genotype file that corresponds to the the one derived in the unrun example's
first iteration.
}
\usage{data("geno.afc")}
\format{
The format is:
int [1:1800, 1:75] 0 0 0 0 0 0 0 0 0 0 ...
- attr(*, "dimnames")=List of 2
..$ : NULL
..$ : chr [1:75] "rs3745120_1" "chr19.22817596_1" "chr19.22817734_1" "chr19.22817955_1" ...
}
\source{
This is simulated data.
}
\examples{
data("geno.afc")
}
\keyword{datasets}
|
62da1dbef1a674442f295521d8d6486655242074
|
6de599efffd75ab721fb158c82e84554bcdb3b96
|
/18_t_code.R
|
8d5b9db31a4fbeb0028aa141b660f8a7abd498fa
|
[] |
no_license
|
ybrandvain/datasets
|
26f820192033194d682d0d3a92b2b07244dbb56d
|
a446e472640d8f769104b4c9825f46b3678f7a00
|
refs/heads/master
| 2022-12-05T03:51:04.453190
| 2022-12-02T06:58:11
| 2022-12-02T06:58:11
| 253,668,620
| 0
| 0
| null | 2020-04-07T02:38:33
| 2020-04-07T02:38:33
| null |
UTF-8
|
R
| false
| false
| 1,543
|
r
|
18_t_code.R
|
library(tidyverse)
library(janitor)
library(broom)
temp_link <- "https://whitlockschluter3e.zoology.ubc.ca/Data/chapter11/chap11e3Temperature.csv"
temp_data <- read_csv(temp_link) %>% # load in data
clean_names()
#glimpse(temp_data)
############################
#### Plot the data -
############################
# Plot is it normalish? lets make a qqplot
ggplot(temp_data, aes(sample = temperature))+
geom_qq()+
geom_qq_line()
# How close is it to the null?
h0_temp <- 98.6
ggplot(temp_data, aes(x = temperature))+
geom_histogram(bins = 10, color = "white")+ # make a histogram
geom_vline(xintercept = h0_temp, color = "red")+ # show null
annotate(geom = "label",x = h0_temp, y = 5, color = "red",hjust = 0, label = "null")
# Data summaries and hypothesis test
temp_data %>%
dplyr::summarise(mean_temp = mean(temperature),
sd_temp = sd(temperature),
cohens_d = (mean_temp - h0_temp) / sd_temp,
sample_size = n(),
se_temp = sd_temp / sqrt(sample_size),
crit_t = qt(p = 0.025,df = sample_size-1, lower.tail=FALSE),
lower_95 = mean_temp - crit_t * se_temp,
upper_95 = mean_temp + crit_t * se_temp,
abs_t_val = abs((mean_temp - h0_temp) / se_temp),
p_val = 2*pt(abs_t_val, df = sample_size-1, lower.tail = FALSE))
t.test(pull(temp_data, temperature),mu = 98.6)
t.test(pull(temp_data, temperature),mu = 98.6) %>%
tidy()
|
81749388975c035bc9d8dce7f8c6e9219f10336a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/FiRE/examples/Rcpp_score.Rd.R
|
f7f6e4d666dc5a5e62d4c7653c6c29d18a9b6d5f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 322
|
r
|
Rcpp_score.Rd.R
|
library(FiRE)
### Name: score
### Title: Compute score on hashed sample.
### Aliases: score
### ** Examples
## Not run:
##D
##D ## Creating class object with required arguments
##D model <- new(FiRE::FiRE, L, M)
##D model$fit(data)
##D score <- model$score(data)
##D
##D
## End(Not run)
|
acfd5cba0b8386eaa21c60447d9f5a32ab3cd94e
|
d48eec86caf281f065ab2b3943d52070171b2ce4
|
/13-simulando-paretos.R
|
d91c5d60e0f64d1e40f5574f1ef926f270c28da9
|
[] |
no_license
|
djosafat/simulacion
|
05c7168433cf2013225e752b4cb211a72fa8853e
|
881af801138e0e46f0f2fafc97e9c9a8fb8ce161
|
refs/heads/main
| 2023-05-12T03:28:46.827007
| 2021-06-01T02:36:20
| 2021-06-01T02:36:20
| 344,931,452
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 720
|
r
|
13-simulando-paretos.R
|
#Caso Pareto con parámetros a=3, b=1
FF <- function(x){1-(1+x)^(-3)}
curve(FF(x),0,3,lwd=4,col='blue')
abline(h=0,v=0,lwd=4)
abline(h=1,lty=2,col=2,lwd=4)
#inversa
FFinv <- function(x){ (1-x)^(-1/3)-1 }
curve(FFinv(x),0,0.99,lwd=4,col='magenta',add=T)
curve(1*(x),add=T)
########################################
## Generamos una muestra tamaño 10000 ##
########################################
u <- runif(10000)
Z <- FFinv(u)
hist(Z,50,xlim=c(0,8),freq = F,col=rainbow(8))
f <- function(x) { # función de densidad
3/(1+x)^4
}
curve(f(x),add = T, col = 4, lwd=4)
plot(ecdf(Z),col = 6,lwd=5)
#plot(ecdf(rexp(10000))) # ecdf = empirical cumulative distribution function
curve(FF(x),add=T,col=1,lwd=2,xlim = c(0,40))
|
4ebfffa867b802574aecf60bfac79dcdd40ea30e
|
79abac7cd7496a52f2fd3a9932eecc849dd2c661
|
/R/get_basis_set.R
|
54243ddf9c71d3b2ac3c79bc00922bffcc5d873c
|
[] |
no_license
|
achazhardenberg/piecewiseSEM
|
7758a164c0fcc328266f2cd0a5fb13cdd164a149
|
7f0b2baddd4688fc5fd4663de676726e4a45ed38
|
refs/heads/master
| 2020-12-07T15:21:11.580966
| 2015-06-02T22:09:15
| 2015-06-02T22:09:15
| 34,168,584
| 0
| 0
| null | 2015-04-18T14:21:59
| 2015-04-18T14:21:59
| null |
UTF-8
|
R
| false
| false
| 2,002
|
r
|
get_basis_set.R
|
get.basis.set = function(modelList, corr.errors = NULL, add.vars = NULL) {
dag = lapply(modelList, function(i)
if(all(class(i) %in% c("lm", "glm", "negbin", "lme", "glmmPQL","pgls"))) formula(i) else
nobars(formula(i)) )
if(is.null(add.vars))
dag = dag else
dag = append(dag, unname(sapply(add.vars, function(x) as.formula(paste(x, x, sep="~")))))
dag = lapply(dag, function(i)
if(grepl("\\*|\\:", paste(format(formula(i)), collapse = ""))) {
f = paste(rownames(attr(terms(i), "factors"))[1], "~",paste(colnames(attr(terms(i), "factors")), collapse = "+"))
f = gsub("\\:", paste("%*%", collapse = ""), f)
formula(f)
} else i )
body(DAG)[[2]] = substitute(f <- dag)
basis.set = basiSet(DAG(dag))
if(length(basis.set) < 1) stop("All endogenous variables are conditionally dependent: model is satured.\n Test of directed separation not possible!")
basis.set = lapply(basis.set, function(i) gsub(paste(".\\%\\*\\%.", collapse = ""), "\\:", i))
if(!is.null(corr.errors)) {
basis.set = lapply(1:length(basis.set), function(i) {
inset = unlist(lapply(corr.errors, function(j) {
corr.vars = gsub(" ", "", unlist(strsplit(j,"~~")))
basis.set.sub = c()
for(k in 1:2) basis.set.sub[k] = gsub(".*\\((.*?)\\)+.*", "\\1", basis.set[[i]][k])
all(basis.set.sub %in% corr.vars) } ))
if(any(inset == TRUE)) NULL else basis.set[[i]]
} )
}
basis.set = lapply(1:length(basis.set), function(i) {
if(is.null(basis.set[[i]])) NULL else {
if(grepl("\\:",basis.set[[i]][1])) {
int = strsplit(basis.set[[i]][1],"\\:")[[1]]
if(any(int %in% basis.set[[i]][2])) NULL else basis.set[[i]] } else basis.set[[i]]
}
} )
basis.set = basis.set[!sapply(basis.set, is.null)]
body(DAG)[[2]] = substitute(f <- list(...))
return(basis.set)
}
|
6458517cb88ff94040caf9af54678a4998cb55cd
|
d17100a3f8887e627fb33eebd2feac71dbbf5f02
|
/UniHSMM.R
|
490cbf3c42d493a08499800435d59a895335e646
|
[] |
no_license
|
jeffung/coursework-cypersecurity
|
28da95476c2cd94cc139def3caf7a1a58cabe5f7
|
336844efc06022de223ccc3f52232766b845a9b1
|
refs/heads/master
| 2020-03-28T21:55:34.250107
| 2017-08-03T23:29:03
| 2017-08-03T23:29:03
| 149,192,504
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,245
|
r
|
UniHSMM.R
|
###
#train data
traintest1 <- list (x= data.frame(uni_train$power), N=length(uni_train$Date))
traintest1$x <- data.matrix(traintest1$x, rownames.force = NA)
traintest1$x <- scale(traintest1$x)
class(traintest1) <- "hsmm.data"
# end
uni_test<- data.frame(as.numeric(test1$Global_active_power))
uni_test <- setNames(uni_test,c("power"))
uni_test$power[is.na(uni_test$power)] <- mean(na.omit(uni_test$power))
xtest1 <- list (x= data.frame(uni_test$power), N=length(uni_test$power))
xtest1$x <- data.matrix(xtest1$x, rownames.force = NA)
xtest1$x <- scale(xtest1$x)
class(xtest1) <- "hsmm.data"
# create M-step
library(mhsmm)
J <- 2
init0 <- rep(1/J, J)
B0 <- list(mu = c(0.5,3), sigma = c(2.5, 1.5))
P <- matrix(c(0, 1/2,
1/2,0
), nrow = J)
M <- length(uni_train$power)
d0 <- cbind(dunif(1:M, 1000, 8000), dunif(1:M, 1000, 8000))
startval <- hsmmspec(init0, P, parms.emis = B0, list(d = d0, type = "nonparametric"), dens.emis = dnorm.hsmm)
nnn <- traintest1$x[,1]
UniHSMM<- hsmmfit(nnn, startval, mstep = mstep.norm, M=1000)
#summary(hmv)
UniHSMMtrain <- predict.hsmm(UniHSMM,traintest1$x)
UniHSMMtest <- predict.hsmm(UniHSMM,xtest1$x)
plot(UniHSMMtrain)
#plot(UniHSMMtest)
UniHSMMtrain$s
UniHSMMtest$s
|
3192db04c089c6e30f015a9b86e4f1a0cdee07e5
|
9735404924fe5eedbb8e2c204504d4b266351e35
|
/global.R
|
63b2022af3fbf64612e0833c1a87372d2a1a775b
|
[] |
no_license
|
aridhia/demo-table-statistics
|
e2e1c31fd48aa03b9643180ad6ec09b1fcc9d151
|
f93513eb7666c042ad78f66fe5d74d430d71b6f4
|
refs/heads/main
| 2023-04-29T02:22:03.845046
| 2021-05-14T15:26:38
| 2021-05-14T15:26:38
| 290,808,862
| 0
| 0
| null | 2021-02-05T12:12:07
| 2020-08-27T15:13:30
|
JavaScript
|
UTF-8
|
R
| false
| false
| 8,142
|
r
|
global.R
|
library(shiny)
library(ggvis)
library(shinyBS)
source("./code/documentation_ui.R")
source("./code/config.R")
xap.chooseDataTable <- function(input, output, session) {
d <- reactive(withProgress(message = "Reading table", value = 0, {
req(input$table_name)
xap.read_table(input$table_name)
}))
## Update the table list when refresh is clicked
observe({
i <- input$refresh
tables <- xap.list_tables()
## isolate the update since we use input$table_name
isolate({
updateSelectizeInput(session, "table_name", choices = c("Choose a dataset" = "", tables),
selected = input$table_name)
})
})
return(list(data = d, table_name = reactive(input$table_name)))
}
xap.chooseDataTableUI <- function(id, label = "Choose a table") {
ns <- NS(id)
tables <- xap.list_tables()
tagList(
selectizeInput(ns("table_name"), label = label, choices = c("Select a Table" = "", tables)),
actionButton(ns("refresh"), "Refresh table list")
)
}
dot_to_underscore <- function(string) {
gsub("\\.", "_", string)
}
create_modal <- function(x, name) {
UseMethod("create_modal", x)
}
create_modal.Polynominal <- function(x, name) {
bsModal(
paste0(dot_to_underscore(name), "modal"),
title = paste("Ordinal Values:", name),
trigger = paste0(dot_to_underscore(name), "details"),
dataTableOutput(paste0("dt", name))
)
}
create_modal.Real <- function(x, name) {
nm <- dot_to_underscore(name)
v <- x$Values
xmean <- x$Average
qs <- quantile(v, na.rm = T)
xmin <- qs[1]
xmax <- qs[5]
xmed <- qs[3]
q25 <- qs[2]
q75 <- qs[4]
xsd <- round(sd(v, na.rm = T), 3)
bsModal(
paste0(nm, "modal"),
title = paste("More Stats:", name),
trigger = paste0(nm, "more"),
tags$b("Min:"), xmin, tags$br(),
tags$b("Q25:"), q25, tags$br(),
tags$b("Median:"), xmed, tags$br(),
tags$b("Q75:"), q75, tags$br(),
tags$b("Max:"), xmax, tags$br(),
tags$b("Standard Deviation:"), xsd
)
}
create_modal.Integer <- create_modal.Real
#create_modal.Date <- create_modal.Real
create_modal.Boolean <- create_modal.Polynominal
create_modal.default <- function(x, name) {
fluidRow()
}
create_plot_modal <- function(name, plot_id) {
bsModal(paste0(dot_to_underscore(name), "plot_modal"), title = paste0("Plot: ", name),
trigger = paste0(dot_to_underscore(name), "plot"),
ggvisOutput(plot_id))
}
my_summ <- function(x) {
UseMethod("my_summ", x)
}
my_summ.numeric <- function(x) {
xmin <- min(x, na.rm = T)
xmax <- max(x, na.rm = T)
xmean <- round(mean(x, na.rm = T), 3)
xmissing <- sum(is.na(x))
xtype <- class(x)
if(xtype == "numeric") {
xtype <- "Real"
} else if (xtype == "integer") {
xtype <- "Integer"
}
out <- list("Type" = xtype, "Missing" = xmissing, "Min" = xmin,
"Max" = xmax, "Average" = xmean, "Values" = x)
class(out) <- xtype
return(out)
}
my_summ.character <- function(x) {
t <- table(x)
xmissing <- sum(is.na(x))
xleast <- t[which.min(t)]
xmost <- t[which.max(t)]
xvalues <- t[order(-t)]
xvalues_string <- paste(paste0(names(xvalues), " (", xvalues, ")"), collapse = ", ")
matches <- gregexpr("), ", xvalues_string)[[1]]
show <- matches[matches < 40]
if(length(show) == 0) {
show <- matches[1]
}
if(length(show) == length(matches)) {
string <- xvalues_string
} else {
to <- rev(show) + 1
left <- length(matches) - length(show)
string <- paste0(substr(xvalues_string, 1, to), " ...[", left, " more]")
}
out <- list(
"Type" = "Polynominal",
"Missing" = xmissing,
"Least" = paste0(names(xleast), " (", xleast, ")"),
"Most" = paste0(names(xmost), " (", xmost, ")"),
"Values" = string,
"dt" = data.frame("Nominal value" = names(xvalues), "Absolute count" = as.vector(xvalues),
"Fraction" = as.vector(xvalues) / sum(xvalues))
)
class(out) <- "Polynominal"
return(out)
}
my_summ.factor <- function(x) {
my_summ(as.character(x))
}
my_summ.Date <- function(x) {
out <- my_summ(as.character(x))
class(out) <- "Date"
out$Type <- "Date"
out
}
my_summ.data.frame <- function(x) {
lapply(x, my_summ)
}
my_summ.logical <- function(x) {
out <- my_summ(as.character(x))
class(out) <- "Boolean"
out$Type <- "Boolean"
out
}
my_summ.POSIXct <- function(x) {
out <- my_summ(as.character(x))
class(out) <- "DateTime"
out$Type <- "DateTime"
out
}
my_summ.POSIXlt <- my_summ.POSIXct
my_summ.NULL <- function(x) {
NULL
}
simple_plot <- function(data, x) {
UseMethod("simple_plot", x)
}
simple_plot.factor <- function(data, x) {
simple_plot(data, as.character(x))
}
simple_plot.character <- function(data, x) {
t <- table(x)
n <- names(sort(-t))[1:5]
l <- x %in% n
d <- data[l,]
x_ <- factor(x[l], levels = n)
d %>% ggvis(~x_) %>%
layer_bars(fill := "#2C88A2", strokeWidth := 0.5) %>%
add_axis("x", title = "") %>%
add_axis("y", title = "", ticks = 8)
}
simple_plot.numeric <- function(data, x) {
d <- data
x_ <- x
d %>% ggvis(~x_) %>%
layer_histograms(width = diff(range(x, na.rm = T))/12, fill := "#2C88A2", strokeWidth := 0.5) %>%
add_axis("x", title = "", ticks = 6) %>%
add_axis("y", title = "", ticks = 8)
}
simple_plot.integer <- function(data, x) {
d <- data
x_ <- x
d %>% ggvis(~x_) %>%
layer_histograms(width = diff(range(x))/12, fill := "#2C88A2", strokeWidth := 0.5) %>%
add_axis("x", title = "", ticks = 6) %>%
add_axis("y", title = "", ticks = 8)
}
simple_plot.logical <- function(data, x) {
simple_plot(data, as.character(x))
}
simple_plot.Date <- function(data, x) {
d <- data
x_ <- x
d %>% ggvis(~x_) %>%
layer_histograms(width = diff(range(x, na.rm = T))/12, fill := "#2C88A2", strokeWidth := 0.5) %>%
add_axis("x", title = "", ticks = 6) %>%
add_axis("y", title = "", ticks = 8)
}
simple_plot.POSIXct <- simple_plot.Date
simple_plot.POSIXlt <- simple_plot.Date
create_header <- function(x, ...) {
tags$thead(
tags$th("Name"),
tags$th("Type"),
tags$th("Missing"),
tags$th("Statistics", colspan = 4)
)
}
create_row <- function(x, ...) {
UseMethod("create_row", x)
}
create_row.Real <- function(x, name, plot_id) {
tags$tr(
id = name,
title = "Click to Expand",
tags$td(class = "left", p(tags$b(name))),
tags$td(p(x$Type)),
tags$td(p(x$Missing)),
tags$td(
conditionalPanel(
condition = paste0("input[\"row", name, "\"] === 1"),
ggvisOutput(plot_id),
actionLink(paste0(dot_to_underscore(name), "plot"), "View plot")
)
),
tags$td(
h6("Min"),
p(x$Min)
),
tags$td(
h6("Max"),
p(x$Max)
),
tags$td(
class = "right",
h6("Average"),
p(x$Average),
actionLink(paste0(dot_to_underscore(name), "more"), "More stats")
)
)
}
create_row.Integer <- create_row.Real
create_row.Polynominal <- function(x, name, plot_id) {
tags$tr(id = name,
title = "Click to Expand",
tags$td(class = "left", p(tags$b(name))),
tags$td(p(x$Type)),
tags$td(p(x$Missing)),
tags$td(
conditionalPanel(
condition = paste0("input[\"row", name, "\"] === 1"),
ggvisOutput(plot_id),
actionLink(paste0(dot_to_underscore(name), "plot"), "View plot")
)
),
tags$td(
h6("Least"),
p(x$Least)
),
tags$td(
h6("Most"),
p(x$Most)
),
tags$td(class = "right",
h6("Values"),
p(x$Values),
actionLink(paste0(dot_to_underscore(name), "details"), "Details")
)
)
}
create_row.Date <- create_row.Polynominal
create_row.DateTime <- create_row.Polynominal
create_row.Boolean <- create_row.Polynominal
create_table <- function(summ, names, plot_ids) {
print("Creating Table...")
out <- tags$table(
id = "myTable", class = "tablesorter table-striped",
create_header(),
tags$tbody(
lapply(seq_along(summ), function(i) {
create_row(summ[[i]], names[i], plot_ids[i])
})
)
)
return(out)
}
|
d591d55cec4d075e8a7495af2db874cbac10ec36
|
9491cadebf15aed8b93a6b8bde60e81febee79ae
|
/modelo1.R
|
b6e5685b108bfc277a02cff372908ecc37ed7795
|
[] |
no_license
|
pyxisdata/sript_personal
|
b01a7cb535832f8501c2cbc1367117ae2d496e73
|
688147512be949951a4541189316e0842e25b4a8
|
refs/heads/master
| 2020-07-18T01:25:10.338963
| 2019-10-06T00:06:39
| 2019-10-06T00:06:39
| 206,143,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,599
|
r
|
modelo1.R
|
# Script para el modelo de evaluacion de EMA
# Librerias
# Conexion a google drive
library(googledrive)
library(googlesheets)
library(readr)
# Procesamiento de los datos
library(lubridate)
library(dplyr)
library(tidyr)
library(stringr)
library(stringi)
library(purrr)
# Desactivar la notacion cientifica
options(scipen = 999)
# Conectarse a la hoja de calculo de google
hoja_calculo <- gs_key("1iKJj0l3Gix8Zcmg6-2M0CYS4m_ZEJclbxd4F38QU7cg")
# Importar los datos de la hoja de calculo de registros
encuesta <- hoja_calculo %>%
gs_read(ws = "Hoja 1", col_types = cols(.default = "c")) %>%
mutate(NIT = c("888888888", "890110964", "860529657",
"999999999", "860032115"))
# Importar encabezados de la tabla de la encuesta
param_encuesta <- paste0(getwd(), "/nombres_encuesta.csv") %>%
read_csv(col_names = TRUE,
col_types = cols(.default = "c"),
locale = locale(encoding = "ISO-8859-1"))
# Nombres de la encuesta
nombres_encuesta <- param_encuesta %>%
select(campof) %>%
unlist()
names(encuesta) <- nombres_encuesta
# Limpiar memoria y espacio de variables
rm(nombres_encuesta)
# Directorio de empresas
# Codigo CIIU
directorio <- paste0(getwd(), "/Limpio") %>%
list.files(pattern = "dir", full.names = TRUE) %>%
map(read_csv, col_types = cols(.default = "c")) %>%
# Asignar nombres
map2(list(c("nombre", "nit", "munpio", "codclase"),
c("nit", "nombre", "codclase", "depto", "munpio"),
c("nit", "nombre", "codclase", "depto", "munpio"),
c("nombre", "nit", "munpio", "codclase")),
function(x, y) {
names(x) <- y
x
}) %>%
bind_rows()
# Importar informacion de la empresa (codigo CIIU)
informacion <- hoja_calculo %>%
gs_read(ws = "registros", col_types = cols(.default = "c")) %>%
filter(Tipo == 4) %>%
select(NIT, Empresa, CIIU, Municipio, Departamento, Tamano) %>%
left_join(distinct(select(encuesta, nit, reporta)),
by = c("NIT" = "nit")) %>%
left_join(directorio, by = c("NIT" = "nit")) %>%
mutate(directorio = ifelse(is.na(nombre), "NO", "SI")) %>%
select(-munpio, -codclase, -depto, -nombre)
names(informacion) <- c("nit", "empresa", "codclase", "municipio",
"departamento", "tamano", "reporta", "directorio")
# Importar clasificacion CIIU 4
ciiu4 <- paste0(getwd(), "/clasificacion_ciiu4.csv") %>%
read_csv(col_names = TRUE, col_types = cols(.default = "c"),
locale = locale(encoding = "ISO-8859-1"))
# Resultados cualitativos -----------------------------------------------------
# Datos cualitativos a utilizar para el modelo
cualitativos <- encuesta %>%
# Proceso
select(nit, ends_with("1")) %>%
select(-direccion1, -contains("AN")) %>%
mutate_at(vars(-nit), function(x) {
x <- str_match(x, "\\d+")[, 1]
}) %>%
gather("parte", "puntaje_emp", -nit) %>%
mutate(puntaje_emp = as.integer(puntaje_emp)) %>%
# Asignar el codigo CIIU
left_join(select(informacion, nit, tamano, codclase), by = "nit") %>%
left_join(ciiu4, by = c("codclase")) %>%
select(-codea, -nomea, -codgea, -nomgea, -coddea, -nomdea,
-codsea, -nomsea) %>%
select(nit, tamano, parte, puntaje_emp, codclase, everything())
# Puntaje promedio de las preguntas cualitativas a nivel sector
# Funcion para realizar los calculos a los cuatro niveles
calificar_cualitativo <- function(data, columna, ciiu) {
# Identificador de las columnas de nivel ciiu
codigo <- paste0("cod", columna)
nombre <- paste0("nom", columna)
# Medidas del sector
# Agrupar por el nivel ciiu
nivel <- data %>%
group_by(parte, !!as.name(codigo)) %>%
summarise(puntaje_sec = mean(puntaje_emp),
num_empresas = n()) %>%
mutate(puntaje_sec = round(puntaje_sec, 2)) %>%
ungroup()
# Puntaje
puntaje <- data %>%
select(nit, parte, puntaje_emp,
!!as.name(codigo), !!as.name(nombre)) %>%
left_join(nivel, by = c(codigo, "parte")) %>%
mutate(diferencia = puntaje_emp - puntaje_sec,
diferencia = round(diferencia, 2)) %>%
group_by(parte) %>%
mutate(posicion = dense_rank(-diferencia)) %>%
arrange(posicion) %>%
ungroup()
}
# Ejecutar funcion
posiciones_cualitativo <- list(cualitativos, cualitativos,
cualitativos, cualitativos) %>%
map2(c("clase", "grupo", "division", "seccion"),
calificar_cualitativo, ciiu = ciiu4)
names(posiciones_cualitativo) <- c("clase", "grupo", "division", "seccion")
# Remover tablas intermedias
rm(cualitativos, calificar_cualitativo)
# Resultados cuantitativos ----------------------------------------------------
# Datos cuantitativos procedentes de la encuesta
cuantitativos <- encuesta %>%
# Proceso
select(nit, reporta, contains("201")) %>%
mutate_at(vars(-nit, -reporta), function(x) {
x <- str_replace(x, "[^\\d+]", "") %>%
str_replace_all("\\.", "")
}) %>%
gather("parte", "valor", -nit, -reporta) %>%
separate(parte, c("parte", "fecha"), -4) %>%
mutate(fecha = paste(fecha, "01", "01", sep = "-"),
fecha = ymd(fecha)) %>%
spread(parte, valor) %>%
left_join(select(informacion, nit, codclase, tamano, directorio),
by = "nit") %>%
select(fecha, nit, reporta, directorio, tamano, codclase, everything())
# Estados financieros sector
# Base empresarial
estados_financieros <- paste0(getwd(), "/Limpio") %>%
list.files(pattern = "ef", full.names = TRUE) %>%
map(read_csv, col_types = cols(.default = "c")) %>%
bind_rows() %>%
mutate(fecha = ymd(fecha),
tamano = as.integer(tamano)) %>%
mutate_at(vars(-fecha, -nit, -tamano), as.numeric) %>%
group_by(nit) %>%
# Aplicar el ultimo tamano de la empresa
mutate(tamano = last(tamano, order_by = fecha)) %>%
rename(activo = `1`,
patrimonio = `3`,
ingresos = `41`,
utilidad = `33`,
impuesto = `54`) %>%
select(fecha, nit, tamano, everything()) %>%
left_join(select(directorio, nit, codclase), by = "nit") %>%
select(fecha, nit, tamano, codclase, everything()) %>%
ungroup()
# Estados financieros de las empresas de la encuesta
# Quienes reportan utilizan la base empresarial y quienes no la encuesta
estados_empresa <- cuantitativos %>%
select(fecha, nit, codclase, tamano, reporta, directorio,
activo, patrimonio, ingresos, utilidad, impuesto) %>%
mutate_at(vars(activo, patrimonio, ingresos, utilidad, impuesto),
as.numeric) %>%
mutate(tamano = as.integer(tamano)) %>%
# Anexion de la base empresarial
left_join(estados_financieros, by = c("nit", "fecha")) %>%
mutate(tamano = tamano.x,
# Reemplazos
activo = ifelse(reporta == "SI", activo.y, activo.x),
patrimonio = ifelse(reporta == "SI", patrimonio.y, activo.x),
ingresos = ifelse(reporta == "SI", ingresos.y, activo.x),
utilidad = ifelse(reporta == "SI", utilidad.y, activo.x),
impuesto = ifelse(reporta == "SI", impuesto.y, impuesto.x)) %>%
select(fecha, nit, codclase.x, tamano.x, activo, patrimonio,
ingresos, utilidad, impuesto) %>%
rename(codclase = codclase.x,
tamano = tamano.x) %>%
left_join(ciiu4, by = "codclase") %>%
select(-codea, -nomea, -codgea, -nomgea, -coddea, -nomdea,
-codsea, -nomsea)
# Añadir estados financieros a la base de datos empresarial
# Reemplazar por la encuesta
estados_financieros <- estados_financieros %>%
left_join(ciiu4, by = "codclase") %>%
select(-codea, -nomea, -codgea, -nomgea, -coddea, -nomdea,
-codsea, -nomsea) %>%
filter(!nit %in% informacion[["nit"]]) %>%
bind_rows(estados_empresa)
# Funcion para calcular el puntaje
# Estados financieros agrupados por sector y tamaño
# Funcion para realizar los calculos a los cuatro niveles
calificar_estados <- function(base, columna, data, ciiu) {
# Identificador de loas columnas de nivel ciiu
codigo <- paste0("cod", columna)
nombre <- paste0("nom", columna)
# Calcular las medidas de las empresas
empresas <- data %>%
select(-impuesto) %>%
rename(activo_emp = activo,
patrimonio_emp = patrimonio,
ingresos_emp = ingresos,
utilidad_emp = utilidad) %>%
# Calculo de las medidas
mutate(roa_emp = round(utilidad_emp / activo_emp, 2),
roe_emp = round(utilidad_emp / patrimonio_emp, 2),
margen_emp = round(utilidad_emp / ingresos_emp, 2)) %>%
group_by(nit) %>%
mutate(ingresos_emp_1 = lag(ingresos_emp, n = 1, order_by = fecha)) %>%
ungroup() %>%
mutate(varing_emp = ((ingresos_emp / ingresos_emp_1) - 1),
varing_emp = round(varing_emp * 100, 2)) %>%
group_by(nit, !!as.name(codigo), !!as.name(nombre), tamano) %>%
# Calculo de los promedios entre periodos
summarise(roa_emp_p = round(mean(roa_emp), 2),
roe_emp_p = round(mean(roe_emp), 2),
margen_emp_p = round(mean(margen_emp), 2),
varing_emp_p = round(mean(varing_emp, na.rm = TRUE),
2)) %>%
ungroup() %>%
gather("parte", "valor_emp", -nit, -!!as.name(codigo), -!!as.name(nombre),
-tamano) %>%
mutate(parte = word(parte, 1, sep = "_"))
# Calcular las medidas sectoriales
# Agrupar segun el nivel ciiu
nivel <- base %>%
select(-impuesto) %>%
group_by(fecha, !!as.name(codigo), tamano) %>%
summarise(activo_sec = sum(activo, na.rm = TRUE),
patrimonio_sec = sum(patrimonio, na.rm = TRUE),
ingresos_sec = sum(ingresos, na.rm = TRUE),
utilidad_sec = sum(utilidad, na.rm = TRUE),
num_empresas = n_distinct(nit)) %>%
ungroup() %>%
mutate(roa_sec = round(utilidad_sec / activo_sec, 2),
roe_sec = round(utilidad_sec / patrimonio_sec, 2),
margen_sec = round(utilidad_sec / ingresos_sec, 2)) %>%
group_by(!!as.name(codigo), tamano) %>%
mutate(ingresos_sec_1 = lag(ingresos_sec, n = 1, order_by = fecha)) %>%
ungroup() %>%
mutate(varing_sec = ((ingresos_sec / ingresos_sec_1) - 1),
varing_sec = round(varing_sec * 100, 2)) %>%
group_by(!!as.name(codigo), tamano, num_empresas) %>%
summarise(roa_sec_p = round(mean(roa_sec), 2),
roe_sec_p = round(mean(roe_sec), 2),
margen_sec_p = round(mean(margen_sec), 2),
varing_sec_p = round(mean(varing_sec, na.rm = TRUE), 2)) %>%
ungroup() %>%
gather("parte", "valor_sec", -!!as.name(codigo),
-tamano, -num_empresas) %>%
mutate(parte = word(parte, 1, sep = "_"))
# Puntaje
puntaje <- empresas %>%
left_join(nivel, by = c(codigo, "tamano", "parte")) %>%
mutate(valor_emp = round(valor_emp, 2),
valor_sec = round(valor_sec, 2),
diferencia = round(valor_emp - valor_sec, 2)) %>%
group_by(parte) %>%
mutate(posicion = dense_rank(-diferencia)) %>%
arrange(posicion) %>%
select(nit, tamano, parte, !!as.name(codigo), !!as.name(nombre),
num_empresas, valor_emp, valor_sec, diferencia, posicion)
}
# Ejecutar funcion estados financieros
posiciones_estados <- list(estados_financieros, estados_financieros,
estados_financieros, estados_financieros) %>%
map2(c("clase", "grupo", "division", "seccion"),
calificar_estados, data = estados_empresa, ciiu = ciiu4)
names(posiciones_estados) <- c("clase", "grupo", "division", "seccion")
# Productividad
# Datos de productividad de la encuesta
productividad <- cuantitativos %>%
select(fecha, nit, codclase,
perper, pertemp, presper, suelper, gastemp) %>%
mutate_at(vars(perper, pertemp, presper, suelper, gastemp),
as.numeric) %>%
left_join(select(ciiu4, codclase, codea), by = "codclase") %>%
filter(fecha != ymd("2018-01-01")) %>%
left_join(select(estados_empresa, fecha, nit, ingresos),
by = c("fecha", "nit")) %>%
rename(valor = ingresos)
# Filtrar empresas que no tienen codigos de encuesta anual
productividad_adjunto <- productividad %>%
filter(is.na(codea)) %>%
mutate(codea = codclase) %>%
select(-codclase) %>%
group_by(fecha, codea) %>%
summarise_at(vars(-nit), sum, na.rm = TRUE) %>%
left_join(select(ciiu4, codclase, nomclase, codgrupo, nomgrupo,
coddivision, nomdivision, codseccion, nomseccion),
by = c("codea" = "codclase")) %>%
rename(nomea = nomclase,
codgea = codgrupo,
nomgea = nomgrupo,
coddea = coddivision,
nomdea = nomdivision,
codsea = codseccion,
nomsea = nomseccion)
# Importar datos de las encuestas anuales
encuestas_anuales <- paste0(getwd(), "/Encuestas Anuales") %>%
list.files(full.names = TRUE) %>%
map(read_csv, col_types = cols(.default = "c"), col_names = TRUE) %>%
map2(list(c("fecha", "codea", "perper", "pertemp", "gastemp",
"suelper", "presper", "valor", "venta"),
c("fecha", "codea", "valor", "suelper", "presper",
"perper", "pertemp", "gastemp"),
c("fecha", "codea", "valor", "suelper", "sueltem",
"presper", "prestem", "perper", "pertemp")),
function (x, y) {
names(x) <- y
x
})
names(encuestas_anuales) <- c("eac", "eam", "eas")
# Proceso de union de columnas
encuestas_anuales <- encuestas_anuales %>%
map(mutate_at, vars(-fecha, -codea), as.numeric) %>%
map_at(vars(1), select, -venta) %>%
map_at(vars(3), mutate, gastemp = sueltem + prestem) %>%
map_at(vars(3), select, -sueltem, -prestem) %>%
bind_rows() %>%
mutate(fecha = ymd(fecha)) %>%
# Añador la jerarquia de las encuestas anuales bajo CIIU
left_join(distinct(select(ciiu4, codea, nomea)), by = "codea") %>%
left_join(distinct(select(ciiu4, codea, codgea, nomgea)),
by = "codea") %>%
left_join(distinct(select(ciiu4, codea, coddea, nomdea)),
by = "codea") %>%
left_join(distinct(select(ciiu4, codea, codsea, nomsea)),
by = "codea") %>%
# Añadir los sectores ajenos a las encuestas
bind_rows(productividad_adjunto)
# Remover limpiar memoria
rm(productividad_adjunto)
# Funcion para calcular el puntaje
# Medidas de productividad
# Agrupar por distintos niveles de CIIU
calificar_productividad <- function(base, columna, data, ciiu) {
# Identificador de las columnas de nivel ciiu
codigo <- paste0("cod", columna)
nombre <- paste0("nom", columna)
# Medidas de las empresas
empresas <- data %>%
mutate(codea = ifelse(is.na(codea), codclase, codea)) %>%
select(-codclase) %>%
select(fecha, nit, codea, everything()) %>%
rename(perper_emp = perper,
pertemp_emp = pertemp,
presper_emp = presper,
suelper_emp = suelper,
gastemp_emp = gastemp,
valor_emp = valor) %>%
# Calcular las medidas
mutate(salprom_emp = suelper_emp / perper_emp,
propor_emp = pertemp_emp / (perper_emp + pertemp_emp),
product_emp = valor_emp / (perper_emp + pertemp_emp)) %>%
# Formato de las medidas
mutate(salprom_emp = round(salprom_emp, 2),
propor_emp = round(propor_emp * 100, 2),
product_emp = round(product_emp, 2)) %>%
# Calculo de los promedios entre periodos
group_by(nit, codea) %>%
summarise(salprom_emp_p = mean(salprom_emp, na.rm = TRUE),
propor_emp_p = mean(propor_emp, na.rm = TRUE),
product_emp_p = mean(product_emp, na.rm = TRUE)) %>%
gather("parte", "valor_emp", -nit, -codea) %>%
mutate(parte = word(parte, 1, sep = "_")) %>%
# Añadir el sector de las encuestas anuales
left_join(ciiu4, by = c("codea")) %>%
select(nit, codea, parte, valor_emp, nomea, codgea, nomgea,
coddea, nomdea, codsea, nomsea) %>%
left_join(ciiu4, by = c("codea" = "codclase")) %>%
mutate(nomea = ifelse(is.na(nomea.x), nomclase, nomea.x),
codgea = ifelse(is.na(codgea.x), codgrupo, codgea.x),
nomgea = ifelse(is.na(nomgea.x), nomgrupo, nomgea.x),
coddea = ifelse(is.na(coddea.x), coddivision, coddea.x),
nomdea = ifelse(is.na(nomdea.x), nomdivision, nomdea.x),
codsea = ifelse(is.na(codsea.x), codseccion, codsea.x),
nomsea = ifelse(is.na(nomsea.x), nomseccion, nomsea.x)) %>%
select(nit, parte, valor_emp, codea, nomea, codgea, nomgea,
coddea, nomdea, codsea, nomsea)
# Medidas de los sectores
nivel <- encuestas_anuales %>%
# Agrupar por el nivel ciiu
group_by(fecha, !!as.name(codigo)) %>%
summarise(perper_sec = sum(perper),
pertemp_sec = sum(pertemp),
gastemp_sec = sum(gastemp),
suelper_sec = sum(suelper),
presper_sec = sum(presper),
valor_sec = sum(valor)) %>%
ungroup() %>%
# Calculo de las medidas
mutate(salprom_sec = suelper_sec / perper_sec,
propor_sec = pertemp_sec / (perper_sec + pertemp_sec),
product_sec = valor_sec / (perper_sec + pertemp_sec)) %>%
# Formato de las medidas
mutate(salprom_sec = round(salprom_sec, 2),
propor_sec = round(propor_sec * 100, 2),
product_sec = round(product_sec, 2)) %>%
group_by(!!as.name(codigo)) %>%
summarise(salprom_sec_p = mean(salprom_sec),
propor_sec_p = mean(propor_sec),
product_sec_p = mean(product_sec)) %>%
ungroup() %>%
# Formato largo
gather("parte", "valor_sec", -!!as.name(codigo)) %>%
mutate(parte = word(parte, 1, sep = "_"))
# Puntaje
puntaje <- empresas %>%
select(nit, parte, !!as.name(codigo), !!as.name(nombre), valor_emp) %>%
left_join(nivel, by = c(codigo, "parte")) %>%
ungroup() %>%
mutate(valor_emp = round(valor_emp, 2),
valor_sec = round(valor_sec, 2),
diferencia = round(valor_emp - valor_sec, 2)) %>%
group_by(parte) %>%
mutate(posicion = dense_rank(-diferencia)) %>%
ungroup() %>%
arrange(posicion)
}
# Ejecutar funcion estados financieros
posiciones_productividad <- list(encuestas_anuales, encuestas_anuales,
encuestas_anuales, encuestas_anuales) %>%
map2(c("ea", "gea", "dea", "sea"),
calificar_productividad,
data = productividad,
ciiu = ciiu4)
names(posiciones_productividad) <- c("clase", "grupo", "division", "seccion")
# Relacion Sector Publico
# Parte de Impuestos
impuestos <- cuantitativos %>%
select(fecha, nit, codclase, tamano, iva, exenciones,
licitaciones, subsidios, subvenciones, credito) %>%
mutate_at(vars(-fecha, -nit, -codclase, -tamano),
as.numeric) %>%
mutate(tamano = as.integer(tamano)) %>%
left_join(select(estados_empresa, -activo, -patrimonio),
by = c("nit", "fecha", "codclase", "tamano"))
# Estados financieros de impuestos
estados_impuesto <- estados_financieros %>%
select(-activo, -patrimonio, -ingresos)
# Funcion para calcular el puntaje
# Medidas de impuestos
# Agrupar por distintos niveles de CIIU
calificar_impuestos <- function(base, columna, data, ciiu) {
# Identificador de las columnas de nivel ciiu
codigo <- paste0("cod", columna)
nombre <- paste0("nom", columna)
empresas <- data %>%
select(fecha, nit, !!as.name(codigo), !!as.name(nombre),
tamano, ingresos, utilidad, impuesto,
iva, exenciones, licitaciones, subsidios,
subvenciones, credito) %>%
# Calcular las medidas
mutate(margenimp = impuesto / utilidad,
margeniva = iva / ingresos,
margenexe = exenciones / ingresos,
margenlic = licitaciones / ingresos,
margensub = subsidios / ingresos,
margensuv = subvenciones / ingresos,
margencre = credito / ingresos) %>%
# Multiuplicar porcentajes por 100
mutate_at(vars(margenimp, margeniva, margenexe, margenlic,
margensub, margensuv, margencre),
function(x) {x <- round(x * 100, 2)}) %>%
group_by(nit, !!as.name(codigo), !!as.name(nombre), tamano) %>%
# Calcular los promedios de los periodos
summarise_at(vars(margenimp, margeniva, margenexe, margenlic,
margensub, margensuv, margencre),
function(x) {x <- round(mean(x, na.rm = TRUE), 2)}) %>%
ungroup() %>%
gather("parte", "valor_emp", -nit, -!!as.name(codigo),
-!!as.name(nombre), -tamano)
# Medidas de los sectores con los datos de la encuesta
nivel1 <- data %>%
select(-utilidad, -impuesto) %>%
group_by(fecha, !!as.name(codigo), tamano) %>%
summarise_at(vars(ingresos, iva, exenciones, licitaciones,
subsidios, subvenciones, credito),
function(x) {x <- sum(x, na.rm = TRUE)}) %>%
ungroup() %>%
# Calculo de las medidas
mutate(margeniva = iva / ingresos,
margenexe = exenciones / ingresos,
margenlic = licitaciones / ingresos,
margensub = subsidios / ingresos,
margensuv = subvenciones / ingresos,
margencre = credito / ingresos) %>%
# Multiuplicar porcentajes por 100
mutate_at(vars(margeniva, margenexe, margenlic, margensub,
margensuv, margencre),
function(x) {x <- round(x * 100, 2)}) %>%
group_by(!!as.name(codigo), tamano) %>%
# Calcular los promedios de los periodos
summarise_at(vars(margeniva, margenexe, margenlic,
margensub, margensuv, margencre),
function(x) {x <- round(mean(x, na.rm = TRUE), 2)}) %>%
ungroup() %>%
gather("parte", "valor_sec", -!!as.name(codigo), -tamano)
# Medicas calculadas del sector base empresarial
nivel <- base %>%
group_by(fecha, !!as.name(codigo), tamano) %>%
summarise_at(vars(utilidad, impuesto),
function(x) {x <- sum(x, na.ram = TRUE)}) %>%
ungroup() %>%
# Calcular medida y multiplicar por 100
mutate(margenimp = round((impuesto / utilidad) * 100, 2)) %>%
group_by(!!as.name(codigo), tamano) %>%
# Calcular los promedios de los periodos
summarise(margenimp = round(mean(margenimp, na.rm = TRUE), 2)) %>%
ungroup() %>%
gather("parte", "valor_sec", -!!as.name(codigo), -tamano) %>%
bind_rows(nivel1)
# Puntaje
puntaje <- empresas %>%
left_join(nivel, by = c(codigo, "parte", "tamano")) %>%
mutate(valor_emp = round(valor_emp, 2),
valor_sec = round(valor_sec, 2),
diferencia = round(valor_emp - valor_sec, 2)) %>%
group_by(parte) %>%
mutate(posicion = dense_rank(-diferencia))
}
# Ejecutar funcion impuestos
posiciones_impuestos <- list(estados_impuesto, estados_impuesto,
estados_impuesto, estados_impuesto) %>%
map2(c("clase", "grupo", "division", "seccion"),
calificar_impuestos,
data = impuestos,
ciiu = ciiu4)
names(posiciones_impuestos) <- c("clase", "grupo", "division", "seccion")
|
6ca8967995490324a5aa9a2b21076c4c5f28c14c
|
7ce35c255fe7506795ff7abc15b5222e582451bb
|
/2-descriptive-outcomes/stunting/13_stunt_calc_outcomes_birth_strat.R
|
a40070860a4578c6d590ebd93382723114cc54ae
|
[] |
no_license
|
child-growth/ki-longitudinal-growth
|
e464d11756c950e759dd3eea90b94b2d25fbae70
|
d8806bf14c2fa11cdaf94677175c18b86314fd21
|
refs/heads/master
| 2023-05-25T03:45:23.848005
| 2023-05-15T14:58:06
| 2023-05-15T14:58:06
| 269,440,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,166
|
r
|
13_stunt_calc_outcomes_birth_strat.R
|
##########################################
# ki longitudinal manuscripts
# stunting analysis
# Calculate mean LAZ, prevalence, incidence,
# and recovery, repeated for fixed effects models
# and sensitivity analysis in monthly cohorts
# with measurements up to 24 months
# Inputs:
# 0-config.R : configuration file
# 0_descriptive_epi_shared_functions.R
# 0_descriptive_epi_stunt_functions.R
# stunting_data.RData
# Outputs:
# meanlaz_velocity.RDS
# meanlaz_velocity_monthly.RDS
# meanlaz_velocity_fe.RDS
# quantile_data_stunting.RDS
# quantile_data_stunting_monthly.RDS
# quantile_data_stunting_fe.RDS
# shiny_desc_data_stunting_objects.RDS
# shiny_desc_data_stunting_objects_monthly.RDS
# shiny_desc_data_stunting_objects_fe.RDS
##########################################
rm(list=ls())
source(paste0(here::here(), "/0-config.R"))
# reloading because some overlap with stunting
source(paste0(here::here(), "/0-project-functions/0_descriptive_epi_shared_functions.R"))
source(paste0(here::here(), "/0-project-functions/0_descriptive_epi_stunt_functions.R"))
d <- readRDS(paste0(ghapdata_dir, "stunting_data.rds"))
# check included cohorts
assert_that(setequal(unique(d$studyid), monthly_and_quarterly_cohorts),
msg = "Check data. Included cohorts do not match.")
head(d)
d <- d %>% subset(., select = -c(tr))
#Get birth size:
d <- d %>% group_by(studyid, subjid) %>% arrange(agedays) %>%
mutate(birth_laz =first(haz), first_age=first(agedays), birth_laz=ifelse(first_age>31, NA, birth_laz)) %>%
filter(!is.na(birth_laz))
table(is.na(d$birth_laz))
table(d$studyid)
summary(d$birth_laz)
d$birth_laz_cat <- cut(d$birth_laz, breaks=c(-6, -2, -1, 0 ,6), include.lowest = T, right=T)
table(d$birth_laz_cat)
prop.table(table(d$birth_laz_cat))*100
#percent ever stunted with birth LAZ <0
df <- d %>% filter(agedays < 730) %>% group_by(studyid, subjid, birth_laz_cat) %>% summarize(min_laz=min(haz,na.rm=T)) %>%
filter(min_laz < (-2))
prop.table(table(df$birth_laz_cat))
#Set age categories
agelst3 = list(
"0-3 months",
"3-6 months",
"6-9 months",
"9-12 months",
"12-15 months",
"15-18 months",
"18-21 months",
"21-24 months"
)
agelst6 = list(
"0-6 months",
"6-12 months",
"12-18 months",
"18-24 months"
)
agelst3_birthstrat = list(
"Birth",
"8 days-3 months",
"3-6 months",
"6-9 months",
"9-12 months",
"12-15 months",
"15-18 months",
"18-21 months",
"21-24 months"
)
agelst6_birthstrat = list(
"Birth",
"8 days-6 months",
"6-12 months",
"12-18 months",
"18-24 months"
)
calc_outcomes = function(data, calc_method, output_file_suffix, pooling_variable){
data$country_cat <- data[[pooling_variable]]
dprev <<- calc.prev.agecat(data)
dmon <<- calc.monthly.agecat(data)
d3 <<- calc.ci.agecat(data, range = 3, birth="yes")
d6 <<- calc.ci.agecat(data, range = 6, birth="yes")
d3_birthstrat <<- calc.ci.agecat(data, range = 3, birth="no")
d6_birthstrat <<- calc.ci.agecat(data, range = 6, birth="no")
######################################################################
# Prevalence
######################################################################
calc_prevalence = function(severe){
prev.data <- summary.prev.haz(dprev, severe.stunted = severe, method = calc_method)
prev.country_cat <- dprev %>% group_by(country_cat) %>% do(summary.prev.haz(., severe.stunted = severe, method = calc_method)$prev.res)
prev.cohort <-
prev.data$prev.cohort %>% subset(., select = c(cohort, agecat, nmeas, prev, ci.lb, ci.ub)) %>%
rename(est = prev, lb = ci.lb, ub = ci.ub)
prev <- bind_rows(
data.frame(cohort = "pooled", country_cat = "Overall", prev.data$prev.res),
data.frame(cohort = "pooled", prev.country_cat),
prev.cohort
)
return(prev)
}
#----------------------------------------
# Prevalence and WHZ - not including yearly studies
#----------------------------------------
prev = calc_prevalence(severe = FALSE)
#----------------------------------------
# Severe stunting prevalence
#----------------------------------------
sev.prev = calc_prevalence(severe = TRUE)
######################################################################
# Mean HAZ
######################################################################
#----------------------------------------
# mean haz
#----------------------------------------
haz.data <- summary.haz(dprev, method = calc_method)
haz.country_cat <- dprev %>% group_by(country_cat) %>% do(summary.haz(., method = calc_method)$haz.res)
haz.cohort <-
haz.data$haz.cohort %>% subset(., select = c(cohort, agecat, nmeas, meanhaz, ci.lb, ci.ub)) %>%
rename(est = meanhaz, lb = ci.lb, ub = ci.ub)
haz <- bind_rows(
data.frame(cohort = "pooled", country_cat = "Overall", haz.data$haz.res),
data.frame(cohort = "pooled", haz.country_cat),
haz.cohort
)
# #----------------------------------------
# # mean haz for growth velocity age categories
# #----------------------------------------
# d_vel = data %>%
# mutate(agecat=ifelse(agedays<3*30.4167,"0-3",
# ifelse(agedays>=3*30.4167 & agedays<6*30.4167,"3-6",
# ifelse(agedays>=6*30.4167 & agedays<9*30.4167,"6-9",
# ifelse(agedays>=9*30.4167 & agedays<12*30.4167,"9-12",
# ifelse(agedays>=12*30.4167 & agedays<15*30.4167,"12-15",
# ifelse(agedays>=15*30.4167 & agedays<18*30.4167,"15-18",
# ifelse(agedays>=18*30.4167 & agedays<21*30.4167,"18-21",
# ifelse(agedays>=21*30.4167& agedays<24*30.4167,"21-24",""))))))))) %>%
# mutate(agecat=factor(agecat,levels=c("0-3","3-6","6-9","9-12",
# "12-15","15-18","18-21","21-24")))
#
# haz.data.vel <- summary.haz.age.sex(d_vel, method = calc_method)
# haz.country_cat.vel <- d_vel %>% group_by(country_cat) %>% do(summary.haz.age.sex(., method = calc_method)$haz.res)
# haz.cohort.vel <-
# haz.data.vel$haz.cohort %>%
# subset(., select = c(cohort, agecat, sex, nmeas, meanhaz,
# ci.lb, ci.ub)) %>%
# rename(est = meanhaz, lb = ci.lb, ub = ci.ub)
#
# haz.vel <- bind_rows(
# data.frame(cohort = "pooled", country_cat = "Overall", haz.data.vel$haz.res),
# data.frame(cohort = "pooled", haz.country_cat.vel),
# haz.cohort.vel
# )
#
# saveRDS(haz.vel, file = paste0(res_dir, "stunting/meanlaz_velocity",
# calc_method, output_file_suffix, ".RDS"))
# #----------------------------------------
# # monthly mean haz
# #----------------------------------------
# dmon <- calc.monthly.agecat(data)
# monthly.haz.data <- summary.haz(dmon, method = calc_method)
# monthly.haz.country_cat <- dmon %>% group_by(country_cat) %>% do(summary.haz(., method = calc_method)$haz.res)
# monthly.haz.country <- dmon %>% group_by(country) %>% do(summary.haz(., method = calc_method)$haz.res)
# monthly.haz.cohort <-
# monthly.haz.data$haz.cohort %>% subset(., select = c(cohort, agecat, nmeas, meanhaz, ci.lb, ci.ub)) %>%
# rename(est = meanhaz, lb = ci.lb, ub = ci.ub)
#
# monthly.haz <- bind_rows(
# data.frame(cohort = "pooled", country_cat = "Overall", monthly.haz.data$haz.res),
# data.frame(cohort = "pooled", monthly.haz.country_cat),
# data.frame(cohort = "pooled-country", monthly.haz.country),
# monthly.haz.cohort
# )
######################################################################
# Incidence proportion
######################################################################
calc_ip = function(datatable, age_list, severe){
ip.data <- summary.stunt.incprop(datatable, agelist = age_list, severe.stunted = severe, method = calc_method)
ip.country_cat <- datatable %>% group_by(country_cat) %>% do(summary.stunt.incprop(., agelist = age_list, severe.stunted = severe, method = calc_method)$ip.res)
ip.cohort <-
ip.data$ip.cohort %>%
subset(., select = c(cohort, agecat, nchild, yi, ci.lb, ci.ub)) %>%
rename(est = yi, lb = ci.lb, ub = ci.ub, nmeas=nchild)
ip <- bind_rows(
data.frame(cohort = "pooled", country_cat = "Overall", ip.data$ip.res),
data.frame(cohort = "pooled", ip.country_cat),
ip.cohort
)
return(ip)
}
#----------------------------------------
# Incidence proportion 3 month intervals
#----------------------------------------
ip_3 = calc_ip(d3, agelst3, severe = FALSE)
#----------------------------------------
# Incidence proportion 3 month intervals
# stratify by birth
#----------------------------------------
ip_3.birthstrat = calc_ip(d3_birthstrat, agelst3_birthstrat, severe = FALSE)
# #----------------------------------------
# # Incidence proportion 6 month intervals
# #----------------------------------------
# ip_6 = calc_ip(d6, agelst6, severe = FALSE)
#----------------------------------------
# Incidence proportion of severe stunting
# 3 month interval
#----------------------------------------
sev.ip3 = calc_ip(d3, agelst3, severe = TRUE)
# #----------------------------------------
# # Incidence proportion of severe stunting
# # 6 month interval
# #----------------------------------------
# sev.ip6 = calc_ip(d6, agelst6, severe = TRUE)
######################################################################
# Cumulative incidence
######################################################################
calc_ci = function(datatable, age_list, birth_strat, severe){
ci.data <- summary.ci(datatable, birthstrat = birth_strat, agelist = age_list, severe.stunted = severe, method = calc_method)
ci.country_cat <- datatable %>% group_by(country_cat) %>% do(summary.ci(., agelist = age_list, birthstrat = birth_strat, severe.stunted = severe, method = calc_method)$ci.res)
ci.cohort <-
ci.data$ci.cohort %>% subset(., select = c(cohort, agecat, nchild, yi, ci.lb, ci.ub)) %>%
rename(est = yi, lb = ci.lb, ub = ci.ub, nmeas=nchild)
cuminc <- bind_rows(
data.frame(cohort = "pooled", country_cat = "Overall", ci.data$ci.res),
data.frame(cohort = "pooled", ci.country_cat),
ci.cohort
)
return(cuminc)
}
#----------------------------------------
# Cumulative Incidence - 3 month intervals
#----------------------------------------
cuminc3 = calc_ci(d3, agelst3, birth_strat = FALSE, severe = FALSE)
#----------------------------------------
# Cumulative Incidence - 3 month intervals
# stratify by birth
#----------------------------------------
cuminc3.birthstrat = calc_ci(d3_birthstrat, agelst3_birthstrat, birth_strat = TRUE, severe = FALSE)
# #----------------------------------------
# # Cumulative Incidence - 6 month intervals
# #----------------------------------------
# cuminc6 <- calc_ci(d6, agelst6, birth_strat = FALSE, severe = FALSE)
#
#----------------------------------------
# Cumulative Incidence - 3 month intervals
# severe
#----------------------------------------
sev.cuminc3 <- calc_ci(d3, agelst3, birth_strat = FALSE, severe = TRUE)
# #----------------------------------------
# # Cumulative Incidence - 6 month intervals
# # severe
# #----------------------------------------
# sev.cuminc6 <- calc_ci(d6, agelst6, birth_strat = FALSE, severe = TRUE)
shiny_desc_data <- bind_rows(
data.frame(disease = "Stunting", age_range="3 months", birth="yes", severe="no", measure= "Prevalence", prev),
data.frame(disease = "Stunting", age_range="3 months", birth="yes", severe="yes", measure= "Prevalence", sev.prev),
data.frame(disease = "Stunting", age_range="3 months", birth="yes", severe="no", measure= "Mean LAZ", haz),
#data.frame(disease = "Stunting", age_range="1 month", birth="yes", severe="no", measure= "Mean LAZ", monthly.haz),
data.frame(disease = "Stunting", age_range="3 months", birth="yes", severe="no", measure= "Cumulative incidence", cuminc3),
data.frame(disease = "Stunting", age_range="3 months", birth="strat", severe="no", measure= "Cumulative incidence", cuminc3.birthstrat),
#data.frame(disease = "Stunting", age_range="6 months", birth="yes", severe="no", measure= "Cumulative incidence", cuminc6),
data.frame(disease = "Stunting", age_range="3 months", birth="yes", severe="yes", measure= "Cumulative incidence", sev.cuminc3),
#data.frame(disease = "Stunting", age_range="6 months", birth="yes", severe="yes", measure= "Cumulative incidence", sev.cuminc6),
data.frame(disease = "Stunting", age_range="3 months", birth="yes", severe="no", measure= "Incidence_proportion", ip_3),
data.frame(disease = "Stunting", age_range="3 months", birth="strat", severe="no", measure= "Incidence_proportion", ip_3.birthstrat),
#data.frame(disease = "Stunting", age_range="6 months", birth="yes", severe="no", measure= "Incidence_proportion", ip_6),
data.frame(disease = "Stunting", age_range="3 months", birth="yes", severe="yes", measure= "Incidence_proportion", sev.ip3),
#data.frame(disease = "Stunting", age_range="6 months", birth="yes", severe="yes", measure= "Incidence_proportion", sev.ip6)
)
#assert_that(names(table(shiny_desc_data$method.used)) == calc_method)
shiny_desc_data <- shiny_desc_data %>% subset(., select = -c(se, nmeas.f, ptest.f))
shiny_desc_data$agecat <- as.factor(shiny_desc_data$agecat)
return(shiny_desc_data)
}
data = d
calc_method = "REML"
output_file_suffix = ""
pooling_variable="birth_laz_cat"
res = calc_outcomes(data = d, calc_method = "REML", pooling_variable="birth_laz_cat", output_file_suffix = "")
table(res$agecat)
saveRDS(res, file = paste0(res_dir,"stunting/stunt_birth_laz_pool.RDS"))
|
9e4d77fea0bd3bc6dbc98c5afa4f45b6d33c9793
|
68f8217845056df195a2d78356ddd5a2f9a9e44e
|
/R/statistics_with_R/04_Exploring_Data_with_Graphs/Script_Files/01_graph_intro.R
|
35389db31760d8c6d2f571a873131aaa1f3d35de
|
[
"MIT"
] |
permissive
|
snehilk1312/AppliedStatistics
|
dbc4a4f2565cf0877776eee88b640abb08d2feb5
|
0e2b9ca45b004f38f796fa6506270382ca3c95a0
|
refs/heads/master
| 2023-01-07T15:11:18.405082
| 2020-11-07T21:06:37
| 2020-11-07T21:06:37
| 289,775,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 472
|
r
|
01_graph_intro.R
|
# loading ggplot2
library(ggplot2)
facebookData <- read.delim('/home/atrides/Desktop/Applied-Statistics-with-R-master/statistics_with_R/04_Exploring_Data_with_Graphs/FacebookNarcissism.dat')
head(facebookData)
# initiatingplot objsct
graph <- ggplot(facebookData, aes(NPQC_R_Total,Rating))
# adding geoms
graph+geom_point(shape=17, size=2,aes(color=Rating_Type),position = 'jitter')
graph + geom_point(aes(shape = Rating_Type,color=Rating_Type), position = "jitter")
|
3bd27b7f571c0fa4a155e593e92c804c8277deac
|
398cb934488b6ebed4f350ef4ab7b6b52df50e7f
|
/plot4.R
|
34f393a3d7bf5df02043370b79f4f1b8f4fb0ce3
|
[] |
no_license
|
sankusaha11/ExData_Plotting1
|
1596a68f09cdbfb0ad1ceb6fb7934f3b2362ee97
|
9fd212992cebd92018ed9d2e5d9636fd6baf103f
|
refs/heads/master
| 2021-01-16T23:03:39.786925
| 2016-06-17T02:50:38
| 2016-06-17T02:50:38
| 61,340,723
| 0
| 0
| null | 2016-06-17T02:46:17
| 2016-06-17T02:46:17
| null |
UTF-8
|
R
| false
| false
| 1,233
|
r
|
plot4.R
|
#Generating Plot#4
setwd("C:/Users/ssaha/Desktop/Personal/coursera/ExploratoryAnalysis")
rm(list=ls())
mydf <- read.csv("household_power_consumption.txt", sep = ";", stringsAsFactors = FALSE)
mydf$Date <- as.Date(mydf$Date,"%d/%m/%Y")
mydf2days <- subset(mydf, Date == as.Date("2007-02-01")| Date == as.Date("2007-02-02"))
mydf2days$Global_active_power <- as.numeric(mydf2days$Global_active_power)
mydf2days$dt <- paste(mydf2days$Date, mydf2days$Time)
mydf2days$dt <- strptime(mydf2days$dt,"%Y-%m-%d %H:%M:%S")
png("plot4.png")
par(mfrow = c(2,2))
with(mydf2days, plot(dt,Global_active_power,type="l", ylab = "Global Active Power",xlab=""))
with(mydf2days, plot(dt,Voltage,type="l", ylab = "Voltage",xlab="datetime"))
with(mydf2days,plot(dt,Sub_metering_1, col= "black", type="l",ylab= "Energy sub metering"))
lines(mydf2days$dt, mydf2days$Sub_metering_2, col="red", type="l")
lines(mydf2days$dt, mydf2days$Sub_metering_3, col="blue", type="l")
legend("topright", c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), lty=c(1,1,1),col=c("black","red","blue"))
with(mydf2days, plot(dt,Global_reactive_power,type="l", ylab = "Global_reactive_power",xlab="datetime"))
dev.off()
|
3dd1231e80f0c16a352d116e48bc64a7211522eb
|
bcd3edb2557cf3ca80d541c6b9ed2ac4e217bb8e
|
/man/cr_survreg.Rd
|
8761b75c384b715b3f7f502d3c926af0b62ff834
|
[] |
no_license
|
jwdink/tidysurv
|
a0bf281bdea6e345d8b3357e37cc9f55226ec928
|
899e26731580f1fda4586b81f4ead0660c590fcd
|
refs/heads/master
| 2021-01-09T05:48:09.016298
| 2017-08-09T03:16:56
| 2017-08-09T03:16:56
| 80,838,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,912
|
rd
|
cr_survreg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidysurv-package.R
\name{cr_survreg}
\alias{cr_survreg}
\title{Easy interface for parametric competing-risks survival-regression}
\usage{
cr_survreg(time_col_name, event_col_name, data, list_of_list_of_args,
time_lb_col_name = NULL, time_dependent_config = list(time_start_col_name
= NULL, id_col_name = NULL, time_dependent_col_names = NULL),
method = "flexsurvreg")
}
\arguments{
\item{time_col_name}{A character indicating the column-name of the 'time' column.}
\item{event_col_name}{A character indicating the column-name for events. The values in this
column (either factor or character), should match the names of the \code{list_of_list_of_args},
except for the value indicating censoring.}
\item{data}{A dataframe.}
\item{list_of_list_of_args}{A list, whose names correspond to each possible type of event. These
should correspond to the values in the \code{event_col_name} column. The elements of this list
are themselves lists- arguments to be passed to the survival-regression modelling function (as
specified by \code{method}- currently only \code{flexsurvreg} is supported).}
\item{time_lb_col_name}{Optional. A character indicating the column-name of the time
'lower-bound'. See the vignette for details.}
\item{time_dependent_config}{If your data is in a 'counting process' format (i.e., there are
multiple rows per 'person', with a 'start' column specifying right-truncation), you should
supply this. A list with three entries: `time_start_col_name`, `id_col_name`, and
`time_dependent_col_names`.}
\item{method}{A character string naming the function to be called for survival-regression
modelling. Currently only supports \code{flexsurvreg}.}
}
\value{
An object of type \code{cr_survreg}, with plot and summary methods.
}
\description{
Easy interface for parametric competing-risks survival-regression
}
|
71b44fa5f53d2f59309dfa50ab21228990780cb9
|
ae54e750598e79dbe8b16ce02c2a14dbd5b38f1a
|
/cachematrix.R
|
3d679fb8b11308acc2b1c11509aafbcb12013cd5
|
[] |
no_license
|
bravedream/ProgrammingAssignment2
|
3b5e06a55d9c360ceda1dd3e879c23c251b9e615
|
095ebfc05d4d5ad1bf80445cb58fa6999e553013
|
refs/heads/master
| 2021-01-21T05:55:40.676811
| 2016-04-06T20:23:07
| 2016-04-06T20:23:07
| 46,301,167
| 0
| 0
| null | 2016-04-06T20:21:01
| 2015-11-16T20:38:54
|
R
|
UTF-8
|
R
| false
| false
| 1,908
|
r
|
cachematrix.R
|
#The file will take an input matrix (assumed to be invertible square matrix) and check
#if the matrix already exist and have been run to get the invert. If not, return the inverted
matrix; otherwise, retrieve the inverted matrix from cachedmatrix
#the function below will create a matrix object and set the Inv to null as a marker that
#the new object has not been inverted. Set() method will first check if the new input matrix
#is the same as existing (if any) matrix and if yes, don't change Inv, don't reassign values to
#x, which is the input matrix. If a different matrix input, then reset Inv and x to create
#a new object. The function returns the four sub-functions (set, get, setInv and getInv).
#setInv() is used to cache the inverse matrix from cacheSolve() function.
makeCacheMatrix <- function(x=matrix()) {
Inv<-NULL
set <- function(y) {
if(all(y==x)==FALSE ) {
x<<-y
Inv<<-NULL
}
}
get<-function() x
setInv<-function(cacheInv) Inv<<-cacheInv
getInv<-function() Inv
list (
set = set,
get = get,
setInv = setInv,
getInv = getInv)
}
#The cacheSolve function below actually do the inverse and caching. It first invoke the
#getInv() method in makeCachMatrix() function to check if there is any existing Inv matrix.
#If yes, then put the message and return the cachInv. If not, it means there is a new and
#different matrix object. The get() function is run and solve function was used to do the
#"inverse" and assigned to variable cacheInv. Then the setInv method in x matrix object
#was invoked to pass the cacheInv values to x object so that next time, Inv in the makeCachMatrix()
#function is not going to be null..
cacheSolve <-function (x,...) {
cacheInv<-x$getInv()
if(!is.null(cacheInv)) {
message("getting cached inverse matrix")
return (cacheInv)
}
data <-x$get()
cacheInv<-solve(data,...)
x$setInv(cacheInv)
cacheInv
}
|
e5953149cf2a05b19f97753147d97a599f655a23
|
b9d4fdc5b544ffb6158705d1c8e81f670a2931f1
|
/inst/shiny/server.R
|
c2dbd9fc19e751040546c77e906c3302c5ac5803
|
[] |
no_license
|
cran/nph
|
284e6c2ad631edfd62f46a3540c499d26c287476
|
e2fa54d4e719b85ad2c40e9ce8b6b0010bea4f1c
|
refs/heads/master
| 2022-06-17T07:47:58.897005
| 2022-05-16T22:20:05
| 2022-05-16T22:20:05
| 236,633,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 814
|
r
|
server.R
|
nph.env<-new.env()
library(ggplot2)
library(dplyr)
library(formatR)
#library(nph)
server <- shinyServer(function(input, output, session) {
options(stringsAsFactors = F)
# # include logic for each tab
source(file.path("server", "srv-tab-home.R"), local = TRUE)$value
source(file.path("server", "srv-tab-simulate.R"), local = TRUE)$value
source(file.path("server", "srv-tab-power.R"),local = TRUE)$value
source(file.path("server", "srv-tab-condpower.R"),local = TRUE)$value
source(file.path("server", "helper_functions.R"),local = TRUE)$value
source(file.path("server", "paket_nph_1-9.R"),local = TRUE)$value
source(file.path("server", "additional_functions.R"),local = TRUE)$value
source(file.path("server", "continuous_functions.R"),local = TRUE)$value
})
|
4c28b900ff0c2b8fd54784c2399429dc9b7f7b43
|
a6f7a6a708ac52be533d18be5e64aa513725c35e
|
/notebooks-2021-joule/2021_06_xx_end_of_life_variation_analysis.R
|
da5244781caada4a17a8a1c96c94e122ed88cf62
|
[] |
no_license
|
wengandrew/fast-formation
|
24ef0b9ff9e3e69cab6804ac77aec97d018bdabc
|
78735c7255cd5c3db66c53a86e981dba67129f01
|
refs/heads/main
| 2023-04-10T00:16:49.636322
| 2023-03-04T15:39:08
| 2023-03-04T15:39:56
| 408,813,945
| 6
| 1
| null | 2022-08-31T02:24:00
| 2021-09-21T12:36:56
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 4,835
|
r
|
2021_06_xx_end_of_life_variation_analysis.R
|
library(knitr)
library(ggplot2)
library(ggbeeswarm)
library(data.table)
library(magrittr)
library(dplyr)
library(cvequality)
library(outliers)
"
Determine the statistical significance of aging variability due to fast
formation. Use some statistical tests to determine if the coefficients of
variation from the two sample sets (baseline vs fast formation) are
statistically different. We need to look at the coefficient of variation
(standard deviation over mean) since the variation in aging naturally increases
as the mean increases.
"
remove_outliers <- function(kable_in) {
kable_out = kable_in
p_value_target = 0.05
x = subset(kable_in, formation_type=='Baseline Formation')$cycles
y = subset(kable_in, formation_type=='Fast Formation')$cycles
# Initialize a list of indices for storing exclusions
idx <- c()
# Use Dixon test to remove max outlier (if significant)
out = dixon.test(x, type = 0, opposite = FALSE, two.sided = TRUE)
if (unname(out[3]) < p_value_target) {
cat(sprintf('Removing max from Baseline Formation (%g)...\n', max(x)))
idx = c(idx, which(max(x) == kable_out$cycles))
}
out = dixon.test(x, type = 0, opposite = TRUE, two.sided = TRUE)
if (unname(out[3]) < p_value_target) {
cat(sprintf('Removing min from Baseline Formation (%g)...\n', min(x)))
idx = c(idx, which(min(x) == kable_out$cycles))
}
out = dixon.test(y, type = 0, opposite = FALSE, two.sided = TRUE)
if (unname(out[3]) < p_value_target) {
cat(sprintf('Removing max from Fast Formation (%g)...\n', max(y)))
idx = c(idx, which(max(y) == kable_out$cycles))
}
out = dixon.test(y, type = 0, opposite = TRUE, two.sided = TRUE)
if (unname(out[3]) < p_value_target) {
cat(sprintf('Removing min from Fast Formation (%g)...\n', min(y)))
idx = c(idx, which(min(y) == kable_out$cycles))
}
if (is.null(idx) == FALSE) {
kable_out <- kable_out[-idx,]
}
# grubbs.test(x, type = 10, opposite = FALSE, two.sided = TRUE)
return(kable_out)
}
run_f_test <- function(kable_in) {
"
Run a simple F test
"
x = subset(kable_in, formation_type=='Baseline Formation')$cycles
y = subset(kable_in, formation_type=='Fast Formation')$cycles
res.ftest <- var.test(x, y, alternative = 'two.sided')
res.ftest$p.value
res.ftest$estimate
}
compute_statistics <- function(kable_in) {
"
Compute the statistics of variability; are the variabilities between the two
groups statistically significant after accounting for differences in the mean?
References:
https://cran.r-project.org/web/packages/cvequality/vignettes/how_to_test_CVs.html
https://link.springer.com/article/10.1007/s00180-013-0445-2
"
cv_test_asymptotic <- with(kable_in,
asymptotic_test(cycles,
formation_type)
)
cv_test_MSLRT <- with(kable_in,
mslr_test(nr = 1e4,
cycles,
formation_type))
label = sprintf("D'AD Statistic: %.4f, p = %.4f \n", cv_test_asymptotic[1],
cv_test_asymptotic[2])
cat(label)
# Visualize the result for a sec as a sanity check
ggplot(kable_in,
aes(formation_type, cycles)) +
geom_boxplot() +
geom_quasirandom(alpha = 0.5) +
theme_bw() +
theme(text = element_text(size=20)) +
labs(subtitle = label,
y = "Cycles",
x = NULL)
}
## BEGIN MAIN
# Load the data
setwd('~/Documents/project-formation/output')
EndOfLife = read.csv('end_of_life_retention.csv')
kable(head(EndOfLife), caption = "Preview of first few rows of EndOfLife data")
# Define a loop that computes the statistics for each retention target
retention_target_list = list(50, 60, 70, 80)
for (retention_target in retention_target_list) {
cat(sprintf("\n\n ----------%g%% Retention --------\n", retention_target))
# Define subsets of the table
room_temp = subset(EndOfLife, is_room_temp == 'True' &
retention == retention_target)
high_temp = subset(EndOfLife, is_room_temp == 'False' &
retention == retention_target)
# Remove outliers
print('|outlier -> room temp')
room_temp = remove_outliers(room_temp)
print('|outlier -> high temp')
high_temp = remove_outliers(high_temp)
# # Duplicate the dataset to understand the impact of sample size on the p-value
# high_temp = rbind(high_temp, high_temp)
cat(sprintf("--> ROOM TEMP..\n.", retention_target))
compute_statistics(room_temp)
cat(sprintf("--> HIGH TEMP..\n.", retention_target))
compute_statistics(high_temp)
}
|
aafdd418c8e4dc2f1efb2bf1b9b4e35e4a9847bc
|
91e79fa553199db814e7e49022666ae1f4bb9b14
|
/input/input_base.R
|
f8f2670e5abf3b64ef1b1a5b31852281243ea9e5
|
[] |
no_license
|
gloriakang/economic-influenza
|
2ae2a6b20ebc05ec120fdf581f3cf614f5282403
|
d8fa9a93624425ce2284e9d3c33313a1ba09b359
|
refs/heads/master
| 2021-06-12T08:27:59.786460
| 2019-10-27T19:39:20
| 2019-10-27T19:39:20
| 126,077,561
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 597
|
r
|
input_base.R
|
# inputs = base case
## base vaccine compliance = 0%
#vax_comp_b <- 0
bc_04 <- 0
bc_519 <- 0
bc_2064 <- 0
bc_65 <- 0
## total = 1248308.16 (36.64%)
base_04 <- 74205.56 * 0.67 #33.19%
base_519 <- 419788.08 * 0.67 #65.63%
base_2064 <- 685774.72 * 0.67 #30.68%
base_65 <- 68539.8 * 0.67 #22.21%
# base case DALYs (high, low, all)
#daly_b_04 <- c(2396.2599, 797.6505, 3193.9104)
#daly_b_519 <- c(11847.968 , 3987.216, 15835.183)
#daly_b_2064 <- c(49711.938, 7485.782, 57197.719)
#daly_b_65 <- c(13243.099, 1686.065, 14929.164)
#total_dalys_base <- daly_b_04 + daly_b_519 + daly_b_2064 + daly_b_65
|
c7b4602ed85ff1d6a14a5ca39aa3b6633233d725
|
12cfa29386d6241c8305927c2238cacff860f7bc
|
/starter_rpackages.R
|
0cc615aa46f55b522684a59388279033dd7d35fe
|
[] |
no_license
|
thomas-keller/slurm_caret_tutor
|
365115d4fdb7403d49f1199a62bd937627cee16a
|
6d70f231e0374e24663c39e4a57360ec80b2e932
|
refs/heads/master
| 2021-09-09T09:04:34.700662
| 2018-03-14T14:35:31
| 2018-03-14T14:35:31
| 125,131,501
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 431
|
r
|
starter_rpackages.R
|
lgit <- function(gitstr){
withr::with_libpaths(new='R/x86_64-pc-linux-gnu-library/3.4/',devtools::install_github(gitstr))
}
dir.create('R/x86_64-pc-linux-gnu-library/3.4/',recursive=TRUE,showWarnings=FALSE)
lgit('sjmgarnier/viridisLite')
lgit('sjmgarnier/viridis')
install.packages(c("tidyverse","caret","corrr","doMC","rsample","recipes","glmnet","rpart","ranger","yardstick"),dep=TRUE,repos='http://cran.r-project.org')
|
53f4c62323b6f6ee180929f35bdabf82e0e8f9b3
|
63a30097fd22c13170f950b314950fa99182f3d8
|
/Chignik_exec_ADFG2.R
|
c2698b7deeb7361d79f3ba79e77d45f49ca22962
|
[] |
no_license
|
Sages11/Chig_UW_transition
|
19483d875d8c4c9f055904d70529d8f8684c77e7
|
f04bd932ee45156771c08e940c707c76bba1e4fa
|
refs/heads/master
| 2020-04-21T19:05:48.771922
| 2019-02-12T19:24:17
| 2019-02-12T19:24:17
| 169,794,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,842
|
r
|
Chignik_exec_ADFG2.R
|
#LUKAS DEFILIPPO 1/28/18
#Script for automated execution of hierarchical logistic model and inference
#load required packages
library(lubridate)
library(viridis)
library(rstan)
library(shinystan)
library(RColorBrewer)
#parallelize
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
#Useful functions for plotting and summarizing posteriors
#Function to calculate 95% quantiles
cred <- function(x){
return(quantile(x, c(0.025, 0.975)))
#Function to calculate 50% quantiles
}
cred.50 <- function(x){
return(quantile(x, c(0.25, 0.75)))
}
#calculate 95% quantiles for columns
cred.2 <- function(x){
return(apply( x, 2, cred))
}
#calculate 50% quantiles for columns
cred.3 <- function(x){
return(apply( x, 2, cred.50))
}
#Function to calculate the medians of a column
colMedian <- function(x){
return(apply(x, 2, median))
}
#Read in daily catch and escapement data
C_E_read.func <- function(file_name='2010-2017ChignikCEbyday_Schindler7-3-18.csv'){
C_E_dat <- read.csv(file=file_name)
C_E_dat$Date <- as.Date(C_E_dat$Date, format='%m/%d/%y')
C_E_dat$Year <- year(C_E_dat$Date)
#Convert date to 'day of year' format
C_E_dat$DOY <- yday(C_E_dat$Date)
min(C_E_dat$DOY)
#C_E_dat <- subset(C_E_dat, C_E_dat$DOY < C_E_dat$DOY[C_E_dat$Date=='2010-8-30'] &
# C_E_dat$DOY > C_E_dat$DOY[C_E_dat$Date=='2010-6-01'])
#Subtract the minimum day of the year so the index begins at 1
C_E_dat$DOY_index <- C_E_dat$DOY - (min(C_E_dat$DOY)-1)
#min(C_E_dat$DOY_index)
#max(C_E_dat$DOY_index)
#Pad each year's escapement data to be the same length
for (i in 1:length(unique(C_E_dat$Year))){
#print(length(C_E_dat$Escapement[C_E_dat$Year==unique(C_E_dat$Year)[i]]))
if(min(C_E_dat$DOY_index[C_E_dat$Year==unique(C_E_dat$Year)[i]]) > 1){
x <- as.data.frame(matrix(ncol=ncol(C_E_dat), nrow=min(C_E_dat$DOY_index[C_E_dat$Year==unique(C_E_dat$Year)[i]])-1))
colnames(x) <- colnames(C_E_dat)
x[,1] <- min(C_E_dat$Date[C_E_dat$Year==unique(C_E_dat$Year)[i]]) - rev(c(1:nrow(x)))
x[,2] <- 0
x[,3] <- 0
x[,4] <- unique(C_E_dat$Year)[i]
x[,5] <- c(1:nrow(x)) + (min(C_E_dat$DOY)-1)
x[,6] <- c(1:nrow(x))
#print(x)
C_E_dat <- rbind(x, C_E_dat)
C_E_dat <- C_E_dat[order(C_E_dat$Year),]
}
if(max(C_E_dat$DOY_index[C_E_dat$Year==unique(C_E_dat$Year)[i]]) < 135){
x <- as.data.frame(matrix(ncol=ncol(C_E_dat), nrow= (135- max(C_E_dat$DOY_index[C_E_dat$Year==unique(C_E_dat$Year)[i]]))))
colnames(x) <- colnames(C_E_dat)
x[,1] <- max(C_E_dat$Date[C_E_dat$Year==unique(C_E_dat$Year)[i]]) + (c(1:nrow(x)))
x[,2] <- 0
x[,3] <- 0
x[,4] <- unique(C_E_dat$Year)[i]
x[,5] <- c(1:nrow(x)) + max(C_E_dat$DOY[C_E_dat$Year==unique(C_E_dat$Year)[i]])
x[,6] <- c(1:nrow(x)) + max(C_E_dat$DOY[C_E_dat$Year==unique(C_E_dat$Year)[i]]) - (min(C_E_dat$DOY)-1)
#print(x)
C_E_dat <- rbind(x, C_E_dat)
C_E_dat <- C_E_dat[order(C_E_dat$Year, C_E_dat$DOY),]
}
}
return(C_E_dat)
}
C_E_dat <- C_E_read.func()
#Read in escapement compositional data --> new genetic samples input via the excel file
#Notes in the excel file template, I have added 3 columns to the original:
#'Mixed' denotes whether a sampling stratum was spread across multiple days
#'Stratum' is the first date of a multi-day sampling event
#'#'Stratum_2' is the last date of a multi-day sampling event
#The two columns will be the same for single day sampling strata
#Experiment with how the model treats new data by adding rows to the spreadsheet in the same format
#as the others
Comp_dat_read.func <- function(file_name='2017 Chignik Estimates Summary.csv'){
Comp_dat <- read.csv(file= file_name)
Comp_dat$Stratum <- as.Date(Comp_dat$Stratum, format= '%m/%d/%y')
Comp_dat$Stratum_2 <- as.Date(Comp_dat$Stratum_2, format= '%m/%d/%y')
C_E_dat <- C_E_read.func()
#Convert date to 'day of year' format
Comp_dat$DOY <- yday(Comp_dat$Stratum)
Comp_dat$DOY_2 <- yday(Comp_dat$Stratum_2)
#Subtract the minimum day of the year from the catch and escapement data (which begins earlier)
#So that this index begins repective to the first day for the catch and escapement data (day 22 in this case)
#min(Comp_dat$DOY)
Comp_dat$DOY_index <- Comp_dat$DOY - (min(C_E_dat$DOY)-1)
Comp_dat$DOY_index_2 <- Comp_dat$DOY_2 - (min(C_E_dat$DOY)-1)
for (i in 1:nrow(Comp_dat)){
if(Comp_dat$DOY_index[i]==Comp_dat$DOY_index_2[i]){
Comp_dat$DOY_index_3[i] <- Comp_dat$DOY_index[i]
}
if(Comp_dat$DOY_index_2[i]==(Comp_dat$DOY_index[i]+1)){
Comp_dat$DOY_index_3[i] <- Comp_dat$DOY_index[i]
}
if(Comp_dat$DOY_index_2[i] > (Comp_dat$DOY_index[i]+1)){
Comp_dat$DOY_index_3[i] <- round((Comp_dat$DOY_index[i] + Comp_dat$DOY_index_2[i])/2)
}
}
#min(Comp_dat$DOY_index)
Comp_dat$yr_index <- as.numeric(Comp_dat$Year) - min(as.numeric(Comp_dat$Year)-1)
#Create a total index of sample dates
for (i in 1:nrow(Comp_dat)){
Comp_dat$DOY_index_II[i] <- Comp_dat$DOY_index[i] + (Comp_dat$Year-(min(Comp_dat$Year)))[i]*max(C_E_dat$DOY_index)
Comp_dat$DOY_index_II_2[i] <- Comp_dat$DOY_index_2[i] + (Comp_dat$Year-(min(Comp_dat$Year)))[i]*max(C_E_dat$DOY_index)
Comp_dat$DOY_index_III[i] <- Comp_dat$DOY_index_3[i] + (Comp_dat$Year-(min(Comp_dat$Year)))[i]*max(C_E_dat$DOY_index)
}
#Check that indices from the daily catch and escapement data align with those from the compositional data
#Note: 2012, 2016 are leap years so the doy indices won't be the same for these years
#print(C_E_dat$Date[C_E_dat$DOY_index==22])
#print(Comp_dat$Stratum[Comp_dat$DOY_index==22])
return(Comp_dat)
}
Comp_dat <- Comp_dat_read.func(file_name='2017 Chignik Estimates Summary.csv')
#number of years
n_year <- length(unique(Comp_dat$Year))
#season length
n_day <- 135
#get the number of genetic samples for each year
length.vec <- c()
for (i in 1:n_year){
length.vec[i] <- length(Comp_dat$DOY_index[Comp_dat$Year==unique(Comp_dat$Year)[i]])
}
#Function to Execute models in stan
exec_func <- function(script, exec=F, shiny=F, n_chain=4, n_iter=60000, adapt=0.999, tree=10, thin=5, file_name,
dat = list(s = length.vec, n_samples = sum(length.vec), comp_dat_x = round(Comp_dat$Proportion_Chignik*Comp_dat$n),comp_dat_index = Comp_dat$DOY_index_3,
n_day = max(C_E_dat$DOY_index), comp_dat_index_2 = Comp_dat$DOY_index_III, comp_dat_N = Comp_dat$n),
init.list = list(x0_ind = rep(52,8), steep_ind=rep(0.17,8), mu_steep=0.17, mu_x0=52, k=20, sigma_x0=5, sigma_steep=0.04, steep_Z = rep(0.01,8))){
init = replicate(n_chain, list())
for(i in 1:n_chain){
init[[i]] <- init.list
}
if(exec==T){
mod <- stan(file = script, data = dat,
iter = n_iter, chains = n_chain, control=list(adapt_delta=adapt, max_treedepth=tree), thin=thin, seed=666,
#init=list(x1 <- init.list, x2 <- init.list, x3 <- init.list, x4 <- init.list))
#init = replicate(n_chain, init.list))
init = init)
saveRDS(mod, paste(file_name, '.rds'))
#print(mod, pars=c('steep_ind', 'x0_ind'))
#print(mod, pars=c('mu_steep', 'mu_x0', 'sigma_steep', 'sigma_x0'))
#print(mod, pars=c('pred_comp'))
#print(mod, pars=c('ppc'))
}
#Shinystan diagnostics
if (shiny==T){
fit1.shiny <- as.shinystan(mod)
launch_shinystan(fit1.shiny)
}
}
#Run Beta-Binomial model (select exec=T to run model, fit to all years of data - not for in-season management)
exec_func(script='Chignik_hierarchical_transition_beta_bin_A.stan', exec=T, shiny=F, n_chain=5, n_iter=5000, adapt=0.999, tree=10, thin=5,
dat = list(s = length.vec, n_samples = sum(length.vec), comp_dat_x = round(Comp_dat$Proportion_Chignik*Comp_dat$n),comp_dat_index = Comp_dat$DOY_index_3,
n_day = max(C_E_dat$DOY_index), comp_dat_index_2 = Comp_dat$DOY_index_III, comp_dat_N = Comp_dat$n), file_name='Chignik_est_beta_bin',
init.list = list(x0_ind = rep(52,n_year), steep_ind=rep(0.17,n_year), mu_steep=0.17, mu_x0=52, k=20, sigma_x0=5, sigma_steep=0.04, steep_Z = rep(0.01,n_year)))
#Execute and plot --> select preseason=T if forecasting for a given season before any samples have been collected
#otherwise preseason=F and the function will run the model iteratively adding each sample for the most recent year
#and plot. This function is meant to be the guts of what a manager would run
plot_func <- function(exec=T, iter=1000, preseason=F){
#Read in genetic sampling data
Comp_dat <- Comp_dat_read.func(file_name='2017 Chignik Estimates Summary.csv')
#Read in escapement data
C_E_dat <- C_E_read.func()
#Total number of years evaluated
n_year_fixed <- length(unique(Comp_dat$Year))
#Number of days within a season
n_day = max(C_E_dat$DOY_index)
#Empty list for storing the pre-season curves
hyper_list <- replicate(n_year_fixed, list())
#Empty list for storing the pre-season midpoints (x0) terms
x0_list <- replicate(n_year_fixed, list())
#Empty list for storing the iterative curves
pred_list <- replicate(n_year_fixed, list())
#Empty lists for storing the assigned escapement to the early and late runs under the iterative curve
in_esc_mat_late <- replicate(n_year_fixed,list())
in_esc_mat_early <- replicate(n_year_fixed,list())
#Empty lists for storing the assignment errors to the early and late runs under the iterative curve
in_esc_error_late <- replicate(n_year_fixed,list())
in_esc_error_early <- replicate(n_year_fixed,list())
#Colors for plotting --> unique color for each GSI sample
col.vec <- magma(table(Comp_dat$Year)[n_year_fixed], alpha = 1, begin = 0, end = 1, direction = -1)
#Read in escapement goals (based on 2018 values)
#esc_goals <- read.csv(file='Esc_goals.csv')
#Reformat dates to be in DOY format
#esc_goals$Date <- as.Date(esc_goals$Date, format='%m/%d/%y')
#esc_goals$DOY <- yday(esc_goals$Date)
#Remove last row
#esc_goals <- esc_goals[1:(nrow(esc_goals)-1),]
#Create DOY index compatible with the genetic samples
#esc_goals$DOY_index <- esc_goals$DOY - (min(C_E_dat$DOY)-1)
#Add entry for day 135 for plotting purposes
#esc_goals$DOY_index[length(esc_goals$DOY_index)] <- n_day
#Standardize the recorded escapement data to a fixed season length (n_day), filling in zeros for any given year's missing data
for (i in 1:length(unique(C_E_dat$Year))){
C_E_dat$Escapement_cumulative_late[C_E_dat$Year==unique(C_E_dat$Year)[i]][1] <- C_E_dat$Escapement[C_E_dat$Year==unique(C_E_dat$Year)[i]][1]
for(j in 2:length(C_E_dat$Escapement[C_E_dat$Year==unique(C_E_dat$Year)[i]])){
C_E_dat$Escapement_cumulative_late[C_E_dat$Year==unique(C_E_dat$Year)[i]][j] <- C_E_dat$Escapement[C_E_dat$Year==unique(C_E_dat$Year)[i]][j] + C_E_dat$Escapement_cumulative_late[C_E_dat$Year==unique(C_E_dat$Year)[i]][j-1]
}
}
#Create plot showing the evolution of the transition curves
pdf(file='loo_iter_curve_in_season.pdf')
#Create lists (curve, cumulative assignments, and assignment errors) for each year with dimensions equal to the total number of samples for that year
#plus 1 (to include the early period informed by the hyper curve, only for the assignment based lists)
for(i in 1:n_year_fixed){
pred_list[[i]] <- replicate(as.vector(table(Comp_dat$Year))[i], list())
in_esc_mat_early[[i]] <- replicate(as.vector(table(Comp_dat$Year))[i]+1, list())
in_esc_mat_late[[i]] <- replicate(as.vector(table(Comp_dat$Year))[i]+1, list())
in_esc_error_early[[i]] <- replicate(as.vector(table(Comp_dat$Year))[i]+1, list())
in_esc_error_late[[i]] <- replicate(as.vector(table(Comp_dat$Year))[i]+1, list())
}
#Open loop to iteratively exclude one year at a time from the compositional data --> the hyper-curves
#from models fitted to these data wil be used as the preseason curve for each year before genetic samples
#are collected
y <- n_year_fixed
#exclude given year from data
if(preseason==F){
Comp_dat_2 <- Comp_dat[-which(Comp_dat$Year==unique(Comp_dat$Year)[y]),]
}else{
Comp_dat_2 <- Comp_dat
}
#redefine number of years
n_year <- length(unique(Comp_dat_2$Year))
#re-calculate the sample length vector
length.vec <- as.vector(table(Comp_dat_2$Year))
#Redefine cumulative indices based on years that were dropped
for (i in 1:nrow(Comp_dat_2)){
Comp_dat_2$DOY_index_II[i] <- Comp_dat_2$DOY_index[i] + (as.numeric(as.factor(Comp_dat_2$Year))-1)[i]*max(C_E_dat$DOY_index)
Comp_dat_2$DOY_index_II_2[i] <- Comp_dat_2$DOY_index_2[i] + (as.numeric(as.factor(Comp_dat_2$Year))-1)[i]*max(C_E_dat$DOY_index)
Comp_dat_2$DOY_index_III[i] <- Comp_dat_2$DOY_index_3[i] + (as.numeric(as.factor(Comp_dat_2$Year))-1)[i]*max(C_E_dat$DOY_index)
}
#Run the model if specificed
if(exec==T){
exec_func(script='Chignik_hierarchical_transition_beta_bin_A.stan', exec=T, shiny=F, n_chain=5, n_iter=iter, adapt=0.999, tree=10, thin=5,
dat = list(s = length.vec, n_samples = sum(length.vec), comp_dat_x = round(Comp_dat_2$Proportion_Chignik*Comp_dat_2$n),comp_dat_index = Comp_dat_2$DOY_index_3,
n_day = max(C_E_dat$DOY_index), comp_dat_index_2 = Comp_dat_2$DOY_index_III, comp_dat_N = Comp_dat_2$n), file_name=paste('Chignik_beta_bin_year=',unique(Comp_dat$Year)[y]),
init.list = list(x0_ind = rep(52,n_year), steep_ind=rep(0.17,n_year), mu_steep=0.17, mu_x0=52, k=20, sigma_x0=5, sigma_steep=0.04, steep_Z = rep(0.01, n_year)))
}
#otherwise load in preexisting RDA object
df <- as.data.frame(readRDS(paste('Chignik_beta_bin_year=',unique(Comp_dat$Year)[y],".rds")))
#Save the hyper-curve from the model fit to a list
hyper_list[[y]] <- df[grep('hyper_curve', colnames(df))]
#Save the estimated midpoint term from the model fit to a list
x0_list[[y]] <- df[grep('mu_x0', colnames(df))]
if(preseason==F){
#Loop through the number of samples for each year
for(j in 1:length(which(Comp_dat$Year==unique(Comp_dat$Year)[y]))){
#Incrementally add the rows containing each sample into the data frame, re-estimating the model with
#the new data
Comp_dat_3 <- rbind(Comp_dat_2, Comp_dat[which(Comp_dat$Year==unique(Comp_dat$Year)[y])[1:j],])
#re-define number of years based on new data
n_year <- length(unique(Comp_dat_3$Year))
#redefine sample length vector based on new data
length.vec <- as.vector(table(Comp_dat_3$Year))
#Redefine cumulative indices based on years that were dropped
for (i in 1:nrow(Comp_dat_3)){
Comp_dat_3$DOY_index_II[i] <- Comp_dat_3$DOY_index[i] + (as.numeric(as.factor(Comp_dat_3$Year))-1)[i]*max(C_E_dat$DOY_index)
Comp_dat_3$DOY_index_II_2[i] <- Comp_dat_3$DOY_index_2[i] + (as.numeric(as.factor(Comp_dat_3$Year))-1)[i]*max(C_E_dat$DOY_index)
Comp_dat_3$DOY_index_III[i] <- Comp_dat_3$DOY_index_3[i] + (as.numeric(as.factor(Comp_dat_3$Year))-1)[i]*max(C_E_dat$DOY_index)
}
#Sort the new data frame by year
Comp_dat_3 <- Comp_dat_3[order(Comp_dat_3$Year),]
#Run the model if specified
if(exec==T){
exec_func(script='Chignik_hierarchical_transition_beta_bin_A.stan', exec=T, shiny=F, n_chain=5, n_iter=iter, adapt=0.999, tree=10, thin=5,
dat = list(s = length.vec, n_samples = sum(length.vec), comp_dat_x = round(Comp_dat_3$Proportion_Chignik*Comp_dat_3$n),comp_dat_index = Comp_dat_3$DOY_index_3,
n_day = max(C_E_dat$DOY_index), comp_dat_index_2 = Comp_dat_3$DOY_index_III, comp_dat_N = Comp_dat_3$n), file_name=paste('Chignik_beta_bin_year=',unique(Comp_dat$Year)[y],'sample',j),
init.list = list(x0_ind = rep(52,n_year), steep_ind=rep(0.17,n_year), mu_steep=0.17, mu_x0=52, k=20, sigma_x0=5, sigma_steep=0.04, steep_Z = rep(0.01, n_year)))
}
#otherwise load in the saved model object
df <- as.data.frame(readRDS(paste('Chignik_beta_bin_year=',unique(Comp_dat$Year)[y],'sample',j,".rds")))
pred <- df[grep('pred_comp', colnames(df))]
#Slice up the predictions according to season length and year
cut_start <- seq(from=1, to=ncol(pred), by=n_day)
cut_end <- seq(from=n_day, to=ncol(pred), by=n_day)
#Save the selected curve to a list
pred_list[[y]][[j]] <- pred[,c(cut_start[y]:cut_end[y])]
}
}
#Plot the hyper-curves
par(oma=c(5,5,5,5))
plot(c(1:n_day), colMedian(hyper_list[[y]]), type='l', lty=3, xaxt='n', yaxt='n', xlim=c(20,90), lwd=2, axes=F, ylab=NA, xlab=NA)
#vertical line for the midpoint term
abline(v=colMedian(x0_list[[y]]), lty=3)
#Polygon for shading the background
#polygon(c(15:95, rev(15:95)), c(rep(-0.1, length(15:95)), rep(1.1, length(15:95))), col=rgb(1,1,1, max=255, alpha=0))
if(preseason==F){
#Plot the iterative fits --> loop through the total number of samples for a given year
for(j in 1:length(which(Comp_dat$Year==unique(Comp_dat$Year)[y]))){
#Allow plotting the final curve a different style from all previous curves
if(j < length(which(Comp_dat$Year==unique(Comp_dat$Year)[y]))){
points(colMedian(pred_list[[y]][[j]]),type='l', lty=1, col=rgb(col2rgb(col.vec)[1,j], col2rgb(col.vec)[2,j], col2rgb(col.vec)[3,j], max=255, alpha=255), lwd=1.75)
}
if(j==length(which(Comp_dat$Year==unique(Comp_dat$Year)[y]))){
points(colMedian(pred_list[[y]][[j]]),type='l', lty=1, col=col.vec[j], lwd=2.5)
}
}
#Plot the actual data points and the confidence bounds
points(Comp_dat$DOY_index_3[Comp_dat$Year==unique(Comp_dat$Year)[y]], Comp_dat$Proportion_Chignik[Comp_dat$Year==unique(Comp_dat$Year)[y]], col='black', pch=21, bg=col.vec[1:length(Comp_dat$DOY_index_3[Comp_dat$Year==unique(Comp_dat$Year)[y]])], cex=1.75, lwd=0.25)
arrows(Comp_dat$DOY_index_3[Comp_dat$Year==unique(Comp_dat$Year)[y]], Comp_dat$Lower_Chignik[Comp_dat$Year==unique(Comp_dat$Year)[y]], Comp_dat$DOY_index_3[Comp_dat$Year==unique(Comp_dat$Year)[y]], Comp_dat$Upper_Chignik[Comp_dat$Year==unique(Comp_dat$Year)[y]], angle=90, code=3, length=0.01, lwd=1.25, lty=1, col=col.vec[1:length(Comp_dat$DOY_index_3[Comp_dat$Year==unique(Comp_dat$Year)[y]])])
legend(x=17.5, y=1.065, legend= c('Preseason', paste('Sample', 1:length(col.vec))),col=c('black', col.vec), pch=c(NA, rep(16, length(col.vec))), bty='n', cex=0.95, lty=c(3, rep(1, length(col.vec))), y.intersp=0.875, x.intersp = 0.75, pt.cex=1.25)
legend(x=17.5, y=1.065, legend= c('Preseason', paste('Sample', 1:length(col.vec))),col='black', pch=c(NA, rep(1, length(col.vec))), bty='n', lwd=0.25, lty=rep(NA, length(col.vec)), cex=0.95, y.intersp=0.875, x.intersp = 0.75, pt.cex=1.25)
text(x=85, y=0.01, unique(Comp_dat$Year)[y])
}else{
text(x=85, y=0.01, unique(Comp_dat$Year)[y]+1)
}
#add axes, legends and text
axis(side=1, at=seq(15, 85, 10), labels= (seq(15, 85, 10) + min(C_E_dat$DOY)) , cex.axis=1)
axis(side=2, cex.axis=1)
mtext(side=2, 'Proportion late run (Chignik)', line=2.5)
mtext(side=1, 'Day of year', line=2.5)
abline(h=0.5, lty=2)
dev.off()
return(pred_list)
}
x <- plot_func(exec=T, iter=1000, preseason=F)
#Isolate the median estimates of the current year's transition and write to a csv
mat_store <- matrix(nrow=n_day, ncol=length.vec[length(length.vec)])
for(i in 1:length.vec[length(length.vec)]){
mat_store[,i] <- unlist(colMedian(x[[n_year]][[i]]))
}
write.csv(mat_store, file='current_year_curves.csv')
#plot(mat_store[,1])
#points(mat_store[,2])
#points(mat_store[,3])
#points(mat_store[,4])
#points(mat_store[,5])
|
fdf3b6cc4ad75b4815d2ceb04de79cc09d8e1e00
|
55686d2928596baa6bbde6b972d191f8a035f573
|
/Week_12_Discussion/Discussion _Week12_V2.R
|
d3e8971817eeb1a1ae129c7a3c85fe3304c08337
|
[] |
no_license
|
DarioUrbina/Teacher-A-Statististical-Methods-BME-423
|
6556688a414c1b3ee404aacdbf4401324f0b2645
|
1572301100c96583da46209d08ceac4efa570024
|
refs/heads/master
| 2023-01-06T23:57:37.652149
| 2020-11-06T02:45:19
| 2020-11-06T02:45:19
| 288,513,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 818
|
r
|
Discussion _Week12_V2.R
|
#Discussion week 12
## 1. Mandatory lines
# Similiar thing as clc;clear all;close all in MATLAB
rm(list = ls()); # clear workspace variables
cat("\014") # it means ctrl+L. clear window
graphics.off() # close all plots
# In-class exercise
# Example from Navarro - chico.Rdata
load("~/Desktop/Week12/chico.Rdata")
View(chico)
head(chico)
library(psych)
describe(chico)
plot(chico$grade_test2, chico$grade_test1,ylab="Grade for Test 2",xlab="Grade for Test 1")
chico$improvement <- chico$grade_test2 - chico$grade_test1
head(chico)
hist(x=chico$improvement, xlab="Improvement in Grade")
library(lsr)
ciMean(x=chico$improvement)
#In-class exercise 2
rm(list = ls()); # clear workspace variables
cat("\014") # it means ctrl+L. clear window
graphics.off() # close all plots
mypath <- "~/Desktop/Week12/ReactionTime.csv"
|
fca74ccb417286003be59ee7fb55c3a370657e76
|
301d8a72cd06b4678b3b391715c2290c4a0de3f7
|
/tests/benchmarks/benchmark_join/tokens.R
|
cee7c3a5dc78753cb447911105fe0ff95f3f1306
|
[] |
no_license
|
XiaoyueZhang/quanteda
|
8adb1c702b5067ac88e4d4baee3233066e3990db
|
257d9462df3fb100bb255109497b26da3c0709d5
|
refs/heads/master
| 2021-01-12T09:16:53.421715
| 2016-12-14T19:05:16
| 2016-12-14T19:05:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,029
|
r
|
tokens.R
|
toks <- tokens(inaugCorpus)
seqs <- list(c('united', 'states'))
microbenchmark::microbenchmark(
joinTokens(toks, seqs, valuetype='fixed', verbose=TRUE),
times=10
)
seqs_not <- list(c('not', '*'))
microbenchmark::microbenchmark(
joinTokens(toks, seqs_not, valuetype='glob', verbose=TRUE),
times=10
)
seqs_will <- list(c('will', '*'))
microbenchmark::microbenchmark(
joinTokens(toks, seqs_will, valuetype='glob', verbose=TRUE),
times=10
)
dict_lex <- dictionary(file='/home/kohei/Documents/Dictonary/Lexicoder/LSDaug2015/LSD2015_NEG.lc3')
seq_lex <- tokens(unlist(dict_lex, use.names = FALSE), hash=FALSE, what='fastest')
# microbenchmark::microbenchmark(
# tokens(phrasetotoken(texts(inaugCorpus), dict_lex, valuetype='glob')),
# joinTokens(tokens(inaugCorpus), seq_lex, valuetype='glob', verbose=TRUE),
# times=1
# )
profvis::profvis(joinTokens(tokens(inaugCorpus), seq_lex, valuetype='glob', verbose=TRUE))
profvis::profvis(joinTokens(toks, seqs_neg, valuetype='glob', verbose=FALSE))
|
1c6cf76095e9aa40d3ec2089552d8aac8796e145
|
e03a758498cac958f162d2f4ad1df886f84c8d72
|
/crossover_exp.R
|
310e029e74d886915386d3b142ab04d85a7d9410
|
[
"MIT"
] |
permissive
|
sealionkat/wdae-differential-evolution
|
1c2407999c28048bbf7aa61f4fed0a5b2acdfa32
|
f6dd4434e64d6b42ab1fe69ca87ea18173cd26a6
|
refs/heads/master
| 2016-08-08T07:16:14.543188
| 2015-10-12T21:58:46
| 2015-10-12T21:58:46
| 33,892,805
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 340
|
r
|
crossover_exp.R
|
crossover_exp <- function(Pgi, Mgi) {
Ngi <- point();
i <- 1;
while(i <= dim) {
a <- runif(1);
if(a < CR) {
Ngi$coordinates[[i]] <- Pgi$coordinates[[i]]
} else {
break;
}
i <- i + 1;
}
while(i <= dim) {
Ngi$coordinates[[i]] <- Mgi$coordinates[[i]];
i <- i + 1;
}
return(Ngi);
}
|
b9f185faa9ab4195258253de8ccfb3606b400f4d
|
1a68555cdacd6d8ccc567bd2c00975f5eba3682e
|
/R/rec_tree3.R
|
343da58d6e9662adbfa7be0b7591d679d0a1b410
|
[] |
no_license
|
franciscorichter/dmea
|
8708c1a2a861bc90c42e02c3d22a0e316779d1ef
|
e4b1af2c3ea102554f97b4a79ed1c30e56428653
|
refs/heads/master
| 2020-06-28T22:35:35.962945
| 2017-07-13T17:05:32
| 2017-07-13T17:05:32
| 74,467,150
| 0
| 1
| null | 2017-02-23T09:06:58
| 2016-11-22T11:45:21
|
R
|
UTF-8
|
R
| false
| false
| 1,270
|
r
|
rec_tree3.R
|
itexp <- function(u, m, t) { -log(1-u*(1-exp(-t*m)))/m }
rtexp <- function(n, m, t) { itexp(runif(n), m, t) }
rec_tree3 <- function(wt, model='dd',pars){
lambda0 = pars[1]
mu0 = pars[2]
K = pars[3]
n = 1:length(wt)
i = 1
E = rep(1,(length(wt)-1))
ct = sum(wt)
prob = 1
p=list(wt=wt,E=E,n=n)
while(i < length(wt)){
N = n[i]
if(model == "dd"){ # diversity-dependence model
lambda = max(0,lambda0 - (lambda0-mu0)*N/K)
mu = mu0
lambda = rep(lambda,N)
mu = rep(mu,N)
}
if(model == 'cr'){ # constant-rate model
lambda = rep(lambda0,N)
mu = rep(mu0,N)
}
s = sum(lambda)
if(s==0){
break
}
cwt = wt[i]
cbt = cumsum(wt)[i] - cwt
t_spe = rexp(1,s)
if (t_spe < cwt){
prob = prob*dexp(x = t_spe,rate = s)
t_ext = rtexp(1, mu0, ct-(cbt + t_spe))
t_ext = cbt + t_spe + t_ext
up = update_tree(p=p, t_spe = (cbt + t_spe), t_ext = t_ext)
E = up$E
n = up$n
wt = up$wt
p=list(wt=wt,E=E,n=n)
}else{
prob = prob*pexp(q = cwt,rate = s,lower.tail = F)
}
i = i+1
}
#L = create_L(wt,E)
f_n = exp(-llik(pars=pars,n=n,E=E,t=wt))
weight = f_n/prob
return(list(wt=wt,E=E,n=n,f_n=f_n,weight=weight,prob=prob))
}
|
3c8c900381c99f13d3e59e04c0c6c01c2bb9bc35
|
1a87d39148d5b6957e8fbb41a75cd726d85d69af
|
/R/plotForest.R
|
44d2b9e76a5655bb7c0ea839cb5ca9a4fbd24ce2
|
[] |
no_license
|
mknoll/dataAnalysisMisc
|
61f218f42ba03bc3905416068ea72be1de839004
|
1c720c8e35ae18ca03aca15ff1a9485e920e8832
|
refs/heads/master
| 2023-01-12T16:49:39.807006
| 2022-12-22T10:21:41
| 2022-12-22T10:21:41
| 91,482,748
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,678
|
r
|
plotForest.R
|
#' @title Calculated univariate analysis and creates a forest plot
#'
#' @description The function creates a forest plot for a given
#' number of variables, expect a srv object and a data.frame containing
#' the selected variables as columns. Univariate Cox PH models
#' are fitted. A subject vector can be specified to allow for the
#' analysis of multiple observations per patient (e.g. paired samples),
#' by using marginal model [cluster(subject)]. Errors might occur if the graphic
#' devices dimension is too small (foresplot() fails).
#'
#' @param srv Survival object as created by survival::Surv() function,
#' each observation is linked to one row of the data parameter
#' @param data data.frame containing all variables which will be analyzed.
#' The class of each column determined the type of analysis: numeric cols
#' will be treated as continous variable, factor and character as factors.
#' @param subject vector identifying independent subjects
#' @param title Plot title
#' @param col Color vector as expected by the forestplot() function
#' @param invalCut Cutoff to set HR, CI and p-values to empty values
#' if HR exceeds the provided cutoff (e.g. if models do not converge)
#' @param removeInval Retain as invalid identified levels (invalCut)
#' @param MDPI adhere to MDPI requirements
#' @param singleLine variable name and measruemnet in one line
#'
#' @import forestplot
#' @import survival
#' @import grid
#'
#' @export
plotForest <- function(srv, data, subject=NULL, title="", col=c("royalblue", "darkblue", "royalblue"),
invalCut=100, removeInval=F, MDPI=F, singleLine=F, plotNCol=T) {
uv <- list()
for (i in 1:length(data[1,])) {
# Add variable
# factors
if (class(data[,i]) %in% c("factor", "character")) {
uv [[length(uv)+1]] <- data.frame(name1=colnames(data)[i],
name2=NA,
HR=NA,
LOW=NA,
UP=NA,
PVAL=NA,
N=NA,
NEVENT=NA)
if (is.null(subject)) {
w <- which(!is.na(data[,i]) & !is.na(srv))
fit <- coxph(srv[w]~factor(data[w,i]))
tbl <- cbind(summary(fit)$coef, summary(fit)$conf.int, fit$n, fit$nevent)
} else {
w <- which(!is.na(data[,i]) & !is.na(srv))
fit <- coxph(srv[w]~factor(data[w,i])+cluster(subject[w]))
tbl <- cbind(summary(fit)$coef[], summary(fit)$conf.int, fit$n, fit$nevent)
tbl <- tbl[,-4,drop=F]
}
rownames(tbl) <- substr(rownames(tbl), 19, nchar(rownames(tbl)))
for (j in 1:length(tbl[,1])) {
uv [[length(uv)+1]] <- data.frame(name1=NA,
name2=rownames(tbl)[j],
HR=tbl[j,2],
LOW=tbl[j, 8],
UP=tbl[j, 9],
PVAL=tbl[j, 5],
N=tbl[j,10],
NEVENT=tbl[j,11])
}
} else if (class(data[,i]) %in% c("numeric", "integer")) {
uv [[length(uv)+1]] <- data.frame(name1=colnames(data)[i],
name2=NA,
HR=NA,
LOW=NA,
UP=NA,
PVAL=NA,
N=NA,
NEVENT=NA)
if (is.null(subject)) {
w <- which(!is.na(data[,i]) & !is.na(srv))
fit <- coxph(srv[w]~data[w,i])
tbl <- cbind(summary(fit)$coef, summary(fit)$conf.int, fit$n, fit$nevent)
} else {
w <- which(!is.na(data[,i]) & !is.na(srv))
fit <- coxph(srv[w]~data[w,i]+cluster(subject[w]))
tbl <- cbind(summary(fit)$coef, summary(fit)$conf.int, fit$n, fit$nevent)
tbl <- tbl[,-4,drop=F]
}
j<-1
uv [[length(uv)+1]] <- data.frame(name1=NA,
name2=NA,
HR=tbl[j,2],
LOW=tbl[j, 8],
UP=tbl[j, 9],
PVAL=tbl[j, 5],
N=tbl[j,10],
NEVENT=tbl[j,11])
} else {
warning(paste("Could not process ", colnames(data)[i]))
}
}
uv <- do.call(rbind, uv)
## set invaldi data to NA
w <- which(uv[,3] > invalCut)
if (length(w) >0) {
uv[w,c(3:6)] <- NA
if (removeInval) {
uv <- uv[-w,,drop=F]
}
}
dash <- ifelse(MDPI, "–", "-")
tabletext<-cbind(c("", as.character(uv[,1])),
c("", as.character(uv[,2])),
c("Hazard Ratio", round(uv[,3],2)),
c("95% CI", ifelse(uv[,4] == "", "",
paste(format(round(uv[,4],2), nsmall=2), dash,
format(round(uv[,5],2), nsmall=2), sep=""))),
c("p-value", ifelse(round(uv[,6],3) == 0, "<0.001", round(uv[,6], 3))),
c("n/nevent", paste(uv[,7], "/", uv[,8], sep=""))
)
## n/nevent
tabletext[2:(length(tabletext[,1])-1),6] <- tabletext[3:length(tabletext[,1]),6]
for (i in 1:length(tabletext[,1])) {
tabletext[i,1] <- paste(tabletext[i,1], tabletext[i,2], collapse=" ")
tabletext[i,1] <- gsub(" NA", "", tabletext[i,1])
#tabletext[i,1] <- gsub("NA", "", tabletext[i,1])
if (substr(tabletext[i,1], 1, 3) == "NA ") {
tabletext[i,1] <- substr(tabletext[i,1], 3, nchar(tabletext[i,1]))
}
tabletext[i,1] <- ifelse(tabletext[i,1] == "NA", "", tabletext[i,1])
tabletext[i,6] <- gsub("NA/NA", "", tabletext[i,6])
if (!is.na(tabletext[i,5]) && i > 1) { tabletext[i,6] <- "" }
}
tabletext <- tabletext[,-2]
tabletext[,3] <- gsub("NA-NA", "", tabletext[,3])
#### same height
if (singleLine) {
tbt <- list()
tbt[[length(tbt)+1]] <- tabletext[1,,drop=F]
for (i in 3:(length(tabletext[,1]))) {
ln <- tabletext[(i-1),,drop=F]
ln[,2:4] <- tabletext[i,2:4]
tbt[[length(tbt)+1]] <- ln
i <- i+1
}
tbt <- do.call(rbind, tbt)
sel <- which(!is.na(tbt[,2]))
#### adjust
tabletext <- tbt[sel,,drop=F]
uv <-uv[seq(from=2, to=length(uv[,1]), by=2),]
}
## remove column with n
if (!plotNCol) {
tabletext <- tabletext[,1:(length(tabletext[1,])-1)]
}
### boldprint
bp <- list()
for (i in 1:length(tabletext[,1])) {
bp[[i]] <-list()
for (j in 1:length(tabletext[1,])) {
if (j == 4) {
if (!is.na(as.numeric(tabletext[i,j])) && (as.numeric(tabletext[i,j]) < 0.05)) {
bp[[i]][[j]] <- gpar(fontface="bold")
} else if (!is.na(tabletext[i,j]) && tabletext[i,j] == "<0.001") {
bp[[i]][[j]] <- gpar(fontface="bold")
} else {
bp[[i]][[j]] <- gpar(fontface="plain")
}
} else {
bp[[i]][[j]] <- gpar(fontface="plain")
#bp[[i]][[j]] <- gpar(fontface="bold")
}
}
}
fp <- forestplot(tabletext,
txt_gp=fpTxtGp(label=bp),
mean = c(NA, as.numeric(as.character(uv[,3]))),
lower = c(NA, as.numeric(as.character(uv[,4]))),
upper = c(NA, as.numeric(as.character(uv[,5]))),
new_page = TRUE,
title=title,
is.summary=c(rep(FALSE,length(tabletext[,1]))),
clip=c(0.1,3.2),
xlog=F,
col=fpColors(box=col[1],line=col[2], summary=col[3]),
#align=1,
zero=1)
print(fp)
return(uv)
}
|
36f365c109b4c35c233cdf8e482cf1f7c9e0f9f6
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612968695-test.R
|
306583ad18d94bca84930c4ec79121b3943d5457
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,067
|
r
|
1612968695-test.R
|
testlist <- list(x1 = c(NaN, NaN, 1.39065814070327e-309, 0, 2.39021688577355e-310, 1.11897728190947e+87, 2.77478592360575e+180, 2.77448002212291e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.72320665544079e+180, 2.77448001762435e+180, 2.72312157072141e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77447923698777e+180, 1.75249501453415e+183, 2.77448001762433e+180, 2.81779820907505e-202, -2.6234033551085e+193, -1.5146004451401e+304, -0.000153938424773514, 0, NaN, NaN, 6.68887130434681e-198, 4.83049477408634e-299, 2.77448001762435e+180, -4.27197407184175e+96, 3.2418105845692e+178, 2.83196538222417e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.8396262443943e+238), x2 = numeric(0), y1 = c(2.77448001764258e+180, 6.22522713759971e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result)
|
cfccae0f097beb93666ec93c451bd3151d7ced4a
|
d4b0f480b0816ee82b31503052134a5a158e74dc
|
/cachematrix.R
|
98405c879bb017110791c0a30eb9065350b4cdee
|
[] |
no_license
|
WanluZhang/ProgrammingAssignment2
|
8357830b927cc8a1c8124f42c8f43416496c8f7c
|
dd8e2485025d089bb89c0cba6ba47963248b1f3b
|
refs/heads/master
| 2021-05-08T07:54:06.596836
| 2017-10-14T17:16:42
| 2017-10-14T17:16:42
| 106,939,972
| 0
| 0
| null | 2017-10-14T15:40:12
| 2017-10-14T15:40:12
| null |
UTF-8
|
R
| false
| false
| 1,300
|
r
|
cachematrix.R
|
## This pair of functions invert a matrix and cache its result.
## makeCacheMatrix() - create a cache for a matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
## create list with methods for get/set of both original matrix
## and its inverse, return the list to parent environment.
## note that this technique allows use of $ operator to access
## each function from the list
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cscheSolve() - computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already
## been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m ## Return a matrix that is the inverse of 'x'
}
|
f1a5bd08051df82ecaac12c1adda9daa488bb20c
|
17599442579623cb1ef00358322170787a8ecc41
|
/tests/testthat/test-lang.R
|
df5964fa4c3560faf55e4302bc48763f22d15301
|
[] |
no_license
|
rlugojr/rlang
|
792c40f51bfe527810e7e2acfd83193cd14cc669
|
73164435cc3b46069c8f451e78edb28cda1d0c83
|
refs/heads/master
| 2021-01-13T01:07:59.722058
| 2017-02-22T14:44:45
| 2017-02-22T14:44:45
| 81,410,996
| 0
| 0
| null | 2017-02-09T05:01:54
| 2017-02-09T05:01:54
| null |
UTF-8
|
R
| false
| false
| 1,808
|
r
|
test-lang.R
|
context("language")
test_that("NULL is a valid language object", {
expect_true(is_lang(NULL))
})
test_that("is_call() pattern-matches", {
expect_true(is_call(quote(foo(bar)), "foo"))
expect_false(is_call(quote(foo(bar)), "bar"))
expect_true(is_call(quote(foo(bar)), quote(foo)))
expect_true(is_call(~foo(bar), "foo", n = 1))
expect_false(is_call(~foo(bar), "foo", n = 2))
expect_true(is_call(~foo::bar()), quote(foo::bar()))
expect_false(is_call(~1))
expect_false(is_call(~NULL))
expect_true(is_unary_call(~ +3))
expect_true(is_binary_call(~ 3 + 3))
})
test_that("is_call() vectorises name", {
expect_false(is_call(~foo::bar, c("fn", "fn2")))
expect_true(is_call(~foo::bar, c("fn", "::")))
expect_true(is_call(~foo::bar, quote(`::`)))
expect_true(is_call(~foo::bar, list(quote(`@`), quote(`::`))))
expect_false(is_call(~foo::bar, list(quote(`@`), quote(`:::`))))
})
# coercion ----------------------------------------------------------------
test_that("as_name() produces names", {
expect_equal(as_name("a"), quote(a))
expect_equal(as_name(quote(a)), quote(a))
expect_equal(as_name(quote(a())), quote(a))
expect_equal(as_name(~ a), quote(a))
expect_equal(as_name(~ a()), quote(a))
expect_error(as_name(c("a", "b")), "Cannot parse character vector of length > 1")
})
test_that("as_call() produces calls", {
expect_equal(as_call(quote(a)), quote(a()))
expect_equal(as_call(quote(a())), quote(a()))
expect_equal(as_call("a()"), quote(a()))
expect_equal(as_call(~ a()), quote(a()))
expect_error(as_call(c("a", "b")), "Cannot parse character vector of length > 1")
})
test_that("as_name() handles prefixed call names", {
expect_identical(as_name(quote(foo::bar())), quote(foo::bar))
expect_identical(as_name(~foo@bar()), quote(foo@bar))
})
|
ec2db22f17172427e8a67bb9757b0e361094fafe
|
2fa33aeef712fa0a1b8043b40261d218a37cafa2
|
/R/truncnorm.R
|
3bf4a6352f58e56752dd0442478f370c46d4cc93
|
[] |
no_license
|
cran/bayess
|
778e3cd961acecec0ccbf0de66048543af82c98c
|
30208f8c4b61bc73e5885875b8134f05a963719c
|
refs/heads/master
| 2022-09-03T00:53:47.483683
| 2022-08-11T09:30:08
| 2022-08-11T09:30:08
| 17,694,647
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 139
|
r
|
truncnorm.R
|
truncnorm=function(n,mu,tau2,a,b)
{
qnorm(pnorm(b,mu,sqrt(tau2))-runif(n)*(pnorm(b,mu,sqrt(tau2))-pnorm(a,mu,sqrt(tau2))),mu,sqrt(tau2))
}
|
dd996051a5cf6ae5dc8458a5dec5b879e9fd9ca8
|
53e5567b96e5e20f556784d7173dd90cc18eb7e5
|
/R/SVI_state_functions.R
|
80b06b9005993991241b1047c15c864a0cbcf9ed
|
[] |
no_license
|
edroxas/svi-tools
|
7deec1c8df6aecc2e199ee2347b7ed419a2a1552
|
fe37e6659da0c4f1c1d458ba0e5de9d093df4c97
|
refs/heads/master
| 2023-03-15T15:21:34.069423
| 2018-05-11T20:33:06
| 2018-05-11T20:33:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,085
|
r
|
SVI_state_functions.R
|
library(RJSONIO)
library(rgdal)
library(ggplot2)
library(ggmap)
library(scales)
library(maptools)
library(rgeos)
library(dplyr)
library(data.table)
# A script to generate functions that allow us to construct the 16 separate
# pieces of the SVI for all census tracts.
# Each function has 2 inputs:
# 1. A state
# 2. a year
# 3. an API key
#################################################################################
#################################################################################
#################################################################################
#################################################################################
#################################################################################
api_call.fn <- function(year,series,key){
if(year %in% c(2009,2010)){
call <- paste('https://api.census.gov/data/',year,'/acs5?key=',key,'&get=',series,',NAME&for=state:*',
sep="")
}else if(year %in% c(2011,2012,2013,2014)){
call <- paste('https://api.census.gov/data/',year,'/acs5?get=NAME,',series,'&for=state:*&key=',
key,sep="")
}else{
call <- paste('https://api.census.gov/data/',year,'/acs/acs5?get=NAME,',series,'&for=state:*&key=',
key,sep="")
}
return(call)
}
#################################################################################
#################################################################################
#################################################################################
#################################################################################
#################################################################################
data.function <- function(year,series,series.name,key){
call <- api_call.fn(year=year,key=key,series=series)
df <- fromJSON(call)
df <- data.frame(rbindlist(lapply(df,function(x){
x<-unlist(x)
return(data.frame(name=x[1],value=x[2],state=x[3],data_series=series,data_name=series.name))
})
)) %>% filter(row_number() > 1)
return(df)
}
|
77974c90b32ca06a3359691c8aa65ca79a316de6
|
690c3c3e583094011d339d20a819b0fbe11a2bf8
|
/conf_matrix.R
|
242a4a421b96958b7d2aaf9a6f081e7751fa8a63
|
[] |
no_license
|
AllisonVincent/StarFM-code
|
a0f907e2931460b7867600bd1566cb39a600338b
|
eac755b6ef61af5d1925b3b65d02269c846e79e1
|
refs/heads/master
| 2021-06-17T15:02:43.013841
| 2021-04-20T17:19:42
| 2021-04-20T17:19:42
| 194,706,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,064
|
r
|
conf_matrix.R
|
#### This script is for the purpose of viewing individual rasters of STARM and Landsat NDSI data, as well as creating a confusion matrix to measure the performance of STARFM against the Landsat for model validation.
library(sp)
library(sf)
library(ggplot2)
library(rgdal)
library(raster)
library(dplyr)
library(caret)
library(data.table)
library(stats)
library(fields)
library(SpatialTools)
library(foreign)
library(oceanmap)
library(tidyr)
library(plotly)
library(rasterVis)
########## Isolating and plotting the NDSI for the test dates
## Load the data files needed
landsat<- brick("./landsat.tif")
modis<- brick("./mod.tif")
starfm<- brick("./starfm.tif")
ER<- readOGR("./EastRiver_Project.shp") #shapefile of the study watershed for reference
## Project the shapefile to match the raster files
data_proj<- crs(starfm)
ER_proj<- spTransform(ER, data_proj)
proj4string(ER_proj)
###### This section contains code to isolate the raster layers for a specific date and write them to their own raster for faster computing. This is helpful when comparing dates where a single Landsat date was excluded for analysis.
#landsat_test<-landsat[[n]] ## The layer of interest in the landsat raster brick
#writeRaster(landsat_test, filename="./landsat_testday.tif", bandorder='BSQ', datatype='INT2S',format='GTiff', overwrite=TRUE)
#starfm_test<- brick(starfm[[n]]) ## The layer of interest in the starfm raster brick
#writeRaster(starfm_test, filename="./starfm_testday.tif", bandorder='BSQ', datatype='INT2S',format='GTiff', overwrite=TRUE)
### Set the landsat pixels with no data (value here set to -11111) to NA
#landsat<- landsat_test
landsat_masked<- Which(landsat == 0, cells = TRUE)
landsat[landsat_masked] <- -11111
landsat_plot<- reclassify(landsat,cbind(-Inf, -11111, NA))
landsat_plot<- landsat_plot/10000 ## Divide by 10000 to get NDSI data into the range of -1 to 1
### Here modis no data values are already set to NA. Only need to divide to get NDSI data into range.
modis_test<- modis[[8]]
modis_plot<- modis_test/10000
### Set the starfm pixels with no data (value here set to -11111) to NA
#starfm<- starfm_test
starfm_plot<- reclassify(starfm,cbind(-Inf, -11111, NA))
starfm_plot<- starfm_plot/10000
#### Plot the landsat NDSI values
#par(mfrow=c(1,2))
dev.new(height=0.91*nrow(landsat_plot)/50, width=1.09*ncol(landsat_plot)/50)
par(mar = c(5,5,5,3.5))
plot(landsat_plot, col = rev(cm.colors(40)), main = "", xlab = "Longitude", ylab = "Latitude", cex.lab = 2, cex.axis = 2, legend = FALSE)
title(main = list("NDSI from Landsat", cex = 2.5), line = 1.0)
#mtext("3 December 2015", line = 0.15, cex = 1.75)
plot(landsat_plot, legend.only = TRUE, col = rev(cm.colors(40)), axis.args = list(cex.axis = 1.9))
plot(ER_proj, border = "black", add = TRUE)
### Plot the STARFM NDSI values
#par(mar = c(3,3,4,3.5))
dev.new(height=0.91*nrow(starfm_plot)/50, width=1.09*ncol(starfm_plot)/50)
par(mar = c(5,5,5,3.5))
plot(starfm_plot, col = rev(cm.colors(40)), main = "", xlab = "Longitude", ylab = "Latitude", cex.lab = 2, cex.axis = 2, legend = FALSE)
title(main = list("NDSI from STARFM", cex = 2.5), line = 1.0)
#mtext("3 December 2015", line = 0.15, cex = 1.75)
plot(starfm_plot, legend.only = TRUE, col = rev(cm.colors(40)), axis.args = list(cex.axis = 1.9))
plot(ER_proj, border = "black", add = TRUE)
### Plot the modis NDSI values
par(mar = c(3,3,4,3.5))
plot(modis_plot, col = rev(cm.colors(40)), main = "NDSI from MODIS ")
mtext("Dec 3, 2015", line = 0.5)
plot(ER_proj, border = "black", add = TRUE)
## Find the fraction of data for each raster
## For landsat
area_L8<- landsat_plot@nrows*landsat_plot@ncols # total number of pixels for Landsat data
area_starfm<- starfm_plot@nrows*starfm_plot@ncols # total number of pixels for starfm data
landsat_data<- landsat_plot
landsat_data[is.na(landsat_data)]<- 0 # set all NA values to zero
landsat_true<- Which(landsat_data != 0, cells = TRUE)
landsat_data[landsat_true]<- 1 ## set all non-NA values to 1
landsat_valid<- as.vector(landsat_data, mode = 'numeric') # turn above data into vector for easy analysis
L8_all_sum<- sum(landsat_valid) ## find the sum of all pixels with a value of 1
L8_data<- L8_all_sum/area_L8 ## find the fraction of pixels in the landsat raster that have data
## Same as above, but for STARFM raster
starfm_data<- starfm_plot
starfm_data[is.na(starfm_data)]<- 0
starfm_true<- Which(starfm_data != 0, cells = TRUE)
starfm_data[starfm_true]<- 1
starfm_valid<- as.vector(starfm_data, mode = 'numeric')
starfm_all_sum<- sum(starfm_valid)
model_data<- starfm_all_sum/area_starfm ## find the fraction of pixels in the starfm raster that have data
## Creating binary snow rasters
# Any values in our rasters of below 0.4 are given a new value of zero
landsat_binary<- reclassify(landsat_plot,cbind(-Inf, 0.4, 0))
starfm_binary<- reclassify(starfm_plot,cbind(-Inf, 0.4, 0))
# Any values in our rasters of 0.4 or above are given a new value of 1
landsat_binary<- reclassify(landsat_binary,cbind(0.4, Inf, 1))
starfm_binary<- reclassify(starfm_binary,cbind(0.4, Inf, 1))
### Save raster file of landsat snow status
writeRaster(landsat_binary, filename = "./landsat_snow_status.tif", bandorder='BSQ', datatype='INT2S', formatt='GTiff', overwrite=TRUE)
### Save raster file of starfm snow status
writeRaster(starfm_binary, filename = "./starfm_snow_status.tif", bandorder='BSQ', datatype='INT2S', formatt='GTiff', overwrite=TRUE)
## create levelplots to display the snow status. Requires the rasterVis library
landsat_bi_fac<- as.factor(landsat_binary)
rat<- levels(landsat_bi_fac)[[1]]
rat[["status"]] <- c("no snow", "snow")
levels(landsat_bi_fac)<- rat
dev.new()
levelplot(landsat_bi_fac, att = "status", col.regions = rev(cm.colors(40)), scales = list(x = list(cex = 1.5), y = list(cex = 1.5)), xlab = list(label = "Longitude", cex = 2), ylab = list(label = "Latitude", cex = 2), main = list(label = "Landsat Snow Status", cex = 2), colorkey = list(labels = list(height = 2, cex = 1.9))) + layer(sp.polygons(ER_proj))
starfm_ratify<- ratify(starfm_binary)
starfm_bi_fac<- as.factor(starfm_ratify)
rat<- levels(starfm_bi_fac)[[1]]
rat[["status"]] <- c("no snow", "snow")
levels(starfm_bi_fac)<- rat
dev.new()
levelplot(starfm_bi_fac, att = "status", col.regions = rev(cm.colors(40)), scales = list(x = list(cex = 1.5), y = list(cex = 1.5)), xlab = list(label = "Longitude", cex = 2), ylab = list(label = "Latitude", cex = 2), main = list(label = "STARFM Snow Status", cex = 2), colorkey = list(labels = list(height = 1, cex = 1.9))) + layer(sp.polygons(ER_proj))
## Find the fractional snow-covered area (fSCA) for each raster
L8_snow<- as.vector(landsat_binary, mode = 'numeric')
L8_snow_sum<- sum(L8_snow, na.rm = TRUE) ## Find the sum all pixels that have a value equal to 1 (snow status)
L8_fsca<- L8_snow_sum/area_L8 #Landsat fSCA
starfm_snow<- as.vector(starfm_binary, mode = 'numeric') ## Find the sum all pixels that have a value equal to 1 (snow status)
starfm_snow_sum<- sum(starfm_snow, na.rm = TRUE)
starfm_fsca<- starfm_snow_sum/area_starfm #STARFM fSCA
## Create a confusion matrix to evaluate the performance of the model against the Landsat data
landsat_factor<- as.factor(L8_snow)
starfm_factor<- as.factor(starfm_snow)
results<- confusionMatrix(starfm_factor,landsat_factor, positive = "1")
# Save the results in matrices that can easily be viewed for reference
overall<- as.matrix(results, what = "overall")
classes<- as.matrix(results, what = "classes")
# Want to know the individual values, so save these values as their own variable
acc_model<- overall[[1]]
spec_model<- classes[[2]]
prec_model<- classes[[5]]
recall_model<- classes[[6]]
f1_model<- classes[[7]]
date<- "12.3.2015" ## for table below
results_end<- c(date, acc_model, spec_model, recall_model, prec_model, f1_model, L8_fsca, starfm_fsca, L8_data, model_data)
labels<- c("Date", "Accuracy", "Specificity", "Recall", "Precision", "F-Score", "L8 fSCA", "STARFM fSCA", "L8 Data", "STARFM Data")
all_data<- data.frame(labels, results_end)
|
aa226496011a2b3bd4e5ddb4a3f17e1f1a3a1174
|
3a22a1b42404a006f4dad390d3190650bdc94ba7
|
/man/list_stations.Rd
|
f73ec3aaa9c2dfd15ea1fae9f476d10f41e857cc
|
[
"MIT"
] |
permissive
|
spadarian/USydneyRainfall
|
282932136c8249081af968b2c65b75b5a27fd764
|
87dd623a8a2b79cc2a9701ffe8ce9c89c7cfada5
|
refs/heads/master
| 2021-01-22T05:05:38.193767
| 2015-07-01T17:47:07
| 2015-07-02T00:57:14
| 38,396,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
rd
|
list_stations.Rd
|
\name{list_stations}
\alias{list_stations}
\title{Better Rainfall Forecast for Grain Growers API connection}
\usage{
list_stations()
}
\value{
\code{data.frame}
}
\description{
This function gets list of user stations.
}
|
06e21c91cc376ec62140d1b73aa3e61d3a11494b
|
c2e833feb1c738737ed468b3d0da503439faa199
|
/save-scripts/ttrees-write.R
|
025df3735151592572e716c8853815e4036590a9
|
[] |
no_license
|
privefl/paper2-PRS
|
4ea4d5d6aa4d0b422c57f4e20cedc8925f4b6497
|
3487d0d0d77e27956788ddb9ef8840c46676a7bf
|
refs/heads/master
| 2021-11-25T23:51:21.652297
| 2021-10-27T08:14:25
| 2021-10-27T08:14:25
| 106,011,226
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 751
|
r
|
ttrees-write.R
|
library(bigsnpr)
library(ggplot2)
NCORES <- nb_cores()
# data
celiac2 <- snp_attach("backingfiles/celiacQC_sub4.rds")
G <- celiac2$genotypes
n <- nrow(G)
m <- ncol(G)
CHR <- celiac2$map$chromosome
POS <- celiac2$map$physical.pos
# jdb
file.jdb <- "ttrees.jdb"
cat(rep(";", 5), sep = "\n", file = file.jdb)
big_apply(G, a.FUN = function(X, ind) {
mat <- cbind(sample(0:1, length(ind), replace = TRUE), X[ind, ])
write.table(mat, file = file.jdb, append = TRUE, quote = FALSE,
row.names = paste0("ind_", ind), col.names = FALSE)
NULL
}, a.combine = "c", ind = rows_along(G), block.size = 1e3)
# bloc
bigsnpr:::write.table2(bigstatsr:::CutBySize(ncol(G), block.size = 10)[, 1:2] - 1,
file = "ttrees.bloc")
|
b7e58f3e114632b919518af7dccd773d64bc51cf
|
011ee506f52512d7245cf87382ded4e42d51bbd9
|
/R/ir_calc.R
|
3cc5f4907daa75659c7b43fbef0e3b4cf926a6a1
|
[
"MIT"
] |
permissive
|
emilelatour/lamisc
|
ff5e4e2cc76968787e96746735dbadf1dd864238
|
e120074f8be401dc7c5e7bb53d2f2cc9a06dd34a
|
refs/heads/master
| 2023-08-28T02:15:00.312168
| 2023-07-27T23:39:58
| 2023-07-27T23:39:58
| 123,007,972
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,229
|
r
|
ir_calc.R
|
#### Packages --------------------------------
library(dplyr)
#' @title
#' Calculate confidence interval for crude incidence rate
#'
#' @description
#' 7 different methods that I found online for calculating the confidence
#' interval for a crude incidence rate. Note that these are all two sided. Most
#' of these come from https://www.openepi.com and their documentation and formulas.
#'
#' Mid-P exact test seems to be the preferred method
#'
#' + "Mid-P exact test" using Miettinen's (1974d) modification, as described in Epidemiologic Analysis with a Programmable Calculator, 1979.
#' + "Fisher's exact test" based on the formula (Armitage,1971; Snedecor & Cochran,1965) as described in Epidemiologic Analysis with a Programmable Calculator, 1979.
#' + "Exact Poisson is the same as the Fisher's exact but calculated differently.
#' + "Normal approximation" to the Poisson distribution as described by Rosner, Fundamentals of Biostatistics (5th Ed).
#' + "Byar approx. Poisson" as described in Rothman and Boice, Epidemiologic Analysis with a Programmable Calculator, 1979.
#' + "Rothman/Greenland" as described in Rothman and Greenland, Modern Epidemiology (2nd Ed).
#' + "CCRB" comes from the follwing website and they do not offer documentation for their methods: http://www2.ccrb.cuhk.edu.hk/stat/confidence%20interval/CI%20for%20single%20rate.htm
#'
#' @param a Number of clinical events
#' @param N Person-time at risk
#' @param pt_units Factor to multiply rate by to get Person-time; default is 100.
#' @param alpha Significance level for two-sided confidence interval
#' @param interval a vector containing the end-points of the interval to be
#' searched for the root. The function `base::uniroot()` is used to solve for
#' some confidence intervals iteratively.
#'
#' @references
#' http://epid.blogspot.com/2012/08/how-to-calculate-confidence-interval-of.html
#' https://www.openepi.com/PDFDocs/PersonTime1Doc.pdf
#' https://seer.cancer.gov/seerstat/WebHelp/Rate_Algorithms.htm
#'
#' @importFrom dplyr bind_rows
#' @importFrom dplyr mutate_at
#' @importFrom dplyr vars
#' @importFrom stats qchisq
#' @importFrom stats uniroot
#' @importFrom tibble tibble
#'
#'
#' @return
#' A tbl_df
#'
#' @export
#'
#' @examples
#' # options(pillar.sigfig = 3)
#' ir_calc(a = 18,
#' N = 352 + 10.5,
#' alpha = 0.05)
#'
#' #### From https://www.openepi.com --------------------------------
#'
#' # Person-Time Rate and 95% Confidence Intervals
#' # Per 100 Person-Time Units
#' # Number of cases: 18
#' # Person-Time: 362.5
#' #
#' # Lower CL Rate Upper CL
#' # Mid-P exact test 3.035 4.966 7.696
#' # Fisher's exact test 2.943 7.848
#' # Normal approximation 2.672 7.259
#' # Byar approx. Poisson 2.941 7.848
#' # Rothman/Greenland 3.129 7.881
ir_calc <- function(a,
N,
pt_units = 100,
alpha = 0.05,
interval = c(0, 10000000)) {
# options(pillar.sigfig = 6)
dplyr::bind_rows(
ir_calc_mid_p(a, N, pt_units, alpha, interval),
ir_calc_fisher(a, N, pt_units, alpha, interval),
ir_calc_exact_poisson(a, N, pt_units, alpha),
ir_calc_normal(a, N, pt_units, alpha),
ir_calc_byar(a, N, pt_units, alpha),
ir_calc_roth_green(a, N, pt_units, alpha),
ir_calc_ccrb(a, N, pt_units, alpha),
)
}
#### Helper functions --------------------------------
## Mid-P exact test ----------------
ir_calc_mid_p <- function(a,
N,
pt_units = 100,
alpha = 0.05,
interval = c(0, 10000000)) {
# https://www.openepi.com/Menu/OE_Menu.htm
# https://cran.r-project.org/web/packages/exact2x2/vignettes/midpAdjustment.pdf
k <- 0:(a - 1)
lower_bound <- function(x) {
(1 / 2) * exp(-x) * (x ^ a) / factorial(a) +
sum(exp(-x) * (x ^ k) / factorial(k)) - (1 - alpha / 2)
}
upper_bound <- function(x) {
(1 / 2) * exp(-x) * (x ^ a) / factorial(a) +
sum(exp(-x) * (x ^ k) / factorial(k)) - (alpha / 2)
}
tibble::tibble(
method = "Mid-P exact test",
number_of_cases = a,
person_time = N,
rate = a / N,
se = NA_real_,
lower_ci = uniroot(lower_bound,
interval = interval)$root / N,
upper_ci = uniroot(upper_bound,
interval = interval)$root / N
) %>%
mutate_at(.vars = dplyr::vars(rate,
lower_ci,
upper_ci),
.funs = list(~ pt_units * .))
}
# ir_calc_mid_p(a = 18,
# N = 352 + 10.5,
# alpha = 0.05)
## Fisher's exact test ----------------
# Turns out to be the same as the Exact Poisson...
ir_calc_fisher <- function(a,
N,
pt_units = 100,
alpha = 0.05,
interval = c(0, 10000000)) {
k_l <- 0:(a - 1)
k_u <- 0:a
lower_bound <- function(x) {
sum(exp(-x) * (x ^ k_l) / factorial(k_l)) - (1 - alpha / 2)
}
upper_bound <- function(x) {
sum(exp(-x) * (x ^ k_u) / factorial(k_u)) - (alpha / 2)
}
tibble::tibble(
method = "Fisher's exact test",
number_of_cases = a,
person_time = N,
rate = a / N,
se = NA_real_,
lower_ci = uniroot(lower_bound,
interval = interval)$root / N,
upper_ci = uniroot(upper_bound,
interval = interval)$root / N
) %>%
mutate_at(.vars = dplyr::vars(rate,
lower_ci,
upper_ci),
.funs = list(~ pt_units * .))
}
# ir_calc_fisher(a = 18,
# N = 352 + 10.5,
# alpha = 0.05)
## Exact Poisson ----------------
ir_calc_exact_poisson <- function(a,
N,
pt_units = 100,
alpha = 0.05) {
# http://www2.ccrb.cuhk.edu.hk/stat/confidence%20interval/CI%20for%20single%20rate.htm
# https://www.statsdirect.com/help/rates/poisson_rate_ci.htm
# https://www.openepi.com/PersonTime1/PersonTime1.htm
# https://www.openepi.com/PDFDocs/ProportionDoc.pdf
# http://epid.blogspot.com/2012/08/how-to-calculate-confidence-interval-of.html
# Ulm, 1990
tibble::tibble(
method = "Exact Poisson",
number_of_cases = a,
person_time = N,
rate = a / N,
lower_ci = qchisq(p = alpha / 2,
df = 2 * a) / 2,
upper_ci = qchisq(p = 1 - alpha / 2,
df = 2 * (a + 1)) / 2
) %>%
mutate(lower_ci = lower_ci / N,
upper_ci = upper_ci / N) %>%
mutate_at(.vars = dplyr::vars(rate,
lower_ci,
upper_ci),
.funs = list(~ pt_units * .))
}
# ir_calc_exact_poisson(a = 18,
# N = 352 + 10.5,
# alpha = 0.05)
## Normal Approximation ----------------
ir_calc_normal <- function(a,
N,
pt_units = 100,
alpha = 0.05) {
# If a is large enough
# http://epid.blogspot.com/2012/08/how-to-calculate-confidence-interval-of.html
z_score <- qnorm(p = 1 - alpha / 2,
mean = 0,
sd = 1,
lower.tail = TRUE)
rate <- a / N
se <- sqrt(a / N ^ 2)
tibble::tibble(
method = "Normal approximation",
number_of_cases = a,
person_time = N,
rate = rate,
se = se,
lower_ci = rate - z_score * se,
upper_ci = rate + z_score * se
) %>%
mutate_at(.vars = dplyr::vars(rate,
lower_ci,
upper_ci),
.funs = list(~ pt_units * .))
}
# ir_calc_normal(a = 18,
# N = 352 + 10.5,
# alpha = 0.05)
## Byar approx. Poisson ----------------
ir_calc_byar <- function(a,
N,
pt_units = 100,
alpha = 0.05) {
z_score <- qnorm(p = 1 - alpha / 2,
mean = 0,
sd = 1,
lower.tail = TRUE)
lower_ci <- a * (1 - 1 / (9 * a) - (z_score / 3) * sqrt(1 / a)) ^ 3
upper_ci <- (a + 1) * (1 - 1 / (9 * (a + 1)) + (z_score / 3) * sqrt(1 / (a + 1))) ^ 3
tibble::tibble(
method = "Byar approx. Poisson",
number_of_cases = a,
person_time = N,
rate = a / N,
se = NA_real_,
lower_ci = lower_ci / N,
upper_ci = upper_ci / N
) %>%
mutate_at(.vars = dplyr::vars(rate,
lower_ci,
upper_ci),
.funs = list(~ pt_units * .))
}
# ir_calc_byar(a = 18,
# N = 352 + 10.5,
# alpha = 0.05)
## Rothman-Greenland ----------------
ir_calc_roth_green <- function(a,
N,
pt_units = 100,
alpha = 0.05) {
z_score <- qnorm(p = 1 - alpha / 2,
mean = 0,
sd = 1,
lower.tail = TRUE)
rate <- a / N
# se <- sqrt((1 - rate) / a)
se <- sqrt(1 / a)
tibble::tibble(
method = "Rothman-Greenland",
number_of_cases = a,
person_time = N,
rate = rate,
se = se,
lower_ci = exp(log(rate) - (z_score * se)),
upper_ci = exp(log(rate) + (z_score * se))
) %>%
mutate_at(.vars = dplyr::vars(rate,
lower_ci,
upper_ci),
.funs = list(~ pt_units * .))
}
# ir_calc_roth_green(a = 18,
# N = 352 + 10.5,
# alpha = 0.05)
## http://www2.ccrb.cuhk.edu.hk/stat/confidence%20interval/CI%20for%20single%20rate.htm ----------------
# http://epid.blogspot.com/2012/08/how-to-calculate-confidence-interval-of.html
# https://stats.stackexchange.com/questions/301777/how-to-calculate-confidence-interval-of-incidence-rate-under-the-poisson-distrib
ir_calc_ccrb <- function(a,
N,
pt_units = 100,
alpha = 0.05) {
# http://www2.ccrb.cuhk.edu.hk/stat/confidence%20interval/CI%20for%20single%20rate.htm
# https://www.statsdirect.com/help/rates/poisson_rate_ci.htm
# They define their standard error as sqrt((1 - r) / a) where r = a / N with
# no further documentation
z_score <- qnorm(p = 1 - alpha / 2,
mean = 0,
sd = 1,
lower.tail = TRUE)
tibble::tibble(
method = "CCRB",
number_of_cases = a,
person_time = N,
rate = a / N,
se = sqrt((1 - rate) / a),
lower_ci = log(rate) - (z_score * se),
upper_ci = log(rate) + (z_score * se)) %>%
mutate_at(.vars = dplyr::vars(lower_ci, upper_ci),
.funs = list(~ exp(.))) %>%
mutate_at(.vars = dplyr::vars(rate,
lower_ci,
upper_ci),
.funs = list(~ pt_units * .))
}
|
4603fe190d3dfb77f710d20c8ef18ccdd53fc1d9
|
13c225cd942a60601c6dd9c9d35e174e2112f73d
|
/01-relational-data.R
|
0c2f8372654a73cc68bbf9b9d4fc527371b5810c
|
[] |
no_license
|
deblnia/data-wrangling-relational-data-and-factors
|
59fc1e9ae384def0d9ccc60d9d26c05f171ad090
|
e53ac044dda1efe207f8fe1bf58a2e4799abe7e3
|
refs/heads/master
| 2022-12-08T08:44:41.449989
| 2020-08-31T15:48:44
| 2020-08-31T15:48:44
| 289,555,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 498
|
r
|
01-relational-data.R
|
# load required packages
library(tidyverse)
# # uncomment and run if nycflights13 is not already installed
# install.packages("nycflights13")
library(nycflights13)
# Is there a relationship between the age of a plane and its departure delays?
## consider the data frames you need to use to solve the problem
glimpse(flights)
glimpse(planes)
## solution
# Add the location of the origin and destination (i.e. the `lat` and `lon`) to `flights`
glimpse(flights)
glimpse(airports)
## solution
|
22a6bab6f3d4d7343cf4ff746f8e54439012b193
|
366a094282df7ec63aff057a1c415991cd5603f4
|
/Problem 9.R
|
cd5adc5bc86afde9ff7f5d6570847db9e346cfaf
|
[] |
no_license
|
spyroso/Project-Euler
|
ed66c89304a12de4b6524138490d36efcb98af83
|
f6b48f0a9b9fbee040d306a984e8fd6ebd2f9570
|
refs/heads/master
| 2020-06-08T20:56:45.721009
| 2019-07-11T03:05:13
| 2019-07-11T03:05:13
| 193,305,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
r
|
Problem 9.R
|
# Problem 9: Brute force approach
for (c in 1:998){
for (b in 1:(c-1)){
for (a in 1:(b-1)){
if (a+b+c == 1000 && a^2 + b^2 == c^2){
print(a*b*c)
break;
}
}
}
}
|
f6705d8f235a243f5e6ec771266786d0b75452a3
|
41364e42f222803c4741c60210c3d0f5b85e83a3
|
/Quantitative Primer/samples/ch9-exercises.r
|
1115bc9ec4cefd4f5aea1d223e58aa2ec42e4f72
|
[
"MIT"
] |
permissive
|
bmoretz/Quantitative-Investments
|
2a052e612e06afe1027fa168df54b5b9212b556f
|
25d9a7199f212787dd9ae05f7af9e7407591c5bc
|
refs/heads/master
| 2021-06-16T18:10:11.203155
| 2021-04-05T03:13:57
| 2021-04-05T03:13:57
| 186,317,831
| 7
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,093
|
r
|
ch9-exercises.r
|
## Dale W.R. Rosenthal, 2018
## You are free to distribute and use this code so long as you attribute
## it to me or cite the text.
## The legal disclaimer in _A Quantitative Primer on Investments with R_
## applies to this code. Use or distribution without these comment lines
## is forbidden.
library(MASS)
library(xts)
library(quantmod)
library(Quandl)
library(PortfolioAnalytics)
library(ROI)
library(ROI.plugin.glpk)
library(ROI.plugin.quadprog)
## risk-free rate (for example purposes)
rf <- 0.01
etf.symbols <- c("SPY", "IWM", "AGG", "FEZ", "ACWI", "IYR")
adj.close <- 6 # 6th field is adjusted close
etf.prices <- getSymbols(etf.symbols[1], source="yahoo",
auto.assign=FALSE, return.class="xts")[,adj.close]
for (i in 2:length(etf.symbols)) {
etf.tmp <- getSymbols(etf.symbols[i], source="yahoo",
auto.assign=FALSE, return.class="xts")[,adj.close]
etf.prices <- cbind(etf.prices, etf.tmp)
}
colnames(etf.prices) <- etf.symbols
etf.rets <- diff(log(etf.prices))["2012/"]
commodity.symbols <- c("WTI", "Natgas", "AU", "CU", "Corn")
settle <- "Settle" # settle field is labeled
commodity.tickers <- c("CHRIS/CME_CL1", "CHRIS/CME_NG1", "CHRIS/CME_GC1",
"CHRIS/CME_HG1", "CHRIS/CME_C1")
commodity.prices <- Quandl(commodity.tickers[1], type="xts")[,settle]
for (i in 2:length(commodity.symbols)) {
commodity.tmp <- Quandl(commodity.tickers[i], type="xts")[,settle]
commodity.prices <- cbind(commodity.prices, commodity.tmp)
}
colnames(commodity.prices) <- commodity.symbols
all.returns.tmp <- diff(log(cbind(etf.prices,commodity.prices)))["2012/"]
all.returns <- na.omit(all.returns.tmp)
## set up portfolio with objective and constraints
n.assets <- length(colnames(all.returns))
port.spec <- portfolio.spec(assets = colnames(all.returns))
port.spec <- add.objective(portfolio=port.spec, type="risk", name="StdDev")
port.spec <- add.objective(portfolio=port.spec, type="return", name="mean")
port.spec <- add.constraint(portfolio=port.spec, type="full_investment")
port.spec <- add.constraint(portfolio=port.spec, type="box", min=-1, max=1)
## map out the efficient frontier (for variance risk)
eff.frontier <- create.EfficientFrontier(R=all.returns, portfolio=port.spec,
n.portfolios=100, type="mean-StdDev")
## daily Sharpe ratio
sharpe.ratios <- (eff.frontier$frontier[,"mean"]-rf/250)/eff.frontier$frontier[,"StdDev"]
max.sharpe.ratio <- sharpe.ratios[sharpe.ratios == max(sharpe.ratios)]
optimal.port.name <- names(max.sharpe.ratio)
optimal.mean <- eff.frontier$frontier[optimal.port.name,"mean"]
optimal.sd <- eff.frontier$frontier[optimal.port.name,"StdDev"]
n.trading.days.per.year <- 252
print(sprintf("Optimal Sharpe Ratio: %f", max.sharpe.ratio*sqrt(n.trading.days.per.year)))
print(sprintf("Optimal E(port return): %f", optimal.mean*n.trading.days.per.year))
print(sprintf("Optimal sd(port return): %f", optimal.sd*sqrt(n.trading.days.per.year)))
print("Optimal weights")
print(eff.frontier$frontier[optimal.port.name,(1:n.assets)+3])
|
16fd4aae0cc78d23b7dd0ebf28a68a6d41c89331
|
d9736711c9c01c91218f9bb06b5a81498014cf0b
|
/R/read_vcfs_as_granges.R
|
e2d3c8a48a5d8be6eae448928f44c942c575d647
|
[
"MIT"
] |
permissive
|
Biocodings/MutationalPatterns
|
6c3819ecca88b1e51f6d41510504c3960e60f217
|
5698fb9abb7c61e54c05b37df4e7f131c1ba5c28
|
refs/heads/master
| 2021-01-20T07:13:39.640056
| 2017-05-01T12:08:32
| 2017-05-01T12:08:42
| 89,982,167
| 2
| 0
| null | 2017-05-02T02:16:44
| 2017-05-02T02:16:44
| null |
UTF-8
|
R
| false
| false
| 9,453
|
r
|
read_vcfs_as_granges.R
|
#' Read VCF files into a GRangesList
#'
#' This function reads Variant Call Format (VCF) files into a GRanges object
#' and combines them in a GRangesList. In addition to loading the files, this
#' function applies the same seqlevel style to the GRanges objects as the
#' reference genome passed in the 'genome' parameter.
#'
#' @param vcf_files Character vector of VCF file names
#' @param sample_names Character vector of sample names
#' @param genome A string matching the name of a BSgenome library
#' corresponding to the reference genome of your VCFs
#' @param group Selector for a seqlevel group. All seqlevels outside
#' of this group will be removed. Possible values:
#' * 'all' for all chromosomes;
#' * 'auto' for autosomal chromosomes;
#' * 'sex' for sex chromosomes;
#' * 'auto+sex' for autosomal + sex chromosomes (default);
#' * 'circular' for circular chromosomes;
#' * 'none' for no filtering, which results in keeping all
#' seqlevels from the VCF file.
#' @param check_alleles logical. If TRUE (default) positions with insertions,
#' deletions and/or multiple alternative alleles are excluded
#' from the vcf object, since these positions cannot be analysed
#' with this package. This setting can be set to FALSE to speed
#' up processing time only if the input vcf does not contain any
#' of such positions, as these will cause obscure errors.
#'
#' @return A GRangesList containing the GRanges obtained from 'vcf_files'
#'
#' @importFrom BiocGenerics match
#' @importFrom VariantAnnotation readVcf
#' @importFrom SummarizedExperiment rowRanges
#' @importFrom GenomeInfoDb "seqlevelsStyle<-"
#' @importFrom GenomeInfoDb "organism"
#' @importFrom GenomeInfoDb keepSeqlevels
#' @importFrom GenomeInfoDb extractSeqlevelsByGroup
#' @importFrom parallel detectCores
#' @importFrom parallel mclapply
#' @importFrom plyr llply
#'
#' @examples
#' # The example data set consists of three colon samples, three intestine
#' # samples and three liver samples. So, to map each file to its appropriate
#' # sample name, we create a vector containing the sample names:
#' sample_names <- c ( "colon1", "colon2", "colon3",
#' "intestine1", "intestine2", "intestine3",
#' "liver1", "liver2", "liver3" )
#'
#' # We assemble a list of files we want to load. These files match the
#' # sample names defined above.
#' vcf_files <- list.files(system.file("extdata",
#' package="MutationalPatterns"),
#' pattern = ".vcf", full.names = TRUE)
#'
#' # Get a reference genome BSgenome object.
#' ref_genome <- "BSgenome.Hsapiens.UCSC.hg19"
#' library("BSgenome")
#' library(ref_genome, character.only = TRUE)
#'
#' # This function loads the files as GRanges objects
#' vcfs <- read_vcfs_as_granges(vcf_files, sample_names, ref_genome)
#'
#' @export
read_vcfs_as_granges <- function(vcf_files, sample_names, genome = "-",
group = "auto+sex", check_alleles = TRUE)
{
# Check sample names
if (length(vcf_files) != length(sample_names))
stop("Please provide the same number of sample names as VCF files")
# Check whether the user has adapted to the new behavior of the function.
if (genome == "-")
stop(paste("Please pass a reference genome string in the 'genome'",
"parameter. This string can be obtained using",
"available.genomes() from the BSgenome package."))
ref_genome <- base::get(genome)
ref_organism <- GenomeInfoDb::organism(ref_genome)
ref_style <- seqlevelsStyle(ref_genome)
# Name the VCF's genome as the name of the genome build instead of
# the BSgenome package name.
genome_name <- genome(ref_genome)[[1]]
# Check the class of the reference genome
if (!(class(ref_genome) == "BSgenome"))
stop("Please provide the name of a BSgenome object.")
# Detect the number of available cores. Windows does not support forking,
# only threading, so unfortunately, we have to set it to 1.
# On confined OS environments, this value can be NA, and in such
# situations we need to fallback to 1 core.
num_cores = detectCores()
if (!(.Platform$OS.type == "windows" || is.na(num_cores)))
num_cores <- detectCores()
else
num_cores = 1
# To be able to print warnings from within the mclapply call,
# we need to explicitly set this option. See:
# https://bugs.r-project.org/bugzilla3/show_bug.cgi?id=17122
original_warn_state = getOption("warn")
options(warn=1)
# Show the warning once for all VCF files that are loaded with this
# call to read_vcfs_as_granges.
if (!check_alleles)
{
warning(paste("check_alleles is set to FALSE. Make sure your input",
"VCF does not contain any positions with insertions,",
"deletions or multiple alternative alleles, as these",
"positions cannot be analysed with MutationalPatterns",
"and cause obscure errors."))
}
vcf_list <- GRangesList(mclapply (vcf_files, function (file)
{
# Use VariantAnnotation's readVcf, but only store the
# GRanges information in memory. This speeds up the
# loading significantly.
vcf <- rowRanges(readVcf (file, genome_name))
# Convert to a single naming standard.
seqlevelsStyle(vcf) <- ref_style[1]
groups <- c()
if (group != "none")
{
if (group == "auto+sex")
{
groups <- c(extractSeqlevelsByGroup(species = ref_organism,
style = ref_style,
group = "auto"),
extractSeqlevelsByGroup(species = ref_organism,
style = ref_style,
group = "sex"))
# In some cases, the seqlevelsStyle returns multiple styles.
# In this case, we need to do a little more work to extract
# a vector of seqlevels from it.
groups_names <- names(groups)
if (! is.null(groups_names))
{
# The seqlevels in the groups are now duplicated.
# The following code deduplicates the list items, so that
# creating a data frame will work as expected.
unique_names <- unique(groups_names)
groups <- llply(unique_names, function(x) groups[groups_names == x])
groups <- llply(groups, unlist, recursive = F)
# In case there are multiple styles applied, we only use the first.
groups <- unique(as.vector(groups[[1]]))
}
}
else
{
groups <- extractSeqlevelsByGroup ( species = ref_organism,
style = ref_style,
group = group )
groups <- unique(as.vector(t(groups)))
}
# The provided VCF files may not contain all chromosomes that are
# available in the reference genome. Therefore, we only take the
# chromosomes that are actually available in the VCF file,
# belonging to the filter group.
groups <- intersect(groups, seqlevels(vcf))
vcf <- keepSeqlevels(vcf, groups)
}
if (check_alleles)
{
# Find and exclude positions with indels or multiple
# alternative alleles.
rem <- which(all(!( !is.na(match(vcf$ALT, DNA_BASES)) &
!is.na(match(vcf$REF, DNA_BASES)) &
(lengths(vcf$ALT) == 1) )))
if (length(rem) > 0)
{
vcf = vcf[-rem]
warning(length(rem),
" position(s) with indels and multiple",
" alternative alleles are removed.")
}
}
return(vcf)
}, mc.cores = num_cores, mc.silent = FALSE))
# Reset the option.
options(warn=original_warn_state)
# Set the provided names for the samples.
names(vcf_list) <- sample_names
return(vcf_list)
}
##
## Deprecated variants
##
read_vcf <- function(vcf_files, sample_names, genome="-", style="UCSC")
{
.Defunct("read_vcfs_as_granges", package="MutationalPatterns",
msg=paste("This function has been removed. Use",
"'read_vcfs_as_granges' instead. The new function",
"automatically renames the seqlevel style for you,",
"so you no longer need to run 'rename_chrom' either."))
}
vcf_to_granges <- function(vcf_files, sample_names, genome="-", style="UCSC")
{
# Show the same error message as 'read_vcf()'.
read_vcf()
}
rename_chrom <- function(granges, style = "UCSC")
{
.Defunct("rename_chrom", package="MutationalPatterns",
msg = paste("This function has been removed."))
}
|
41d3eda160e85a7b594c790369438221b273fc2b
|
5ceb1928a72ce2e1e3249cf6af607637c51f06d2
|
/R/R03.R
|
537121d4c26b65eba45ed2ae2cd43398b36fb80c
|
[] |
no_license
|
doyun0916/BIG_DATA
|
a9f9df40a51bbf763716c8d907842f510c528bc4
|
0c17a0e7f86a7dcab69a99a3bea68f1740f3d4ef
|
refs/heads/main
| 2023-02-26T01:35:58.121065
| 2021-01-28T00:55:53
| 2021-01-28T00:55:53
| 328,689,545
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 495
|
r
|
R03.R
|
# R03.R #
# Matrix #
c(1,2,3,4)
m01 <- matrix(c(1,2,3,4))
m01
m02 <- matrix(c(1,2,3,4), nrow =2)
m02
m03 <- matrix(c(1,2,3,4), nrow = 2, byrow = T)
m03
seq(1,9)
m04 <- matrix(c(1:9), nrow=3, ncol = 3, byrow=T)
m04
m04 <- matrix(c(1:10), nrow=3, byrow=T)
m04
m04 <- matrix(seq(1,9),3,3,T)
m04
v05 <- seq(1,9)
m05 <- matrix(data = v05*10, nrow=3, byrow=T)
m05
m05[1,3]
m05[2,1]
m05[3,2]
m05[2:3, 2:3]
m05[2:3, 1:2]
m05[,2]
m05[,1:2]
m05[-2,-2]
|
38274dfe16a1494f215c679a30b00372d0cad512
|
41a7fe696b9339ae11bcbb8fb3d49dbef9daa186
|
/Broad-Rush/mergeROSMAPMethylationData.R
|
789732d4652570b236f596fe0cdac168a3759bcd
|
[] |
no_license
|
alma2moon434/ampAdScripts
|
88f8386fa66adaaf0772ea492ac68fa162079294
|
04e3f388b5c552283865064f2f57fe28494fb0a3
|
refs/heads/master
| 2020-04-06T13:45:07.562444
| 2016-04-19T04:19:46
| 2016-04-19T04:19:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,570
|
r
|
mergeROSMAPMethylationData.R
|
library(synapseClient)
library(plyr)
library(dplyr)
library(reshape2)
library(rGithubClient)
synapseLogin()
## Get this script
thisRepo <- getRepo("Sage-Bionetworks/ampAdScripts")
thisScript <- getPermlink(thisRepo, "Rush-Broad/mergeROSMAPMethylationData.R")
# Function to get all files from Synapse
readData <- function(x) {
o <- synGet(x$id)
read.delim(o@filePath)
}
## Data in this folder
folderId <- "syn2701448"
q <- sprintf("select id,name from file where parentId=='%s'", folderId)
res <- tbl_df(synapseQuery(q))
colnames(res) <- gsub(".*\\.", "", colnames(res))
## Get list of annotation files
resAnnot <- filter(res, grepl("^ill450kAnno", name))
## Make merged annotation
mergedAnnot <- ddply(resAnnot, .(id), readData)
mergedAnnot$id <- NULL
## sort annotations on chromosome and probe TargetID
mergedAnnot <- arrange(mergedAnnot, CHR, TargetID)
## Write merged annotation
consortium <- "AMP-AD"
study <- "ROSMAP"
center <- "Rush-Broad"
platform <- "IlluminaHumanMethylation450"
other <- "metaData"
extension <- "tsv"
organism <- "human"
newannotfilename <- paste(paste(consortium, study, center, platform, other, sep="_"),
"tsv", sep=".")
write.table(mergedAnnot, file=newannotfilename, sep="\t", row.names=FALSE, quote=FALSE)
synannotfile <- File(newannotfilename, parentId="syn3157275",
name=paste(consortium, study, center, platform, other, sep="_"))
act <- Activity(name="Merge files", used=as.list(resAnnot$id), executed=thisScript)
generatedBy(synannotfile) <- act
synSetAnnotations(synannotfile) <- list(consortium=consortium, study=study, center=center, platform=platform,
dataType="metaData", organism=organism)
synannotobj <- synStore(synannotfile)
## Update the master table
# masterTable <- synGet("syn3163713")
# parentId <- "syn2701448"
# q <- paste("select * from ", masterTable@properties$id,
# " where originalParentId='", parentId, "' AND oldFileName LIKE 'ill450kAnno%'", sep="")
# res <- synTableQuery(q)
#
# res@values$newSynapseId <- "syn3168775"
# res@values$isMigrated <- TRUE
# res@values$hasAnnotation <- TRUE
# res@values$hasProvenance <- TRUE
# synStore(res)
#
# q <- paste("select * from ", masterTable@properties$id,
# " where originalParentId='", parentId, "' AND oldFileName LIKE 'ill450kMeth%'", sep="")
#
# res <- synTableQuery(q)
#
# res@values$newSynapseId <- "syn3168763"
# res@values$isMigrated <- TRUE
# res@values$hasAnnotation <- TRUE
# res@values$hasProvenance <- TRUE
# synStore(res)
|
da0ea9dc6b03ffe143277f9d6745195be7ba8e8a
|
9eab973e373e12f170dbbba8dffbb18f2038bd54
|
/papers/Rpaper1/figures/plot.chi2.R
|
00b987752588455766548a4468e44ea41a82ec5f
|
[
"CC-BY-4.0"
] |
permissive
|
richarddmorey/nullHistoryAMPPS
|
f1c36cb40d862ec1bd473cfdb31b882c7de68e64
|
2256b9fe547e957e636b0fb7a1b2b70c36a8e836
|
refs/heads/master
| 2021-09-13T16:24:44.933397
| 2018-05-02T07:35:01
| 2018-05-02T07:35:07
| 112,319,821
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 429
|
r
|
plot.chi2.R
|
plot(0,0,ty='n',ylim = c(0,1.2), xlim=c(0,10), xaxs='i',yaxs='i',
main = expression(paste(chi^2,"(1) distribution",sep="")),
ylab = "Density", xlab="Squared deviation from expectation", axes=FALSE)
xx = seq(qchisq(.025,1), qchisq(.975,1), len = 200)
polygon(c(xx,rev(xx)), c(dchisq(xx,1), xx*0), border = NA, col="gray")
curve( dchisq(x, df=1), 0, 10, add=TRUE, lwd=2)
axis(1)
box()
abline(v = 0.263, col = "red")
|
d8517f2b9631768b680bc529d54b3a0c0d74b735
|
4acd939710367338d5e1dc08a3b70c8b948f303a
|
/day1.R
|
f7a5797529ab795290d7ebced6645a962eb5f865
|
[] |
no_license
|
amittiwari18/R-Language
|
e9298f669e4c6e81f3aab4de8d73fff649a1f9b4
|
30de78027f32d3c93adc9412fa39d351a553e157
|
refs/heads/master
| 2020-04-28T20:48:05.639119
| 2019-03-14T07:06:21
| 2019-03-14T07:06:21
| 164,835,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 492
|
r
|
day1.R
|
x<- 3
y<- 4
x*y
(8/2)-(x*y)
8/(2-x)*y
#Number of sequence created
z<- 1:20
w<- 20:1
#Character vectors created using function paste
a<- "label"
k<- 1:30
paste(a,k)
#word "brown" replaced with word "red"
s<- "The quick brown fox jumps over the lazy dog"
sub("brown","red",s)
# word "fox" picked up using substr()
substr(s, start=17, stop=19)
#perimeter of rectanglw calculated
b=2
h=3
p<- 2*(b+h)
# sequence created and 10 occurence of 4 printed
rep(c(4,6,3), each=10, length.out=10)
|
43fbb87264320ccfced9f8d5f1f8fe083deef111
|
3411a6cb316c664d37af1640b2b8e929aa607aa8
|
/cell_annotation.R
|
11464651c5230a5b7049d9a0253c7ce5979f2c39
|
[] |
no_license
|
rongfan8/DBiT-seq_FFPE
|
dfcc3aad43f82c53de761e6f5302e02238b6d17c
|
47992a89dc7f2d1f978651f09b4fc731ef0f7765
|
refs/heads/master
| 2022-12-06T10:58:12.606385
| 2020-08-26T13:31:48
| 2020-08-26T13:31:48
| 290,505,811
| 1
| 0
| null | 2020-08-26T13:34:49
| 2020-08-26T13:34:48
| null |
UTF-8
|
R
| false
| false
| 20,579
|
r
|
cell_annotation.R
|
library(Seurat)
library(SeuratData)
library(ggplot2)
library(patchwork)
library(dplyr)
library(rhdf5)
library(Matrix)
library(sctransform)
library(plyr)
library(gridExtra)
library(magrittr)
library(tidyr)
library(raster)
library(OpenImageR)
library(ggpubr)
library(grid)
library(wesanderson)
dir <- "C:/Users/EDIC-THINKPAD/Desktop/FFPEs/FFPE-2/cell_annotation"
setwd("C:/Users/EDIC-THINKPAD/Desktop/FFPEs/FFPE-2/cell_annotation")
#change filename1 to name of txt file you want to load
data1 <- read.table("annotation.tsv", header = TRUE, sep = "\t", row.names = 1)
levels(data1$Name)
data1[] <- lapply(data1, gsub, pattern='<=', replacement='')
data1$Sox2<- as.numeric(data1[])
cols.num = -c(1,2)
data1[cols.num] <- sapply(data1[cols.num],as.numeric)
data1[cols.num] <- sapply(data1[cols.num],scale)
longdata10 <- gather(data = data1, key = Class, value = Abundance, -c(1:2))
levels(longdata10$Name)
longdata10$Abundance <- as.numeric(longdata10$Abundance)
longdata10$Name <- factor(longdata10$Name,levels = c("TEGLU1",
"TEGLU3",
"TEGLU2",
"TEGLU20",
"TEGLU11",
"TEGLU12",
"TEGLU10",
"TEGLU9",
"TEGLU8",
"TEGLU7",
"TEGLU6",
"TEGLU13",
"TEGLU14",
"TEGLU5",
"TEGLU16",
"TEGLU15",
"TEGLU17",
"TEGLU18",
"TEGLU19",
"TEGLU22",
"TEGLU21",
"TEGLU4",
"TEGLU24",
"TEGLU23",
"DGGRC1",
"DGGRC2",
"MSN1",
"MSN2",
"MSN3",
"MSN4",
"MSN5",
"MSN6",
"DETPH",
"DGNBL2",
"DGNBL1",
"SZNBL",
"OBNBL3",
"OBINH1",
"OBINH5",
"OBINH2",
"OBINH3",
"OBINH4",
"OBNBL4",
"OBNBL5",
"OBDOP",
"OBINH6",
"DEINH1",
"DEINH2",
"TEINH17",
"TEINH18",
"TEINH19",
"TEINH21",
"TEINH16",
"TEINH15",
"TEINH14",
"TEINH20",
"TEINH13",
"TEINH12",
"TEINH9",
"TEINH10",
"TEINH11",
"TEINH4",
"TEINH5",
"TEINH8",
"TEINH7",
"TEINH6",
"TECHO",
"DECHO1",
"HBCHO4",
"HBCHO3",
"HBADR",
"HBNOR",
"HYPEP7",
"HYPEP6",
"MEGLU14",
"MBDOP1",
"MBDOP2",
"HBSER1",
"HBSER2",
"HBSER3",
"HBSER5",
"HBSER4",
"TEINH3",
"TEINH2",
"DEINH4",
"DEINH5",
"HYPEP3",
"HYPEP1",
"HYPEP2",
"MEINH14",
"DEINH6",
"DEINH8",
"DEINH7",
"HYPEP5",
"HYPEP4",
"HYPEP8",
"SCINH11",
"SCINH10",
"SCINH9",
"SCINH8",
"SCINH7",
"SCINH6",
"SCINH5",
"SCINH4",
"SCINH3",
"HBINH9",
"SCINH2",
"SCGLU1",
"SCGLU2",
"SCGLU3",
"SCGLU4",
"SCGLU5",
"SCGLU6",
"SCGLU7",
"SCGLU8",
"SCGLU9",
"SCGLU10",
"HBGLU10",
"HBGLU3",
"HBGLU2",
"MEGLU2",
"MEGLU3",
"DEGLU5",
"MEGLU1",
"MEGLU7",
"MEGLU8",
"MEGLU9",
"MEGLU10",
"MEGLU11",
"MBCHO1",
"MEGLU6",
"MEGLU5",
"MEGLU4",
"CR",
"DECHO2",
"HBGLU1",
"DEGLU1",
"DEGLU2",
"DEGLU3",
"DEGLU4",
"MEINH12",
"MEINH11",
"MEINH10",
"MEINH9",
"MEINH5",
"MEINH6",
"MEINH7",
"MEINH4",
"MEINH3",
"HBINH5",
"MEINH2",
"DEINH3",
"TEINH1",
"MEINH13",
"MEINH8",
"HBINH1",
"HBINH3",
"HBINH4",
"HBINH6",
"HBINH2",
"HBCHO1",
"HBCHO2",
"HBGLU4",
"HBGLU5",
"HBGLU6",
"HBGLU7",
"HBGLU8",
"HBGLU9",
"HBINH7",
"HBINH8",
"SCINH1",
"CBINH2",
"MEINH1",
"CBINH1",
"CBPC",
"CBGRC",
"CBNBL2",
"CBNBL1",
"SEPNBL",
"OBNBL1",
"OBNBL2",
"ENT1",
"ENT2",
"ENT3",
"ENT4",
"ENT5",
"ENT6",
"ENT7",
"ENT8",
"ENT9",
"SYNOR1",
"SYNOR2",
"SYNOR3",
"SYNOR4",
"SYNOR5",
"SYCHO2",
"SYCHO1",
"PSPEP8",
"PSPEP7",
"PSPEP6",
"PSPEP5",
"PSPEP2",
"PSPEP4",
"PSPEP3",
"PSPEP1",
"PSNF3",
"PSNF2",
"PSNF1",
"PSNP1",
"PSNP2",
"PSNP3",
"PSNP4",
"PSNP5",
"PSNP6",
"COP1",
"COP2",
"NFOL2",
"NFOL1",
"MFOL2",
"MFOL1",
"MOL1",
"MOL2",
"MOL3",
"CHOR",
"HYPEN",
"EPSC",
"EPEN",
"EPMB",
"RGDG",
"RGSZ",
"ACTE1",
"ACTE2",
"ACOB",
"ACNT1",
"ACNT2",
"ACMB",
"ACBG",
"OEC",
"OPC",
"SCHW",
"SATG2",
"SATG1",
"ENTG1",
"ENTG2",
"ENTG3",
"ENTG4",
"ENTG5",
"ENTG6",
"ENTG7",
"ENMFB",
"ABC",
"VLMC2",
"VLMC1",
"VECA",
"PER3",
"VSMCA",
"PER1",
"PER2",
"VECC",
"VECV",
"PVM1",
"PVM2",
"MGL3",
"MGL2",
"MGL1"
))
longdata10$Class <- factor(longdata10$Class,levels = c("Nnat",
"Map1b",
"Tubb3",
"Stmn2",
"Dpysl3",
"Tuba1a",
"Ina",
"Elavl3",
"Mapt",
"Malat1",
"Col3a1",
"Dkk2",
"Col1a2",
"Col1a1",
"Meg3",
"Nfib",
"Cdkn1c",
"Igfbp5",
"Hmga2",
"Prrx1",
"Mest",
"Igf2",
"H19",
"Hist1h2ao",
"Hist1h2ap",
"Chd7",
"Pea15a",
"Zic1",
"Hes5",
"Ttyh1",
"Sox9",
"Fabp7",
"Sox2"
))
levels(longdata10$Class)
ggplot(longdata10, aes(Name, Class)) +
geom_tile(aes(fill = Abundance), color = "white") + scale_fill_gradientn(colors= c("white", "darkblue")) +
theme(axis.title=element_blank(),
legend.position = "none",
axis.text.x = element_blank())
|
c1ff90f2da7a9ab0559aebac98e6fadf527363f6
|
c8b76c289224a86d20c8a9e60d45777867946c26
|
/PredictionFuncs.R
|
4a9eec8ce5e961fe94e093e951e913ac1a9a2347
|
[] |
no_license
|
drewCo2/DS-Capstone
|
0a35a830c5e7d0bc2c31445aadb013130a46a147
|
e891cd5e08afe008796e3a5a4d84f94a270332ee
|
refs/heads/master
| 2020-04-02T05:45:11.969390
| 2016-06-23T21:47:48
| 2016-06-23T21:47:48
| 60,376,073
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,095
|
r
|
PredictionFuncs.R
|
library(quanteda)
library(dplyr)
# Create a string that we can use with dplyr's 'filter_' function.
# We use this to select our ngrams from the result sets.
makeFilter<-function(tokens)
{
n<-length(tokens)
res<-sapply(1:n, function(x)
{
sprintf("t%d=='%s'",x, tokens[x])
})
res<-paste(res, collapse=" & ")
res
}
# split the input into individual words.
getTokens<-function(input)
{
# NOTE: R sucks at regex. Also, we could create something that is a bit more sophiticated, I guess.
# We aren't dumping numbers for example, but I guess we could...
# We could also split along sentence lines, i.e. anything with a period in it.
input<-gsub("[[:punct:]]", " ", input)
res<-unlist(lapply(strsplit(input, split=" "), function(x){x[!x ==""]}))
res
}
# return up to the last n-tokens from the stream.
getLastTokens<-function(tokens, n)
{
len<-length(tokens)
if (len <= n)
{
tokens
}
else
{
start = (len-n)+1
res<-tokens[start:len]
res
}
}
# this will get the groups of the best matches along the different token lengths.
getMatchGroups<-function(tokens, n, df)
{
if(n == 0)
{
match<-head(df,1)
}
else
{
# we actually need a better windowing function here. Getting the last few doesn't really make sense.
t<-getLastTokens(tokens, n)
filter<-makeFilter(t)
match<-filter_(df, .dots=filter)
}
if(nrow(match) > 0)
{
# Take the top listed item. assuming this is our best guess.
# later we could determine tie breakers by looking at our distributions of unigrams.
best <- head(match, 1)
nc<-ncol(best)
res<-data.frame(token=best[1,nc-1], p=best[1,nc], stringsAsFactors = FALSE)
res
}
else
{
res<-data.frame(token="", p=0, stringsAsFactors = FALSE)
res
}
}
guessWord<-function(model, input)
{
srcTokens<-getTokens(input)
# this will be the function.....
# determine the max number of tokens that we can predict on...
maxPredict<- min(length(srcTokens), length(model) - 1)
useLens<-maxPredict:0
allMatches<-lapply(useLens, function(x) getMatchGroups(srcTokens, x, model[[x+1]]))
# Now that we have all of the matches, we can just cyle through till we get our max... res<-""
withWord = data.frame(token=character(), p=numeric())
for (i in 1:length(allMatches)) {
# We just return the very first match. This approach favors the larger n-grams that
# have a hit. We don't take the p value into account at all.
word<-allMatches[[i]][1,1]
if(word != "")
{
withWord<-rbind(withWord, allMatches[[i]])
}
}
# we always tack on the single tokens so that we have a list of next-best guesses....
bestN<-5
toMerge<-head(model[[1]], bestN)
names(toMerge)<-c("token", "p")
withWord<-rbind(withWord, toMerge)
# now we will collect the next bestN best matches.
withWord<-distinct(withWord, token)
maxPicks = min(bestN, nrow(withWord)-1)
res<-list(bestWord = withWord[1,1], nextBest=tail(withWord, maxPicks)$token)
res
}
#
# g<-guessWord(textModel, "this is a case of")
# g
#
|
38c5676984b8a2e88233715f41060258309a6415
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/GSE/man/TSGS-class.Rd
|
9a212dccdc9b9a310a3e52b22336f11f08662f33
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,270
|
rd
|
TSGS-class.Rd
|
\name{TSGS-class}
\docType{class}
\alias{TSGS-class}
\alias{getFiltDat,TSGS-method}
\title{Two-Step Generalized S-Estimator for cell- and case-wise outliers}
\description{Class of Two-Step Generalized S-Estimator. It has the superclass of \code{GSE}. }
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("TSGS", ...)},
but the best way of creating \code{TSGS} objects is a call to the function
\code{TSGS} which serves as a constructor.
}
\section{Slots}{
\describe{
\item{\code{mu} }{ Estimated location. Can be accessed via \code{\link{getLocation}}. }
\item{\code{S} }{ Estimated scatter matrix. Can be accessed via \code{\link{getScatter}}. }
\item{\code{xf} }{ Filtered data matrix from the first step of 2SGS. Can be accessed via \code{\link{getFiltDat}}. }
}
}
\section{Extends}{ Class \code{"\linkS4class{GSE}"}, directly. }
\section{Methods}{
In addition to the methods defined in the superclass "GSE", the following methods are also defined:
\describe{
\item{getFiltDat}{\code{signature(object = "TSGS")}: return the filtered data matrix.}
}
}
\author{ Andy Leung \email{andy.leung@stat.ubc.ca} }
\seealso{
\code{\link{TSGS}}, \code{\link{GSE}}, \code{\link{GSE-class}}
}
\keyword{classes}
|
42335dd8b5097ca66c09b139408f844e83cf99f2
|
05a095367c9970e09044f1dd36e9a45321d66c64
|
/DMC2/dmc2.R
|
359e42fdfb30376027d74c7865359d6eebe4ae14
|
[] |
no_license
|
taimir/DMC2015-2016
|
712581957dee90dfc0ce856ecedf5c6d813ee98e
|
71084f81b7a34c82725ab929975f5f891880c2c2
|
refs/heads/master
| 2021-01-10T03:15:06.755473
| 2016-01-19T20:55:52
| 2016-01-19T20:55:52
| 49,517,681
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,514
|
r
|
dmc2.R
|
# Business Analytics
# Data Mining Cup Introduction
#
# Please note, that this script only has the nature of proposal. It provides useful functions for the steps of data mining but does not cover all possibilities.
# The caret package is used (http://topepo.github.io/caret/index.html)
#install.packages("caret")
library(caret)
library(plyr)
# For reasons of traceability you must use a fixed seed
set.seed(42) # do NOT CHANGE this seed
######################################################
# 2. Load
training_data = read.csv("training.csv", sep=",")
test_data = read.csv("test.csv", sep=",")
######################################################
# 3. Data Preparation
# (using both training and test data)
# do NOT DELETE any instances in the test data
# Rename columns
names(training_data)[names(training_data) == 'ProperyDamage'] <- 'PropertyDamage'
names(test_data)[names(test_data) == 'ProperyDamage'] <- 'PropertyDamage'
names(training_data)[names(training_data) == 'WorkZOne'] <- 'WorkZone'
names(test_data)[names(test_data) == 'WorkZOne'] <- 'WorkZone'
# Nominal attributes
setLevels <- function (data, attr, levels) {
data[,attr] <- factor(data[,attr], levels=levels)
return(data)
}
# Reduce charge
# training_data$Charge <- as.factor(sub("-.*", "", training_data$Charge))
# test_data$Charge <- as.factor(sub("-.*", "", test_data$Charge))
# Unify Yes/ No fields
# --------------------------------------------------
levels <- c("No", "Yes")
# training data
training_data <- setLevels(training_data, "Accident", levels)
training_data <- setLevels(training_data, "Belts", levels)
training_data <- setLevels(training_data, "PersonalInjury", levels)
training_data <- setLevels(training_data, "PropertyDamage", levels)
training_data <- setLevels(training_data, "Fatal", levels)
training_data <- setLevels(training_data, "CommercialLicense", levels)
training_data <- setLevels(training_data, "HAZMAT", levels)
training_data <- setLevels(training_data, "CommercialVehicle", levels)
training_data <- setLevels(training_data, "Alcohol", levels)
training_data <- setLevels(training_data, "WorkZone", levels)
training_data <- setLevels(training_data, "ContributedToAccident", levels)
# test data
test_data <- setLevels(test_data, "Accident", levels)
test_data <- setLevels(test_data, "Belts", levels)
test_data <- setLevels(test_data, "PersonalInjury", levels)
test_data <- setLevels(test_data, "PropertyDamage", levels)
test_data <- setLevels(test_data, "Fatal", levels)
test_data <- setLevels(test_data, "CommercialLicense", levels)
test_data <- setLevels(test_data, "HAZMAT", levels)
test_data <- setLevels(test_data, "CommercialVehicle", levels)
test_data <- setLevels(test_data, "Alcohol", levels)
test_data <- setLevels(test_data, "WorkZone", levels)
test_data <- setLevels(test_data, "ContributedToAccident", levels)
unifyLevels <- function(data, attr) {
levels <- union(levels(training_data[, attr]), levels(test_data[, attr]))
data[,attr] <- factor(data[,attr], levels=levels)
return(data)
}
# State
training_data <- unifyLevels(training_data, "State")
test_data <- unifyLevels(test_data, "State")
# VehicleType
training_data <- unifyLevels(training_data, "VehicleType")
test_data <- unifyLevels(test_data, "VehicleType")
# Make
training_data <- unifyLevels(training_data, "Make")
test_data <- unifyLevels(test_data, "Make")
# Model
training_data <- unifyLevels(training_data, "Model")
test_data <- unifyLevels(test_data, "Model")
# Color
training_data <- unifyLevels(training_data, "Color")
test_data <- unifyLevels(test_data, "Color")
# Charge
training_data <- unifyLevels(training_data, "Charge")
test_data <- unifyLevels(test_data, "Charge")
# Race
training_data <- unifyLevels(training_data, "Race")
test_data <- unifyLevels(test_data, "Race")
# Gender
training_data <- unifyLevels(training_data, "Gender")
test_data <- unifyLevels(test_data, "Gender")
# DriverCity
training_data <- unifyLevels(training_data, "DriverCity")
test_data <- unifyLevels(test_data, "DriverCity")
# DriverState
training_data <- unifyLevels(training_data, "DriverState")
test_data <- unifyLevels(test_data, "DriverState")
# DLState
training_data <- unifyLevels(training_data, "DLState")
test_data <- unifyLevels(test_data, "DLState")
# ArrestType
training_data <- unifyLevels(training_data, "ArrestType")
test_data <- unifyLevels(test_data, "ArrestType")
# Remove missing features
# ----------------------------------------------------
drop <- c("TimeOfStop", "Agency", "SubAgency", "Geolocation", "Article")
training_data <- training_data[,!(names(training_data) %in% drop)]
test_data <- test_data[,!(names(test_data) %in% drop)]
# Fix errors
# ---------------------------------------------------
training_data$Year[training_data$Year == 2077] = 2007
training_data$Year[training_data$Year == 0] = median(training_data$Year)
training_data$Year <- as.integer(training_data$Year)
# New fields
# ----------------------------------------------------
extractField <- function(data, searches, fieldName) {
filter <- 1:length(data$Description)
# find all entries matching all of the searched words
for (word in searches) {
filter <- intersect(filter, grep(word, data$Description, ignore.case = TRUE))
}
if(!fieldName %in% colnames(data)) {
data[,fieldName] <- factor("No", levels = c("No", "Yes"))
}
data[filter,fieldName] <- factor("Yes", levels = c("No", "Yes"))
return(data)
}
# Alcohol
training_data <- extractField(training_data, c("ALCOHOL"), "Alcohol")
test_data <- extractField(test_data, c("ALCOHOL"), "Alcohol")
# Speed
training_data <- extractField(training_data, c("SPEED"), "Speed")
test_data <- extractField(test_data, c("SPEED"), "Speed")
training_data <- extractField(training_data, c("EXCEEDING"), "Speed")
test_data <- extractField(test_data, c("EXCEEDING"), "Speed")
# Accident
training_data <- extractField(training_data, c("ACCIDENT"), "Accident")
test_data <- extractField(test_data, c("ACCIDENT"), "Accident")
# Belts
training_data <- extractField(training_data, c("SEATBELT"), "Belts")
test_data <- extractField(test_data, c("SEATBELT"), "Belts")
# Injury
training_data <- extractField(training_data, c("INJUR"), "PersonalInjury")
test_data <- extractField(test_data, c("INJUR"), "PersonalInjury")
# Damage
training_data <- extractField(training_data, c("DAMAGE"), "PropertyDamage")
test_data <- extractField(test_data, c("DAMAGE"), "PropertyDamage")
# Fatal
training_data <- extractField(training_data, c("FATAL"), "Fatal")
test_data <- extractField(test_data, c("FATAL"), "Fatal")
training_data <- extractField(training_data, c("DEATH"), "Fatal")
test_data <- extractField(test_data, c("DEATH"), "Fatal")
training_data <- extractField(training_data, c("DEAD"), "Fatal")
test_data <- extractField(test_data, c("DEAD"), "Fatal")
# Commercial License
training_data <- extractField(training_data, c("COMMERCIAL LICENSE"), "CommercialLicense")
test_data <- extractField(test_data, c("COMMERCIAL LICENSE"), "CommercialLicense")
# License
training_data <- extractField(training_data, c("LICENSE"), "License")
test_data <- extractField(test_data, c("LICENSE"), "License")
# HAZMAT
training_data <- extractField(training_data, c("HAZARD"), "HAZMAT")
test_data <- extractField(test_data, c("HAZARD"), "HAZMAT")
# CommercialVehicle
training_data <- extractField(training_data, c("COMMERCIAL VEHICLE"), "CommercialVehicle")
test_data <- extractField(test_data, c("COMMERCIAL VEHICLE"), "CommercialVehicle")
# WorkZone
training_data <- extractField(training_data, c("WORKZONE"), "WorkZone")
test_data <- extractField(test_data, c("WORKZONE"), "WorkZone")
# Accident
training_data <- extractField(training_data, c("ACCIDENT"), "ContributedToAccident")
test_data <- extractField(test_data, c("ACCIDENT"), "ContributedToAccident")
# Life
training_data <- extractField(training_data, c("LIFE"), "Life")
test_data <- extractField(test_data, c("LIFE"), "Life")
# Danger
training_data <- extractField(training_data, c("DANGER"), "Danger")
test_data <- extractField(test_data, c("DANGER"), "Danger")
# Drug
training_data <- extractField(training_data, c("DRUG"), "Drug")
test_data <- extractField(test_data, c("DRUG"), "Drug")
training_data <- extractField(training_data, c("SUBSTANCE"), "Drug")
test_data <- extractField(test_data, c("SUBSTANCE"), "Drug")
# Crosswalk
training_data <- extractField(training_data, c("CROSSWALK"), "Crosswalk")
test_data <- extractField(test_data, c("CROSSWALK"), "Crosswalk")
# Registration
training_data <- extractField(training_data, c("REG."), "Registration")
test_data <- extractField(test_data, c("REG."), "Registration")
training_data <- extractField(training_data, c("REGIST"), "Registration")
test_data <- extractField(test_data, c("REGIST"), "Registration")
# Lights
training_data <- extractField(training_data, c("LIGHTS"), "Lights")
test_data <- extractField(test_data, c("LIGHTS"), "Lights")
training_data <- extractField(training_data, c("LAMP"), "Lights")
test_data <- extractField(test_data, c("LAMP"), "Lights")
# PHONE
training_data <- extractField(training_data, c("PHONE"), "Phone")
test_data <- extractField(test_data, c("PHONE"), "Phone")
# Red signal
training_data <- extractField(training_data, c("RED", "SIGNAL"), "RedSignal")
test_data <- extractField(test_data, c("RED", "SIGNAL"), "RedSignal")
# Medical certificate
training_data <- extractField(training_data, c("MEDICAL", "CERTIFICATE"), "MedCert")
test_data <- extractField(test_data, c("MEDICAL", "CERTIFICATE"), "MedCert")
# Right of way
training_data <- extractField(training_data, c("RIGHT OF WAY"), "RightOfWay")
test_data <- extractField(test_data, c("RIGHT OF WAY"), "RightOfWay")
training_data <- extractField(training_data, c("RIGHT-OF-WAY"), "RightOfWay")
test_data <- extractField(test_data, c("RIGHT-OF-WAY"), "RightOfWay")
# Highway
training_data <- extractField(training_data, c("HIGHWAY"), "Highway")
test_data <- extractField(test_data, c("HIGHWAY"), "Highway")
# No passing
training_data <- extractField(training_data, c("NO PASSING"), "NoPassing")
test_data <- extractField(test_data, c("NO PASSING"), "NoPassing")
# Insurence
training_data <- extractField(training_data, c("INSURE"), "Insurence")
test_data <- extractField(test_data, c("INSURE"), "Insurence")
# Turn
training_data <- extractField(training_data, c("TURN"), "Turn")
test_data <- extractField(test_data, c("TURN"), "Turn")
# Pedestrian
training_data <- extractField(training_data, c("PEDESTRIAN"), "Pedestrian")
test_data <- extractField(test_data, c("PEDESTRIAN"), "Pedestrian")
# Child
training_data <- extractField(training_data, c("CHILD"), "Child")
test_data <- extractField(test_data, c("CHILD"), "Child")
# Passanger
training_data <- extractField(training_data, c("PASSENGER"), "Passenger")
test_data <- extractField(test_data, c("PASSENGER"), "Passenger")
training_data <- extractField(training_data, c("OCCUPANT"), "Passenger")
test_data <- extractField(test_data, c("OCCUPANT"), "Passenger")
# STOP
training_data <- extractField(training_data, c("STOP"), "Stop")
test_data <- extractField(test_data, c("STOP"), "Stop")
# Tire
training_data <- extractField(training_data, c("TIRE"), "Tire")
test_data <- extractField(test_data, c("TIRE"), "Tire")
# SIGNS
training_data <- extractField(training_data, c("SIGNS"), "Signs")
test_data <- extractField(test_data, c("SIGNS"), "Signs")
# Police
training_data <- extractField(training_data, c("POLICE"), "Police")
test_data <- extractField(test_data, c("POLICE"), "Police")
training_data <- extractField(training_data, c("OFFICER"), "Police")
test_data <- extractField(test_data, c("OFFICER"), "Police")
# Lane
training_data <- extractField(training_data, c("LANE"), "Lane")
test_data <- extractField(test_data, c("LANE"), "Lane")
# TODO: add the WOEID field (geolocation) by using the twitteR package
# library(twitteR)
# # For a given lat and long:
# closestTrendLocations(lat, long, ...)[1, "woeid"]
# Handle NA values
# ----------------------------------------------------
# Only for longitude and latitude, replace with median
# remove NA Year and NA color
naCols = c("Longitude", "Latitude")
pp<- preProcess(training_data[naCols], method = c("knnImpute"), k = 5)
preprocessed <- predict(pp, newdata = training_data[naCols])
training_data$Latitude = preprocessed$Latitude
training_data$Longitude = preprocessed$Longitude
pp <- preProcess(test_data[naCols], method = c("knnImpute"), k = 5)
preprocessed <- predict(pp, newdata = test_data[naCols])
test_data$Latitude = preprocessed$Latitude
test_data$Longitude = preprocessed$Longitude
# remove other NA values
training_data <- training_data[complete.cases(training_data),]
# colSums(is.na(training_data))
#
# colSums(is.na(test_data))
# # Discretize Longitude and Latitude
# # ----------------------------------------------------
# # TODO: do this by considering longitude and lat. at the same time (squares in 2D space)
# # So far I've seen no obvious "separation" in the locations, so
# # discretization with a single rule seems meaningless
# # install.packages("arules")
# library(arules)
# # Equal frequency binning
# equiFreqLatitude = discretize(training_data$Latitude, categories=18, method="cluster", onlycuts=TRUE)
# training_data$LatitudeDiscr = cut(training_data$Latitude, breaks=equiFreqLatitude, ordered_result=TRUE, right=FALSE)
# test_data$LatitudeDiscr = cut(test_data$Latitude, breaks=equiFreqLatitude, ordered_result=TRUE, right=FALSE)
# # table(training_data$LatitudeDiscr, useNA="ifany")
# # str(training_data)
#
# equiFreqLongitude = discretize(training_data$Longitude, categories=18, method="cluster", onlycuts=TRUE)
# training_data$LongitudeDiscr = cut(training_data$Longitude, breaks=equiFreqLongitude, ordered_result=TRUE, right=FALSE)
# test_data$LongitudeDiscr = cut(test_data$Longitude, breaks=equiFreqLongitude, ordered_result=TRUE, right=FALSE)
# # table(training_data$LongitudeDiscr, useNA="ifany")
# str(training_data)
# Multicolinearity
# ----------------------------------------------------
# * Check columns
# * remove ones that are collinear
# Feature selection
# ----------------------------------------------------
#install.packages("FSelector")
library(FSelector)
# Calculate weights for the attributes using Info Gain and Gain Ratio
weights_info_gain = information.gain(Citation ~ ., data=training_data)
weights_info_gain[order(-weights_info_gain$attr_importance), , drop = FALSE]
most_important_attributes <- cutoff.k.percent(weights_info_gain, 0.9)
reduced_formula <- as.simple.formula(most_important_attributes, "Citation")
# # Remove some excess fields
# training_data$Latitude <- NULL
# test_data$Latitude <- NULL
# training_data$Longitude <- NULL
# test_data$Longitude <- NULL
# training_data$Description <- NULL
# test_data$Description <- NULL
# test_data$Color <- NULL
# training_data$Color <- NULL
# test_data$Year <- NULL
# training_data$Year <- NULL
#
# # TODO: decide whether to remove description
#
# # Drop the id column
# training_data$id <- NULL
# # Drop driverCity and model
# training_data$DriverCity <- NULL
# training_data$Model <- NULL
# test_data$DriverCity <- NULL
# test_data$Model <- NULL
# # COMMERCIAL VEHICLE and YEAR seem to be of no importance, remove them
# training_data$Year <- NULL
# test_data$Year <- NULL
# training_data$CommercialVehicle <- NULL
# test_data$CommercialVehicle <- NULL
######################################################
# 4. Training & Evaluation
# List of classifiers in Caret: http://topepo.github.io/caret/modelList.html
# Caret tutorial for model training and tuning
#http://topepo.github.io/caret/training.html
#Partition training set for faster model training
InTrain<-createDataPartition(y=training_data$Citation,p=0.3,list=FALSE)
training_small<-training_data[InTrain,]
test_small<-training_data[-InTrain,]
# http://bigcomputing.blogspot.de/2014/10/an-example-of-using-random-forest-in.html
# LatitudeDiscr + LongitudeDiscr + VehicleType + Charge + Race + ArrestType + Alcohol + Speed + Accident + Belts + PersonalInjury + PropertyDamage + Fatal + License + HAZMAT + CommercialLicense + WorkZone + Accident + Life + Danger + Drug + Crosswalk + Registration + Lights + Phone + RedSignal + MedCert + RightOfWay + Highway + NoPassing + Insurence + Turn + Pedestrian + Child + Passenger + Stop + Tire + Signs + Police + Lane
library(caret)
# check the proportions of the labels
prop.table(table(training_data$Citation)) # close enough
manual_formula <- Citation ~ LatitudeDiscr + LongitudeDiscr + VehicleType + Charge + Race + ArrestType + Alcohol + Speed + Accident + Belts + PersonalInjury + PropertyDamage + Fatal + License + HAZMAT + CommercialLicense + WorkZone + Accident + Life + Danger + Drug + Crosswalk + Registration + Lights + Phone + RedSignal + MedCert + RightOfWay + Highway + NoPassing + Insurence + Turn + Pedestrian + Child + Passenger + Stop + Tire + Signs + Police + Lane
rf_model<-train(reduced_formula,
data=training_small,
method="rf",
trControl=trainControl(method="cv",number=5),
prox=TRUE,allowParallel = TRUE, na.action = na.exclude)
print(rf_model)
print(rf_model$finalModel)
######################################################
# 5. Predict Classes in Test Data
prediction_classes = predict.train(object=rf_model, newdata=test_data)
predictions = data.frame(id=test_data$id, prediction=prediction_classes)
predictions
write.csv(predictions, file="predictions_dmc2.csv", row.names=FALSE)
|
1c7b134ef9008fc049029d36f99178ecf97f9f40
|
4f0d8fff4a5910661a1e0650e1e288322fe94eae
|
/app.R
|
f0d3ca5e8c50185af62454e729b8fa90d13a129b
|
[] |
no_license
|
josuejv/tripto
|
bb58d15dc68db8d02e98c64da501ebce9529be5e
|
3b6e2b821a7c11952140e30830719abf35c5a83b
|
refs/heads/main
| 2023-05-02T11:34:38.291771
| 2021-05-25T02:32:37
| 2021-05-25T02:32:37
| 370,523,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43,945
|
r
|
app.R
|
## Aplicacion desarrollada por Josué Jiménez Vázquez
# Load packages ----
if(!require(shiny)) install.packages("shiny", repos = "http://cran.us.r-project.org")
if(!require(shinythemes)) install.packages("shinythemes", repos = "http://cran.us.r-project.org")
if(!require(shinydashboard)) install.packages("shinydashboard", repos = "http://cran.us.r-project.org")
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(ggthemes)) install.packages("ggthemes", repos = "http://cran.us.r-project.org")
if(!require(shinyWidgets)) install.packages("shinyWidgets", repos = "http://cran.us.r-project.org")
if(!require(DT)) install.packages("DT", repos = "http://cran.us.r-project.org")
if(!require(plotly)) install.packages("plotly", repos = "http://cran.us.r-project.org")
if(!require(lubridate)) install.packages("lubridate", repos = "http://cran.us.r-project.org")
if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org")
if(!require(xlsx)) install.packages("xlsx", repos = "http://cran.us.r-project.org")
# library(shiny)
# library(shinythemes)
# library(shinydashboard)
# library(tidyverse)
# library(ggthemes)
# library(shinyWidgets)
# library(DT)
# #library(heatmaply)
# library(plotly)
# library(lubridate)
# library(scales)
# library(xlsx)
# Source helper functions ----
source("functions.R")
source("values.R")
source("helpers.R")
# -----Create the data if don't exists----
createDF0()
#------Load data ----
if(file.exists("rdas/Finanzas.rda")){
load(file = "rdas/Finanzas.rda")
}
if(file.exists("rdas/nCapturas.rda")){
load(file = "rdas/nCapturas.rda")
}
# -------------_______UI______-------
ui <- navbarPage(theme = shinytheme("flatly"), collapsible = TRUE, "Tripto", id = "nav",
###### Here : insert shinydashboard dependencies ######
header = tagList(
useShinydashboard()
),
#---- Panel Principal---------
tabPanel("Principal",
fluidRow(
box(width = 4,
fluidRow(
column(1),
column(6, uiOutput("dateUI")
),
column(2,
br(),
actionButton("MesAnterior", "Anterior", width = 90)
),
column(2,
br(),
actionButton("MesSiguiente", "Siguiente", width = 90)),
column(1)
),
br(),
valueBoxOutput("boxIngresosMes", width = 12),
valueBoxOutput("boxEgresosMes", width = 12),
valueBoxOutput("boxDeudas", width = 12),
valueBoxOutput("boxActivos", width = 12)
),
box(width = 8,
uiOutput("dateRangeUI"),
h4("BALANCE GENERAL", align = "center"),
plotlyOutput("BalanceGeneralPlot", height = '500px')
)
),
#---------INGRESOS--------
box(width = 6,
title = h3("INGRESOS", align = "center"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "INGRESOS FIJOS",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_IF_Ingresos"),
uiOutput("IF_Ingresos"),
uiOutput("Btn_IF_Ingresos")
),
box(width = 12,
title = "INGRESOS VARIABLES",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_IV_Ingresos"),
uiOutput("IV_Ingresos"),
uiOutput("Btn_IV_Ingresos")
)
),
#---------EGRESOS--------
box(width = 6,
title = h3("EGRESOS", align = "center"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
#title = "MANTENCIÓN",
title = htmlOutput("PresMantencion"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "GASTOS FIJOS",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GF_Mantencion"),
uiOutput("GF_Mantencion"),
uiOutput("Btn_GF_Mantencion")
),
box(width = 12,
title = "GASTOS VARIABLES",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GV_Mantencion"),
uiOutput("GV_Mantencion"),
uiOutput("Btn_GV_Mantencion")
)
),
box(width = 12,
#title = "DIVERSIÓN",
title = htmlOutput("PresDiversion"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "GASTOS FIJOS",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GF_Diversion"),
uiOutput("GF_Diversion"),
uiOutput("Btn_GF_Diversion")
),
box(width = 12,
title = "GASTOS VARIABLES",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GV_Diversion"),
uiOutput("GV_Diversion"),
uiOutput("Btn_GV_Diversion")
)
),
box(width = 12,
#title = "APRENDER",
title = htmlOutput("PresAprender"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "GASTOS FIJOS",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GF_Aprender"),
uiOutput("GF_Aprender"),
uiOutput("Btn_GF_Aprender")
),
box(width = 12,
title = "GASTOS VARIABLES",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GV_Aprender"),
uiOutput("GV_Aprender"),
uiOutput("Btn_GV_Aprender")
)
),
box(width = 12,
#title = "VIAJAR",
title = htmlOutput("PresViajar"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "GASTOS FIJOS",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GF_Viajar"),
uiOutput("GF_Viajar"),
uiOutput("Btn_GF_Viajar")
),
box(width = 12,
title = "GASTOS VARIABLES",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GV_Viajar"),
uiOutput("GV_Viajar"),
uiOutput("Btn_GV_Viajar")
)
),
box(width = 12,
#title = "INVERTIR",
title = htmlOutput("PresInvertir"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "GASTOS FIJOS",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GF_Invertir"),
uiOutput("GF_Invertir"),
uiOutput("Btn_GF_Invertir")
),
box(width = 12,
title = "GASTOS VARIABLES",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GV_Invertir"),
uiOutput("GV_Invertir"),
uiOutput("Btn_GV_Invertir"))
),
box(width = 12,
#title = "OTROS",
title = htmlOutput("PresOtros"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "GASTOS FIJOS",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GF_Otros"),
uiOutput("GF_Otros"),
uiOutput("Btn_GF_Otros")
),
box(width = 12,
title = "GASTOS VARIABLES",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GV_Otros"),
uiOutput("GV_Otros"),
uiOutput("Btn_GV_Otros")
),
box(width = 12,
title = "GASTOS DEUDAS",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_GD_Otros"),
uiOutput("GD_Otros")
)
),
),
#---------ACTIVOS--------
box(width = 6,
title = h3("ACTIVOS", align = "center"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "EFECTIVO",
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "INICIAL",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("Act_EF_Inicial"),
uiOutput("Btn_Act_EF_Inicial")
),
box(width = 12,
title = "ACUMULADO",
collapsible = TRUE,
collapsed = TRUE,
h3(textOutput("EfecAcumulado"))
)
),
box(width = 12,
title = "INVERSIÓN",
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "AGREGAR",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_Inversion"),
uiOutput("Inversion"),
uiOutput("Btn_Inversion")
)
),
box(width = 12,
title = "BIENES RAÍCES",
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "AGREGAR",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_Bienes_raices"),
uiOutput("Bienes_raices"),
uiOutput("Btn_Bienes_raices")
)
),
box(width = 12,
title = "PROPIEDAD PERSONAL",
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "AGREGAR",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_Propiedad_P"),
uiOutput("Propiedad_P"),
uiOutput("Btn_Propiedad_P")
)
),
),
#---------DEUDAS--------
box(width = 6,
title = h3("DEUDAS", align = "center"),
collapsible = TRUE,
collapsed = TRUE,
box(width = 12,
title = "AGREGAR",
collapsible = TRUE,
collapsed = TRUE,
uiOutput("N_Deudas"),
uiOutput("Deudas"),
uiOutput("Btn_Deudas")
)
)
),
#---- Panel Ingresos---------
tabPanel("Ingresos",
fluidRow(
column(6,
tabsetPanel(
tabPanel("Concepto",
DT::dataTableOutput("IngresosTable")
),
tabPanel("Categoria", DT::dataTableOutput("IngresosTable2"))
)
),
column(6,
box(width = 12,
uiOutput("dateRangeIngresosUI1"),
h4("INGRESOS", align = "center"),
plotlyOutput("IngresosPlot", height = '350px')
)#,
# box(width = 12,
# h4("", align = "center"),
# plotlyOutput("IngresosPlot2", height = '350px')
# )
),
box(width = 12,
h4("", align = "center"),
plotlyOutput("IngresosPlot2", height = '350px')
),
box(width = 12,
h4("", align = "center"),
plotlyOutput("IngresosPlot3", height = '350px')
)
)
),
#---- Panel Egresos---------
tabPanel("Egresos",
fluidRow(
column(6,
tabsetPanel(
tabPanel("Concepto",
DT::dataTableOutput("EgresosTable")
),
tabPanel("Categoria", DT::dataTableOutput("EgresosTable2"))
)
),
column(6,
box(width = 12,
uiOutput("dateRangeEgresosUI1"),
h4("EGRESOS", align = "center"),
plotlyOutput("EgresosPlot", height = '350px')
),
box(width = 12,
h4("EGRESOS", align = "center"),
plotlyOutput("EgresosPlot2", height = '350px')
) #,
# box(width = 12,
# h4("", align = "center"),
# plotlyOutput("EgresosPlot3", height = '350px')
# )
),
box(width = 12,
h4("", align = "center"),
plotlyOutput("EgresosPlot3", height = '350px')
)
)
),
#---- Panel Activos---------
tabPanel("Activos",
fluidRow(
column(6,
DT::dataTableOutput("ActivosTable")
),
column(6,
box(width = 12,
uiOutput("dateRangeActivosUI1"),
h4("ACTIVOS", align = "center"),
plotlyOutput("ActivosPlot", height = '350px')
),
)
)
),
#---- Panel Deudas---------
tabPanel("Deudas",
fluidRow(
column(6,
DT::dataTableOutput("DeudasTable")
),
column(6,
box(width = 12,
uiOutput("dateRangeDeudasUI1"),
h4("DEUDAS", align = "center"),
plotlyOutput("DeudasPlot", height = '350px')
),
)
)
),
#---- Panel Balance---------
tabPanel("Balance",
column(2),
column(8,
DT::dataTableOutput("BalanceTable")
),
column(2)
)
)
### ----_____SERVER_____----
server <- function(input, output, session) {
#----UI para hacer reactivo al mes----
mes <- reactiveValues(mes = Sys.Date(),
anterior = NULL,
seleccion = NULL)
observeEvent(input$MesSiguiente,
{
if(!is.null(mes$seleccion)){
d <- day(mes$seleccion)
y <- year(mes$seleccion)
m <- month(mes$seleccion)
mes$seleccion <- NULL
} else {
d <- day(mes$mes)
y <- year(mes$mes)
m <- month(mes$mes)
}
if(m == 12){
y <- y+1
m <- 1
} else {
m <- m+1
}
mes$anterior <- mes$mes
mes$mes <- as.Date(paste0(y, "-", m, "-", d))
})
observeEvent(input$MesAnterior,
{
if(!is.null(mes$seleccion)){
d <- day(mes$seleccion)
y <- year(mes$seleccion)
m <- month(mes$seleccion)
mes$seleccion <- NULL
} else {
d <- day(mes$mes)
y <- year(mes$mes)
m <- month(mes$mes)
}
if(m == 1){
y <- y-1
m <- 12
} else {
m <- m-1
}
mes$anterior <- mes$mes
mes$mes <- as.Date(paste0(y, "-", m, "-", d))
})
output$dateUI <- renderUI({
mes <- mes$mes
dateInput("date", "MES", value = mes, format = "MM-yyyy", language = "es")
})
output$dateRangeUI <- renderUI({
dat <- datBalance()
dat <- dat$Balance
if(!is.null(dat)){
minDate <- min(dat$Fecha)
maxDate <- max(dat$Fecha)
} else {
minDate <- mes$mes
maxDate <- mes$mes
}
dateRangeInput("dateRange", "PERIODO", start = minDate, end = maxDate, format = "MM-yyyy", language = "es")
})
observe({
if(!is.null(input$date)){
mesS <- input$date
mes$seleccion <- mesS
}else{
mesS <- mes$mes
}
datF <- dataFiltered()
datnCF <- datanCapturasFiltered()
lapply(1:length(var_names$nombre), UIcreatorCaptures, output, input, mesS, datF, datnCF)
lapply(1:length(var_names$nombre), UIcreatorNo, output, mesS, datnCF)
})
observeEvent(input$MesSiguiente,{
mes <- mes$mes
datF <- dataFiltered()
datnCF <- datanCapturasFiltered()
lapply(1:length(var_names$nombre), UIcreatorCaptures, output, input, mes, datF, datnCF)
lapply(1:length(var_names$nombre), UIcreatorNo, output, mes, datnCF)
})
observeEvent(input$MesAnterior,{
mes <- mes$mes
datF <- dataFiltered()
datnCF <- datanCapturasFiltered()
lapply(1:length(var_names$nombre), UIcreatorCaptures, output, input, mes, datF, datnCF)
lapply(1:length(var_names$nombre), UIcreatorNo, output, mes, datnCF)
})
lapply(1:length(var_names$nombre), UIcreatorBtns, output, input)
##----Values Boxes del resumen-------
output$boxIngresosMes <- renderValueBox({
if(!is.null(mes$seleccion)){
mes <- mes$seleccion
} else {
mes <- mes$mes
}
y <- year(mes)
m <- month(mes)
dat <- datBalance()
dat <- dat$Ingresos
if(!is.null(dat)){
Ingreso <- dat %>% filter(year == y, month == m) #%>% pull(Egresos)
if(length(Ingreso$Ingresos) == 0) {
Ingreso <- 0
} else {
Ingreso <- Ingreso %>% pull(Ingresos)
}
} else {
Ingreso <-0
}
valueBox(
prettyNum(round(Ingreso, digits = 2), big.mark = ","), "Ingreso",
icon = icon("money-bill", lib = "font-awesome"),
color = "green", width = 12
)
})
output$boxEgresosMes <- renderValueBox({
if(!is.null(mes$seleccion)){
mes <- mes$seleccion
} else {
mes <- mes$mes
}
y <- year(mes)
m <- month(mes)
dat <- datBalance()
dat <- dat$Egresos
if(!is.null(dat)){
Egreso <- dat %>% filter(year == y, month == m) #%>% pull(Egresos)
if(length(Egreso$Egresos) == 0) {
Egreso <- 0
} else {
Egreso <- Egreso %>% pull(Egresos)
}
} else {
Egreso <- 0
}
valueBox(
prettyNum(round(Egreso, digits = 2), big.mark = ","), "Egreso",
icon = icon("balance-scale-left", lib = "font-awesome"),
color = "orange", width = 12
)
})
output$boxDeudas <- renderValueBox({
if(!is.null(mes$seleccion)){
mes <- mes$seleccion
} else {
mes <- mes$mes
}
y <- year(mes)
m <- month(mes)
dat <- datBalance()
dat <- dat$Deudas
if(!is.null(dat)){
Deudas <- dat %>% filter(year == y, month == m) #%>% pull(Egresos)
if(length(Deudas$Deudas) == 0) {
Deudas <- 0
} else {
Deudas <- Deudas %>% pull(Deudas)
}
} else {
Deudas <- 0
}
valueBox(
prettyNum(round(Deudas, digits = 2), big.mark = ","), "Deudas",
icon = icon("credit-card", lib = "font-awesome"),
color = "red", width = 12
)
})
output$boxActivos <- renderValueBox({
if(!is.null(mes$seleccion)){
mes <- mes$seleccion
} else {
mes <- mes$mes
}
y <- year(mes)
m <- month(mes)
dat <- datBalance()
dat <- dat$Activos
if(!is.null(dat)){
Activos <- dat %>% filter(year == y, month == m) #%>% pull(Egresos)
if(length(Activos$Activos) == 0) {
Activos <- 0
} else {
Activos <- Activos %>% pull(Activos)
}
} else {
Activos <- 0
}
valueBox(
prettyNum(round(Activos, digits = 2), big.mark = ","), "Activos",
icon = icon("landmark", lib = "font-awesome"),
color = "blue", width = 12
)
})
##----Para cargar los datos guardados y que se actualicen---------
#----__finanzas_totales__
finanzas_totales <- reactiveValues(datosAgregados = NULL,
nCapturas = NULL,
datosCapturados = NULL,
datosCapturadosN = NULL,
indicesAgregados = NULL)
lapply(1:length(var_names$nombre), BtnsEvents, input, output, finanzas_totales, input$date, dataFiltered(), datanCapturasFiltered(),
Finanzas, nCapturas)
##----Para ver los datos de cada mes----
##----___dataFiltered___----
dataFiltered <- reactive({
if(!is.null(input$date)){
mes <- input$date
}else{
mes <- mes$mes
}
if (is.null(finanzas_totales$datosAgregados)){
dat <- Finanzas
} else {
dat <- finanzas_totales$datosAgregados
}
dataF <- lapply(1:length(var_names$nombre), dataFilter, mes, dat)
dataF
})
##------Para las capturas guardadas y filtradas-----
datanCapturasFiltered <- reactive({
if(!is.null(input$date)){
mes <- input$date
}else{
mes <- mes$mes
}
if (is.null(finanzas_totales$nCapturas)){
dat <- nCapturas
} else {
dat <- finanzas_totales$nCapturas
}
dataF <- lapply(1:length(var_names$nombre), dataFilter, mes, dat)
dataF
})
##-------Para los datos de las tablas---------
datTables <- reactive({
if (is.null(finanzas_totales$datosAgregados)){
dat <- Finanzas
} else {
dat <- finanzas_totales$datosAgregados
}
Ingresos <- dat %>% filter(Seccion == "Ingresos") %>% select(date, Seccion, Type, categoria, concepto, captura)
Egresos <- dat %>% filter(str_detect(Type, "Gasto")) %>% select(date, Seccion, Type, categoria, concepto, captura)
Activos <- dat %>% filter(Type %in% c("Activo Efectivo Inicial",
"Inversion",
"Bienes Raices",
"Propiedad Personal")) %>%
select(date, Seccion, Type, categoria, concepto, captura, rendimiento)
Deudas <- dat %>% filter(Seccion == "Deudas") %>% select(date, Seccion, Type, categoria, concepto, captura, interes)
if(!is.null(Egresos)){
EgresosConcepto <- Egresos %>% group_by(concepto) %>% summarise(cantidad = sum(captura), .groups = 'drop')
}
if(!is.null(Ingresos)){
IngresosConcepto <- Ingresos %>% group_by(concepto) %>% summarise(cantidad = sum(captura), .groups = 'drop')
}
if(!is.null(Ingresos)){
IngresosCategoria <- Ingresos %>% group_by(categoria) %>% summarise(cantidad = sum(captura), .groups = 'drop')
}
if(!is.null(Deudas)){
DeudasConcepto <- Deudas %>% group_by(concepto) %>% summarise(cantidad = sum(captura), .groups = 'drop')
}
if(!is.null(Activos)){
ActivosConcepto <- Activos %>% group_by(concepto) %>% summarise(cantidad = sum(captura), .groups = 'drop')
}
dat <- list(
Ingresos = Ingresos,
Egresos = Egresos,
Activos = Activos,
Deudas = Deudas,
EgresosConcepto = EgresosConcepto,
IngresosConcepto = IngresosConcepto,
IngresosCategoria = IngresosCategoria,
DeudasConcepto = DeudasConcepto,
ActivosConcepto = ActivosConcepto
)
dat
})
##----Tablas------
output$IngresosTable <- DT::renderDataTable({
dat <- datTables()
dat <- dat$Ingresos
dat <- dat %>%
rename(Fecha = date, "Sección" = Seccion, Tipo = Type, Categoria = categoria, Concepto = concepto, Monto = captura) %>%
arrange(desc(Fecha))
if(!is.null(dat)){
DT::datatable(dat, options = list(
scrollY = '400px', paging = FALSE),
rownames = FALSE) %>% formatRound(columns = "Monto", digits = 2)
}
})
output$IngresosTable2 <- DT::renderDataTable({
dat <- datTables()
dat <- dat$Ingresos
dat <- dat %>% rename(Fecha = date, "Sección" = Seccion, Tipo = Type, Categoria = categoria, Concepto = concepto, Monto = captura) %>%
mutate(Año = year(Fecha), Mes = month(Fecha))
dat <- dat %>% group_by(Año, Mes, `Sección`, Tipo, Categoria) %>%
summarise(Monto = sum(Monto), .groups = 'drop') %>%
arrange(desc(Año), desc(Mes))%>%
mutate(Mes = str_to_title(month(Mes, label = TRUE, abbr = FALSE)))
if(!is.null(dat)){
DT::datatable(dat, options = list(
scrollY = '400px', paging = FALSE),
rownames = FALSE) %>% formatRound(columns = "Monto", digits = 2)
}
})
output$EgresosTable <- DT::renderDataTable({
dat <- datTables()
dat <- dat$Egresos
dat <- dat %>% rename(Fecha = date, "Sección" = Seccion, Tipo = Type, Categoria = categoria, Concepto = concepto, Monto = captura) %>%
arrange(desc(Fecha))
if(!is.null(dat)){
DT::datatable(dat, options = list(
scrollY = '700px', paging = FALSE),
rownames = FALSE) %>% formatRound(columns = "Monto", digits = 2)
}
})
output$EgresosTable2 <- DT::renderDataTable({
dat <- datTables()
dat <- dat$Egresos
dat <- dat %>% rename(Fecha = date, "Sección" = Seccion, Tipo = Type, Categoria = categoria, Concepto = concepto, Monto = captura) %>%
mutate(Año = year(Fecha), Mes = month(Fecha))
dat <- dat %>% group_by(Año, Mes, `Sección`, Tipo, Categoria) %>%
summarise(Monto = sum(Monto), .groups = 'drop') %>%
arrange(desc(Año), desc(Mes))%>%
mutate(Mes = str_to_title(month(Mes, label = TRUE, abbr = FALSE)))
if(!is.null(dat)){
DT::datatable(dat, options = list(
scrollY = '700px', paging = FALSE),
rownames = FALSE) %>% formatRound(columns = "Monto", digits = 2)
}
})
output$ActivosTable <- DT::renderDataTable({
dat <- datTables()
dat <- dat$Activos
dat <- dat %>% rename(Fecha = date, "Sección" = Seccion, Tipo = Type, Categoria = categoria, Concepto = concepto, Monto = captura, Rendimiento = rendimiento) %>%
arrange(desc(Fecha))
if(!is.null(dat)){
DT::datatable(dat, options = list(
scrollY = '700px', paging = FALSE),
rownames = FALSE) %>% formatRound(columns = "Monto", digits = 2)
}
})
output$DeudasTable <- DT::renderDataTable({
dat <- datTables()
dat <- dat$Deudas
dat <- dat %>% rename(Fecha = date, "Sección" = Seccion, Tipo = Type, Categoria = categoria, Concepto = concepto, Monto = captura, "Interés" = interes) %>%
arrange(desc(Fecha))
if(!is.null(dat)){
DT::datatable(dat, options = list(
scrollY = '700px', paging = FALSE),
rownames = FALSE) %>% formatRound(columns = "Monto", digits = 2)
}
})
output$BalanceTable <- DT::renderDataTable({
dat <- datBalance()
dat <- dat$BalanceTable
if(!is.null(dat)){
dat <- dat %>% select(Fecha, Ingresos, Egresos, "Ingresos-Egresos", Deudas, Efectivo, Activos, "Patrimonio Neto") %>%
arrange(desc(Fecha))
DT::datatable(dat, options = list(
scrollY = '700px', paging = FALSE),
rownames = FALSE) %>% formatRound(columns = c(2:length(dat)), digits = 2)
}
})
##-------datBalance---------
datBalance <- reactive({
if (is.null(finanzas_totales$datosAgregados)){
dat <- Finanzas
} else {
dat <- finanzas_totales$datosAgregados
}
if(!is.na(dat[1,1]) | is.null(dat)){
Ingresos <- dat %>% filter(Seccion == "Ingresos") %>%
select(year, month, captura) %>%
group_by(year, month) %>%
summarise(Ingresos = sum(captura), .groups = 'drop')
Egresos <- dat %>% filter(str_detect(Type, "Gasto")) %>%
group_by(year, month) %>%
summarise(Egresos = sum(captura), .groups = 'drop')
EfectivoInicial <- dat %>% filter(Seccion == "Efectivo Inicial")
if(length(EfectivoInicial$Seccion) == 0) {
EfectivoInicial <- 0
} else {
EfectivoInicial <- EfectivoInicial %>% pull(captura)
}
Activos <- dat %>% filter(Type %in% c("Inversion",
"Bienes Raices",
"Propiedad Personal")) %>%
group_by(year, month) %>%
summarise(Activos = sum(captura), .groups = 'drop')
Deudas <- dat %>% filter(Seccion == "Deudas") %>%
group_by(year, month) %>%
summarise(Deudas = sum(captura), .groups = 'drop')
Balance <- merge(Ingresos, Egresos, all = TRUE)
Balance <- merge(Balance, Activos, all = TRUE)
Balance <- merge(Balance, Deudas, all = TRUE)
Balance <- Balance %>% gather(Feature, Value, -year, -month)
Balance <- Balance %>% mutate(Value = ifelse(is.na(Value), 0, Value))
Balance <- Balance %>% spread(Feature, Value)
if(!is.na(Balance$year[1])){
Balance <- Balance %>% mutate(ImenosE = Ingresos - Egresos,
Efectivo = cumsum(ImenosE) + EfectivoInicial,
Activos = Efectivo + Activos,
"Patrimonio Neto" = Activos - Deudas,
Fecha = as.Date(paste0(ifelse(month+1 <= 12, year, year+1), "-", ifelse(month+1 <= 12, month+1, 1) , "-", 1)))
Balance <- Balance %>% rename("Ingresos-Egresos" = ImenosE)
Efectivo <- Balance %>% select(year, month, Efectivo)
Efectivo <- Efectivo %>% rename(Activos = Efectivo)
Activos <- rbind(Activos, Efectivo)
Activos <- Activos %>%
group_by(year, month) %>%
summarise(Activos = sum(Activos), .groups = 'drop')
Balance <- Balance %>% gather(Feature, Value, -year, -month, -Fecha)
Balance <- Balance %>% mutate(Value = ifelse(is.na(Value), 0, Value))
BalanceTable <- Balance %>% spread(Feature, Value) #%>%
#select()
Balance <- list(
Balance = Balance,
BalanceTable = BalanceTable,
Ingresos = Ingresos,
Egresos = Egresos,
Activos = Activos,
Deudas = Deudas
)
} else {
Balance <- NULL
}
} else {
Balance <- NULL
}
Balance
})
output$BalanceGeneralPlot <- renderPlotly({
inicio <- input$dateRange[1]
fin <- input$dateRange[2]
dat <- datBalance()
dat <- dat$Balance
if(!is.null(dat)){
if(!is.null(inicio) & !is.null(fin)){
if(fin > inicio){
dat <- dat %>% filter(Fecha <= fin & Fecha >= inicio)
}
}
BalancePlot(dat)
}
})
#------Graficas de los paneles-------
output$dateRangeIngresosUI1 <- renderUI({
dat <- datTables()
dat <- dat$Ingresos
if(is.Date(dat$date[1]) & !is.na(dat$date[1])){
minDate <- min(dat$date)
maxDate <- max(dat$date)
} else {
minDate <- mes$mes
maxDate <- mes$mes
}
dateRangeInput("dateRangeIngresos", "PERIODO", start = minDate, end = maxDate, format = "MM-yyyy", language = "es")
})
output$IngresosPlot <- renderPlotly({
inicio <- input$dateRangeIngresos[1]
fin <- input$dateRangeIngresos[2]
dat <- datTables()
dat <- dat$Ingresos
if(!is.null(dat) & !is.na(dat$captura[1])){
if(!is.null(inicio) & !is.null(fin)){
if(fin > inicio){
dat <- dat %>% filter(date <= fin & date >= inicio)
}
}
ConceptPlot(dat, "Ingresos")
}
})
output$IngresosPlot2 <- renderPlotly({
dat <- datTables()
dat <- dat$IngresosConcepto
if(!is.null(dat)){
BarPlot(dat)
}
})
output$IngresosPlot3 <- renderPlotly({
dat <- datTables()
dat <- dat$IngresosCategoria
if(!is.null(dat)){
BarPlot2(dat)
}
})
output$dateRangeEgresosUI1 <- renderUI({
dat <- datTables()
dat <- dat$Egresos
if(is.Date(dat$date[1]) & !is.na(dat$date[1])){
minDate <- min(dat$date)
maxDate <- max(dat$date)
} else {
minDate <- mes$mes
maxDate <- mes$mes
}
dateRangeInput("dateRangeEgresos", "PERIODO", start = minDate, end = maxDate, format = "MM-yyyy", language = "es")
})
output$EgresosPlot <- renderPlotly({
inicio <- input$dateRangeEgresos[1]
fin <- input$dateRangeEgresos[2]
dat <- datTables()
dat <- dat$Egresos
if(!is.null(dat) & !is.na(dat$captura[1])){
if(!is.null(inicio) & !is.null(fin)){
if(fin > inicio){
dat <- dat %>% filter(date <= fin & date >= inicio)
}
}
TypePlot(dat)
}
})
output$EgresosPlot2 <- renderPlotly({
inicio <- input$dateRangeEgresos[1]
fin <- input$dateRangeEgresos[2]
dat <- datTables()
dat <- dat$Egresos
if(!is.null(dat) & !is.na(dat$captura[1])){
if(!is.null(inicio) & !is.null(fin)){
if(fin > inicio){
dat <- dat %>% filter(date <= fin & date >= inicio)
}
}
ConceptPlot(dat, "Egresos")
}
})
output$EgresosPlot3 <- renderPlotly({
dat <- datTables()
dat <- dat$EgresosConcepto
if(!is.null(dat)){
BarPlot(dat)
}
})
output$dateRangeDeudasUI1 <- renderUI({
dat <- datTables()
dat <- dat$Deudas
if(is.Date(dat$date[1]) & !is.na(dat$date[1])){
minDate <- min(dat$date)
maxDate <- max(dat$date)
} else {
minDate <- mes$mes
maxDate <- mes$mes
}
dateRangeInput("dateRangeDeudas", "PERIODO", start = minDate, end = maxDate, format = "MM-yyyy", language = "es")
})
output$DeudasPlot <- renderPlotly({
inicio <- input$dateRangeDeudas[1]
fin <- input$dateRangeDeudas[2]
dat <- datTables()
dat <- dat$Deudas
if(!is.null(dat) & !is.na(dat$captura[1]) ){
if(!is.null(inicio) & !is.null(fin)){
if(fin > inicio){
dat <- dat %>% filter(date <= fin & date >= inicio)
}
}
ConceptPlot(dat, "Deudas")
}
})
output$dateRangeActivosUI1 <- renderUI({
dat <- datTables()
dat <- dat$Activos
if(is.Date(dat$date[1]) & !is.na(dat$date[1])){
minDate <- min(dat$date)
maxDate <- max(dat$date)
} else {
minDate <- mes$mes
maxDate <- mes$mes
}
dateRangeInput("dateRangeActivos", "PERIODO", start = minDate, end = maxDate, format = "MM-yyyy", language = "es")
})
output$ActivosPlot <- renderPlotly({
inicio <- input$dateRangeActivos[1]
fin <- input$dateRangeActivos[2]
dat <- datTables()
dat <- dat$Activos
if(!is.null(dat) & !is.na(dat$captura[1])){
if(!is.null(inicio) & !is.null(fin)){
if(fin > inicio){
dat <- dat %>% filter(date <= fin & date >= inicio)
}
}
AreaPlot(dat)
}
})
##---- Presupuesto----
datPresupuesto <- reactive({
if(!is.null(mes$seleccion)){
mes <- mes$seleccion
} else {
mes <- mes$mes
}
y <- year(mes)
m <- month(mes)
dat <- datBalance()
dat <- dat$Ingresos
dat2 <- datTables()
dat2 <- dat2$Egresos
if(!is.null(dat)){
Ingreso <- dat %>% filter(year == y, month == m) #%>% pull(Egresos)
if(length(Ingreso$Ingresos) == 0) {
Ingreso <- 0
} else {
Ingreso <- Ingreso %>% pull(Ingresos)
}
} else {
Ingreso <-0
}
if(!is.null(dat2)){
Egreso <- dat2 %>% mutate(year = year(date), month = month(date))
Egreso <- Egreso %>% filter(year == y, month == m) #%>% pull(Egresos)
if(length(Egreso$captura) == 0) {
Egreso <- 0
}
} else {
Egreso <-0
}
if(is.data.frame(Egreso)){
Egreso <- Egreso %>% group_by(Seccion) %>% summarise(Egreso = sum(captura), .groups = 'drop' )
}
Presupuesto <- list(
Mantencion = Ingreso*0.3,
Diversion = Ingreso*0.2,
Aprender = Ingreso*0.15,
Viajar = Ingreso*0.1,
Invertir = Ingreso*0.25,
Otros = 0,
Egreso = Egreso
)
Presupuesto
})
##----Texto para el presupuesto------
observe({
datPres <- datPresupuesto()
lapply(1:6, presupuesto, datPres, output)
})
output$EfecAcumulado <- renderText({
if(!is.null(mes$seleccion)){
mes <- mes$seleccion
} else {
mes <- mes$mes
}
y <- year(mes)
m <- month(mes)
dat <- datBalance()
dat <- dat$Balance
if(!is.null(dat)){
Efectivo <- dat %>% filter(year == y, month == m)
if(length(Efectivo$Efectivo) == 0) {
Efectivo <- 0
} else {
Efectivo <- Efectivo %>% pull(Efectivo)
}
} else {
Efectivo <- 0
}
prettyNum(round(Efectivo, digits = 2), big.mark = ",")
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
5b0f4273892d12c96d4166ef02e811850c64a0ee
|
a2da3f8ed1f91e1792e9f47a9f801bdd41aaa371
|
/02-Learn-R.R
|
cecd91547ce8c37980fc61b175749db4fb5aa9b2
|
[] |
no_license
|
fatihilhan42/DATA-SCIENCE-WITH-R
|
fe2d25afbd7dfb07533ac3559928c460f31e32b0
|
5bfc0025edff853eedc8f56852dcad98e4f55f7a
|
refs/heads/main
| 2023-05-12T06:37:42.207102
| 2021-06-07T10:49:16
| 2021-06-07T10:49:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 975
|
r
|
02-Learn-R.R
|
# R ÖĞRENELİM
"Merhaba Dünya!"
#<- atama operatörü
x<-5
#help() yardım için metot
help(getwd)
#Çalışma dizini ekrana yazdıralım
getwd()
#Çalışma dizini değiştirelim
setwd("C:/Users/ASUS/Desktop/demo")
#Vektör veri tipi
v<-c(1,2,3)
#Liste veri tipi
l<-list(x=c("elma","erik"),y=1:10)
#Vektörün elemanlarına ulaşmak için
v[1]
#Listenin öğrelerine ulaşalım
l[1]
#Listenin öğelerinin elemanlarına ulaşalım
l[[1]][2]
l[[2]][5]
#Mantıksal operatörleri kullanabiliriz
v[c(TRUE,FALSE,TRUE)]
#Öğeler için $ sembolü kullanılabilir
l$y
#C() NULL döndürür
c()
#NULL ekrana yazılmaz
c(c(),1,NULL)
#NA eksik veya kayıp veri için kullanılır
c(1,NA,"a")
#data frame veri yapısı
d<-data.frame(x=c(1,NA,3))
print(d)
#Eksik veri yerine atama yapalım
d$x[is.na(d$x)]<-0
#Paket kullanımı
library()
stats::var(1:10)
#Data frame kullanımı
df<-data.frame(col1=c(1,2,3),col2=c(4,5,6))
df
df$.col3<-df$col1+df$col2
df
|
5b7a60b0c43bbe5fb2e833e772960d4ef895a0dc
|
d62b3bbe79263098532870905d5e8e7442421dbf
|
/setup.R
|
e024d982ed812beb41f5843bea2231c60f1f16a8
|
[] |
no_license
|
doberstein/CVK-MOOC-Analysis
|
b051dff96392477fa6f3dae8c77145d1e5a724a4
|
e9a75b868b859b6ca85010e9a9b396d3079e1011
|
refs/heads/master
| 2020-04-05T11:56:08.659074
| 2019-12-18T13:51:43
| 2019-12-18T13:51:43
| 156,850,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
r
|
setup.R
|
library(purrr)
library(tidyr)
library(jsonlite)
library(dplyr)
library(ngram)
library(anytime)
library(ggplot2)
library(compare)
library(reldist)
library(rmarkdown)
library(dmm)
library(TraMineR)
library(fpc)
library(stringr)
library(apcluster)
library(data.table)
|
fb725fc3f4c2eab2d44419db6cdf6a58c385d9b8
|
58b63f843ddba75a567d921f736554d978caa33d
|
/Rename_Files.R
|
ad0ad6119763974d533386694058a52ed1adf458
|
[] |
no_license
|
Broccolito/HRLR300_Tissue_Dataset
|
d41208453c9ed15ddd1dc48f99b422e679673533
|
2e4d8061bc8ca80df25b02836ad4cf57ba4fca5b
|
refs/heads/master
| 2020-03-22T15:19:30.857993
| 2018-07-10T00:57:49
| 2018-07-10T00:57:49
| 140,244,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
Rename_Files.R
|
#Run this line to rename the files in a fashion of
# 1 ~ # of files
file.rename(from = list.files(pattern = "png"),
to = paste(seq(1:length(list.files(pattern = "png"))), ".png",
sep = ""))
|
a95e1f4187d8cae651ddd23246b28426ce95e27e
|
98cdc943ac1f7bee868b2a485f74955abd38ae57
|
/tests/testthat.R
|
cf8bd1cc4536f03006ae886c7d4bf80b745e3b13
|
[
"MIT"
] |
permissive
|
FoRTExperiment/PestED
|
f31e4aff1eb3096c9b6703d772d85f7139d58ae9
|
62f023d8fd8098649d8f671068b1409f0cd20300
|
refs/heads/master
| 2021-05-17T04:41:56.114938
| 2020-11-18T02:12:17
| 2020-11-18T02:12:17
| 250,627,623
| 2
| 2
|
NOASSERTION
| 2020-11-18T02:12:18
| 2020-03-27T19:31:21
|
R
|
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(PestED)
test_check("PestED")
|
bedae03aef5a98bbc6eb1f65dd74c590d452c554
|
65061263ab8ea942345edc0b1a7b5771b06406c9
|
/man/IV_PILE.Rd
|
85be21d4f97ee2fcba329b10a1aed48b453695c8
|
[] |
no_license
|
cran/ivitr
|
ae438aeb16bc498713a2bd01445467a08c5b5afc
|
4404053c3195c948ac9463b7d00ae36d7a4af670
|
refs/heads/master
| 2022-12-08T16:46:34.854825
| 2020-09-11T07:40:03
| 2020-09-11T07:40:03
| 295,361,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,316
|
rd
|
IV_PILE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IV_PILE.R
\name{IV_PILE}
\alias{IV_PILE}
\title{Estimate an IV-optimal individualized treatment rule}
\usage{
IV_PILE(dt, kernel = "linear", C = 1, sig = 1/(ncol(dt) - 5))
}
\arguments{
\item{dt}{A dataframe whose first column is a binary IV 'Z', followed
by q columns of observed covariates, a binary
treatment indicator 'A', a binary outcome 'Y',
lower endpoint of the partial identification interval 'L',
and upper endpoint of the partial identification interval 'U'.
The dataset has q+5 columns in total.}
\item{kernel}{The kernel used in the weighted SVM algorithm. The user
may choose between 'linear' (linear kernel) and
'radial' (Gaussian RBF kernel).}
\item{C}{Cost of violating the constraint. This is the parameter C in
the Lagrange formulation.}
\item{sig}{Sigma in the Gaussian RBF kernel. Default is set to
1/dimension of covariates, i.e., 1/q. This parameter
is not relevant for linear kernel.}
}
\value{
An object of the type \code{wsvm}, inheriting from \code{svm}.
}
\description{
\code{IV_PILE} estimates an IV-optimal individualized treatment
rule given a dataset with estimated partial identification intervals
for each instance.
}
\examples{
\dontrun{
# It is necessary to install the package locClass in order
# to run the following code.
attach(dt_Rouse)
# Construct an IV out of differential distance to two-year versus
# four-year college. Z = 1 if the subject lives not farther from
# a 4-year college compared to a 2-year college.
Z = (dist4yr <= dist2yr) + 0
# Treatment A = 1 if the subject attends a 4-year college and 0
# otherwise.
A = 1 - twoyr
# Outcome Y = 1 if the subject obtained a bachelor's degree
Y = (educ86 >= 16) + 0
# Prepare the dataset
dt = data.frame(Z, female, black, hispanic, bytest, dadsome,
dadcoll, momsome, momcoll, fincome, fincmiss, A, Y)
# Estimate the Balke-Pearl bound by estimating each constituent
# conditional probability p(Y = y, A = a | Z, X) with a multinomial
# regression.
dt_with_BP_bound_multinom = estimate_BP_bound(dt, method = 'multinom')
# Estimate the IV-optimal individualized treatment rule using a
# linear kernel, under the putative IV and the Balke-Pearl bound.
iv_itr_BP_linear = IV_PILE(dt_with_BP_bound_multinom, kernel = 'linear')
}
}
|
7ee9d63d6d7086cdf58f1c84b178e2e8a1a3c05e
|
08377005c504cad79e453e702725fd0cfc5ae360
|
/R/Exiqon_2Colour_miRNA_Pipeline.r
|
09d8a669dd75d8e19120552d3bac7e6a67addb57
|
[] |
no_license
|
AndrewSkelton/BSU_Scripts
|
a7c73960fa9aba754b82c2aca100e4e812790ca9
|
8a1a81fd2112ef51a9f8d30f920bbde46b95a939
|
refs/heads/master
| 2016-09-10T10:48:52.905124
| 2014-05-28T16:13:59
| 2014-05-28T16:13:59
| 16,342,295
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,657
|
r
|
Exiqon_2Colour_miRNA_Pipeline.r
|
##'LOAD IN GAL FILE (ANNOTATION) AND RAW DATA
biocLite("ExiMiR")
library(ExiMiR)
install.packages("miRada")
make.gal.env(galname="gal_208500,208501,208502,208510_lot35003-35003_hsa-and-related-vira_from_mb180,miRPlus.gal" , gal.path="./" )
ebatch <- ReadExi(galname="gal_208500,208501,208502,208510_lot35003-35003_hsa-and-related-vira_from_mb180,miRPlus.gal" , txtfile.path="./" )
##
##'LIMMA RAW OBJECT CREATION
library(limma)
targets <- readTargets(path = "./" )
RGList <- read.maimages(targets[,c("Cy3", "Cy5")], source = "imagene", path = "./" )
RGList$genes <- readGAL(path = "./")
RGList$printer <- getLayout(RGList$genes)
###AFFY NORMALISATION
obatch <- createAB(RGList)
spikein.all <- grep( '^spike', featureNames(obatch), value=TRUE)
spikein.subset <- setdiff(spikein.all, 'spike_control_f')
spikein.params <- list(probeset.list=spikein.subset,
loess.span=0.6,
force.zero=TRUE,
figures.show=TRUE, figures.output=file)
eset.spike <- NormiR(ebatch, bgcorrect.method= 'normexp',
bgcorrect.param=list(offset=50),
normalize.method= 'spikein',
normalize.param= list(spikein.params),
summary.method= 'medianpolish')
###
##
##'BOXPLOT OF INTENSITIES FOR EACH PAIRED ARRAY
##'FROM PROVIDED TARGETS TEXT FILE
pdf("Boxplot_raw_intensities.pdf")
labs <- colnames(data.frame(cbind(log2(RGList$Gb),log2(RGList$Rb))))
lab_test <- c("IL-17", "IL-17/IFNy", "IL-17/IFNy", "IL-17", "IL-17",
"IFNy", "IFNy", "IL-17/IFNy", "IFNy", "IL-17", "IFNy", "IL-17/IFNy")
lab_test_Cy3 <- paste(lab_test, "_Cy3", sep="")
lab_test_Cy5 <- paste(lab_test, "_Cy5", sep="")
lab_test_final <- c(lab_test_Cy3, lab_test_Cy5)
boxplot(data.frame(cbind(log2(RGList$Gb),log2(RGList$Rb))),
main="Boxplot of Channel Intensity Values", xaxt="n",
col=rep(c("green","red"),each=12))
axis(1, at=seq(1, length(lab_test_final), by=1), labels = FALSE)
text(seq(1+.75, length(lab_test_final)+.75, by=1), par("usr")[3]-.1,
labels = lab_test_final, srt = -45, pos = 1, xpd = TRUE, cex=0.7)
dev.off()
##
##'R' CAN BE CHANGED FOR OTHER ATTRIBUTES
##'R - RED CHANNEL
##'G - GREEN CHANNEL
##'Rb - RED BACKGROUND CHANNEL
##'Gb - GREEN BACKGROUND CHANNEL
pdf("Pseudo_Array_Images.pdf")
for(i in 1:length(colnames(RGList$R)))
{
imageplot(log2(RGList$R[,i]),RGList$printer, main=paste("Array",i ,"Pseudo-Image - Red Channel", sep=" "))
}
dev.off()
##
##'SCATTER PLOT OF SIGNAL VS BACKGROUND
pdf("Scatter_plots_primary_channel_vs_background_channel.pdf")
for(i in 1:length(colnames(RGList$G)))
{
plot(log2(RGList$Gb[,i]),log2(RGList$G[,i]),
main=paste("Scatter Plot of Array", i, "- Green Channel Signal Vs Background"))
lines(c(-9,99),c(-9,99),col=2)
}
for(i in 1:length(colnames(RGList$R)))
{
plot(log2(RGList$Rb[,i]),log2(RGList$R[,i]),
main=paste("Scatter Plot of Array", i, "- Red Channel Signal Vs Background"))
lines(c(-9,99),c(-9,99),col=2)
}
dev.off()
##'BACKGROUND CORRECTION
RGList <- backgroundCorrect(RGList, method = "normexp")
labs <- colnames(data.frame(cbind(log2(RGList$Gb),log2(RGList$Rb))))
lab_test <- c("IL-17", "IL-17/IFNy", "IL-17/IFNy", "IL-17", "IL-17",
"IFNy", "IFNy", "IL-17/IFNy", "IFNy", "IL-17", "IFNy", "IL-17/IFNy")
lab_test_Cy3 <- paste(lab_test, "_Cy3", sep="")
lab_test_Cy5 <- paste(lab_test, "_Cy5", sep="")
for(i in 1:12)
{
lab_test_final_1 <- c(lab_test_final_1, lab_test_Cy5[i], lab_test_Cy3[i])
}
pdf("Boxplot_Background_Corrected.pdf")
boxplot(log2(data.frame(
RGList$R,RGList$G))[,as.vector(rbind(1:12,13:24))],
main="Boxplot of Background Corrected RG Channel Intensity Values", xaxt="n",
col=rep(c("red","green"),24))
axis(1, at=seq(1, length(lab_test_final_1), by=1), labels = FALSE)
text(seq(1+.75, length(lab_test_final_1)+.75, by=1), par("usr")[3]-1.5,
labels = lab_test_final_1, srt = -45, pos = 1, xpd = TRUE, cex=0.7)
dev.off()
##
##'BACKGROUND CORRECTED DENSITY
pdf("Background_Corrected_Densities.pdf")
plotDensities(RGList)
dev.off()
##
##'MA PLOTS OF THE DIFFERENCE VS AVERAGE LOG BACKGROUND
##'CORRECTED SIGNAL
pdf("MA_Plots_Background_Corrected.pdf")
for(i in 1:length(lab_test))
{
plotMA(RGList[,i], main=paste(lab_test[i], "- MA Plot"))
}
dev.off()
##
##'LOESS AND MEDIAN NORMALISATION
##'SPIKE NOT AVAILABLE AS METHOD
MAList <- normalizeWithinArrays(RGList, method = "loess")
MAList <- normalizeWithinArrays(MAList, method = "median")
##
##'POST NORMALISATION PLOTS
pdf("Normalised_Plots_Comparison.pdf")
for(i in 1:12)
{
plotMA(RGList[,i])
plotMA(MAList[,i])
}
plotDensities(RGList, main="Unnormalised Densities")
plotDensities(MAList, main="Normalised Densities")
boxplot(data.frame(MAList$M),
main="Boxplot of Normalised Intensity Values", xaxt="n")
axis(1, at=seq(1, length(lab_test), by=1), labels = FALSE)
text(seq(1+.25, length(lab_test)+.25, by=1), par("usr")[3]-1.2,
labels = lab_test, srt = -45, pos = 1, xpd = TRUE, cex=0.7)
dev.off()
##
##'SCALE NORMALISATION
MAList <- normalizeBetweenArrays(MAList, method = "scale")
pdf("Boxplot_post_scale_normalisation.pdf")
boxplot(data.frame(MAList$M),
main="Boxplot of Normalised (Post Scale Normalisation) Intensity Values", xaxt="n")
axis(1, at=seq(1, length(lab_test), by=1), labels = FALSE)
text(seq(1+.25, length(lab_test)+.25, by=1), par("usr")[3]-1.2,
labels = lab_test, srt = -45, pos = 1, xpd = TRUE, cex=0.7)
dev.off()
pdf("Fully_Normalised_MA_Plots_Density.pdf")
for(i in 1:12)
{
plotMA(MAList[,i])
}
plotDensities(RGList, main="Unnormalised Densities")
plotDensities(MAList, main="Normalised Densities")
dev.off()
##
##'DIFFERENTIAL EXPRESSION
treatments <- c("IL17", "IL17_IFNy", "IFNy")
array_names <- c("IL17", "IL17_IFNy", "IL17_IFNy", "IL17", "IL17", "IFNy",
"IFNy", "IL17_IFNy", "IFNy", "IL17", "IFNy", "IL17_IFNy")
design <- model.matrix(~0 + factor(array_names, levels = treatments))
colnames(design) <- treatments
num_parameters <- ncol(design)
fit <- lmFit(MAList, design)
cont_mat <- makeContrasts(IFNy-IL17, IL17_IFNy-IL17, IL17_IFNy-IFNy, levels=treatments)
fit2 <- contrasts.fit(fit, contrasts=cont_mat)
fit2 <- eBayes(fit2)
gene_list_IFNy_IL17 <- topTable(fit2, coef="IFNy - IL17", p.value=0.05, lfc=log2(1.5), adjust.method="BH")
gene_list_IL17IFNy_IL17 <- topTable(fit2, coef="IL17_IFNy - IL17", p.value=0.05, lfc=log2(1.5), adjust.method="BH")
gene_list_IL17IFNy_IFNy <- topTable(fit2, coef="IL17_IFNy - IFNy", p.value=0.05, lfc=log2(1.5), adjust.method="BH")
|
1a80fb0353a84b559f9b06fcc684d19c5235c5f5
|
b33611762071f9277bf18d712d3beaddb1683788
|
/man/upsamplePitchContour.Rd
|
c18a9f1f57eda08c5ec089eadb949b88a6d524cc
|
[] |
no_license
|
fxcebx/soundgen
|
abc6bb7d7aded02e11fe6bd88cb058ca0947f75f
|
2d8ae67893509bd29d132aaa04c0e9385879ddd9
|
refs/heads/master
| 2020-09-06T21:51:23.464374
| 2019-10-31T17:43:11
| 2019-10-31T17:43:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,334
|
rd
|
upsamplePitchContour.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities_analyze.R
\name{upsamplePitchContour}
\alias{upsamplePitchContour}
\title{Upsample pitch contour}
\usage{
upsamplePitchContour(pitch, len, plot = FALSE)
}
\arguments{
\item{pitch}{numeric vector of pitch values, including NAs (as returned by
pitch_app)}
\item{len}{required length}
\item{plot}{if TRUE, plots the old and new pitch contours}
}
\description{
Internal soundgen function
}
\details{
Intended to up- or downsample pitch contours containing NA values using
linear interpolation ("approx"). The problem is that NA segments should also
be expanded when upsampling, and approx() doesn't do that. Algorithm: when
upsampling, first interpolates NAs (constant at beg/end, linear in the
middle), then runs approx(), and finally puts NAs back in where they belong.
}
\examples{
pitchManual = c(130, 150, 250, 290, 320, 300, 280, 270, 220)
soundgen:::upsamplePitchContour(pitchManual, len = 5, plot = TRUE)
soundgen:::upsamplePitchContour(pitchManual, len = 25, plot = TRUE)
pitchManual = c(NA, 150, 250, NA, NA, 300, 280, 270, NA)
soundgen:::upsamplePitchContour(pitchManual, len = 5, plot = TRUE)
soundgen:::upsamplePitchContour(pitchManual, len = 25, plot = TRUE)
soundgen:::upsamplePitchContour(c(NA, NA), len = 5)
}
\keyword{internal}
|
0de774875df50356e3ed7d4ffa024a116eeff800
|
eca810397cfa067c4c7f8ced66c4b748b8a1e8c9
|
/temp/DataPrep.R
|
c1ee039569a961ddcdd8fe54f8a86ca578e9c7fa
|
[] |
no_license
|
PennBBL/pncPreterm
|
c149319dfdbb801eabf0e0acf15f9db5dc138cec
|
936cb62f63f652b2adb393dbafe6bf31891c313b
|
refs/heads/master
| 2022-06-20T18:22:41.423267
| 2020-05-06T13:12:30
| 2020-05-06T13:12:30
| 116,976,795
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,406
|
r
|
DataPrep.R
|
#################
### LOAD DATA ###
#################
##Demographic data (n=1629)
data.demo <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/demographics/n1601_demographics_go1_20161212.csv", header=TRUE)
##Environment data (n=1601)
data.environ <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/environment/n1601_go1_environment_factor_scores_tymoore_20150909.csv", header=TRUE)
##Clinical data
#Screening diagnoses (n=1601)
data.diag <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/clinical/n1601_goassess_psych_summary_vars_20131014.csv", header=TRUE, na.strings=".")
#Psychosis (n=1601)
data.psy <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/clinical/n1601_diagnosis_dxpmr_20170509.csv", header=TRUE, na.strings=".")
#Suicidal ideation (n=1601)
data.suicidal <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/clinical/n1601_goassess_itemwise_smryvars_suicide_20170209.csv", header=TRUE, na.strings=".")
#Item level psychiatric interview (n=1601)
data.items <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/clinical/n1601_goassess_112_itemwise_vars_20161214.csv", header=TRUE, na.strings=".")
#Bifactors (n=1601)
data.bifactors <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/clinical/n1601_goassess_itemwise_bifactor_scores_20161219.csv", header=TRUE, na.strings=".")
#Correlated traits (n=1601)
data.corrTraits <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/clinical/n1601_goassess_itemwise_corrtraits_scores_20161219.csv", header=TRUE, na.strings=".")
#State trait anxiety data (n=1391)
data.stai <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/clinical/n1601_stai_pre_post_itemwise_smry_factors_20170131.csv", header=TRUE, na.strings=".")
##Cognitive scores
#Cognitive factor scores (n=1601)
data.cogFactors <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/cnb/n1601_cnb_factor_scores_tymoore_20151006.csv", header=TRUE, na.strings=".")
#14 cog tests (n=1601)
data.cogTests <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/cnb/n1601_cnb_zscores_all_fr_20161215.csv", header=TRUE, na.strings=".")
#WRAT scores (n=1601)
data.wrat <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/cnb/n1601_cnb_wrat_scores_20161215.csv", header=TRUE, na.strings=".")
##Exclusion data (n=1601)
#Health exclusion (use the new healthExcludev2 variable)
data.healthExclude <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/health/n1601_health_20170421.csv", header=TRUE, na.strings=".")
#T1 QA exclusion (n=1601)
data.t1QA <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/neuroimaging/t1struct/n1601_t1QaData_20170306.csv", header=TRUE, na.strings=".")
##Brain data
#Ravens NMF (n=1396)
data.ravensNMF <- read.csv("/data/jux/BBL/projects/pncPreterm/subjectData/NmfResults26Bases_bblids.csv", header=TRUE)
#JLF volume ROIs (n=1601)
data.volROIs <- read.csv("/data/jux/BBL/studies/pnc/pncDataFreeze/n1601_dataFreeze/neuroimaging/t1struct/n1601_jlfAntsCTIntersectionVol_20170412.csv", header=TRUE)
##Gestational age (n=)
data.ga <- read.csv("/data/jux/BBL/projects/pncPreterm/subjectData/gaData_final.csv", header=TRUE, na.strings="NA")
############################
#### TRANSFORM VARIABLES ###
############################
#Transform the age variable from months to years
data.demo$age <- (data.demo$ageAtScan1)/12
#Make age squared (demeaned)
data.demo$ageSq <- I(scale(data.demo$age, scale=FALSE, center=TRUE)^2)
#Recode male as 0 and female as 1
data.demo$sex[which(data.demo$sex==1)] <- 0
data.demo$sex[which(data.demo$sex==2)] <- 1
data.demo$sex <- as.factor(data.demo$sex)
#Make White (1) vs Non-white (0)
data.demo$white <- 0
data.demo$white[which(data.demo$race==1)] <- 1
data.demo$white <- as.factor(data.demo$white)
#Make preterm variable
data.ga$preterm <- NA
data.ga$preterm[data.ga$ga < 37] <- 1
data.ga$preterm[data.ga$ga >= 37] <- 0
#Make preterm bins
data.ga$pretermBins <- "fullterm"
data.ga$pretermBins[data.ga$ga < 39] <- "earlyterm"
data.ga$pretermBins[data.ga$ga < 37] <- "latePreterm"
data.ga$pretermBins[data.ga$ga < 34] <- "moderatelyPreterm"
data.ga$pretermBins[data.ga$ga < 32] <- "veryPreterm"
data.ga$pretermBins[data.ga$ga < 28] <- "extremeleyPreterm"
#Remove sui001 and sui002 from data.items because they are redundant with data.suicidal
data.items$sui001 <- NULL
data.items$sui002 <- NULL
##################
### MERGE DATA ###
##################
dataMerge1 <-merge(data.demo,data.environ, by=c("bblid","scanid"), all=TRUE)
dataMerge2 <-merge(dataMerge1,data.diag, by=c("bblid","scanid"), all=TRUE)
dataMerge3 <-merge(dataMerge2,data.psy, by=c("bblid","scanid"), all=TRUE)
dataMerge4 <-merge(dataMerge3,data.suicidal, by=c("bblid","scanid"), all=TRUE)
dataMerge5 <-merge(dataMerge4,data.items, by=c("bblid","scanid"), all=TRUE)
dataMerge6 <-merge(dataMerge5,data.bifactors, by=c("bblid","scanid"), all=TRUE)
dataMerge7 <-merge(dataMerge6,data.corrTraits, by=c("bblid","scanid"), all=TRUE)
dataMerge8 <-merge(dataMerge7,data.stai, by=c("bblid","scanid"), all=TRUE)
dataMerge9 <-merge(dataMerge8,data.cogFactors, by=c("bblid","scanid"), all=TRUE)
dataMerge10 <-merge(dataMerge9,data.cogTests, by=c("bblid","scanid"), all=TRUE)
dataMerge11 <-merge(dataMerge10,data.wrat, by=c("bblid","scanid"), all=TRUE)
dataMerge12 <-merge(dataMerge11,data.healthExclude, by=c("bblid","scanid"), all=TRUE)
dataMerge13 <-merge(dataMerge12,data.t1QA, by=c("bblid","scanid"), all=TRUE)
dataMerge14 <-merge(dataMerge13,data.ravensNMF, by=c("bblid","scanid"), all=TRUE)
dataMerge15 <-merge(dataMerge14,data.volROIs, by=c("bblid","scanid"), all=TRUE)
dataMerge16 <-merge(dataMerge15,data.ga, by="bblid", all=TRUE)
#Retain only the 1601 bblids (demographics has 1629)
data.n1601 <- dataMerge16[match(data.t1QA$bblid, dataMerge16$bblid, nomatch=0),]
#Put bblids in ascending order
data.ordered <- data.n1601[order(data.n1601$bblid),]
#Count the number of subjects (should be 1601)
n <- nrow(data.ordered)
###########################
### SUBSET TO GA SAMPLE ###
###########################
#Remove those who are missing ga data
data.preterm <- data.ordered[!is.na(data.ordered$preterm),]
#Count the number of subjects (should be 345)
n_preterm <- nrow(data.preterm)
#################################
### APPLY EXCLUSIONS AND SAVE ###
#################################
##Count the total number excluded for healthExcludev2=1 (1=Excludes those with medical rating 3/4, major incidental findings that distort anatomy, psychoactive medical medications)
#Included: n=303; Excluded: n=42, but medical.exclude (n=21) + incidental.exclude (n=8) + medicalMed.exclude (n=17) = 46, so 4 people were excluded on the basis of two or more of these criteria
data.final <- data.preterm
data.final$ACROSS.INCLUDE.health <- 1
data.final$ACROSS.INCLUDE.health[data.final$healthExcludev2==1] <- 0
health.include<-sum(data.final$ACROSS.INCLUDE.health)
health.exclude<-345-health.include
#Count the number excluded just medical rating 3/4 (GOAssess Medial History and CHOP EMR were used to define one summary rating for overall medical problems) (n=21)
data.final$ACROSS.INCLUDE.medical <- 1
data.final$ACROSS.INCLUDE.medical[data.final$medicalratingExclude==1] <- 0
medical.include<-sum(data.final$ACROSS.INCLUDE.medical)
medical.exclude<-345-medical.include
#Count the number excluded for just major incidental findings that distort anatomy (n=8)
data.final$ACROSS.INCLUDE.incidental <- 1
data.final$ACROSS.INCLUDE.incidental[data.final$incidentalFindingExclude==1] <- 0
incidental.include<-sum(data.final$ACROSS.INCLUDE.incidental)
incidental.exclude<-345-incidental.include
#Count the number excluded for just psychoactive medical medications (n=17)
data.final$ACROSS.INCLUDE.medicalMed <- 1
data.final$ACROSS.INCLUDE.medicalMed[data.final$psychoactiveMedMedicalv2==1] <- 0
medicalMed.include<-sum(data.final$ACROSS.INCLUDE.medicalMed)
medicalMed.exclude<-345-medicalMed.include
#Subset the data to just the that pass healthExcludev2 (n=303)
data.subset <-data.final[which(data.final$ACROSS.INCLUDE.health == 1), ]
##Count the number excluded for failing to meet structural image quality assurance protocols
#Included: n=282; Excluded: n=63
data.subset$ACROSS.INCLUDE.QA <- 1
data.subset$ACROSS.INCLUDE.QA[data.subset$t1Exclude==1] <- 0
QA.include<-sum(data.subset$ACROSS.INCLUDE.QA)
QA.exclude<-345-QA.include
###Exclude those with ALL problems (health problems and problems with their t1 data) (included n=282)
data.exclude <- data.subset[which(data.subset$healthExcludev2==0 & data.subset$t1Exclude == 0 ),]
##################################
### Remove those missing medu1 ###
##################################
#Remove those who are missing maternal level of education data
data.final <- data.exclude[!is.na(data.exclude$medu1),]
#Count the number of subjects (should be 278)
n_final <- nrow(data.final)
####################
### Demographics ###
####################
#Demographics for the paper
meanAge<-mean(data.final$age)
sdAge<-sd(data.final$age)
rangeAge<-range(data.final$age)
genderTable<-table(data.final$sex)
#################
### Save Data ###
#################
#Save final dataset
saveRDS(data.final,"/data/jux/BBL/projects/pncPreterm/subjectData/n278_Prematurity_allData.rds")
#Save the bblids and scanids for the final sample (n=)
IDs <- c("bblid", "scanid")
bblidsScanids <- data.final[IDs]
#Remove header
names(bblidsScanids) <- NULL
#Save list
write.csv(bblidsScanids, file="/data/jux/BBL/projects/pncPreterm/subjectData/n278_Prematurity_bblids_scanids.csv", row.names=FALSE)
############################
### SENSITIVITY ANALYSES ###
############################
#Count the number taking psychotropic psychiatric medications
#Included: n=243; Excluded: n=35
data.final$ACROSS.INCLUDE.psychMeds <- 1
data.final$ACROSS.INCLUDE.psychMeds[data.final$psychoactiveMedPsychv2==1] <- 0
psychMeds.include<-sum(data.final$ACROSS.INCLUDE.psychMeds)
psychMeds.exclude<-278-psychMeds.include
#Exclude those who were on psychiatric medications (included n=243)
data.sensitivity <- data.final[which(data.final$ACROSS.INCLUDE.psychMeds==1),]
#Save sensitivity dataset
saveRDS(data.sensitivity,"/data/jux/BBL/projects/pncPreterm/subjectData/n243_Prematurity_NoPsyMeds.rds")
|
320bf2e73f602095f74225495c0f89c394ff1e84
|
be5f7e66344c9f2e0ab2ed2af54605d7d99dd790
|
/Chemostat_simulation_recovery/Optimization_algorithm_chemostat_model.R
|
b02539bb7b17368e0abcb722ee2200b571a29775
|
[] |
no_license
|
claycressler/deb_fitting
|
f4db2f78da5a9f92746f6cea0cc2d4f5e4d5e18d
|
cb4b4559e64f7e9f772b34e6b76dd162500cd667
|
refs/heads/master
| 2020-12-25T16:57:25.668226
| 2017-09-28T19:51:04
| 2017-09-28T19:51:04
| 35,557,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,278
|
r
|
Optimization_algorithm_chemostat_model.R
|
require(pomp)
require(plyr)
## I want to use an algorithm that allows me to hone in on the best
## parameter combinations, starting from near complete ignorance. The
## algorithms of pomp, liked iterated filtering and trajectory
## matching, are really only intended to work locally. Run by
## themselves, they do not guarantee finding the global optimum. So it
## is necessary to go through a many-step process.
## Begin by loading a pomp object with data.
source('DEB_chemostat_constantK_C-code.R')
load('LH_chemostat_constantK_fastE_F05.rda')
simdata <- out11.fast[seq(11,751,20),c('time','y2','y4')]
colnames(simdata) <- c('Age','Lobs','Robs')
obsdata <- simdata
L_sd <- 0.05
obsdata$Lobs <- rnorm(length(simdata$Lobs),simdata$Lobs,L_sd)
obsdata$Robs <- rpois(length(simdata$Robs),simdata$Robs)
pompBuilder(
name='DEB_chemostat_constantK',
data=obsdata,
times='Age',
t0=0,
step.fn=stepfn,
step.fn.delta.t=0.1,
dmeasure=dmeas,
rmeasure=rmeas,
skeleton.type='vectorfield',
skeleton=skel,
parameter.transform=trans,
parameter.inv.transform=untrans,
statenames=c('E','L','Re','R'),
paramnames=c('K','km','eG','eR','v','Rmbar','f','E.0','L.0','Re.0','R.0','L.sd','PA_sd','PC_sd')) -> deb
## True parameter values
if(!file.exists('true_parameters.rda')) {
true.parameters <- vector(mode='list',length=4)
F <- 0.05
true.parameters[[1]] <- c(K=0.6, km=0.33, eG=0.0017, eR=0.00868, v=18.1, Rmbar=0.0189, f=(0.005187*F/(3.09e-5+F)), E.0=0.005187/18.1*0.85^3, L.0=0.85, Re.0=0, R.0=0, L.sd=0.02, PA_sd=0.1, PC_sd=0.1)
F <- 0.025
true.parameters[[2]] <- c(K=0.7, km=0.23, eG=0.0017, eR=0.00868, v=3.1, Rmbar=0.0189, f=(0.005187*F/(3.09e-5+F)), E.0=0.005187/3.1*0.75^3, L.0=0.75, Re.0=0, R.0=0, L.sd=0.02, PA_sd=0.1, PC_sd=0.1)
F <- 0.05
true.parameters[[3]] <- c(K=0.4, km=0.1, eG=0.001, eR=0.0048, v=10, Rmbar=0.0189, f=(0.005187*F/(3.09e-5+F)), E.0=0.005187/10*0.5^3, L.0=0.5, Re.0=0, R.0=0, L.sd=0.02, PA_sd=0.1, PC_sd=0.1)
F <- 0.025
true.parameters[[4]] <- c(K=0.5, km=0.15, eG=0.001, eR=0.005, v=5, Rmbar=0.0189, f=(0.005187*F/(3.09e-5+F)), E.0=0.005187/5*1^3, L.0=1, Re.0=0, R.0=0, L.sd=0.02, PA_sd=0.1, PC_sd=0.1)
save(true.parameters, file='true_parameters.rda')
} else load('true_parameters.rda')
cummin <- function(x) sapply(2:length(x), function(y) x[y]-x[y-1])
## Generate data (to overwrite the data currently in the pomp object deb
if (!file.exists('observed_data.rda')) {
observed.data <- vector(mode='list',length=4)
for (i in 1:4) {
x <- trajectory(deb, params=true.parameters[[i]], as.data.frame=TRUE, times=seq(0,75,1))
## simulate measurements, assuming data observation every 2 days
Lobs <- rnorm(length(x[seq(1,75,2)+1,2]),
mean=x[seq(1,75,2)+1,2],
sd=unname(true.parameters[[i]]['L.sd']))
Robs <- rpois(length(x[seq(1,75,2)+1,4]),
x[seq(1,75,2)+1,4])
observed.data[[i]] <- as.data.frame(cbind(time=seq(1,75,2),
cbind(Lobs,Robs)))
}
}
estimated.pars <- c('K','km','eG','eR','v','Rmbar','f','E.0','L.0')
for (i in 1:4) {
pompBuilder(
name='DEB_chemostat_constantK',
data=observed.data[[i]],
times='time',
t0=0,
step.fn=stepfn,
step.fn.delta.t=0.1,
dmeasure=dmeas,
rmeasure=rmeas,
skeleton.type='vectorfield',
skeleton=skel,
parameter.transform=trans,
parameter.inv.transform=untrans,
statenames=c('E','L','Re','R'),
paramnames=c('K','km','eG','eR','v','Rmbar','f','E.0','L.0','Re.0','R.0','L.sd','PA_sd','PC_sd')) -> deb
## Encompass our initial ignorance of the estimated parameter values by a box
box <- cbind(
lower=c(K=0.1, km=0.01, eG=0.0001, eR=0.0001, v=1, Rmbar=0.001, f=0.0001, E.0=0.00001, L.0=0.5, Re.0=0, R.0=0, L.sd=0.02, PA_sd=0.1, PC_sd=0.1),
upper=c(K=0.9, km=1, eG=0.01, eR=0.01, v=25, Rmbar=0.1, f=0.01, E.0=0.0001, L.0=1.2, Re.0=0, R.0=0, L.sd=0.02, PA_sd=0.1, PC_sd=0.1)
)
## Create a Sobol low discrepancy sequence of length 10000
guesses <- sobolDesign(lower=box[,'lower'],upper=box[,'upper'],nseq=10000)
guesses$id <- seq_len(nrow(guesses))
joblist <- dlply(guesses, ~id, unlist)
workfn <- function(start, po) {
tm <- traj.match(po, start=start, transform=TRUE, est=estimated.pars,
method='subplex',maxit=2000, eval.only=TRUE)
list(par=start,
lik=tm$value,
conv=tm$convergence)
}
## calculate the logliklihood of each parameter combination
lik <- lapply(joblist, workfn, po=deb)
results = ldply(lik, function(x) c(x$par,loglik=x$lik))
## sort by log-likelihood
results = results[order(results$loglik,decreasing=TRUE),]
save(results, file=paste0('estimation_results_',i,'_stage_one.rda'))
## run subplex on the top 100 parameter sets to hone in more
tm.results <- vector(mode='list', length=100)
jvec <- vector()
for (j in 1:100) {
jvec <- c(jvec, as.character(j))
write(jvec, file='progress.txt')
pars <- as.numeric(results[j,2:15])
names(pars) <- names(results[j,2:15])
y <- traj.match(deb, start=pars, transform=TRUE,
est=estimated.pars,
method='subplex',maxit=2000)
tm.results[[j]] <- y
}
## extract the parameter values
tm.results <- cbind(t(sapply(tm.results, function(x) x$params)),
sapply(tm.results, function(x) logLik(x)))
colnames(tm.results)[ncol(tm.results)] <- 'loglik'
tm.results <- tm.results[order(tm.results[,'loglik'],decreasing=TRUE),]
save(tm.results, file=paste0('estimation_results_',i,'_stage_two.rda'))
## I am going to take the top 50, create novel variation around each
## putative optimal parameter set, and then run subplex again
tm.vary <- array(NA, dim=c(500,14))
for (q in 1:50) {
tm.vary[((q-1)*10+1):(q*10),] <-
t(sapply(1:10, function(x) {
n <- names(tm.results[q,1:14])
w <- which(n%in%estimated.pars)
p <- as.numeric(tm.results[q,1:14])
p[w] <- rnorm(length(p[w]),mean=p[w],sd=p[w]/10)
p
}))
}
colnames(tm.vary) <- colnames(tm.results)[1:14]
tm.vary[which(tm.vary[,'K'] > 1),'K'] <- 0.99
tm.vary.results <- vector(mode='list', length=500)
jvec <- vector()
for (j in 1:500) {
jvec <- c(jvec,as.character(j))
write(jvec, file='progress.txt')
pars <- as.numeric(tm.vary[j,])
names(pars) <- names(tm.vary[j,])
y <- traj.match(deb, start=pars, transform=TRUE,
est=estimated.pars,
method='subplex',maxit=5000)
tm.vary.results[[j]] <- y
save(tm.vary.results, file=paste0('estimation_results_',i,'_stage_three.rda'))
}
}
|
b6f6fe2c14ae6ef43b40a576c3a38af13c6a8978
|
4b5d1178e3fbc94223c974926bbdd417182c314d
|
/R/linreg_estimators.R
|
27eb6453f0a260fbeede4ee811267fa0ff2f815b
|
[] |
no_license
|
rossklin/dynpan
|
6686d891af9834eaa94a4190cfb4c325b18c3b2f
|
2f655de1f350e54375621edf3010b67eff37a4ce
|
refs/heads/master
| 2021-01-10T22:01:39.875849
| 2016-08-28T15:39:39
| 2016-08-28T15:39:39
| 36,667,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,196
|
r
|
linreg_estimators.R
|
## Software License Agreement (BSD License)
##
## Copyright (c) 2014, Tilo Wiklund (tilo@wiklund.co)
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
##
## Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
##
## Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
##
## The names of its contributors may not be used to endorse or promote products
## derived from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
regression_matrices <- function( x, y=NULL, idxs=NULL
, use.auxiliary=FALSE
, input.cols=NULL, output.cols=NULL
, modelfun=function(x) poly(raw=TRUE, x, degree=2)
, ...
, has.no.na=FALSE ) {
if(!is.null(y)) stopifnot( setequal(index_names(x), index_names(y)) &
time_name(x) == time_name(y) )
#
if(is.null(input.cols)) {
if(is.null(y)) stop("Need to specify input columns if y==NULL")
input.cols <- c( measurement_names(x)
, if(use.auxiliary) auxiliary_names(x) else c() )
}
#
if(is.null(output.cols)) {
if(is.null(y)) stop("Need to specify output columns if y==NULL")
output.cols <- c( measurement_names(y)
, if(use.auxiliary) auxiliary_names(y) else c() )
}
#
if(is.null(idxs)) {
if(is.null(y)) {
idxs <- index(x, with.time=TRUE)
if(!has.no.na) {
cc <- complete.cases( x[, c( index_names(x)
, time_name(x)
, input.cols
, output.cols )
, with=F ])
idxs <- idxs[cc]
}
} else {
idxx <- index(x, with.time=TRUE)
idxy <- index(y, with.time=TRUE)
if(!has.no.na) {
ccx <- complete.cases( x[, c( index_names(x)
, time_name(x)
, input.cols )
, with=F ] )
ccy <- complete.cases( y[, c( index_names(x)
, time_name(x)
, output.cols )
, with=F ] )
idxx <- idxx[ccx]
idxy <- idxy[ccy]
}
idxs <- merge(idxx, idxy, all=FALSE)
}
}
#
if(is.null(y)) y <- x
#
data.matrix <- as.matrix(x[idxs, input.cols, with=F])
resp.matrix <- as.matrix(y[idxs, output.cols, with=F])
design.matrix <- modelfun(data.matrix, ...)
#
list( data=data.matrix, response=resp.matrix, design=design.matrix
, input.cols = input.cols, output.cols = output.cols )
}
#' Best subset (linear) regression on a time.table
#'
#' Perform a best subsets regression procedure on data stored in
#' time.table(s). Produces one linear regression subset (the one with the lowest
#' residual sum of squares) for each subset size of the covariates and each
#' component of the dependent variable.
#'
#' @param x \code{time.table} that contains predictors and, optionally, dependent variable(s).
#' @param y \code{time.table} containing dependent variable(s).
#' @param idxs index/time values to include, defaults to all complete cases
#' @param use.auxiliary whether to include auxiliary values
#' @param input.cols column(s) of \code{x} to use for computing covariate(s)
#' @param output.cols column(s) of \code{x} or \code{y} to use as dependent varaiable(s)
#' @param ... additional arguments to pass to \code{modelfun}
#' @param modelfun function that produces the actual covariates used in the linear regression
#' @param has.no.na whether user guarantees \code{x}/\code{y} contian no \code{NA} values
#'
#' @export
time_table_leaps <- function( x, y=NULL, idxs=NULL
, use.auxiliary=FALSE
, input.cols=NULL, output.cols=NULL
, ...
, modelfun=function(x) polySane(raw=TRUE, x, degree=2)
, has.no.na=FALSE ) {
if(nrow(x) < 2) stop("For some reason leaps breaks with only one observation")
matrices <- regression_matrices( x=x, y=y, idxs=idxs
, use.auxiliary=use.auxiliary
, input.cols=input.cols, output.cols=output.cols
, modelfun=modelfun
, ...
, has.no.na=has.no.na)
#
estimations <- apply(matrices$response, 2, function(resp) {
regsubsets( matrices$design, resp
, nbest = 1
, nvmax=ncol(matrices$design)
, method="exhaustive"
# The response hasn't been normalised!
, intercept=TRUE
, id=seq_len(ncol(matrices$design))
, matrix=TRUE
, matrix.logical=TRUE )
})
#
all.coef <- lapply(setNames(nm=names(estimations)), function(fac) {
nmodel <- estimations[[fac]]$nvmax - estimations[[fac]]$intercept
cfm <- matrix(0, nrow=nmodel, ncol=ncol(matrices$design)+1)
colnames(cfm) <- c("(Intercept)", colnames(matrices$design))
for(id in seq_len(nmodel)) {
cf <- coef(estimations[[fac]], id)
cfm[id, names(cf)] <- cf
}
t(cfm)
})
#
result <-
list( modelfun = modelfun
, input.cols = matrices$input.cols
, output.cols = matrices$output.cols
, matrices = matrices
, nmodel = sapply(estimations, function(xs) xs$nvmax-1)
, coef = all.coef
, estimations = estimations )
class(result) <- "dynpan_leaps"
result
}
summary.dynpan_leaps <- function(dp) {
estimations <- dp$estimations
extr.stats <- function(fac) {
stats <- summary(estimations[[fac]])
basic <- as.data.table(stats[c("cp", "bic", "rsq", "adjr2", "rss")])
terms <- as.data.table(t(dp$coef[[fac]]))
nterm <- rowSums(stats$outmat == "*")
data.table(basic, nterm=nterm, Term=terms)
}
stats <-
data.table(factor=names(estimations))[,extr.stats(factor), by="factor"]
stats[,method:="exhaustive.leaps"]
setnames( stats
, c("cp", "bic", "rsq", "adjr2", "rss")
, c("Cp", "BIC", "R2", "R2adj", "RSS") )
stats
}
coef.dynpan_leaps <- function(dp, ids=NULL) {
# Ugh, I'm trying to be "convienient", I'm sure this will bite
# me in the ass soon enough.
picks <- if(is.null(ids)) {
lapply(dp$nmodel, seq_len)
} else if(is.null(names(ids))) {
setNames(rep(list(unlist(ids)), length(dp$output.cols)), dp$output.cols)
} else {
stopifnot(all(names(ids) %in% dp$output.cols))
ids
}
## NOTE: Just an ugly hack, change time_table_leaps to make this nice :P
design.matrix <- if(!is.null(dp$matrices$design)) {
dp$matrices$design
} else {
tmp <- matrix(ncol=length(dp$input.cols), nrow=2, 0)
colnames(tmp) <- dp$input.cols
dp$modelfun(tmp)
}
lapply(setNames(nm=names(picks)), function(fac) {
estimation <- dp$estimations[[fac]]
lapply(picks[[fac]], function(id) {
tmpcf <- coef(estimation, id)
tmp <- setNames( numeric(ncol(design.matrix)+1)
, c("(Intercept)", colnames(design.matrix)) )
tmp[names(tmpcf)] <- tmpcf
})
})
}
predict.dynpan_leaps <- function(dp, newdata, ids=NULL) {
picks <- if(is.null(ids)) {
lapply(dp$nmodel, seq_len)
} else if(is.null(names(ids))) {
stopifnot(length(ids) == length(dp$output.cols))
setNames(ids, dp$output.cols)
} else {
ids
}
# TODO: Make this work with a larger class of things
data.matrix <- if(is.null(names(newdata))) {
stopifnot(ncol(newdata) == length(dp$input.cols))
as.matrix(newdata)
} else {
stopifnot(all(dp$input.cols %in% names(newdata)))
as.matrix(newdata[,dp$input.cols,with=F])
}
colnames(data.matrix) <- dp$input.cols
#
other.data.vars <- if(is.null(names(newdata))) {
character()
} else {
setdiff(colnames(newdata), dp$input.cols)
}
#
design.matrix <- cbind(1, dp$modelfun(data.matrix))
colnames(design.matrix)[1] <- "(Intercept)"
#
lapply(setNames(nm=names(picks)), function(fac) {
ids <- picks[[fac]]
cfs <- dp$coef[[fac]]
# TODO: Remove this once sure it works
# NOTE: This could be optimised by removing non-zero columns or using
# sparse matrices, though I doubt it'd be worth it (this routine is
# pretty slow anyway).
stopifnot(all(rownames(cfs) == colnames(design.matrix)))
lapply(setNames(nm=picks[[fac]]), function(id) {
cbind( newdata[,other.data.vars,with=FALSE]
, as.data.table(design.matrix %*% cfs[,id]) )
})
})
}
#' Perform lasso regression on a time.table
#'
#' @param x \code{time.table} that contains predictors and, optionally, dependent variable(s).
#' @param y \code{time.table} containing dependent variable(s).
#' @param idxs index/time values to include, defaults to all complete cases
#' @param adaptive exponent used for the adaptive weights set to \code{NULL} or \code{0} to disable (defaults to 0.5)
#' @param use.auxiliary whether to include auxiliary values
#' @param input.cols column(s) of \code{x} to use for computing covariate(s)
#' @param output.cols column(s) of \code{x} or \code{y} to use as dependent varaiable(s)
#' @param ... additional arguments to pass to \code{modelfun}
#' @param modelfun function that produces the actual covariates used in the linear regression
#' @param has.no.na whether user guarantees \code{x}/\code{y} contian no \code{NA} values
#' @param adaptive.lambda ridge regression shrinkage parameter to use when calculating adaptive lasso weights
#'
#' @export
time_table_lars <- function( x, y=NULL, idxs=NULL
, adaptive=0.5
, normalise=TRUE
, use.auxiliary=FALSE
, input.cols=NULL, output.cols=NULL
, ...
, modelfun=function(x) polySane(x, raw=TRUE, degree=2)
, has.no.na=FALSE
, adaptive.lambda=0.1 ) {
# TODO: Check that there are sufficient input/output columns
if(nrow(x) < 2) stop("For some reason lars breaks with only one observation")
##
matrices <- regression_matrices( x=x, y=y, idxs=idxs
, use.auxiliary=use.auxiliary
, input.cols=input.cols, output.cols=output.cols
, modelfun=modelfun
, ...
, has.no.na=has.no.na )
##
scaled.design <- if(normalise) {
scale(matrices$design)
} else {
m <- matrices$design
attr(m, "scaled:scale") <- rep(1, ncol(m))
setattr(m, "scaled:center", rep(0, ncol(m)))
m
}
##
adaptive.weights <- if(maybe(adaptive, 0)) {
apply(matrices$response, 2, function(resp) {
#abs(lm.fit(x=matrices$design, y=scale(resp,scale=F))$coefficients)^adaptive
require(MASS)
abs(coef(lm.ridge(resp ~ scaled.design, lambda=adaptive.lambda))[-1])^adaptive
})
} else {
matrix(rep(1, ncol(matrices$response)*ncol(scaled.design)), ncol(scaled.design))
}
colnames(adaptive.weights) <- colnames(matrices$response)
rownames(adaptive.weights) <- colnames(matrices$design)
##
design.translations <- attr(scaled.design, "scaled:center")
names(design.translations) <- colnames(matrices$design)
design.scalings <- adaptive.weights/attr(scaled.design, "scaled:scale")
colnames(design.scalings) <- colnames(matrices$response)
rownames(design.scalings) <- colnames(matrices$design)
##
estimations <- lapply(setNames(nm=colnames(matrices$response)), function(respn) {
resp <- matrices$response[,respn]
ws <- adaptive.weights[,respn]
lars( sweep(scaled.design, 2, ws, `*`)
, resp, type="lasso", intercept=TRUE, normalize=FALSE )
})
rm(scaled.design)
##
## TODO: All this should proably be removed, we juse use the LARS build in extraction
## thing instead...
all.coef <- lapply(names(estimations), function(fac) {
estimation <- estimations[[fac]]
intercepts <- predict.lars( estimation
, newx=as.data.table(rep(list(0)
, ncol(matrices$design))))$fit
non.intercepts <-
predict.lars( estimation
, as.data.table(diag(ncol(matrices$design))))$fit -
rep(intercepts, each=ncol(matrices$design))
##
non.intercepts <- non.intercepts * design.scalings[,fac]
intercepts <- intercepts - as.numeric(design.translations %*% non.intercepts)
##
coef <- rbind(intercepts, non.intercepts)
rownames(coef) <- c("(Intercept)", colnames(matrices$design))
coef
})
#Don't ask me why it suddenly needs this...
names(all.coef) <- matrices$output.cols
#
result <-
list( modelfun = modelfun
, input.cols = matrices$input.cols
, output.cols = matrices$output.cols
, matrices = matrices
, nmodel = sapply(estimations, function(xs) length(xs$df))
, nobs = nrow(matrices$design)
, coef = all.coef
, estimations = estimations
, adaptive.weights = adaptive.weights
, design.translations = design.translations
, design.scalings = design.scalings )
class(result) <- "dynpan_lars"
result
}
#' Number of observations used in the regression
#'
#' @param dp results from time_table_lars
#'
#' @export
nobs.dynpan_lars <- function(dp) {
dp$nobs
}
## uses translation/scaling info from dp to correct a matrix/data.frame of
## parameter estimates, assumes everything is in the expected order!
## correct.estimate <- function(dp, coefs, factor) {
## trans <- dp$design.translations
## trans[is.na(trans)] <- 0
## ##
## scalefac <- dp$design.scalings[colnames(coefs),factor]
## scalefac[is.na(scalefac)] <- 1
## ##
## coefs[,1] <- c(, rep(0, length(dp$input.cols))
## coefs*c(1, dp$design.scalings)
## }
#' time.table LASSO regression summary
#'
#' Gives table containing coefficient estimates and diagonistic data
#'
#' @param dp result of \code{time_table_lars} call
#'
#' @export
summary.dynpan_lars <- function(dp) {
estimations <- dp$estimations
extr.stats <- function(fac) {
stats <- estimations[[fac]]
basic <- as.data.table(stats[c("Cp", "R2", "RSS")])
basic[,lambda:=c(stats[["lambda"]], 0)]
terms <- t(dp$coef[[fac]])
nterm <- rowSums(abs(terms) > .Machine$double.eps)
## This isn't quite the same BIC as in leaps, but I think it's correctish
## See the lasso/lars papers on the EDF of the LASSO, though I'm not sure
## if this is affected by the adaptive correction (seems unlikely)
## n <- nrow(dp$matrices$design)
n <- nobs.dynpan_lars(dp)
BICf <- function(RSS) n + n*log(2*pi) + n*log(RSS/n) + log(n)*nterm
data.table(basic, nterm=nterm, Term=terms)[,BIC:=BICf(RSS)]
}
stats <-
data.table(factor=names(estimations))[,extr.stats(factor), by="factor"]
stats[,method:="lasso.lars"]
stats
}
#' Remove matrices from lars result to reduce memory footprint
#'
#' @param dp result from time_table_lars
#'
#' @export
remove_matrices <- function(dp) {
dp$matrices <- NULL
dp
}
#' time.table LASSO coefficients
#'
#' Matrix of parameter estimates from LASSO fit
#'
#' @param dp result of \code{time_table_lars} call
#' @param ids named list containing which models to get coefficients for, should map output.col names from dp to list of model numbers for that column
#' @param lambda (exact/absolute) shrinkage values for which to extract coefficients, should map output.col names from dp to values
#' @param fraction fractions of minimal shrinkage at which to extract coefficients, should map output.col names from dp to values
#' @param include.intercept whether to include the intercept parameter (defaults to FALSE for legacy reasons)
#'
#' @details If none of \code{ids}, \code{lamda}, or \code{fraction} are
#' specified the fits corresponding to lambda values at which the set of active
#' terms changes are returned.
#'
#' @export
coef.dynpan_lars <- function(dp, ids=NULL, lambda=NULL, fraction=NULL, include.intercept=FALSE) {
# NOTE: Hack this in for now
if(!is.null(lambda) | !is.null(fraction)) {
if((!is.null(lambda) & !is.null(fraction)) | !is.null(ids))
stop("Specify only one of 'ids', 'lambda', and 'fraction'")
mode <- if(is.null(lambda)) "fraction" else "lambda"
valuess <- if(is.null(lambda)) fraction else lambda
lapply(setNames(nm=names(valuess)), function(outp) {
lapply(setNames(nm=valuess[[outp]]), function(value) {
non.intercepts <- coef(dp$estimations[[outp]], mode=mode, s=value)*dp$design.scalings[,outp]
intercept <- if(include.intercept) {
nulldata <- as.data.frame(matrix(0, nrow=1, ncol=length(dp$input.cols)))
colnames(nulldata) <- dp$input.cols
correction <- as.numeric(dp$design.translations %*% non.intercepts)
intercept <- predict(dp$estimations[[outp]], newx=nulldata, mode=mode, s=value)$fit
setNames(intercept - correction, "(Intercept)")
} else numeric()
c(intercept, non.intercepts)
})
})
} else {
picks <- if(is.null(ids)) {
lapply(dp$nmodel, seq_len)
} else if(is.null(names(ids))) {
setNames(rep(list(unlist(ids)), length(dp$output.cols)), dp$output.cols)
} else {
if(!(all(names(ids) %in% dp$output.cols)))
stop(paste0( "coef.dynpan_lars: '"
, setdiff(names(ids), dp$output.cols)
, "' is not an output of the regression."
, collapse="\n" ))
ids
}
#
lapply(setNames(nm=names(picks)), function(fac) {
cf <- dp$coef[[fac]]
lapply(setNames(nm=picks[[fac]]), function(i) {
if(include.intercept)
cf[,i]
else
cf[-1,i]
})
})
}
}
#' time.table LASSO prediction
#'
#' Use LASSO fit to predict values for new data
#'
#' @param dp result of \code{time_table_lars} call
#' @param newdata time.table containing (at least) the columns used when fitting \code{dp}
#' @param ids see coef.dynpan_lars
#' @param lambda see coef.dynpan_lars
#' @param fraction see coef.dynpan_lars
#' @param keep.all.cols whether results should contain copies of all columns from dp
#'
#' @export
predict.dynpan_lars <- function(dp, newdata=NULL, ids=NULL, lambda=NULL, fraction=NULL, keep.all.cols=FALSE) {
data.matrix <- if(is.null(names(newdata))) {
stopifnot(ncol(newdata) == length(dp$input.cols))
as.matrix(newdata)
} else {
stopifnot(all(dp$input.cols %in% names(newdata)))
as.matrix(as.data.table(newdata)[,dp$input.cols,with=F])
}
colnames(data.matrix) <- dp$input.cols
##
other.data.vars <- if(is.null(names(newdata))) {
character()
} else {
setdiff(colnames(newdata), dp$input.cols)
}
estimations <- dp$estimations
##
if(sum(c(!is.null(lambda), !is.null(fraction), !is.null(ids))) != 1)
stop("Specify exactly one of 'ids', 'lambda', and 'fraction'")
mode <- if(!is.null(lambda)) {
if(!local({ n <- sapply(lambda, length); min(n) == max(n)}))
stop("different number of lambdas supplied")
if(!all(names(lambda) %in% dp$output.cols))
stop("lambda must be given as a list mapping output columns to lambda values")
"lambda"
} else if(!is.null(fraction)) {
if(!local({ n <- sapply(fraction, length); min(n) == max(n)}))
stop("different number of fractions supplied")
if(!all(names(fraction) %in% dp$output.cols))
stop("fractions must be given as a list mapping output columns to fractions")
"fraction"
} else {
if(!local({ n <- sapply(ids, length); min(n) == max(n)}))
stop("different number of fractions supplied")
if(!all(names(ids) %in% dp$output.cols))
stop("fractions must be given as a list mapping output columns to fractions")
"step"
}
values <- if(!is.null(lambda)) { lambda } else if(!is.null(fraction)) { fraction } else ids
predicted <- lapply(setNames(nm=names(values)), function(fac) {
design.matrix <-
sweep( sweep( dp$modelfun(data.matrix)
, 2, dp$design.translations, `-` )
, 2, dp$design.scalings[,fac], `*` )
predict(dp$estimations[[fac]], newx=design.matrix, type="fit", mode=mode, s=values[[fac]])$fit
})
##
lapply(seq_along(values[[1]]), function(i) {
if(keep.all.cols)
cbind( newdata
, do.call(data.frame, lapply(predicted, function(xs) xs[,i])) )
else if(length(other.data.vars) > 0)
cbind( as.data.table(newdata)[,other.data.vars,with=FALSE]
, do.call(data.frame, lapply(predicted, function(xs) xs[,i])) )
else do.call(data.frame, lapply(predicted, function(xs) xs[,i]))
})
##
## lapply(setNames(nm=names(ids)), function(fac) {
## estimation <- estimations[[fac]]
## lapply(setNames(nm=ids[[fac]]), function(id) {
## predicted <- do.call( data.table
## , setNames(nm=fac,
## list(predict.lars( estimation
## , design.matrix
## , s=id )$fit)) )
## if(length(other.data.vars) > 0)
## cbind(as.data.table(newdata)[,other.data.vars,with=FALSE], predicted)
## else
## predicted
## })
## })
}
|
884d0dd0cb4d6f81269ec4aa44b1e9af35bcf0ef
|
8c3ce85957132b59fab7a0489b6c90d38290ed34
|
/R/genesOfInterest.R
|
b37f5c5f5fead15a87aac29298cf127f2f2e9b67
|
[
"MIT"
] |
permissive
|
ChristofferFlensburg/superFreq
|
a8e60ef211d461c1b5a148f573d550634621651e
|
99584742099b33310f96a4bfb3a9fd179274d5cb
|
refs/heads/master
| 2023-04-08T12:58:53.463534
| 2023-03-29T03:28:09
| 2023-03-29T03:28:09
| 51,901,575
| 118
| 38
|
MIT
| 2023-02-08T19:28:54
| 2016-02-17T06:53:45
|
R
|
UTF-8
|
R
| false
| false
| 14,319
|
r
|
genesOfInterest.R
|
#' plots a heatmap of copy numbers, highlighting genes of interest
#'
#' @param GoI character vector. Genes of interest.
#' @param Rdirectory character The Rdirectory where the superFreq run was run.
#' @param genome character. The genome the sample is aligned to. 'hg19', 'hg38' or 'mm10'
#'
#' @details This function plots a heatmap of copy numbers across the samples and genome. Genes of Interest are highlighted. Can be run after the superFreq run is completed.
#'
#'
#' @export
#'
plotCNAheatmapWithGoI = function(GoI, Rdirectory, genome, metaDataFile, excludeIndividuals=c(), excludeSamples=c(), GoIakas=c(), cpus=1) {
superFreq:::resetMargins()
superFreq:::plotCNAbatchHeatmap(Rdirectory=Rdirectory, metaDataFile=metaDataFile, genome=genome,
excludeIndividuals=excludeIndividuals, excludeSamples=excludeSamples, cpus=cpus)
captureRegions = loadAnyCaptureRegions(Rdirectory, metaDataFile, excludeIndividuals=excludeIndividuals, excludeSamples=excludeSamples)
ymax = par("usr")[4]
for ( gene in GoI ) {
crs = captureRegions[captureRegions$region == gene]
maxx = chrToX(seqnames(crs[1]), max(end(crs)), genome=genome)
minx = chrToX(seqnames(crs[1]), min(start(crs)), genome=genome)
midx = (minx+maxx)/2
segments(midx, 0.5, minx, ymax-0.75, col=mcri('cyan', 0.5), lwd=0.5)
text(midx, 0.25, renameGoIs(gene, GoIakas), cex=0.8, col=mcri('cyan'))
}
}
#' plots a heatmap of copy numbers, focusing on a gene of interest
#'
#' @param GoI character vector. Genes of interest.
#' @param Rdirectory character The Rdirectory where the superFreq run was run.
#' @param genome character. The genome the sample is aligned to. 'hg19', 'hg38' or 'mm10'
#' @param padding numeric. How many basepairs next to the gene to show. default 3e6.
#'
#' @details This function plots a heatmap of copy numbers across the samples, focused at a gene of interest. Can be run after the superFreq run is completed.
#'
#'
#' @export
#'
plotCNAheatmapOverGoI = function(gene, Rdirectory, genome, metaDataFile, excludeIndividuals=c(), excludeSamples=c(), GoIakas=c(), cpus=1, padding=3e6) {
captureRegions = loadAnyCaptureRegions(Rdirectory, metaDataFile, excludeIndividuals=excludeIndividuals, excludeSamples=excludeSamples)
crs = captureRegions[captureRegions$region == gene]
maxx = chrToX(seqnames(crs[1]), max(end(crs)), genome=genome)
minx = chrToX(seqnames(crs[1]), min(start(crs)), genome=genome)
midx = (minx+maxx)/2
superFreq:::resetMargins()
superFreq:::plotCNAbatchHeatmap(Rdirectory=Rdirectory, metaDataFile=metaDataFile, genome=genome, xlim=c(minx-padding, maxx+padding),
excludeIndividuals=excludeIndividuals, excludeSamples=excludeSamples, cpus=cpus)
ymax = par("usr")[4]
segments(minx, 0.5, minx, ymax-0.75, col=mcri('cyan'))
segments(maxx, 0.5, maxx, ymax-0.75, col=mcri('cyan'))
text(midx, 0.25, renameGoIs(gene, GoIakas), cex=0.8, col=mcri('cyan'))
}
#' plots mutations in samples in Genes of Interest
#'
#' @param GoI character vector. Genes of interest for the analysis
#' @param Rdirectory character The Rdirectory where the superFreq run was run.
#'
#' @details This function plots a mutation matrix showing CNAs and point mutations in the Genes of Interest across the analysed samples in the Rdirectory. Should be run after the superFreq() run has completed.
#'
#' @export
#'
plotCohortMutationHeatmap = function(GoI, Rdirectory, metaDataFile, excludeIndividuals=c(), excludeSamples=c(), GoIakas=c(), cpus=1) {
clusterList = superFreq:::loadClusterList(Rdirectory=Rdirectory, metaDataFile=metaDataFile, excludeIndividuals=excludeIndividuals,
excludeSamples=excludeSamples, cpus=cpus)
sampleList = lapply(clusterList, names)
qsList = superFreq:::loadQsList(Rdirectory=Rdirectory, metaDataFile=metaDataFile,
excludeIndividuals=excludeIndividuals, excludeSamples=excludeSamples, cpus=cpus)
#set up plot and sample/gene labels
superFreq:::setupCohortPlot(GoI, sampleList, GoIakas)
#fill in CNAs as background colour for each box
colMx = superFreq:::addCNAs(GoI, clusterList, sampleList)
#add dots for point mutations
superFreq:::addPointMutations(GoI, qsList, sampleList, colMx=colMx)
}
#helper function
setupCohortPlot = function(GoI, sampleList, GoIakas=c()) {
#set up scale and grid
xScale = length(GoI)
yScale = length(unlist(sampleList))
xTicks = 1:xScale
yTicks = 1:yScale
#title
main = 'mutation matrix'
#empty plot
plot(0, type='n', xaxt='n', xlim=c(-0.15, 1)*(xScale+0.5), ylim=c(0, 1.3)*yScale, frame=F, yaxt='n', xlab='', ylab='', main=main)
#grid
segments(0:xScale + 0.5, 0.5, 0:xScale + 0.5, yScale*1.15, lwd=0.5, col='grey')
if ( yScale < 100 )
segments(-0.03*xScale, 0:yScale + 0.5, xScale + 0.5, 0:yScale + 0.5, lwd=0.3*min(1, 30/yScale), col=rgb(0.8, 0.8, 0.8))
if ( length(sampleList) > 1 ) {
indSeparators = cumsum(sapply(sampleList, length))
indSeparators = indSeparators[-length(indSeparators)]
#if many samples, thinner separating lines or no lines at all.
if ( length(sampleList) < 100 )
segments(-0.15*xScale, indSeparators + 0.5, xScale + 0.5, indSeparators + 0.5, lwd=min(1, 30/length(sampleList)), col='grey')
}
textCol = rgb(0.5,0.5,0.5)
for ( ind in names(sampleList) ) {
sys = cumsum(sapply(sampleList, length))[ind] - length(sampleList[[ind]]) + 1:length(sampleList[[ind]])
text(-0.03*xScale, sys, sampleList[[ind]], col=textCol, adj=c(1, 0.5), cex=0.8, font=2)
}
#topbar
text(xTicks, yScale+1, superFreq:::renameGoIs(GoI, GoIakas), adj=c(0, 0.5), srt=90, cex=0.8, font=2, col=textCol)
#legend
legend('topleft', c('ampli', 'gain', 'LOH', 'loss', 'SNV', 'biall', 'trunc'), bg='white', pch=c(15, 15, 15, 15, 16, 16, 17), col=mcri(c('darkred', 'red', 'green', 'blue', 'black', 'black', 'black')), pt.cex=c(2,2,2,2,1,1.4,1.2), pt.lwd=c(1,1,1,1,1,1,2))
}
#helper function
addCNAs = function(GoI, clusterList, sampleList) {
#set up scale and grid
xScale = length(GoI)
yScale = length(unlist(sampleList))
xTicks = 1:xScale
yTicks = 1:yScale
cr = clusterList[[1]][[1]]$CR
XoI = (cr[GoI,]$x1+cr[GoI,]$x2)/2
names(XoI) = GoI
xs = 1:length(GoI)
names(xs) = GoI
colMx = matrix('black', ncol=length(GoI), nrow=length(unlist(sampleList)))
rownames(colMx) = unlist(sampleList)
colnames(colMx) = GoI
for ( ind in names(sampleList) ) {
sys = cumsum(sapply(sampleList, length))[ind] - length(sampleList[[ind]]) + 1:length(sampleList[[ind]])
samples = sampleList[[ind]]
names(sys) = samples
for ( sample in samples ) {
clusters = clusterList[[ind]][[sample]]$clusters
y = sys[sample]
for ( gene in GoI ) {
call = clusters$call[clusters$x2 > XoI[gene] & clusters$x1 < XoI[gene]]
if ( length(call) == 0 | call %in% c('AB', 'AB?', 'AB??') ) next
clonality = clusters$clonality[clusters$x2 > XoI[gene] & clusters$x1 < XoI[gene]]
col = superFreq:::callToColHeatmap(call, sqrt(clonality))
colMx[sample, gene] = col
x = xs[gene]
rect(x-0.45, y-0.5, x+0.45, y+0.5, border=F, col=col)
}
}
}
return(invisible(colMx))
}
#helper function
callToColHeatmap = function(call, clonality) {
if ( call %in% c('A', 'A?', 'A??', 'B', 'B?', 'B??') ) return(mcri('blue', clonality))
if ( call %in% c('AAB', 'AAB?', 'AAB??', 'BBA', 'BBA?', 'BBA??') ) return(mcri('red', clonality))
if ( call %in% c('AA', 'AA?', 'AA??', 'BB', 'BB?', 'BB??', 'AAA', 'AAA?', 'AAA??') ) return(mcri('green', clonality))
if ( call %in% c('CL', 'CL?', 'CL??') ) return(mcri('darkblue', clonality))
if ( nchar(gsub('?', '', call)) > 3 ) {
effectiveN = nchar(gsub('?', '', call))*clonality
return(mcri('darkred', pmin(1, effectiveN/5)))
}
stop('Couldnt find colour.')
}
#helper function
addPointMutations = function(GoI, qsList, sampleList, colMx) {
#set up scale and grid
xScale = length(GoI)
yScale = length(unlist(sampleList))
xTicks = 1:xScale
yTicks = 1:yScale
xs = 1:length(GoI)
names(xs) = GoI
for ( ind in names(sampleList) ) {
sys = cumsum(sapply(sampleList, length))[ind] - length(sampleList[[ind]]) + 1:length(sampleList[[ind]])
samples = sampleList[[ind]]
names(sys) = samples
for ( sample in samples ) {
q = qsList[[ind]][[sample]]
q = q[q$somaticP > 0.5 & q$severity < 11 & q$inGene %in% GoI & q$var > 0.15*q$cov,]
y = sys[sample]
for ( gene in GoI ) {
qg = q[q$inGene == gene,]
if ( nrow(qg) == 0 ) next
cex=1
if ( nrow(qg) > 1 ) cex=2
else if ( qg$var > qg$cov*0.6 & pbinom(qg$var, qg$cov, 0.5, lower.tail=F) < 0.05 ) cex=2
else if ( qg$var < qg$cov/4 ) cex = 4*qg$var/qg$cov
pointcol = 'black'
if ( yScale >= 100 ) pointcol = colMx[sample, gene]
bordercol = 'white'
x = xs[gene]
cex=sqrt(cex*1.2)*min(1, max(0.6, sqrt(50/yScale)))
lwd = 1.5*min(1, max(0.6, sqrt(50/yScale)))
points(x, y, col=bordercol, cex=cex, pch=ifelse(qg$severity < 10, 24, 21), lwd=lwd, bg=pointcol)
}
}
}
}
renameGoIs = function(genes, GoIakas) {
for ( SYMBOLname in names(GoIakas) ) {
genes = gsub(SYMBOLname, GoIakas[SYMBOLname], genes)
}
return(genes)
}
findGoI = function(Rdirectory, metaDataFile, cosmicCensus=T, clinvarPathogenic=T, clinvarAny=F, excludeIndividuals=c(), excludeSamples=c(), maxNumberOfGenes=50, cpus=1, genome='hg19') {
qsList = superFreq:::loadQsList(Rdirectory=Rdirectory, metaDataFile=metaDataFile, excludeIndividuals=excludeIndividuals, excludeSamples=excludeSamples, cpus=cpus)
qs = do.call(c, qsList)
GoIlist = lapply(qs, function(q) {
isSomatic = q$somaticP > 0.5
isCoding = q$severity < 11
isntSubclonal = q$var > 0.15*q$cov
isCosmicCensus = q$isCosmicCensus
isClinvarAny = q$ClinVar_ClinicalSignificance != ''
isClinvarPathogenic =
grepl('[pP]athogenic,', paste0(q$ClinVar_ClinicalSignificance)) |
grepl('[pP]athogenic$', paste0(q$ClinVar_ClinicalSignificance))
use =
isSomatic & isCoding & isntSubclonal &
((isCosmicCensus&cosmicCensus) | (isClinvarPathogenic&clinvarPathogenic) | (isClinvarAny&clinvarAny))
if ( genome == 'mm10' )
use = isSomatic & isCoding & isntSubclonal & (isCosmicCensus | !cosmicCensus)
return(unique(q$inGene[use]))
})
GoI = unique(unlist(GoIlist))
hits = sapply(GoI, function(gene) {
sum(sapply(names(qsList), function(ind) {
qs = qsList[[ind]]
GoIsublist = GoIlist[paste0(ind, '.', names(qs))]
genesInInd = unique(unlist(GoIsublist))
hit = gene %in% genesInInd
return(hit)
}))
})
GoI = GoI[order(-hits)]
if ( length(GoI) > maxNumberOfGenes ) GoI = GoI[1:maxNumberOfGenes]
}
#' returns relevant mutations in samples in Genes of Interest
#'
#' @param GoI character vector. Genes of interest for the analysis
#' @param Rdirectory character The Rdirectory where the superFreq run was run.
#'
#' @details This function plots a mutation matrix showing CNAs and point mutations in the Genes of Interest across the analysed samples in the Rdirectory. Should be run after the superFreq() run has completed.
#'
#' @export
#'
getCohortMutationMatrix = function(GoI, Rdirectory, metaDataFile, excludeIndividuals=c(), excludeSamples=c(), GoIakas=c(), cpus=1, includeGermlineLike=T) {
clusterList = superFreq:::loadClusterList(Rdirectory=Rdirectory, metaDataFile=metaDataFile, excludeIndividuals=excludeIndividuals,
excludeSamples=excludeSamples, cpus=cpus)
sampleList = lapply(clusterList, names)
qsList = superFreq:::loadQsList(Rdirectory=Rdirectory, metaDataFile=metaDataFile,
excludeIndividuals=excludeIndividuals, excludeSamples=excludeSamples, cpus=cpus)
#get CNA calls and clonality
cnaMx = superFreq:::getCohortCNAmatrix(GoI, clusterList, sampleList)
#add dots for point mutations
snvMx = getCohortSNVmatrix(GoI, qsList, sampleList, colMx=colMx, includeGermlineLike=includeGermlineLike)
return(list(cnaMx=cnaMx, snvMx=snvMx))
}
#helper function
getCohortSNVmatrix = function(GoI, qsList, sampleList, colMx, includeGermlineLike=includeGermlineLike) {
biallelicMx = matrix(FALSE, ncol=length(unlist(sampleList)), nrow=length(GoI))
mostSevereMx = matrix('', ncol=length(unlist(sampleList)), nrow=length(GoI))
rownames(biallelicMx) = rownames(mostSevereMx) = GoI
colnames(biallelicMx) = colnames(mostSevereMx) = unlist(sampleList)
for ( ind in names(sampleList) ) {
samples = sampleList[[ind]]
for ( sample in samples ) {
q = qsList[[ind]][[sample]]
q = q[q$somaticP > 0.5 & q$severity < 11 & q$inGene %in% GoI & q$var > 0.15*q$cov,]
if ( !includeGermlineLike ) q = q[!q$germline | is.na(q$germline),]
for ( gene in GoI ) {
qg = q[q$inGene == gene,]
if ( nrow(qg) == 0 ) next
if ( nrow(qg) > 1 ) biallelicMx[gene, sample] = TRUE
else biallelicMx[gene, sample] = qg$var > qg$cov*0.6 & pbinom(qg$var, qg$cov, 0.5, lower.tail=F) < 0.05
mostSevereMx[gene, sample] = qg$type[which.min(qg$severity)]
}
}
}
return(list(biallelicMx=biallelicMx, mostSevereMx=mostSevereMx))
}
#helper function
getCohortCNAmatrix = function(GoI, clusterList, sampleList) {
#set up scale and grid
cr = clusterList[[1]][[1]]$CR
XoI = (cr[GoI,]$x1+cr[GoI,]$x2)/2
names(XoI) = GoI
callMx = matrix('', ncol=length(unlist(sampleList)), nrow=length(GoI))
clonalityMx = matrix(0, ncol=length(unlist(sampleList)), nrow=length(GoI))
rownames(callMx) = rownames(clonalityMx) = GoI
colnames(callMx) = colnames(clonalityMx) = unlist(sampleList)
for ( ind in names(sampleList) ) {
samples = sampleList[[ind]]
for ( sample in samples ) {
clusters = clusterList[[ind]][[sample]]$clusters
for ( gene in GoI ) {
call = clusters$call[clusters$x2 > XoI[gene] & clusters$x1 < XoI[gene]]
if ( length(call) == 0 ) next
clonalityMx[gene, sample] = clusters$clonality[clusters$x2 > XoI[gene] & clusters$x1 < XoI[gene]]
callMx[gene, sample] = call
}
}
}
return(list(callMx=callMx, clonalityMx=clonalityMx))
}
|
03b661941f973487e38e9690ba23a8d2a171e1a2
|
cd2e6a05bbf1196bf447f7b447b01f0790f81e04
|
/ch4-classification/4.3-multivariate-logistic-regression/quiz17.R
|
fcd76292a7753f449c5d89cebbfb4b7e299aaefb
|
[] |
no_license
|
AntonioPelayo/stanford-statistical-learning
|
d534a54b19f06bc5c69a3981ffa6b57ea418418f
|
c4e83b25b79a474426bb24a29110a881174c5634
|
refs/heads/master
| 2022-04-21T07:33:00.988232
| 2020-04-22T05:44:46
| 2020-04-22T05:44:46
| 242,658,021
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 414
|
r
|
quiz17.R
|
# Quiz 17
# 4.3.R1
ProbAinClass = function(B0, B1, B2, X1, X2){
# X_1 = Hours studies
# X_2 = Undergrad GPA
# Return probablility student recieves an A
numerator = exp(B0 + (B1 * X1) + (B2 * X2))
return(numerator / (1 + numerator))
}
hoursToStudy = function(Px,B0, B1, B2, X2){
return((log(Px / (1-Px)) - B0 - (B2 * X2)) / B1)
}
ProbAinClass(-6, 0.05, 1, 40, 3.5)
hoursToStudy(.5, -6, 0.05, 1, 3.5)
|
e60ddddab17f45d0b8b248a2184dc1825c5c4f2f
|
d473a271deb529ed2199d2b7f1c4c07b8625a4aa
|
/zSnips_R/Columns.R
|
98f7fcc809a872e0723f8697c40b0d07c9d889e3
|
[] |
no_license
|
yangboyubyron/DS_Recipes
|
e674820b9af45bc71852ac0acdeb5199b76c8533
|
5436e42597b26adc2ae2381e2180c9488627f94d
|
refs/heads/master
| 2023-03-06T05:20:26.676369
| 2021-02-19T18:56:52
| 2021-02-19T18:56:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,634
|
r
|
Columns.R
|
# IN THIS SCRIPT:
# Add columns
# Remove columns
# Name columns
# Rename columns
# Perform operations across columns
# Test for column present
# Dynamically reference columns
#---------------------------------------------------------------------------
# ADD COLUMNS
# in this example, df has 3 rows
#---------------------------------------------------------------------------
data$size <- c("small", "large", "medium")
data[["size"]] <- c("small", "large", "medium")
data[,"size"] <- c("small", "large", "medium")
data$size <- 0 # Use the same value (0) for all rows
# create column where college is added for eachrow
Elite=rep("No",nrow(college))
# create column where 'Yes' is added if Top10per > 50
Elite[college$Top10perc>50]="Yes"
#---------------------------------------------------------------------------
# REMOVE COLUMNS
#---------------------------------------------------------------------------
df = df[,-1] # creates new df with first column removed
# Other examples
data$size <- NULL
data[["size"]] <- NULL
data[,"size"] <- NULL
data[[3]] <- NULL
data[,3] <- NULL
data <- subset(data, select=-size)
#---------------------------------------------------------------------------
# NAME COLUMNS
#---------------------------------------------------------------------------
overview <- cbind(xs, xi[,3], xl[,3], xr[,3], count)
colnames(overview) <- c("location","region","Spending","Income","Count")
#---------------------------------------------------------------------------
# RENAME COLUMNS
# note: dplyr has a rename function
#---------------------------------------------------------------------------
# Basic rename example using base R
colnames(cc_data)[colnames(cc_data)=="PAY_0"] = "PAY_1"
# Replace column names by concatenating the "q_" with original column name
# where x is the dataframe containing the columns
colnames(y) <- paste("q_", colnames(x), sep = "")
# Rename column by adding specified text ("diff" in this case) and separate by using _
colnames(var_diff_both) = paste(colnames(var_diff_both), "diff", sep="_")
#----------------------------------------------------------
#----------------------------------------------------------
# OPERATIONS ACROSS COLUMNS
#----------------------------------------------------------
#----------------------------------------------------------
dft$Avg_Bill_Amt = rowMeans(df[,13:18])
df$Max_Bill_Amt = apply(df[13:18],1, FUN=max)
df$var = rowSums(df[,c('colA', 'colB', 'colC')])
#---------------------------------------------------------------------------
# Identify if a desired column is present, and if not, create it
# YourTBL is the data frame/tibble containing one or more of
# the columns shown below.
#---------------------------------------------------------------------------
# specify the columns expected
colNameList <- c( "ColA",
"ColB",
"ColC",
"ColD")
# look for each column and if not present, create it and set values to NA
YourTBL[colNameList[!(colNameList %in% colnames(YourTBL))]] = NA
#---------------------------------------------------------------------------
## Dynamically Reference columns from a list of columns of interest
#---------------------------------------------------------------------------
df = data.frame(Col1=rnorm(10),
Col2=rnorm(10),
Col3=rnorm(10))
InterestCol = c("Col1",
"Col3")
GetIndex = match(InterestCol,names(df))
df1 = df[,GetIndex] # create df based on InterestCol
df$calc = rowSums(df[, GetIndex]) # calculate row sums using InterstCol
|
a649ef9c4ca5b14f1babd0b2399a102f398337e4
|
4c20974447b0b474410266f3d73fec02ad47404b
|
/PRACTICA.R
|
de00c9e0e9b41d0dcd2ccd429300063a351c1c10
|
[] |
no_license
|
josebummer/ugr_estadistica
|
d9583f4013727d3750e7b3732d00f1a0e12e01c9
|
993b70240202c88371e0d0350c9430c7aeb032bd
|
refs/heads/master
| 2021-12-26T15:11:43.302307
| 2021-12-17T16:06:23
| 2021-12-17T16:06:23
| 75,067,680
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 919
|
r
|
PRACTICA.R
|
Respuestas <-
readXL("C:/Users/Jose/Google Drive/Universidad/SEGUNDO CUATRIMESTRE/Estadistica/Practicas/Practica 2/respuestas.xls",
rownames=FALSE, header=TRUE, na="", sheet="Respuestas", stringsAsFactors=TRUE)
library(relimp, pos=14)
showData(Respuestas, placement='-20+200', font=getRcmdr('logFont'), maxwidth=80, maxheight=30)
scatterplot(Peso~Altura, reg.line=FALSE, smooth=FALSE, spread=FALSE, boxplots='xy', span=0.5,
ellipse=FALSE, levels=c(.5, .9), data=Respuestas)
RegLineal <- lm(Peso~Altura, data=Respuestas)
summary(RegLineal)
peso_estimado <- -66.0046 + 0.7915 * 173
peso_estimado
RegLinealHombres <- lm(Peso~Altura, data=Respuestas, subset=Sexo == "Varón")
summary(RegLinealHombres)
RegLinealMultiples <- lm(Peso ~ Altura +Sexo, data=Respuestas)
summary(RegLinealMultiples)
RegLinealMultiple2 <- lm(Peso ~ Altura + Sexo +n_herm, data=Respuestas)
summary(RegLinealMultiple2)
|
7d16b0ccca067eb983dfde00a615014aee067f59
|
403dc51dcd89aa11ed7079f865bbd159e2ec35b6
|
/Classiffda/R/hselect.R
|
fa3f79bb291d768973b9f4c1d805a16934d89a9a
|
[] |
no_license
|
dapr12/Classifficationfda
|
c49f79ba8581c9beca12869b8245473ba1309b9f
|
bb9b930f0eecb48de663b6a050fb704b60e61977
|
refs/heads/master
| 2020-04-15T18:13:23.383393
| 2014-08-05T10:58:56
| 2014-08-05T10:58:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,084
|
r
|
hselect.R
|
#' CVSelect - Select the Cross-Validation Bandwith described in (Foster, and ) for the Median of the PSE funcion based on Functional Data
#' @param bandwith
#' @param x Location of the discretization points. THis discretization points must be uniform and missing values are not accepted.
#' @param y Typically a matrix or data frame which contains a set of curves stored in rows. Missing values are not accepte.
#' @param degree Degree of the local Polynomial to be used. If Degree is missing takes by default degree = 1.
#' @return A bandwith that minimizes the Median of the Median PSE.
#' @references Peter Foster PhD Thesis. University of Manchester
#' @examples \dontrun{
#' Mat<- fdaobjMale$data
#' h<- cv.select(c(0,10), 1:31,t(Mat),1)
#' }
#' @export
hselect <- function(x, y, degree, interval = NULL, ...)
{
if (is.null(interval)) {
rangex <- diff(range(x))
meshx <- rangex / (length(x) - 1)
interval <- c( ifelse(degree < 2, meshx / 2, meshx), rangex / 2)
}
optimize(medianPSE, interval, x = x, y = y, degree = degree, ...)$minimum
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.