blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
d7de3a53185ff14667c9410ffb7bdc57ef9ee94d
bbfcc35c6394e5cb99b3164c0c5b16ad6821ddec
/man/bt.fisher.Rd
de138f8d027f10240b1b45a52eb08347312dbb91
[ "MIT" ]
permissive
PhanstielLab/bedtoolsr
41ec321d3c16a2f893a56182c2d01254bf386de9
cce152f1ce653771d8d41431e0d12e4ef9c42193
refs/heads/master
2022-11-15T13:02:56.285838
2022-11-08T22:20:19
2022-11-08T22:20:19
151,143,796
31
5
NOASSERTION
2019-12-05T18:11:09
2018-10-01T19:08:56
R
UTF-8
R
false
true
2,628
rd
bt.fisher.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bt.fisher.R \name{bt.fisher} \alias{bt.fisher} \title{Calculate Fisher statistic b/w two feature files.} \usage{ bt.fisher( a, b, g, m = NULL, s = NULL, S = NULL, f = NULL, F = NULL, r = NULL, e = NULL, split = NULL, nonamecheck = NULL, bed = NULL, header = NULL, nobuf = NULL, iobuf = NULL, output = NULL ) } \arguments{ \item{a}{<bed/gff/vcf>} \item{b}{<bed/gff/vcf>} \item{g}{<genome file>} \item{m}{Merge overlapping intervals before - looking at overlap.} \item{s}{Require same strandedness. That is, only report hits in B that overlap A on the _same_ strand. - By default, overlaps are reported without respect to strand.} \item{S}{Require different strandedness. That is, only report hits in B that overlap A on the _opposite_ strand. - By default, overlaps are reported without respect to strand.} \item{f}{Minimum overlap required as a fraction of A. - Default is 1E-9 (i.e., 1bp). - FLOAT (e.g. 0.50)} \item{F}{Minimum overlap required as a fraction of B. - Default is 1E-9 (i.e., 1bp). - FLOAT (e.g. 0.50)} \item{r}{Require that the fraction overlap be reciprocal for A AND B. - In other words, if -f is 0.90 and -r is used, this requires that B overlap 90 percent of A and A _also_ overlaps 90 percent of B.} \item{e}{Require that the minimum fraction be satisfied for A OR B. - In other words, if -e is used with -f 0.90 and -F 0.10 this requires that either 90 percent of A is covered OR 10 percent of B is covered. Without -e, both fractions would have to be satisfied.} \item{split}{Treat "split" BAM or BED12 entries as distinct BED intervals.} \item{nonamecheck}{For sorted data, don't throw an error if the file has different naming conventions for the same chromosome. ex. "chr1" vs "chr01".} \item{bed}{If using BAM input, write output as BED.} \item{header}{Print the header from the A file prior to results.} \item{nobuf}{Disable buffered output. Using this option will cause each line of output to be printed as it is generated, rather than saved in a buffer. This will make printing large output files noticeably slower, but can be useful in conjunction with other software tools and scripts that need to process one line of bedtools output at a time.} \item{iobuf}{Specify amount of memory to use for input buffer. Takes an integer argument. Optional suffixes K/M/G supported. Note: currently has no effect with compressed files.} \item{output}{Output filepath instead of returning output in R.} } \description{ Calculate Fisher statistic b/w two feature files. }
c66cba24a0393487219d7bef29a5aa4d0b15001d
6c5ed0d438fe8170ae8a98f0e56b3f843c5131b1
/man/soft.thre.Rd
8f4cabd4f25071db6670ab800d590ca190ba74ad
[]
no_license
mariedueker/lwglasso
87580465658a1f340906b37ba2dbfca67c17ea21
ba0228f07218c8d39602f41d573924cebdc6ba31
refs/heads/master
2022-08-01T09:19:26.747570
2020-05-27T04:42:50
2020-05-27T04:42:50
null
0
0
null
null
null
null
UTF-8
R
false
true
580
rd
soft.thre.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auxfunctions.R \name{soft.thre} \alias{soft.thre} \title{softthresholding complex matrix G} \usage{ soft.thre(G, phi, diagTF = TRUE) } \arguments{ \item{G}{Input complex matrix} \item{phi}{Threshold level parameter} \item{diagTF}{If TRUE, apply thresholding on the diagonal entries of G} } \description{ Soft-thresholding of comlex matrix G with threshould phi. } \details{ soft.thre } \examples{ soft.thre(G) } \keyword{function} \keyword{soft} \keyword{thresholding}
e88dd0daa7a06d9965e4c94f295c0dea2f47f5e0
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/h2o/examples/dim.H2OFrame.Rd.R
527b0646a47d71ef30ea41cd8204de66732c51e9
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
211
r
dim.H2OFrame.Rd.R
library(h2o) ### Name: dim.H2OFrame ### Title: Returns the Dimensions of an H2OFrame ### Aliases: dim.H2OFrame ### ** Examples ## No test: h2o.init() iris_hf <- as.h2o(iris) dim(iris_hf) ## End(No test)
ae9e6fb7755602dfad430199a7b89b8aa302e4ef
27df58994df6d1d57ed07fdd4d21ba9fd77af799
/Functional Data Analysis/gam_part1.R
ffb67568564c4fd357fab493eb32d9c5d2449582
[]
no_license
timothyvigers/School
ded3960fbba42aa6b6e7a2d6e7a97ac2f6f359a5
7a14890884db1f23f0d7e2ad72758a04fa17880c
refs/heads/master
2023-04-06T18:41:54.309469
2023-03-17T21:05:11
2023-03-17T21:05:11
145,224,185
0
0
null
null
null
null
UTF-8
R
false
false
3,059
r
gam_part1.R
library("mgcv") set.seed(80487) N <- 200 P <- 4 X <- matrix(rnorm(N*P), N, P) ## set up the association structures f1 <- function(x) sin(pi*x) f2 <- function(x) 2*x f3 <- function(x) 0.25*x^3 f4 <- function(x) cos(pi*x)*x ## get the linear predictor eta <- 1 + f1(X[,1]) + f2(X[,2]) + f3(X[,3]) + f4(X[,4]) ## simulate gaussian outcomes y_g <- eta + rnorm(N, sd=1) ## simulate binary outcomes pr_y <- 1/(1+exp(-eta)) y_b <- vapply(pr_y, function(x) sample(c(0,1), size=1, prob=c(1-x,x)), numeric(1)) ## combine data into a dataframe df_fit <- data.frame(y_g=y_g, y_b=y_b, X) ## set up basis type for all smooth terms bs <- "cr" ## number of basis functions for all smooth terms K <- 20 ## fit the models on the Gaussian data fit_g_GCV <- gam(y_g ~ s(X1, bs=bs, k=K) + s(X2, bs=bs, k=K) + s(X3, bs=bs, k=K) + s(X4,bs=bs, k=K), family=gaussian(), method="GCV.Cp", data=df_fit) fit_g_REML <- gam(y_g ~ s(X1, bs=bs, k=K) + s(X2, bs=bs, k=K) + s(X3, bs=bs, k=K) + s(X4,bs=bs, k=K), family=gaussian(), method="REML", data=df_fit) ## fit the models on the binary data fit_b_GCV <- gam(y_b ~ s(X1, bs=bs, k=K) + s(X2, bs=bs, k=K) + s(X3, bs=bs, k=K) + s(X4,bs=bs, k=K), family=binomial(), method="GCV.Cp", data=df_fit) fit_b_REML <- gam(y_b ~ s(X1, bs=bs, k=K) + s(X2, bs=bs, k=K) + s(X3, bs=bs, k=K) + s(X4,bs=bs, k=K), family=binomial(), method="REML", data=df_fit) # Plot par(mfrow=c(2,2)) nx_pred <- 1000 xind_pred <- lapply(1:P, function(x){ rn_x <- range(X[,x]) seq(rn_x[1], rn_x[2], len=nx_pred) }) fn_ls <- list(f1,f2,f3,f4) for(p in 1:P){ plot(fit_g_GCV, select=p, shade=TRUE) lines(xind_pred[[p]], fn_ls[[p]](xind_pred[[p]]),col='red',lwd=2,lty=2) } ## set up a new data frame with all "X" predictors at a new range of values xind_pred <- seq(-3,3,len=1000) df_pred <- data.frame(X1=xind_pred, X2=xind_pred, X3=xind_pred, X4=xind_pred) head(df_pred) # Predict yhat_g_REML <- predict(fit_g_REML, newdata=df_pred, type="response", se.fit=T) etahat_g_REML <- predict(fit_g_REML, newdata=df_pred, type="link", se.fit=T) smhat_g_REML <- predict(fit_g_REML, newdata=df_pred, type="terms", se.fit=T) Phi_g_REML <- predict(fit_g_REML, newdata=df_pred, type="lpmatrix", se.fit=T) # Class exercise N <- 2000 P <- 2 X <- matrix(rnorm(N*P), N, P) ## set up the association structures f1 <- function(x) exp(x) f2 <- function(x) x^2 eta <- 1 + f1(X[,1]) + f2(X[,2]) y_p <- rpois(N,lambda = eta) # DF df_fit <- data.frame(y_p=y_p, X) # Fit fit_p_GCV <- gam(y_p ~ s(X1, bs=bs, k=K) + s(X2, bs=bs, k=K), family=poisson(), method="GCV.Cp", data=df_fit) # Plot par(mfrow=c(1,2)) plot(fit_p_GCV) # predict xind_pred <- seq(-3,3,len=N) df_pred <- data.frame(X1=xind_pred, X2=xind_pred) smhat_p_REML <- predict(fit_p_GCV, newdata=df_pred, type="terms") ## get the design matrix for our initial fit Phi = predict(fit_p_GCV, newdata=df_pred, type="lpmatrix") ## get the estimated function at our x-values for predicting yhat = Phi %*% fit_p_GCV$coefficients
e88e63ac4bebd6e216aa3874523a8cfa86e008ca
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/SpadeR/examples/ChaoSpecies.Rd.R
246c5732e9ab561d5217fee0136336e1a4c1899c
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
697
r
ChaoSpecies.Rd.R
library(SpadeR) ### Name: ChaoSpecies ### Title: Estimation of species richness in a community ### Aliases: ChaoSpecies ### ** Examples data(ChaoSpeciesData) # Type (1) abundance data ChaoSpecies(ChaoSpeciesData$Abu,"abundance",k=10,conf=0.95) # Type (1A) abundance-frequency counts data ChaoSpecies(ChaoSpeciesData$Abu_count,"abundance_freq_count",k=10,conf=0.95) # Type (2) incidence-frequency data ChaoSpecies(ChaoSpeciesData$Inci,"incidence_freq",k=10,conf=0.95) # Type (2A) incidence-frequency counts data ChaoSpecies(ChaoSpeciesData$Inci_count,"incidence_freq_count",k=10,conf=0.95) # Type (2B) incidence-raw data ChaoSpecies(ChaoSpeciesData$Inci_raw,"incidence_raw",k=10,conf=0.95)
bb35d73d670c233a3a4fd172e1d7442cbe7284df
6dec0f6aec45dff66fcc2f480b03a5ffe3b9408c
/R/gx.eb.R
6b0f8d9b658dd44079ef5d60956d7b51acecb3d6
[]
no_license
cran/rgr
9645638952aa019902d3308e7e6cf04c1112bab7
87383fabc4cb3529c4c97493c596f7fd347cf302
refs/heads/master
2021-01-21T21:55:06.570408
2018-03-05T22:42:52
2018-03-05T22:42:52
17,699,219
0
0
null
null
null
null
UTF-8
R
false
false
1,645
r
gx.eb.R
gx.eb <- function(r, s, xx, ...) { # NOTE: Prior to using this function the data frame/matrix containing the # parts, xx, must be run through ltdl.fix.df to convert any <dl -ve # values to positive half that value, and set zero2na = TRUE if it is # required to convert any zero values or other numeric codes representing # blanks to NAs. Parts in the data frame/matrix, xx, must be in the # same units, any compositions including NAs will be removed. # if (is.data.frame(xx)) xx <- as.matrix(xx) if (any(xx < 0, na.rm = TRUE)) stop("Negative values not allowed\n") cat(" ** Are the data/parts all in the same measurement units? **\n") temp.x <- remove.na(xx, iftell = FALSE) x <- temp.x$x; nna <- temp.x$nna if (nna >= 1) cat(" ", nna, "composition(s) with NA(s) removed\n") # ListOfParts <- list(...) kk <- length(ListOfParts) if(kk != (r + s)) stop("Sum of parts in numerator and denominator must ", "equal length of List of Parts\n") # num.parts <- unlist(ListOfParts[1 : r]) num.names <- colnames(x)[num.parts] cat(" Parts in numerator:", num.names, "\t[", num.parts, "]\n") num <- x[, num.parts] num <- log(num) num.mean <- rowMeans(num) # den.parts <- unlist(ListOfParts[(r+1) : (r+s)]) den.names <- colnames(x)[den.parts] cat(" Parts in denomintor:", den.names, "\t[", den.parts, "]\n") den <- x[, den.parts] den <- log(den) den.mean <- rowMeans(den) # temp <- sqrt(r * s / (r + s)) z <- temp * exp(num.mean - den.mean) # return(z = z) }
706825c5725ad373b6e5e09b2603b941946687e6
0aa63f99a9ebe79e55cc09338d5bb4ce2377fd83
/R/redcapConnection.R
1fb585a849833d62d2838600d3e04dadf1880ebd
[]
no_license
nutterb/redcapAPI
c13b890b5d33b40c134833155861ee42d44b06c7
9b7287106198581c352fc91492d83fc7806d2bd7
refs/heads/main
2023-09-01T07:41:41.326514
2023-08-28T14:02:23
2023-08-28T14:02:23
11,691,011
47
31
null
2022-11-03T22:49:05
2013-07-26T17:31:51
R
UTF-8
R
false
false
40,474
r
redcapConnection.R
#' @name redcapConnection #' #' @title Connect to a REDCap Database #' @description Creates an object of class \code{redcapApiConnection} for #' using the REDCap API #' #' @param url URL for a REDCap database API. Check your institution's REDCap #' documentation for this address. Either \code{url} or \code{conn} must #' be specified. #' @param token REDCap API token #' @param config A list to be passed to \code{httr::POST}. This allows the #' user to set additional configurations for the API calls, such as #' certificates, ssl version, etc. For the majority of users, this does #' not need to be altered. See Details for more about this argument's #' purpose and the \code{redcapAPI} wiki for specifics on its use. #' @param retries \code{integerish(1)}. Sets the number of attempts to make to the #' API if a timeout error is encountered. Must be a positive value. #' @param retry_interval \code{numeric}. Sets the intervals (in seconds) at #' which retries are attempted. By default, set at a \code{2^r} where #' \code{r} is the \code{r}th retry (ie, 2, 4, 8, 16, ...). For fixed #' intervals, provide a single value. Values will be recycled to match #' the number of retries. #' @param retry_quietly \code{logical(1)}. When \code{FALSE}, messages will #' be shown giving the status of the API calls. Defaults to \code{TRUE}. #' @param x \code{redcapConnection} object to be printed #' @param ... arguments to pass to other methods #' #' @details #' \code{redcapConnection} objects will retrieve and cache various forms of #' project information. This can make metadata, arms, dags, events, instruments, fieldnames, #' arm-event mappings, users, version, project information, fileRepository, #' and repeating instruments available #' directly from the \code{redcapConnection} object. Take note that #' the retrieval of these objects uses the default values of the respective #' export functions (excepting the file repository, #' which uses \code{recursive = TRUE}). #' #' For each of these objects, there are four methods that can be called from #' the \code{redcapConnection} object: the get method (called via #' \code{rcon$metadata()}, for example); the has method (\code{rcon$has_metadata}), #' which returns a logical indicating if the metadata has been cached; #' the flush method (\code{rcon$flush_metadata}), which removes the cached value; #' and the refresh method (\code{rcon$refresh_metadata}), which replaces the #' current value with a new call to the API. There is also a \code{flush_all} #' and \code{refresh_all} method. #' #' The \code{redcapConnection} object also stores the user preferences for #' handling repeated attempts to call the API. In the event of a timeout #' error or server unavailability, these settings allow a system pause before #' attempting another API call. In the event all of the retries fail, the #' error message of the last attempt will be returned. These settings may #' be altered at any time using the methods \code{rcon$set_retries(r)}, #' \code{rcon$set_retry_interval(ri)}, and \code{rcon$set_retry_quietly(rq)}. #' The argument to these functions have the same requirements as the #' corresponding arguments to \code{redcapConnection}. #' #' For convenience, you may consider using #' \code{options(redcap_api_url=[your URL here])} in your RProfile. #' To obtain an API token for a project, do the following:\cr #' Enter the 'User Rights' section of a project\cr #' Select a user\cr #' Check the box for 'API Data Export' or 'API Data Import,' as appropriate. A full tutorial on #' configuring REDCap to use the API can be found at \url{https://github.com/vubiostat/redcapAPI/wiki} #' #' Tokens are specific to a project, and a token must be created for each #' project for which you wish to use the API. #' #' The \code{config} argument is passed to the \code{httr::POST} argument of #' the same name. The most likely reason for using this argument is that the #' certificate files bundled in \code{httr} have fallen out of date. #' Hadley Wickham is pretty good about keeping those certificates up #' to date, so most of the time this problem can be resolved by updating #' \code{httr} to the most recent version. If that doesn't work, a #' certificate file can be manually passed via the \code{config} argument. #' The \code{redcapAPI} wiki has a more detailed tutorial on how to #' find and pass an SSL certificate to the API call #' (\url{https://github.com/vubiostat/redcapAPI/wiki/Manually-Setting-an-SSL-Certificate-File}). #' #' Additional Curl option can be set in the \code{config} argument. See the documentation #' for \code{httr::config} and \code{httr:httr_options} for more Curl options. #' #' @author Jeffrey Horner #' #' @examples #' \dontrun{ #' rcon <- redcapConnection(url=[YOUR_REDCAP_URL], token=[API_TOKEN]) #' #' options(redcap_api_url=[YOUR_REDCAP_URL]) #' rcon <- redcapConnection(token=[API_TOKEN]) #' #' exportRecords(rcon) #' #' # Get the complete metadata for the project #' rcon$metadata() #' #' # Get the fieldnames for a project #' rcon$fieldnames() #' #' # remove a cached value for fieldnames #' rcon$flush_fieldnames() #' rcon$has_fieldnames() #' } #' #' @export redcapConnection <- function(url = getOption('redcap_api_url'), token, config = httr::config(), retries = 5, retry_interval = 2^(seq_len(retries)), retry_quietly = TRUE) { coll <- checkmate::makeAssertCollection() checkmate::assert_character(x = url, len = 1, add = coll) checkmate::assert_character(x = token, len = 1, add = coll) checkmate::assert_integerish(x = retries, len = 1, lower = 1, any.missing = FALSE, add = coll) checkmate::assert_numeric(x = retry_interval, lower = 0, any.missing = FALSE, add = coll) checkmate::assert_logical(x = retry_quietly, len = 1, any.missing = FALSE, add = coll) checkmate::reportAssertions(coll) u <- url t <- token this_metadata <- NULL this_arm <- NULL this_event <- NULL this_fieldname <- NULL this_mapping <- NULL this_user <- NULL this_version <- NULL this_project <- NULL this_instrument <- NULL this_fileRepository <- NULL this_repeat <- NULL this_dag <- NULL this_dag_assign <- NULL this_user_role <- NULL this_user_role_assign <- NULL rtry <- retries rtry_int <- rep(retry_interval, length.out = rtry) rtry_q <- retry_quietly getter <- function(export){ switch(export, "metadata" = exportMetaData(rc), "arm" = exportArms(rc), "event" = exportEvents(rc), "fieldname" = exportFieldNames(rc), "mapping" = exportMappings(rc), "user" = exportUsers(rc), "version" = exportVersion(rc), "project" = exportProjectInformation(rc), "instrument" = exportInstruments(rc), "fileRepo" = exportFileRepositoryListing(rc, recursive = TRUE), "repeat" = exportRepeatingInstrumentsEvents(rc), "dags" = exportDags(rc), "dagAssign" = exportUserDagAssignments(rc), "userRole" = exportUserRoles(rc), "userRoleAssign" = exportUserRoleAssignments(rc), NULL) } rc <- list( url = u, token = t, config = config, metadata = function(){ if (is.null(this_metadata)) this_metadata <<- getter("metadata"); this_metadata }, has_metadata = function() !is.null(this_metadata), flush_metadata = function() this_metadata <<- NULL, refresh_metadata = function() this_metadata <<- getter("metadata"), arms = function(){ if (is.null(this_arm)) this_arm <<- getter("arm"); this_arm }, has_arms = function() !is.null(this_arm), flush_arms = function() this_arm <<- NULL, refresh_arms = function() this_arm <<- getter("arm"), events = function(){ if (is.null(this_event)) this_event <<- getter("event"); this_event}, has_events = function() !is.null(this_event), flush_events = function() this_event <<- NULL, refresh_events = function() this_event <<- getter("event"), fieldnames = function(){ if (is.null(this_fieldname)) this_fieldname <<- getter("fieldname"); this_fieldname }, has_fieldnames = function() !is.null(this_fieldname), flush_fieldnames = function() this_fieldname <<- NULL, refresh_fieldnames = function() this_fieldname <<- getter("fieldname"), mapping = function(){ if (is.null(this_mapping)) this_mapping <<- getter("mapping"); this_mapping }, has_mapping = function() !is.null(this_mapping), flush_mapping = function() this_mapping <<- NULL, refresh_mapping = function() this_mapping <<- getter("mapping"), users = function(){ if (is.null(this_user)) this_user <<- getter("user"); this_user }, has_users = function() !is.null(this_user), flush_users = function() this_user <<- NULL, refresh_users = function() this_user <<- getter("user"), user_roles = function(){ if (is.null(this_user_role)) this_user_role <<- getter("userRole"); this_user_role }, has_user_roles = function() !is.null(this_user_role), flush_user_roles = function() this_user_role <<- NULL, refresh_user_roles = function() this_user_role <<- getter("userRole"), user_role_assignment = function(){ if (is.null(this_user_role_assign)) this_user_role_assign <<- getter("userRoleAssign"); this_user_role_assign }, has_user_role_assignment = function() !is.null(this_user_role_assign), flush_user_role_assignment = function() this_user_role_assign <<- NULL, refresh_user_role_assignment = function() this_user_role_assign <<- getter("userRoleAssign"), version = function(){ if (is.null(this_version)) this_version <<- getter("version"); this_version }, has_version = function() !is.null(this_version), flush_version = function() this_version <<- NULL, refresh_version = function() this_version <<- getter("version"), projectInformation = function(){ if (is.null(this_project)) this_project <<- getter("project"); this_project }, has_projectInformation = function() !is.null(this_project), flush_projectInformation = function() this_project <<- NULL, refresh_projectInformation = function() this_project <<- getter("project"), instruments = function(){ if (is.null(this_instrument)) this_instrument <<- getter("instrument"); this_instrument }, has_instruments = function() !is.null(this_instrument), flush_instruments = function() this_instrument <<- NULL, refresh_instruments = function() this_instrument <<- getter("instrument"), fileRepository = function(){ if (is.null(this_fileRepository)) this_fileRepository <<- getter("fileRepo"); this_fileRepository }, has_fileRepository = function() !is.null(this_fileRepository), flush_fileRepository = function() this_fileRepository <<- NULL, refresh_fileRepository = function() this_fileRepository <<- getter("fileRepo"), repeatInstrumentEvent = function(){ if (is.null(this_repeat)) this_repeat <<- getter("repeat"); this_repeat }, has_repeatInstrumentEvent = function() !is.null(this_repeat), flush_repeatInstrumentEvent = function() this_repeat <<- NULL, refresh_repeatInstrumentEvent = function() this_repeat <<- getter("repeat"), dags = function() {if (is.null(this_dag)) this_dag <<- getter("dags"); this_dag }, has_dags = function() !is.null(this_dag), flush_dags = function() this_dag <<- NULL, refresh_dags = function() this_dag <<- getter("dags"), dag_assignment = function() {if (is.null(this_dag_assign)) this_dag_assign <<- getter("dagAssign"); this_dag_assign }, has_dag_assignment = function() !is.null(this_dag_assign), flush_dag_assignment = function() this_dag_assign <<- NULL, refresh_dag_assignment = function() this_dag_assign <<- getter("dagAssign"), flush_all = function(){ this_metadata <<- this_arm <<- this_event <<- this_instrument <<- this_fieldname <<- this_mapping <<- this_repeat <<- this_user <<- this_user_role <<- this_user_role_assign <<- this_dag <<- this_dag_assign <<- this_project <<- this_version <<- this_fileRepository <<- NULL}, refresh_all = function(){ this_metadata <<- getter("metadata") this_arm <<- getter("arm") this_event <<- getter("event") this_instrument <<- getter("instrument") this_fieldname <<- getter("fieldname") this_mapping <<- getter("mapping") this_repeat <<- getter("repeat") this_user_role <<- getter("userRole") this_user_role_assign <<- getter("userRoleAssign") this_dag <<- getter("dag") this_dag_assign <<- getter("dagAssign") this_project <<- getter("project") this_version <<- getter("version") this_fileRepository <<- getter("fileRepo") }, retries = function() rtry, set_retries = function(r){ checkmate::assert_integerish(x = r, len = 1, lower = 1, any.missing = FALSE) rtry <<- r }, retry_interval = function() rtry_int, set_retry_interval = function(ri){ checkmate::assert_numeric(x = ri, lower = 0, any.missing = FALSE) rtry_int <<- rep(ri, length.out = rtry) }, retry_quietly = function() rtry_q, set_retry_quietly = function(rq){ checkmate::assert_logical(x = rq, len = 1, any.missing = FALSE) rtry_q <<- rq } ) class(rc) <- c("redcapApiConnection", "redcapConnection") rc } #' @rdname redcapConnection #' @export print.redcapApiConnection <- function(x, ...){ is_cached <- function(l) if (l) "Cached" else "Not Cached" output <- c("REDCap API Connection Object", sprintf("Meta Data : %s", is_cached(x$has_metadata())), sprintf("Arms : %s", is_cached(x$has_arms())), sprintf("Events : %s", is_cached(x$has_events())), sprintf("Instruments : %s", is_cached(x$has_instruments())), sprintf("Field Names : %s", is_cached(x$has_fieldnames())), sprintf("Mapping : %s", is_cached(x$has_mapping())), sprintf("Repeat Inst. : %s", is_cached(x$has_repeatInstrumentEvent())), sprintf("Users : %s", is_cached(x$has_users())), sprintf("User Roles : %s", is_cached(x$has_user_roles())), sprintf("User Role Assignment : %s", is_cached(x$has_user_role_assignment())), sprintf("DAGs : %s", is_cached(x$has_dags())), sprintf("DAG Assignment : %s", is_cached(x$has_dag_assignment())), sprintf("Project Info : %s", is_cached(x$has_projectInformation())), sprintf("Version : %s", is_cached(x$has_version())), sprintf("File Repo : %s", is_cached(x$has_fileRepository()))) cat(output, sep = "\n") } #' @rdname redcapConnection #' @param meta_data Either a \code{character} giving the file from which the #' metadata can be read, or a \code{data.frame}. #' @param arms Either a \code{character} giving the file from which the #' arms can be read, or a \code{data.frame}. #' @param events Either a \code{character} giving the file from which the #' events can be read, or a \code{data.frame}. #' @param instruments Either a \code{character} giving the file from which the #' instruments can be read, or a \code{data.frame}. #' @param field_names Either a \code{character} giving the file from which the #' field names can be read, or a \code{data.frame}. #' @param mapping Either a \code{character} giving the file from which the #' Event Instrument mappings can be read, or a \code{data.frame}. #' @param repeat_instrument Either a \code{character} giving the file from which the #' Repeating Instruments and Events settings can be read, or a \code{data.frame}. #' Note: The REDCap GUI doesn't offer a download file of these settings #' (at the time of this writing). #' @param users Either a \code{character} giving the file from which the #' User settings can be read, or a \code{data.frame}. #' @param user_roles Either a \code{character} giving the file from which the #' User Roles can be read, or a \code{data.frame}. #' @param user_role_assignment Either a \code{character} giving the file from which the #' User Role Assigments can be read, or a \code{data.frame}. #' @param dags Either a \code{character} giving the file from which the #' Data Access Groups can be read, or a \code{data.frame}. #' @param dag_assignment Either a \code{character} giving the file from which the #' Data Access Group Assigments can be read, or a \code{data.frame}. #' @param project_info Either a \code{character} giving the file from which the #' Project Information can be read, or a \code{data.frame}. #' @param version Either a \code{character} giving the file from which the #' version can be read, or a \code{data.frame}. #' @param file_repo Either a \code{character} giving the file from which the #' File Repository Listing can be read, or a \code{data.frame}. #' @param records Either a \code{character} giving the file from which the #' Records can be read, or a \code{data.frame}. This should be the raw #' data as downloaded from the API, for instance. Using labelled or formatted #' data is likely to result in errors when passed to other functions. #' @export offlineConnection <- function(meta_data = NULL, arms = NULL, events = NULL, instruments = NULL, field_names = NULL, mapping = NULL, repeat_instrument = NULL, users = NULL, user_roles = NULL, user_role_assignment = NULL, dags = NULL, dag_assignment = NULL, project_info = NULL, version = NULL, file_repo = NULL, records = NULL){ ################################################################### # Argument Validation #### coll <- checkmate::makeAssertCollection() checkmate::assert( checkmate::check_character(x = meta_data, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = meta_data, null.ok = TRUE), combine = "or", .var.name = "meta_data", add = coll ) checkmate::assert( checkmate::check_character(x = arms, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = arms, null.ok = TRUE), .var.name = "arms", add = coll ) checkmate::assert( checkmate::check_character(x = events, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = events, null.ok = TRUE), .var.name = "events", add = coll ) checkmate::assert( checkmate::check_character(x = instruments, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = instruments, null.ok = TRUE), .var.name = "instruments", add = coll ) checkmate::assert( checkmate::check_character(x = field_names, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = field_names, null.ok = TRUE), .var.name = "field_names", add = coll ) checkmate::assert( checkmate::check_character(x = mapping, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = mapping, null.ok = TRUE), .var.name = "mapping", add = coll ) checkmate::assert( checkmate::check_character(x = repeat_instrument, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = repeat_instrument, null.ok = TRUE), .var.name = "repeat_instrument", add = coll ) checkmate::assert( checkmate::check_character(x = users, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = users, null.ok = TRUE), .var.name = "users", add = coll ) checkmate::assert( checkmate::check_character(x = user_roles, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = user_roles, null.ok = TRUE), .var.name = "user_roles", add = coll ) checkmate::assert( checkmate::check_character(x = user_role_assignment, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = user_role_assignment, null.ok = TRUE), .var.name = "user_role_assignment", add = coll ) checkmate::assert( checkmate::check_character(x = dags, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = dags, null.ok = TRUE), .var.name = "dags", add = coll ) checkmate::assert( checkmate::check_character(x = dag_assignment, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = dag_assignment, null.ok = TRUE), .var.name = "dag_assignment", add = coll ) checkmate::assert( checkmate::check_character(x = project_info, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = project_info, null.ok = TRUE), .var.name = "project_info", add = coll ) checkmate::assert( checkmate::check_character(x = version, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = version, null.ok = TRUE), .var.name = "version", add = coll ) checkmate::assert( checkmate::check_character(x = file_repo, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = file_repo, null.ok = TRUE), .var.name = "file_repo", add = coll ) checkmate::assert( checkmate::check_character(x = records, len = 1, null.ok = TRUE), checkmate::check_data_frame(x = records, null.ok = TRUE), .var.name = "records", add = coll ) checkmate::reportAssertions(coll) ################################################################### # Argument Validation - Part Two #### if (is.character(meta_data)){ checkmate::assert_file_exists(x = meta_data, add = coll) } if (is.character(arms)){ checkmate::assert_file_exists(x = arms, add = coll) } if (is.character(events)){ checkmate::assert_file_exists(x = events, add = coll) } if (is.character(instruments)){ checkmate::assert_file_exists(x = instruments, add = coll) } if (is.character(field_names)){ checkmate::assert_file_exists(x = field_names, add = coll) } if (is.character(mapping)){ checkmate::assert_file_exists(x = mapping, add = coll) } if (is.character(repeat_instrument)){ checkmate::assert_file_exists(x = repeat_instrument, add = coll) } if (is.character(users)){ checkmate::assert_file_exists(x = users, add = coll) } if (is.character(user_roles)){ checkmate::assert_file_exists(x = user_roles, add = coll) } if (is.character(user_role_assignment)){ checkmate::assert_file_exists(x = user_role_assignment, add = coll) } if (is.character(dags)){ checkmate::assert_file_exists(x = dags, add = coll) } if (is.character(dag_assignment)){ checkmate::assert_file_exists(x = dag_assignment, add = coll) } if (is.character(project_info)){ checkmate::assert_file_exists(x = project_info, add = coll) } if (is.character(file_repo)){ checkmate::assert_file_exists(x = file_repo, add = coll) } checkmate::reportAssertions(coll) ################################################################### # Read files #### this_metadata <- validateRedcapData(data = .offlineConnection_readMetaData(meta_data), redcap_data = REDCAP_METADATA_STRUCTURE) this_arm <- validateRedcapData(data = .offlineConnection_readFile(arms), redcap_data = REDCAP_ARMS_STRUCTURE) this_event <- validateRedcapData(data = .offlineConnection_readFile(events), redcap_data = REDCAP_EVENT_STRUCTURE) this_fieldname <- if (is.null(field_names) & !is.null(this_metadata)){ .fieldNamesFromMetaData(this_metadata) } else { validateRedcapData(data = .offlineConnection_readFile(field_names), redcap_data = REDCAP_FIELDNAME_STRUCTURE) } this_mapping <- validateRedcapData(data = .offlineConnection_readFile(mapping), redcap_data = REDCAP_INSTRUMENT_MAPPING_STRUCTURE) this_repeat <- .offlineConnection_readFile(repeat_instrument) this_user <- validateRedcapData(data = .offlineConnection_readFile(users), redcap_data = REDCAP_USER_STRUCTURE) this_user_roles <- validateRedcapData(data = .offlineConnection_readFile(user_roles), redcap_data = REDCAP_USER_ROLE_STRUCTURE) this_user_role_assignment <- validateRedcapData(data = .offlineConnection_readFile(user_role_assignment), redcap_data = REDCAP_USER_ROLE_ASSIGNMENT_STRUCTURE) this_dags <- validateRedcapData(data = .offlineConnection_readFile(dags), redcap_data = REDCAP_DAG_STRUCTURE) this_dag_assignment <- validateRedcapData(data = .offlineConnection_readFile(dag_assignment), redcap_data = REDCAP_DAG_ASSIGNMENT_STRUCTURE) this_project <- validateRedcapData(data = .offlineConnection_readFile(project_info), redcap_data = REDCAP_PROJECT_INFORMATION_STRUCTURE) this_version <- version this_fileRepository <- .offlineConnection_readFile(file_repo) this_instrument <- if (is.null(instruments) & !is.null(this_metadata)){ data.frame(instrument_name = unique(this_metadata$form_name), instrument_label = unique(this_metadata$form_name), stringsAsFactors = FALSE) } else { validateRedcapData(data = .offlineConnection_readFile(instruments), redcap_data = REDCAP_INSTRUMENT_STRUCTURE) } this_record <- .offlineConnection_readFile(records) ################################################################### # Redcap Connection object #### rc <- list( url = NULL, token = NULL, config = NULL, metadata = function(){ this_metadata }, has_metadata = function() !is.null(this_metadata), flush_metadata = function() this_metadata <<- NULL, refresh_metadata = function(x) {this_metadata <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_METADATA_STRUCTURE)}, arms = function(){ this_arm }, has_arms = function() !is.null(this_arm), flush_arms = function() this_arm <<- NULL, refresh_arms = function(x) {this_arm <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_ARMS_STRUCTURE)}, events = function(){ this_event}, has_events = function() !is.null(this_event), flush_events = function() this_event <<- NULL, refresh_events = function(x) {this_event <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_EVENT_STRUCTURE)}, instruments = function(){ this_instrument }, has_instruments = function() !is.null(this_instrument), flush_instruments = function() this_instrument <<- NULL, refresh_instruments = function(x) { this_instrument <<- if (is.null(x) & !is.null(this_metadata)){ data.frame(instrument_name = unique(this_metadata$form_name), instrument_label = unique(this_metadata$form_name), stringsAsFactors = FALSE) } else { validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_INSTRUMENT_STRUCTURE) } }, fieldnames = function(){ this_fieldname }, has_fieldnames = function() !is.null(this_fieldname), flush_fieldnames = function() this_fieldname <<- NULL, refresh_fieldnames = function(x = NULL) { this_fieldname <<- if (is.null(x) & !is.null(this_metadata)){ .fieldNamesFromMetaData(this_metadata) } else { validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_FIELDNAME_STRUCTURE) } }, mapping = function(){ this_mapping }, has_mapping = function() !is.null(this_mapping), flush_mapping = function() this_mapping <<- NULL, refresh_mapping = function(x) { this_mapping <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_INSTRUMENT_MAPPING_STRUCTURE)}, repeatInstrumentEvent = function(){ this_repeat }, has_repeatInstrumentEvent = function() !is.null(this_repeat), flush_repeatInstrumentEvent = function() this_project <<- NULL, refresh_repeatInstrumentEvent = function(x) {this_project <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_REPEAT_INSTRUMENT_STRUCTURE)}, users = function(){ this_user }, has_users = function() !is.null(this_user), flush_users = function() this_user <<- NULL, refresh_users = function(x) {this_user <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_USER_STRUCTURE)}, user_roles = function(){ this_user_roles }, has_user_roles = function() !is.null(this_user_roles), flush_user_roles = function() this_user_roles <<- NULL, refresh_user_roles = function(x) {this_user_roles <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_USER_ROLE_STRUCTURE)}, users_role_assignment = function(){ this_user_role_assignment }, has_user_role_assignment = function() !is.null(this_user_role_assignment), flush_user_role_assignment = function() this_user_role_assignment <<- NULL, refresh_user_role_assignment = function(x) {this_user_role_assignment <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_USER_ROLE_ASSIGNMENT_STRUCTURE)}, dags = function(){ this_dags }, has_dags = function() !is.null(this_dags), flush_dags = function() this_dags <<- NULL, refresh_dags = function(x) {this_dags <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_DAG_STRUCTURE)}, dag_assignment = function(){ this_dag_assignment }, has_dag_assignment = function() !is.null(this_dag_assignment), flush_dag_assignment = function() this_dag_assignment <<- NULL, refresh_dag_assignment = function(x) {this_dag_assignment <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_DAG_ASSIGNMENT_STRUCTURE)}, projectInformation = function(){ this_project }, has_projectInformation = function() !is.null(this_project), flush_projectInformation = function() this_project <<- NULL, refresh_projectInformation = function(x) {this_project <<- validateRedcapData(data = .offlineConnection_readFile(x), redcap_data = REDCAP_PROJECT_INFORMATION_STRUCTURE)}, version = function(){ this_version }, has_version = function() !is.null(this_version), flush_version = function() this_version <<- NULL, refresh_version = function(x) {this_version <<- x}, fileRepository = function(){ this_fileRepository }, has_fileRepository = function() !is.null(this_fileRepository), flush_fileRepository = function() this_fileRepository <<- NULL, refresh_fileRepository = function(x) {this_fileRepository <<- .offlineConnection_readFile(x)}, records = function(){ this_record }, has_records = function() !is.null(this_record), flush_records = function() this_record <<- NULL, refresh_records = function(x) {this_record <<- .offlineConnection_readFile(records)}, flush_all = function(){ this_metadata <<- this_arm <<- this_event <<- this_fieldname <<- this_mapping <<- this_user <<- this_version <<- this_project <<- this_instrument <<- this_fileRepository <<- this_repeat <<- NULL}, refresh_all = function(){} # provided only to match the redcapApiConnection. Has no effect ) class(rc) <- c("redcapOfflineConnection", "redcapConnection") rc } #' @rdname redcapConnection #' @export print.redcapOfflineConnection <- function(x, ...){ is_cached <- function(l) if (l) "Cached" else "Not Cached" output <- c("REDCap Offline Connection Object", sprintf("Records : %s", is_cached(x$has_records())), sprintf("Meta Data : %s", is_cached(x$has_metadata())), sprintf("Arms : %s", is_cached(x$has_arms())), sprintf("Events : %s", is_cached(x$has_events())), sprintf("Instruments : %s", is_cached(x$has_instruments())), sprintf("Field Names : %s", is_cached(x$has_fieldnames())), sprintf("Mapping : %s", is_cached(x$has_mapping())), sprintf("Repeat Inst. : %s", is_cached(x$has_repeatInstrumentEvent())), sprintf("Users : %s", is_cached(x$has_users())), sprintf("User Roles : %s", is_cached(x$has_user_roles())), sprintf("Users Role Assignment : %s", is_cached(x$has_user_role_assignment())), sprintf("DAGs : %s", is_cached(x$has_dags())), sprintf("DAG Assigment : %s", is_cached(x$has_dag_assignment())), sprintf("Project Info : %s", is_cached(x$has_projectInformation())), sprintf("Version : %s", is_cached(x$has_version())), sprintf("File Repo : %s", is_cached(x$has_fileRepository()))) cat(output, sep = "\n") } ##################################################################### # Unexported .offlineConnection_readFile <- function(file){ if (is.character(file)){ read.csv(file, na.strings = "", stringsAsFactors = FALSE, colClasses = "character") } else { file } } .fieldNamesFromMetaData <- function(meta_data){ FieldNameFrame <- mapply( function(field_name, field_type, choices){ if (field_type == "checkbox"){ mapping <- fieldChoiceMapping(choices, field_name) data.frame(original_field_name = rep(field_name, nrow(mapping)), choice_value = mapping[, 1], export_field_name = sprintf("%s___%s", field_name, tolower(mapping[, 1])), stringsAsFactors = FALSE) } else { data.frame(original_field_name = field_name, choice_value = NA_character_, export_field_name = field_name, stringsAsFactors = FALSE) } }, field_name = meta_data$field_name, field_type = meta_data$field_type, choices = meta_data$select_choices_or_calculations, SIMPLIFY = FALSE) forms <- unique(meta_data$form_name) forms <- sprintf("%s_complete", forms) FormFieldName <- data.frame(original_field_name = forms, choice_value = NA_character_, export_field_name = forms, stringsAsFactors = FALSE) FieldNames <- do.call("rbind", FieldNameFrame) FieldNames <- rbind(FieldNames, FormFieldName) rownames(FieldNames) <- NULL FieldNames } .offlineConnection_readMetaData <- function(file){ if (is.character(file)){ MetaData <- read.csv(file, na.strings = "", stringsAsFactors = FALSE) names(MetaData) <- ifelse(names(MetaData) %in% names(REDCAP_METADATA_API_UI_MAPPING), REDCAP_METADATA_API_UI_MAPPING[names(MetaData)], names(MetaData)) return(MetaData) } else { file } }
b6d8e0c167bc9ab27e87de8382c1964f021f559f
d4fb7b0bf2830af9f44f95f1ffdbc906c11379ab
/man/carto_epci.Rd
fa897b6781ff43dc008785dd0470f9e6b37402d0
[ "CC-BY-4.0", "etalab-2.0", "CC-BY-3.0", "LicenseRef-scancode-etalab-2.0-en" ]
permissive
jengelaere/enr.reseaux
b973be63f793652717c340d0fbf0785f960d1aff
72c4b44aa13c5fbb68ad18a2281d9a19478a8917
refs/heads/main
2023-04-10T19:43:47.856755
2021-04-06T18:58:27
2021-04-06T18:58:27
355,301,058
0
0
null
null
null
null
UTF-8
R
false
true
1,135
rd
carto_epci.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_carto.R \docType{data} \name{carto_epci} \alias{carto_epci} \title{carto_epci} \format{ A data frame with 72 rows and 9 variables: \describe{ \item{ EPCI }{ code de l'EPCI, factor } \item{ AREA }{ surface de l'EPCI, units } \item{ geometry }{ contours geographiques de l'EPCI, sfc_GEOMETRY,sfc } \item{ Echelle }{ echelle du dataset, factor } \item{ Zone }{ libelle de l'EPCI, factor } \item{ pourcent_enrr }{ part de la consommation electrique par la production renouvelable, numeric } \item{ cat_prct_enrr }{ classe de l'indicateur part de la consommation electrique par la production renouvelable, factor } \item{ pourcent_bioch4 }{ part de la consommation de gaz couverte par la production renouvelable numeric } \item{ cat_prct_bioch4 }{ classe de l'indicateur part de la consommation de gaz couverte par la production renouvelable, factor } } } \source{ DREAL PdL - TEO } \usage{ carto_epci } \description{ Un dataset comprenant la geographie des EPCI de la region et quelques indicateurs à cartographier. } \keyword{datasets}
8e982559603a58b7669ad5bd5f7532a1ff1d1479
1a4b653701ea2cbee79bf52e8e4b1bdb4b6d9d45
/CVS case/0_clean_data_nonsmk.r
6a6cb7d96635ca64e8acde72ce35b3085e6d4167
[]
no_license
Superet/Tobacco
c5fbab709d2ea7be74be2f0cc7466d279ebcd551
7fe51f480cd3d0db812987c3cf73c7086978980b
refs/heads/master
2021-05-04T10:29:17.916670
2017-07-15T19:13:54
2017-07-15T19:13:54
44,255,630
0
0
null
null
null
null
UTF-8
R
false
false
16,854
r
0_clean_data_nonsmk.r
library(reshape2) library(data.table) library(lubridate) library(xlsx) # setwd("~/Documents/Research/Tobacco/processed_data") # plot.wd <- "~/Desktop" setwd("U:/Users/ccv103/Documents/Research/tobacco/processed_data") # setwd("/sscc/home/c/ccv103/Tobacco") ################################### # Data of smokers and non-smokers # ################################### panelists <- read.csv("tob_CVS_pan.csv", header = T) purchases <- read.csv("tob_CVS_purchases.csv", header = T) trips <- read.csv("tob_CVS_trips.csv", header = T) nsmk.pan <- read.csv("tob_CVS_nonsmk_pan.csv", header = T) nsmk.trips <- read.csv("tob_CVS_nonsmk_trips.csv", header = T) ma.policy <- read.xlsx("MA_policy.xlsx", 1) panelists$smk <- 1 nsmk.pan$smk <- 0 trips$smk <- 1 nsmk.trips$smk <- 0 panelists <- rbind(panelists, nsmk.pan) trips <- rbind(trips, nsmk.trips) names(panelists) <- tolower(names(panelists)) ######################## # Clean household data # ######################## # Add week event.date <- as.Date("2014-09-01", format = "%Y-%m-%d") # Placebo event event.month <- month(event.date) + 12 cvs.ret <- 4914 # retailer_code for CVS qunit <- 20 # 20 cigaretts per pack firstw <- as.Date("2012-12-31", format = "%Y-%m-%d") # The first week in 2013 purchases$purchase_date <- as.Date(as.character(purchases$purchase_date), format = "%Y-%m-%d") purchases$week <- ((as.numeric(purchases$purchase_date - firstw)) %/%7 + 1)* 7 + firstw - 1 purchases$year <- year(purchases$purchase_date) purchases$month <- month(purchases$purchase_date) purchases$month <- (purchases$year - 2013)*12 + purchases$month trips$purchase_date <- as.Date(as.character(trips$purchase_date), format = "%Y-%m-%d") trips$week <- ((as.numeric(trips$purchase_date - firstw)) %/%7 + 1)* 7 + firstw - 1 trips$year <- year(trips$purchase_date) trips <- subset(trips, year > 2012) trips$month <- month(trips$purchase_date) trips$month <- (trips$year - 2013)*12 + trips$month endweek <- c(min(purchases$week), max(purchases$week)) # Mark CVS trips$cvs <- ifelse(trips$retailer_code == cvs.ret, 1, 0) purchases <- merge(purchases, trips[,c("trip_code_uc", "cvs", "channel_type")], by = "trip_code_uc", all.x=T) # Mark the places that already implement tobacco ban sort(unique(panelists[panelists$statecode == "MA","city"])) cnty <- c("Berkeley","Daly City","Healdsburg","Hollister","Marin","Richmond","San Francisco","Santa Clara", "Sonoma" ) panelists$ban_ard <- with(panelists, 1*((statecode=="MA" & city %in% ma.policy$MUNICIPALITY)| (statecode=="CA" & countynm %in% cnty))) sort(unique(panelists[panelists$ban_ard==1,"countynm"])) table(panelists$ban_ard) # Use 2014 panelist profile tmp <- data.table(panelists) tmp <- tmp[,list(nzip = length(unique(panelist_zip_code)), nd = length(unique(distance_cvs))), by = list(household_code)] summary(tmp) mean(tmp$nzip>1) nonsmk.pan <- panelists[panelists$panel_year == 2014,] # Collapse demographic levels new.col <- list(setNames(c(2500,6500, 9000, 11000, 13500, 17500, 22500, 27500, 32500, 37500, 42500, 47500, 55000, 65000, 75000, 100000), c(3, 4, 6, 8, 10, 11, 13, 15, 16, 17, 18, 19, 21, 23, 26, 27)), # Income code # Income code setNames(c(NA, 23, 27, 32, 37, 42, 47, 52, 60, 65), 0:9), # Age code setNames(c(rep(1,8), 0), 1:9), # Kids code setNames(c(rep(c("House","Condo"), 3), "Mobile"), 1:7), # Residence code setNames(c("White", "African American", "Asian", "Other"), 1:4), # Race code setNames(c(NA, rep("Employed", 3), "Unemployed", rep(c("Employed", "Both employed", "Both employed", "Both employed", "Only one employed"), 3), "Unemployed", "Only one employed", "Only one employed", "Only one employed", "Both unemployed"), do.call(paste, expand.grid(c(0,1:3,9), c(0, 1:3, 9))) ) ) names(new.col) <- c("household_income", "age", "age_and_presence_of_children", "residence", "race", "employment") new.col nonsmk.pan$income <- new.col[["household_income"]][as.character(nonsmk.pan$household_income)] nonsmk.pan$male_head_age <- new.col[["age"]][as.character(nonsmk.pan$male_head_age)] nonsmk.pan$female_head_age <- new.col[["age"]][as.character(nonsmk.pan$female_head_age)] nonsmk.pan$age <- rowMeans(nonsmk.pan[,c("female_head_age", "male_head_age")], na.rm=T) nonsmk.pan$have_kids <- new.col[["age_and_presence_of_children"]][as.character(nonsmk.pan$age_and_presence_of_children)] nonsmk.pan$employment <- paste(nonsmk.pan$male_head_employment, nonsmk.pan$female_head_employment) nonsmk.pan$employment <- new.col[["employment"]][as.character(nonsmk.pan$employment)] nonsmk.pan$employment <- factor(nonsmk.pan$employment, levels = c("Unemployed", "Employed", "Only one employed", "Both employed", "Both unemployed")) nonsmk.pan$race <- factor(new.col[["race"]][as.character(nonsmk.pan$race)], levels = new.col[["race"]]) demo.col <- c("income", "age", "have_kids", "employment", "race", "distance_cvs") sel <- sapply(demo.col, function(i) is.numeric(nonsmk.pan[,i])) summary(nonsmk.pan[,demo.col[sel]]) lapply(demo.col[!sel], function(i) table(nonsmk.pan[,i])) sel <- apply(nonsmk.pan[,demo.col], 1, function(x) any(is.na(x))) cat(sum(sel), "Households have missing demogrpahics.\n") drop.hh <- nonsmk.pan[sel,"household_code"] # --------------------# # Household selection # # Household shopping profiles are constructed using data prior to the event, consumption are on the monthly basis # Selection criterion: # 1. Stay in the data before and after the event, # 2. No missing demographics # 3. Monthly cigarette consumption is less than 50 packs. # 4. Monthly cigarette spending at CVS is less than $300. # 5. If Live in the cities that passed bans of tobacco sales at pharmacies, then they cannot have cigarette purchases at CVS. # 6. Distance to CVS is within 100 miles if they are CVS shoppers max.q <- 50 max.cvs <- 300 tmppan <- data.table(trips) tmppan <- tmppan[,list(start = min(month), end = max(month), dol_cvs = sum(total_spent*cvs*1*(month < event.month))), by = list(household_code,smk)] tmppan <- tmppan[, stay2 := 1*(start < event.month & end > event.month)] tmp <- data.table(subset(purchases, month < event.month)) tmp <- tmp[,list(q = sum(quantity*size/qunit, na.rm=T), cigdol = sum(total_price_paid - coupon_value, na.rm=T), cigdol_cvs = sum(cvs*(total_price_paid - coupon_value), na.rm=T)), by = list(household_code)] tmppan <- merge(tmppan, tmp, by = "household_code", all.x = T) tmppan <- tmppan[,':='(q = q/(event.month - start), cigdol = cigdol/(event.month - start), cigdol_cvs = cigdol_cvs/(event.month - start), dol_cvs = dol_cvs/(event.month- start))] tmppan <- merge(tmppan, nonsmk.pan[,c("household_code", "ban_ard", "distance_cvs")], by = "household_code", all.x=T) summary(tmppan) tmppan[is.na(tmppan)] <- 0 cat("We start with", nrow(tmppan), "households \n") cat(sum(tmppan$stay2==0), "households did not stay before and after the event.\n") cat(sum(tmppan$q > max.q, na.rm=T), "households have monthly cigarette consumption greater than", max.q, "packs.\n") cat(sum(tmppan$cigdol_cvs > max.cvs, na.rm=T), "households have monthly cigarette spending at CVS greater than", max.cvs, ".\n") cat(sum(tmppan$ban_ard == 1 & tmppan$cigdol_cvs > 0), "households live in the cities that passed tobacco ban but still have puchased cigar at CVS.\n") cat(sum(tmppan$dol_cvs > 0 & tmppan$distance_cvs > 100),"CVS shoppers live beyond 100 miles from CVS.\n") tmppan <- tmppan[,drop := 1*(stay2 == 0 | q>max.q | cigdol_cvs > max.cvs | household_code %in% drop.hh | (ban_ard == 1 & cigdol_cvs > 0) | (dol_cvs > 0 & distance_cvs > 100))] table(tmppan$drop) tmppan <- subset(tmppan, drop == 0) cat("The table of smokers/non-smokers:\n"); print(table(tmppan$smk)); cat("\n") cat("The table of smoker and CVS shopper:\n"); table(tmppan$smk, tmppan$dol_cvs > 0) # For this analysis, we only focus on CVS shoppers. cat(sum(tmppan$dol_cvs==0),"households out of ", nrow(tmppan), "never shopped at CVS, so drop them for this current analysis.\n") tmppan <- subset(tmppan, dol_cvs > 0) # Subset panelist data, purchase data and trip data nonsmk.pan <- subset(nonsmk.pan, household_code %in% tmppan$household_code) purchases <- subset(purchases, household_code %in% tmppan$household_code) trips <- subset(trips, household_code %in% tmppan$household_code) nonsmk.pan <- nonsmk.pan[order(nonsmk.pan$household_code),] max(abs(nonsmk.pan$household_code - tmppan$household_code)) dim(nonsmk.pan) # ------------------ # # Household segments # # Classify households distance to CVS median(nonsmk.pan$distance_cvs) nonsmk.pan$cvs_in2 <- ifelse(nonsmk.pan$distance_cvs <=2, 1, 0) nonsmk.pan$wgr_in2 <- ifelse(nonsmk.pan$distance_walgreens <=2, 1, 0) # Classify light vs heavy smokers median(tmppan[smk==1,q]) nonsmk.pan$heavy <- ifelse(tmppan$q > 10, 1, 0) # Distribution of the fraction of cigarette spending conditional on CVS visit tmppan <- tmppan[,cig_frac_cvs := cigdol_cvs/dol_cvs] summary(tmppan$cig_frac_cvs) summary(tmppan[cig_frac_cvs>0,cig_frac_cvs]) nonsmk.pan$frac_seg <- ifelse(tmppan$dol_cvs == 0, "Never", ifelse(tmppan$cig_frac_cvs ==0, "Zero", ifelse(tmppan$cig_frac <= .2, "S1", "S2"))) nonsmk.pan$frac_seg <- factor(nonsmk.pan$frac_seg, levels = c("Zero", "S1", "S2")) cat("Table of CVS shopper segment:\n"); print(table(nonsmk.pan$frac_seg)); cat("\n") cat(sum(nonsmk.pan$ban_ard==1 & nonsmk.pan$frac_seg %in% c("S1", "S2")), "households in SF or MA have bought cigarettes in CVS.\n") # Define the construction of treatment and control # 1: Control (Non-smokers and cvs shoppers), Treament (smokers who are also CVS shoppers) # 2: Control (Non-smokers and cvs shoppers), Treament (smokers who also purchased cigarettes at CVS) # 3: Control (Non-smokers and cvs shoppers, Treament (smokers who spent more than 17% CVS spending on cigarettes) # Construct treatment and control nonsmk.pan$treat1 <- with(nonsmk.pan, 1*(smk == 1 & ban_ard == 0)) cat("Table of frist construction:\n") table(nonsmk.pan$treat1) table(nonsmk.pan$treat1, nonsmk.pan$frac_seg) nonsmk.pan$treat2 <- NA sel <- !(nonsmk.pan$frac_seg == "Zero" & nonsmk.pan$smk == 1 & nonsmk.pan$treat1 == 1) nonsmk.pan[sel,"treat2"] <- nonsmk.pan[sel,"treat1"] cat("Table of second construction:\n") table(nonsmk.pan$treat2) table(nonsmk.pan$treat2, nonsmk.pan$frac_seg) nonsmk.pan$treat3 <- NA sel <- !(nonsmk.pan$frac_seg %in% c("Zero", "S1") & nonsmk.pan$smk == 1 & nonsmk.pan$treat1 == 1) nonsmk.pan[sel,"treat3"] <- nonsmk.pan[sel,"treat1"] cat("Table of third construction:\n") table(nonsmk.pan$treat3) table(nonsmk.pan$treat3, nonsmk.pan$frac_seg) ############################ # Organize regression data # ############################ # -------------------------- # # Fill in non-puchases months # # Complete month for each household tmp <- data.table(trips) tmp <- tmp[,list(start = min(month), end = max(month)), by = list(household_code)] tmp <- tmp[, n:= end-start] tmp1 <- lapply(1:nrow(tmp), function(i) tmp[i,start] + c(0:tmp[i,n])) names(tmp1) <- tmp$household_code tmp1 <- melt(tmp1) names(tmp1) <- c("month", "household_code") tmp1$household_code <- as.numeric(tmp1$household_code) # Trips and spending tmp2 <- data.table(trips) tmp2 <- tmp2[,list(total_spent = sum(total_spent)), by = list(household_code, month, purchase_date, channel_type, retailer_code, cvs)] tmp2 <- tmp2[,list( trip_cvs = length(purchase_date[cvs==1]), trip_othdrug = length(purchase_date[channel_type == "Drug Store" & cvs ==0] ), trip_othchannel = length(purchase_date[channel_type != "Drug Store"]), trip_grocery = length(purchase_date[channel_type == "Grocery"]), trip_discount = length(purchase_date[channel_type == "Discount Store"]), trip_convenience= length(purchase_date[channel_type == "Convenience Store"]), trip_service = length(purchase_date[channel_type == "Service Station"]), trip_gas = length(purchase_date[channel_type == "Gas Mini Mart"]), dol_cvs = sum(total_spent*cvs, na.rm = T), dol_othdrug = sum(total_spent*(1-cvs)*1*(channel_type == "Drug Store"), na.rm = T), dol_othchannel = sum(total_spent*1*(channel_type != "Drug Store"), na.rm = T), dol_grocery = sum(total_spent*1*(channel_type == "Grocery"), na.rm = T), dol_discount = sum(total_spent*1*(channel_type == "Discount Store"),na.rm=T), dol_convenience = sum(total_spent*1*(channel_type == "Convenience Store"),na.rm=T), dol_service = sum(total_spent*1*(channel_type == "Service Station"), na.rm=T), dol_gas = sum(total_spent*1*(channel_type == "Gas Mini Mart"),na.rm=T), dol_total = sum(total_spent) ), by = list(household_code, month)] dim(tmp1); dim(tmp2) sum(is.na(tmp2)) summary(tmp2[,list(trip_cvs, trip_othdrug, trip_othchannel)]) nonsmk.trips <- merge(tmp1, tmp2, by = c("household_code", "month"), all.x = T) dim(nonsmk.trips) # Cigarette spending # Actual cigarette purchases tmp3 <- data.table(purchases) tmp3 <- tmp3[,list( q = sum(quantity*size/qunit, na.rm=T), cigdol = sum(total_price_paid - coupon_value, na.rm=T), cigdol_cvs = sum((total_price_paid - coupon_value)*cvs, na.rm=T), cigdol_othdrug = sum((total_price_paid - coupon_value)*(1-cvs)*1*(channel_type == "Drug Store"), na.rm=T), cigdol_othchannel= sum((total_price_paid - coupon_value)*1*(channel_type != "Drug Store"), na.rm=T)), by = list(household_code, month)] nonsmk.trips <- merge(nonsmk.trips, tmp3, by = c("household_code", "month"), all.x = T) sel <- is.na(nonsmk.trips) nonsmk.trips[sel] <- 0 nonsmk.trips$netdol <- with(nonsmk.trips, dol_total - cigdol) nonsmk.trips$netdol_cvs <- with(nonsmk.trips, dol_cvs - cigdol_cvs) nonsmk.trips$netdol_othdrug <- with(nonsmk.trips, dol_othdrug - cigdol_othdrug) nonsmk.trips$netdol_othchannel<- with(nonsmk.trips, dol_othchannel - cigdol_othchannel) cat("Summary stats:\n"); print(summary(nonsmk.trips[, -c(1:2)])); cat("\n") # Calculate pre-event shopping behavior for each household # NOTE: we have NAs for some trend measurement; nonsmk.trips <- nonsmk.trips[order(nonsmk.trips$household_code),] for(i in 0:7){ if(i == 0){ tmpp <- data.table(subset(nonsmk.trips, month < event.month - 3*i)) }else{ tmpp <- data.table(subset(nonsmk.trips, month < event.month - 3*(i-1) & month >= event.month - 3*i)) } print(unique(tmpp$month)) tmpp <- tmpp[,list( pre_q = mean(q), pre_trip_cvs = mean(trip_cvs), pre_trip_othdrug = mean(trip_othdrug), pre_trip_othchannel = mean(trip_othchannel), pre_dol_cvs = mean(dol_cvs), pre_dol_othdrug = mean(dol_othdrug), pre_dol_othchannel = mean(dol_othchannel) ), by = list(household_code)] if(i == 0){ predat <- tmpp cat("dim(predat) = ", dim(predat), "\n") }else{ names(tmpp)[-1] <- paste(names(tmpp)[-1], i, sep="") print(identical(predat$household, tmpp$household_code)) predat <- merge(predat, tmpp, by = "household_code", all.x = T) } } cat("dim(predat) = ", dim(predat), "\n") dim(nonsmk.pan) nonsmk.pan <- merge(nonsmk.pan, predat, by = "household_code", all.x = T) dim(nonsmk.pan) # Check any missing values in the panelist data demo.col bhv.col <- c("pre_trip_cvs", "pre_trip_othdrug", "pre_trip_othchannel", "pre_dol_cvs", "pre_dol_othdrug", "pre_dol_othchannel") # (bhv.col <- paste(rep(bhv.col, 4), "_H", rep(1:4, each = 6), sep="")) sapply(bhv.col, function(i) sum(is.na(nonsmk.pan[,i]))) sel <- apply(nonsmk.pan[,c(demo.col,bhv.col)], 1, function(x) any(is.na(x))) if(sum(sel) > 0){ cat(sum(sel), "households have missing values in their behavioral metrics, so we drop them for this analysis. \n") nonsmk.pan <- nonsmk.pan[!sel,] nonsmk.trips <- subset(nonsmk.trips, household_code %in% nonsmk.pan$household_code) trips <- subset(trips, household_code %in% nonsmk.pan$household_code) purchases <- subset(purchases, household_code %in% nonsmk.pan$household_code) } # Create other control variables: month nonsmk.trips$year <- ifelse(nonsmk.trips$month > 12, 2014, 2013) nonsmk.trips$month1 <- nonsmk.trips$month %% 12 nonsmk.trips$month1 <- ifelse(nonsmk.trips$month1 == 0, 12, nonsmk.trips$month1) nonsmk.trips$month1 <- factor(nonsmk.trips$month1) dim(nonsmk.trips) nonsmk.trips <- merge(nonsmk.trips, nonsmk.pan[,c("household_code", "treat1", "treat2", "treat3", "panelist_zip_code", "distance_cvs","cvs_in2", "wgr_in2", "heavy", "smk", "ban_ard", "frac_seg")], by = "household_code", all.x=T) dim(nonsmk.trips) nonsmk.trips$after <- 1*(nonsmk.trips$month >= event.month) length(unique(nonsmk.trips$household_code)) dim(nonsmk.pan) table(nonsmk.trips$month) save(nonsmk.pan, nonsmk.trips, file = "cvs_nonsmk.rdata")
c8d923df243dc081ae0b0a1ecd25e9c510506287
c35fbc233528e1920a9ec2ba13232828fae401b9
/man/nmECx.Rd
8e79074f42d6194bb7fe9569153a5cac0a2ec267
[]
no_license
cran/mixtox
03d9af3b74dfc1286008352119971aed14ea79ab
996fec422d5a36a42cbf9fa6483111c93d773cb9
refs/heads/master
2022-07-05T18:41:34.137823
2022-06-20T15:40:02
2022-06-20T15:40:02
36,813,233
6
4
null
null
null
null
UTF-8
R
false
false
2,349
rd
nmECx.Rd
\name{nmECx} \alias{nmECx} %- Also NEED an '\alias' for EACH other topic documented here. \title{Effect Concentration Calculation for J-shaped Models} \description{ Effect concentrations are calculated at particular effects based on the fitting coefficients of J-shaped Models. } \usage{nmECx(model, param, effv, minx, gap = -1e-6, sav = FALSE)} %- maybe also 'usage' for other objects documented here. \arguments{ \item{model}{a character vector of equations:("Brain_Consens", "BCV", "Biphasic", "Hill_five").} \item{param}{a numeric matrix of fitting coefficients with rownames (models) and colnames (ALpha, Beta, Gamma, Delta, and Epsilon).} \item{effv}{a numeric value (vector) with single or multiple effect values (miny ~ 1).} \item{minx}{a numeric value (vector) with single or multiple concentrations that induce maximun stimulation.} \item{gap}{ theoritical response at the extreme low concentration predicted by a fitted model.} \item{sav}{TRUE: save output to a default file; FALSE: output will not be saved; a custom file directory: save output to the custom file directory.} } \details{effect concentrations will be calculated with provided equations(model), associated fitting parameters (param), and effects (effv). Effect (effv) should be a value(s) between miny ~ 1. For example, \eqn{effv} should be 0.5 if we want to calculate a concentration causes 50\% effect. \eqn{minx} should be calculated by curveFit or tuneFit. } \value{ \item{ecx }{a numeric vector of effect concentration.} } \references{ Zhu X-W, Liu S-S, Qin L-T, Chen F, Liu H-L. 2013. Modeling non-monotonic dose-response relationships: Model evaluation and hormetic quantities exploration. Ecotoxicology and Environmental Safety 89:130-136.\cr } \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ \code{\link{CEx}} \code{\link{curveFit}} } \examples{ ## example 1 # calculate ECL-10, ECR-10, EC5, and EC50 of the four hormetic curves model <- hormesis$sgl$model param <- hormesis$sgl$param minx <- hormesis$sgl$minx nmECx(model, param, effv = c(-0.10, 0.05, 0.50), minx) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{effect concentration} \keyword{ non-monotonic curve}
ec4dd14db9337200531f266fa6e0619aa0b6156b
849d11b2b53a0c9eb60719b52279fd2548a16fea
/origin_code/bootstrap.R
f844378d50f0426cdc43850ba858db6219c2cd17
[]
no_license
jingwang134/financial-engineering
7f292e564818609f6fa8116d1ce4f196568c4e34
78ae279c5d4ed5b60df8db65b284a2b0d48c7fa1
refs/heads/master
2021-04-26T22:15:53.478307
2018-03-06T09:49:28
2018-03-06T09:49:28
124,055,942
1
0
null
null
null
null
UTF-8
R
false
false
7,029
r
bootstrap.R
########### R script for Chapter 6 #################################################### ########### of Statistics and Data Analysis for Financial Engineering, 2nd Edition ###### ########### by Ruppert and Matteson #################################################### ########################################################### ############ Code for Examples 6.1 and 6.2 ############## ########################################################### library(bootstrap) library(MASS) # For fitdistr set.seed("3857") data(CRSPday,package="Ecdat") ge = CRSPday[,4] ge_250 = ge[1:250] nboot = 1000 options(digits=3) t_mle = function(x){as.vector(fitdistr(x,"t")$estimate)} t1=proc.time() results = bootstrap(ge,nboot,t_mle) t2=proc.time() t2-t1 results_250 = bootstrap(ge_250,nboot,t_mle) rowMeans(results$thetastar[,]) ## For Table 6.1, row 2 apply(results$thetastar[,],1,sd) ## For Table 6.1, row 4 fitdistr(ge,"t") ## For table 6.1, rows 1 and 3 apply(results_250$thetastar,1,mean) ## For Table 6.2, row 2 apply(results_250$thetastar,1,sd) ## For Table 6.2, row 4 fitdistr(ge_250,"t") ## For table 6.2, rows 1 and 3 quantile( results_250$thetastar[3,] , c(.95,.98,.99,.999)) pdf("MLE_t_BS_250.pdf",width=7,height=3.5) ## Figure 6.1 par(mfrow=c(1,2)) plot(density(results_250$thetastar[3,]),xlab="df", xlim=c(2,21),main="(a) n = 250") plot(density(results$thetastar[3,]),xlab="df", xlim=c(2,21),main="(b) n = 2528") graphics.off() ################################################## ############ Code for Example 6.3 ############# ################################################## bmw = read.csv("bmw.csv") library("bootstrap") quKurt = function(y,p1=0.025,p2=0.25) { Q = quantile(y,c(p1,p2,1-p2,1-p1)) (Q[4]-Q[1]) / (Q[3]-Q[2]) } set.seed("5640") t1 = proc.time() bca_kurt= bcanon(bmw$bmw,5000,quKurt) t2 = proc.time() t2-t1 bca_kurt$confpoints ################################################################## ############ Code for Figure 6.2 and example 6.4 ############### ################################################################## pdf("LSCC_CSGSQQ.pdf",width=6,height=5) ## Figure 6.2 par(mfrow = c(1,1)) midcapD.ts = read.csv("midcapD.ts.csv") attach(midcapD.ts) qqplot(LSCC,CSGS) lmfit = lm(quantile(CSGS,c(.25,.75)) ~ quantile(LSCC,c(.25,.75)) ) abline(lmfit,col="red", lwd=2) graphics.off() n = dim(midcapD.ts)[1] quKurt = function(y,p1=0.025,p2=0.25) { Q = quantile(y,c(p1,p2,1-p2,1-p1)) as.numeric((Q[4]-Q[1]) / (Q[3]-Q[2])) } compareQuKurt = function(x,p1=0.025,p2=0.25,xdata) { quKurt(xdata[x,1],p1,p2)/quKurt(xdata[x,2],p1,p2) } quKurt(LSCC) quKurt(CSGS) xdata=cbind(LSCC,CSGS) compareQuKurt(1:n,xdata=xdata) library("bootstrap") set.seed("5640") bca_kurt= bcanon((1:n),5000,compareQuKurt,xdata=xdata) bca_kurt$confpoints ################## R lab #################### ############ Problems 1 - 5 ############## library("fGarch") bmwRet = read.csv("bmwRet.csv") n = dim(bmwRet)[1] kurt = kurtosis(bmwRet[,2],method="moment") skew = skewness(bmwRet[,2],method="moment") fit_skewt = sstdFit(bmwRet[,2]) q.grid = (1:n)/(n+1) qqplot(bmwRet[,2], qsstd(q.grid,fit_skewt$estimate[1], fit_skewt$estimate[2], fit_skewt$estimate[3],fit_skewt$estimate[4]), ylab="skewed-t quantiles" ) quKurt = function(y, p1 = 0.025, p2 = 0.25) { Q = quantile(y, c(p1, p2, 1 - p2, 1 - p1)) k = (Q[4] - Q[1]) / (Q[3] - Q[2]) k } nboot = 5000 ModelFree_kurt = rep(0, nboot) ModelBased_kurt = rep(0, nboot) set.seed("5640") for (i in 1:nboot) { samp_ModelFree = sample(bmwRet[,2], n, replace = TRUE) samp_ModelBased = rsstd(n, fit_skewt$estimate[1], fit_skewt$estimate[2], fit_skewt$estimate[3], fit_skewt$estimate[4]) ModelFree_kurt[i] = quKurt(samp_ModelFree) ModelBased_kurt[i] = quKurt(samp_ModelBased) } ############ Problems 6 - 14 ############## library(bootstrap) Kurtosis = function(x) mean( ((x-mean(x))/sd(x))^4 ) set.seed(3751) niter = 500 nboot = 400 n = 50 nu = 10 trueKurtosis = 3 + 6/(nu-4) correct = matrix(nrow=niter,ncol=5) width = matrix(nrow=niter,ncol=5) error = matrix(nrow=niter,ncol=1) t1 = proc.time() for (i in 1:niter){ y = rt(n,nu) int1 = boott(y,Kurtosis,nboott=nboot,nbootsd=50)$confpoints[c(3,9)] width[i,1] = int1[2]-int1[1] correct[i,1] = as.numeric((int1[1]<trueKurtosis)&(trueKurtosis<int1[2])) int2 = bcanon(y,nboot,Kurtosis)$confpoints[c(1,8),2] width[i,2] = int2[2]-int2[1] correct[i,2] = as.numeric((int2[1]<trueKurtosis)&(trueKurtosis<int2[2])) boot = bootstrap(y,nboot,Kurtosis)$thetastar int3 = Kurtosis(y)+1.96*c(-1,1)*sd(boot) width[i,3] = int3[2]-int3[1] correct[i,3] = as.numeric((int3[1]<trueKurtosis)&(trueKurtosis<int3[2])) int4 = quantile(boot,c(.025,.975)) width[i,4] = int4[2]-int4[1] correct[i,4] = as.numeric((int4[1]<trueKurtosis)&(trueKurtosis<int4[2])) int5 = 2*Kurtosis(y) - quantile(boot,c(.975,.025)) width[i,5] = int5[2]-int5[1] correct[i,5] = as.numeric((int5[1]<trueKurtosis)&(trueKurtosis<int5[2])) error[i] = mean(boot)-Kurtosis(y) } t2 = proc.time() (t2-t1)/60 colMeans(width) colMeans(correct) options(digits=3) mean(error) mean(error^2) 0.874 + c(-1,1)* 1.96 *sqrt(0.874*(1-0.874)/500) ############ Problem 15 #################### ### Warning: this will take a long time to run ### ### It took 42 minutes on an Intel core 2 quad processor running at 3 GHz ### ### Reduce niter and nboot if you are impatient ### library(bootstrap) quKurt = function(y,p1=0.025,p2=0.25) { Q = quantile(y,c(p1,p2,1-p2,1-p1)) k = (Q[4]-Q[1]) / (Q[3]-Q[2]) k } set.seed(3751) niter = 500 nboot = 400 n = 50 nu = 10 p1 = 0.025 p2 = 0.25 trueQuantiles = qt(c(p1,p2,1-p2,1-p1),df=nu) truequKurt = (trueQuantiles[4]-trueQuantiles[1]) / (trueQuantiles[3]-trueQuantiles[2]) correct = matrix(nrow=niter,ncol=5) width = matrix(nrow=niter,ncol=5) error = matrix(nrow=niter,ncol=1) t1 = proc.time() for (i in 1:niter){ y = rt(n,nu) int1 = boott(y,quKurt,nboott=nboot,nbootsd=50)$confpoints[c(3,9)] width[i,1] = int1[2]-int1[1] correct[i,1] = as.numeric((int1[1]<truequKurt)&(truequKurt<int1[2])) int2 = bcanon(y,nboot,quKurt)$confpoints[c(1,8),2] width[i,2] = int2[2]-int2[1] correct[i,2] = as.numeric((int2[1]<truequKurt)&(truequKurt<int2[2])) boot = bootstrap(y,nboot,quKurt)$thetastar int3 = quKurt(y)+1.96*c(-1,1)*sd(boot) width[i,3] = int3[2]-int3[1] correct[i,3] = as.numeric((int3[1]<truequKurt)&(truequKurt<int3[2])) int4 = quantile(boot,c(.025,.975)) width[i,4] = int4[2]-int4[1] correct[i,4] = as.numeric((int4[1]<truequKurt)&(truequKurt<int4[2])) int5 = 2*quKurt(y) - quantile(boot,c(.975,.025)) width[i,5] = int5[2]-int5[1] correct[i,5] = as.numeric((int5[1]<truequKurt)&(truequKurt<int5[2])) error[i] = mean(boot)-quKurt(y) } t2 = proc.time() (t2-t1)/60 colMeans(width) colMeans(correct) options(digits=3) mean(error) mean(error^2) p = 0.926 p + c(-1,1)* 1.96 *sqrt(p*(1-p)/500) p = 0.932 p + c(-1,1)* 1.96 *sqrt(p*(1-p)/500)
0c9e81dd1fcf20a01fab47400bd06c385d15cc22
86727f33e262a33acc6714944b25d1b965ef5c4e
/plot6.R
9e1c7203f49c03cbb4e4c9080af27f1ca643b92b
[]
no_license
terencelimzhengwei/ExData_Plotting2
3d87a5de8921c3033c9e4f2f3ad2d9fabf932f72
c7c7cc7d15fb5770abbc6ac56ff7e70393003a23
refs/heads/master
2021-01-10T03:00:38.589937
2016-01-15T03:45:16
2016-01-15T03:45:16
49,658,210
0
0
null
null
null
null
UTF-8
R
false
false
829
r
plot6.R
# Check whether data has already been loaded if(sum(c("NEI","SCC") %in% ls())!=2) source("./load_data.R") # Coal Related Source motor_scc <- SCC[grep("Vehicle", SCC$EI.Sector ,ignore.case = TRUE),] # Group by County/Year motor_data <- NEI %>% filter(SCC %in%motor_scc$SCC) %>% filter(fips=="24510"|fips=="06037") %>% group_by(fips,year) %>% summarize(total_pm25=sum(Emissions)) motor_data$fips[motor_data$fips=="24510"]="Baltimore City" motor_data$fips[motor_data$fips=="06037"]="Los Angeles County" png(filename = "./figures/plot6.png",bg="transparent") x<- ggplot(data = motor_data, aes(x=year,y=total_pm25,col=fips))+ geom_line()+ geom_point()+ ggtitle("Total PM2.5 Emissions by Motor Vehicles")+ ylab("Emissions")+ facet_grid(fips~.,scales="free") print(x) dev.off()
b67a64b3ae949b77ac158deb1ac81c393dd42520
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/hitandrun/examples/eliminateRedundant.Rd.R
af9022f567563a2b60d31473a827780b81c6dcec
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
441
r
eliminateRedundant.Rd.R
library(hitandrun) ### Name: eliminateRedundant ### Title: Eliminate redundant linear constraints ### Aliases: eliminateRedundant ### Keywords: constraint ### ** Examples constr <- list( constr = rbind( c(-1 , 0), c( 0 , -1), c( 1 , 1), c( 0.5, -1)), dir = c('<=', '<=', '=', '<='), rhs = c(0, 0, 1, 0)) constr <- eliminateRedundant(constr) stopifnot(nrow(constr$constr) == 3) # eliminates one constraint
be61ba65b9b84ef3ec6e127ddeae70667bf513eb
eef234939eeebc2e5dcf2ad9cfd1888ce36259df
/ident_regression/data/preproc_ridge.R
708f62db602b076c55dba6f7f192128fe0214b88
[]
no_license
snarles/misc
5d4e138cbb17bfd08143fc4c097fb84417446990
246f9fac0130340e44837b528a2f59e9256f2711
refs/heads/master
2023-06-26T06:53:53.933752
2023-06-12T13:29:17
2023-06-12T13:29:17
18,860,939
2
2
null
null
null
null
UTF-8
R
false
false
5,909
r
preproc_ridge.R
############################################################# ## DATA PREPROCESSING ## ############################################################# savelist <- c() isqrtm <- function(m) { res <- eigen(m) d <- res$values if (min(d) < -1e-5) warning("Negative eigenvalues in isqrtm") d[d < 0] <- 0 d[d > 0] <- 1/sqrt(d[d > 0]) v <- res$vectors return (v %*% diag(d) %*% t(v)) } library(Rcpp) sourceCpp('pdist.cpp') # code from http://blog.felixriedel.com/2013/05/pairwise-distances-in-r/ #ddir <- "/home/snarles/stat312data" #ddir <- "/home/ubuntu/stat312data" if ("rstudio" %in% lf) { (ddir <- "/home/rstudio/stat312data") setwd("/home/rstudio/misc/ident_regression/data/") } list.files(ddir) ## get indices of V1 from larger matrix load(paste0(ddir, "/forCharlesSNR.Rdata")) best_v <- order(-snr_ests)[1:100] load(paste0(ddir, "/all_voxel_locations.RData")) dim(voxel.loc) # 25915 3 load(paste0(ddir, "/v1_locations.RData")) v1_locations <- v1_locations[best_v, ] dim(v1_locations) # 100 3 library(prodlim) v1_inds <- row.match(data.frame(v1_locations), data.frame(voxel.loc)) ## extract V1 voxels in training data temp <- read.csv(paste0(ddir, "/allVoxTrain.csv"), header = FALSE, stringsAsFactors = FALSE) train_v1 <- temp[v1_inds, ] train_v1[,1] <- as.numeric(train_v1[, 1]) load(paste0(ddir, "/valid_index.RData")) train_index <- read.csv(paste0(ddir, "/indexTrain.csv"), header = FALSE) train_index <- as.numeric(train_index) load(paste0(ddir, "/train_resp.RData")) load(paste0(ddir, "/feature_valid.RData")) load(paste0(ddir, "/feature_train.RData")) dim(feature_train) # 1750 10921 dim(feature_valid) # 120 10921 load(paste0(ddir, "/valid_v1.RData")) valid_v1 <- valid_v1[best_v, ] dim(train_v1) dim(valid_v1) train_resp <- read.csv(paste0(ddir, "/train_resp_all.csv"), header = FALSE) dim(train_resp) #25915 1750 train_resp <- train_resp[v1_inds, ] feat_attr <- read.csv(paste0(ddir, "/featAttr.csv"), header = TRUE, stringsAsFactors = FALSE) feat_lv <- feat_attr[2, ] ##### ## PROCESSING IMAGE FEATURES ##### inds_train <- 1:1750 inds_valid <- 1750 + 1:120 features_all <- rbind(feature_train, feature_valid) vars <- apply(features_all, 2, var) lvars <- log(apply(features_all, 2, var)) plot(sort(lvars), type ="l") var_filt <- (lvars > -10) sum(var_filt) dim(feat_attr) comp_var <- sapply( 1:4, function(i) { temp_filt <- var_filt & (feat_lv == i) median(vars[temp_filt]) }) comp_var for (i in 1:4) { features_all[, feat_lv == i] <- features_all[, feat_lv == i]/sqrt(comp_var[i]) } features_all <- features_all[, var_filt] features_train <- features_all[inds_train, ] features_valid <- features_all[inds_valid, ] feat_attr <- feat_attr[, var_filt] dim(features_train) train_index #x_train <- features_train[train_index, ] dim(train_v1) length(train_index) length(unique(train_index)) max(train_index) max(valid_index) #### ## COVARIANCE OF ERROR #### dim(features_train) dim(train_v1) train_index[1:10] dm_train_v1 <- train_v1 i <- train_index[1] for (i in unique(train_index)) { filt <- train_index == i dm_train_v1[, filt] <- t(apply(train_v1[, filt], 1, function(v) v - mean(v))) } dim(dm_train_v1) sigma_e <-cov(t(dm_train_v1)) eye <- mean(diag(sigma_e)) * diag(rep(1, 100)) sigma_e <- 0.5 * sigma_e + 0.5 * eye omega_e <- isqrtm(sigma_e) #### ## REGRESSION #### library(parallel) library(glmnet) #cl <- makeCluster(5) dim(features_train) dim(train_resp) lambdas <- 0:10/10 nlambdas <- length(lambdas) prfunc <- function(i) { as.numeric(train_resp[1,]) res <- glmnet(features_train, as.numeric(train_resp[i, ]), standardize = FALSE, alpha = 0) pr <- predict(res, features_valid, s=lambdas) pr } res <- lapply(1:100, prfunc) pr_error <- numeric(nlambdas) misc_error <- numeric(nlambdas) for (i in 1:nlambdas) { pvalid <- matrix(0, 120, 100) for (j in 1:100) { pvalid[, j] <- res[[j]][, i] } yhat <- pvalid[valid_index, ] ys <- t(valid_v1) pr_error[i] <- sum((yhat - ys)^2) for (z in 1:120) { y <- apply(valid_v1[, valid_index == z], 1, mean) diff <- t(pvalid) - y # 100 120 cdiff <- omega_e %*% diff ds <- apply(cdiff^2, 2, sum) zhat <- order(ds)[1] misc_error[i] <- misc_error[i] + (zhat != z) } } plot(lambdas, misc_error) plot(lambdas, pr_error) #### ## REGRESSION : USING TRAIN_RESP #### lambdas <- 0:1000/40000 nlambdas <- length(lambdas) ntrials <- 200 misc_errors <- matrix(0, ntrials, nlambdas) pr_errors <- matrix(0, ntrials, nlambdas) library(class) proc.time() for (ii in 1:ntrials) { tr_inds <- sample(1750, 1725, FALSE) te_inds <- setdiff(1:1750, tr_inds) nte <- length(te_inds) prfunc <- function(i) { res <- glmnet(features_train[tr_inds, ], as.numeric(train_resp[i, tr_inds]), standardize = FALSE) pr <- predict(res, features_train[te_inds, ], s=lambdas) pr } res <- mclapply(1:100, prfunc, mc.cores = 30) #res <- lapply(1:100, prfunc) pr_error <- numeric(nlambdas) misc_error <- numeric(nlambdas) for (i in 1:nlambdas) { pvalid <- matrix(0, nte, 100) for (j in 1:100) { pvalid[, j] <- res[[j]][, i] } pr_error[i] <- sum((t(pvalid) - train_resp[, te_inds])^2) te_cl <- knn(pvalid %*% omega_e, t(train_resp[, te_inds]) %*% omega_e, 1:nte, k=1) misc_error[i] <- misc_error[i] + sum(te_cl != 1:nte) } misc_errors[ii, ] <- misc_error pr_errors[ii, ] <- pr_error print(ii) } proc.time() misc_error <- apply(misc_errors, 2, mean) pr_error <- apply(pr_errors, 2, mean) lambdas[order(misc_error)[1]] lambdas[order(pr_error)[1]] saveRDS(misc_errors, "misc_error_ridge.rds") saveRDS(pr_errors, "pr_error_ridge.rds") #plot(lambdas, misc_error) #plot(lambdas, pr_error)
a3af7e285363bf86734e96e0ae613c793e2f0ebd
15e6ac3031acc9b771cef4d26e3d5d03e8e3c936
/day2/change-detection/mle-rouder08-indiv.R
963fdec47f462468dcb30995a5c235b6d8ff5773
[]
no_license
anhnguyendepocen/CognitiveModelingWorkshop
de60a5a1910560a1ea8cf5c14abf992bbc2de6c9
080db90e6745f28247a7829222982912e6f9b6dd
refs/heads/master
2021-09-20T09:40:23.408058
2018-08-07T22:23:53
2018-08-07T22:23:53
null
0
0
null
null
null
null
UTF-8
R
false
false
5,872
r
mle-rouder08-indiv.R
# MLE Rouder et al (2008) PNAS # get the MLE functions from the group script source("day1/change-detection/mle-rouder08-group.R") # the data is also read in under cd head(cd) # function to calculate fit statistics from -LL fit_stats <- function(nLL, n, p){ # nLL = negative log liklihood # n = number of observations # p = number of parameters deviance = 2*nLL aic = deviance + 2*p bic = deviance + p*log(n) return(list("D" = deviance, "AIC" = aic, "BIC" = bic)) } sdt_fit = fit_stats(nLL = sdt_res$value, n = sum(group_data), p = 4) k_fit = fit_stats(nLL = k_res$value, n = sum(group_data), p = 3) vary_k_fit = fit_stats(nLL = vary_k_res$value, n = sum(group_data), p = 5) sdt_fit$AIC k_fit$AIC vary_k_fit$AIC sdt_fit$BIC k_fit$BIC vary_k_fit$BIC #### FIT TO INDIVIDUALS ---- S = nrow(cd) # number of participants # create matrices to hold the resulting parameter estimates # 1 row per participant, 1 column per parameter estimates_fix_k <- matrix(NA, nrow = S, ncol = 3) colnames(estimates_fix_k) <- c("k", "a", "g") estimates_vary_k <- matrix(NA, nrow = S, ncol = 5) colnames(estimates_vary_k) <- c("k1", "k2", "k3", "a", "g") estimates_sdt <- matrix(NA, nrow = S, ncol = 4) colnames(estimates_sdt) <- c("d1", "d2", "d3", "c") # create a matrix to hold the -log likelihood for each individual (row) # and each model (col) fit_statistics <- matrix(NA, nrow = S, ncol = 5) colnames(fit_statistics) <- c("LL_vac", "LL_fix_k", "LL_vary_k", "LL_sdt", "N_obs") # this loop takes the data from each row (participant) and fits the three models for (s in 1:S){ # get the data for this subject tmp.dat = as.integer(cd[s,]) # model that freely estimates response frequencies fit_statistics[s,1] <- ll.vacuous(y = tmp.dat) # fixed k par = runif(n = 3, min = 0, max = c(max(N), 1, 1)) k_res_s = optim(par, ll.fixed_k, y = tmp.dat) fit_statistics[s,2] <- k_res_s$value # add estimates and LL to matrices estimates_fix_k[s,] <- k_res_s$par # variable k par = runif(n = 5, min = 0, max = c(rep(max(N),3), 1, 1)) vary_k_res_s = optim(par, ll.vary_k, y = tmp.dat) fit_statistics[s,3] <- vary_k_res_s$value estimates_vary_k[s,] <- vary_k_res_s$par ## sdt model par = runif(n = 4, min = 0, max = c(5, 5, 5, 5)) sdt_res_s = optim(par, ll.sdt.ev, y = tmp.dat) fit_statistics[s,4] <- sdt_res_s$value estimates_sdt[s,] <- sdt_res_s$par fit_statistics[s,5] = sum(tmp.dat) } # remove stuff we no longer need... rm(list = c("tmp.dat", "k_res_s", "vary_k_res_s", "sdt_res_s")) # look at resulting parameter estimates hist(estimates_fix_k[,'k'], main="Fixed k", xlab="k estimate") #################### Model Comparison ####################### ##Let's do AIC first AIC.ind <- fit_statistics for(s in 1:S){ for(m in 1:M){ AIC.ind[s, m] <- fit_stats(nLL = fit_statistics[s, m], n = fit_statistics[s, 5], p = npar[m])$AIC } AIC.ind[s, 5] <- order(AIC.ind[s, 1:4])[1] } colnames(AIC.ind) <- c("vac", "fix_k", "vary_k", "sdt", "winner") AIC.ind <- as.data.frame(AIC.ind) AIC.ind$winner <- factor(AIC.ind$winner , labels = c("fix_k", "vary_k", "sdt")) table(AIC.ind$winner) ##BIC BIC.ind <- fit_statistics M <- ncol(BIC.ind) npar <- c(12, 3, 5, 4) for(s in 1:S){ for(m in 1:M){ BIC.ind[s, m] <- fit_stats(nLL = fit_statistics[s, m], n = fit_statistics[s, 5], p = npar[m])$BIC } BIC.ind[s, 5] <- order(BIC.ind[s, 1:4])[1] } colnames(BIC.ind) <- c("vac", "fix_k", "vary_k", "sdt", "winner") BIC.ind <- as.data.frame(BIC.ind) BIC.ind$winner <- factor(BIC.ind$winner , labels = c("fix_k")) table(BIC.ind$winner) ##################### More Stuff ##################################### #### Unequal Variance Signal Detection Model ll.sdt.uv <- function(par, y){ # length(par) == 7 (d1, d2, d3, c, s1, s2, s3) ll=0 for(i in 1:length(N)){ # for each set size p = sdt(d = par[i], c = par[length(N) + 1], s = par[length(N) + 1 + i]) ll = ll + negLL(y[N_i==i], p) } if(any(par[5:7] < rep(0,3))){ ll = ll + 10000} # penalty for going out of range return(ll) } ## fit sdt model par = runif(n = 7, min = 0, max = 3) sdt_res_uv = optim(par, ll.sdt.uv, y = group_data) sdt_res_uv$par ## fit sdt model par = runif(n = 4, min = 0, max = 3) sdt_res = optim(par, ll.sdt.ev, y = group_data) sdt_res$par c(sdt_res_uv$value, sdt_res$value) ## Try with differen random seeds set.seed(123) ##### Dealing with zero counts # create a matrix to hold the -log likelihood for each individual (row) # and each model (col) fit_statistics <- matrix(NA, nrow = S, ncol = 5) colnames(fit_statistics) <- c("LL_vac", "LL_fix_k", "LL_vary_k", "LL_sdt", "N_obs") # this loop takes the data from each row (participant) and fits the three models for (s in 1:S){ # get the data for this subject tmp.dat = as.integer(cd[s,]) + .5 # model that freely estimates response frequencies fit_statistics[s,1] <- ll.vacuous(y = tmp.dat) # fixed k par = runif(n = 3, min = 0, max = c(max(N), 1, 1)) k_res_s = optim(par, ll.fixed_k, y = tmp.dat) fit_statistics[s,2] <- k_res_s$value # add estimates and LL to matrices estimates_fix_k[s,] <- k_res_s$par # variable k par = runif(n = 5, min = 0, max = c(rep(max(N),3), 1, 1)) vary_k_res_s = optim(par, ll.vary_k, y = tmp.dat) fit_statistics[s,3] <- vary_k_res_s$value estimates_vary_k[s,] <- vary_k_res_s$par ## sdt model par = runif(n = 4, min = 0, max = c(5, 5, 5, 5)) sdt_res_s = optim(par, ll.sdt.ev, y = tmp.dat) fit_statistics[s,4] <- sdt_res_s$value estimates_sdt[s,] <- sdt_res_s$par fit_statistics[s,5] = sum(tmp.dat) } # remove stuff we no longer need... rm(list = c("tmp.dat", "k_res_s", "vary_k_res_s", "sdt_res_s")) # look at resulting parameter estimates hist(estimates_fix_k[,'k'], main="Fixed k", xlab="k estimate")
f18a6fd2eff78820d255ecd6f617eeecb5e0f93e
b2180767977549f293f4b7492284863aeb2cc6ab
/VS-ABC-GKTS.R
b47b46af9397eeda4935349c3ad9ef18eab89058
[]
no_license
NTomasetti/Variational-Updating
04b02653112cc479b5c2fadd60f542f57e4f393d
80098be791ee9f19de874fe1a1a7210d36a0e8a2
refs/heads/master
2020-04-06T12:43:44.340180
2018-11-14T00:56:14
2018-11-14T00:56:14
157,468,007
0
0
null
null
null
null
UTF-8
R
false
false
19,847
r
VS-ABC-GKTS.R
rm(list = ls()) repenv <- Sys.getenv("SLURM_ARRAY_TASK_ID") id <- as.numeric(repenv) library(plyr, lib.loc = 'packages') library(ks, lib.loc = 'packages') library(tibble, lib.loc = 'packages') library(mvtnorm, lib.loc = 'packages') eucdist <- function(y,z, w) { t(y-z) %*% w %*% (y-z) } drawGK <-function(theta, T, e0 = 0){ a <- theta[1] b <- theta[2] g <- theta[3] k <- theta[4] xy <- rnorm(T) eps <- b*(1+.8*((1-exp(-g*xy))/(1+exp(-g*xy))))*((1+xy^2)^k)*xy y <- a * e0 + eps[1] if(T > 1){ for(t in 2:T){ y[t] <- a * eps[t-1] + eps[t] } } data.frame(y = y, eps = eps) } abcVB <- function(lambda, z, draws, post, dim = 4, mix = ncol(lambda)){ draws <- as.matrix(draws) Jacobian <- apply(draws, 1, function(x){ (1 / x[1] + 1 / (1 - x[1])) }) draws[, 1] <- -log((1 - draws[,1]) / draws[,1]) maxIter <- 1000 threshold <- 0.0001 alpha <- 0.01 beta1 <- 0.9 beta2 <- 0.99 e <- 1e-8 LB <- rep(0, maxIter) meanLB <- 5 iter <- 1 diff <- threshold + 1 MZ <- VZ <- rep(0, mix) M <- V <- matrix(0, nrow(lambda), mix) while(diff > threshold & iter <= maxIter){ mean <- lambda[1:dim, ] Sigma <- array(0, dim = c(dim, dim, mix)) SigInv <- Sigma for(m in 1:mix){ U <- matrix(lambda[dim + 1:dim^2, m], dim) Sigma[,,m] <- t(U) %*% U if(det(Sigma[,,m]) > 1e-8){ SigInv[,,m] <- solve(Sigma[,,m]) } else { SigInv[,,m] <- diag(1/diag(Sigma[,,m])) } } pi <- exp(z) / sum(exp(z)) qComp <- matrix(0, nrow(draws), mix) for(m in 1:mix){ qComp[,m] <- mvtnorm::dmvnorm(draws, mean[,m], Sigma[,,m]) } qDens <- c(qComp %*% pi) qDensTransf <- qDens * Jacobian scorePi <- qComp / qDens scoreZ <- matrix(0, nrow(draws), mix) denom <- sum(exp(z))^2 for(i in 1:mix){ for(j in 1:mix){ if(i == j){ scoreZ[, i] <- scoreZ[, i] + scorePi[, j] * sum(exp(z[i] + z[-i])) / denom } else { scoreZ[, i] <- scoreZ[, i] - scorePi[, j] * exp(z[i] + z[j]) / denom } } } score <- array(0, dim = c(nrow(lambda), mix, nrow(draws))) for(m in 1:mix){ for(n in 1:nrow(draws)){ meandiff <- (draws[n, ] - mean[,m]) score[1:dim, m, n] <- SigInv[,,m] %*% meandiff product <- SigInv[,,m] %*% meandiff %*% t(meandiff) %*% SigInv[,,m] scoreSig <- -SigInv[,,m] + diag(diag(SigInv[,,m]) )/ 2 + product - diag(diag(product))/2 score[5, m, n] <- lambda[c(5, 9, 13, 17), m] %*% scoreSig[1, 1:4] + lambda[5, m] * scoreSig[1, 1] # U_11 score[9, m, n] <- lambda[c(5, 9, 13, 17), m] %*% scoreSig[2, 1:4] + lambda[9, m] * scoreSig[2, 2] # U_21 score[10, m, n] <- lambda[c(10, 14, 18), m] %*% scoreSig[2, 2:4] + lambda[10, m] * scoreSig[2, 2] # U_22 score[13, m, n] <- lambda[c(5, 9, 13, 17), m] %*% scoreSig[3, 1:4] + lambda[13, m] * scoreSig[3, 3] # U_31 score[14, m, n] <- lambda[c(10, 14, 18), m] %*% scoreSig[3, 2:4] + lambda[14, m] * scoreSig[3, 3] # U_32 score[15, m, n] <- lambda[c(15, 19), m] %*% scoreSig[3, 3:4] + lambda[15, m] * scoreSig[3, 3] # U_33 score[17, m, n] <- lambda[c(5, 9, 13, 17), m] %*% scoreSig[4, 1:4] + lambda[17, m] * scoreSig[4, 4] # U_41 score[18, m, n] <- lambda[c(10, 14, 18), m] %*% scoreSig[4, 2:4] + lambda[18, m] * scoreSig[4, 4] # U_42 score[19, m, n] <- lambda[c(15, 19), m] %*% scoreSig[4, 3:4] + lambda[19, m] * scoreSig[4, 4] # U_43 score[20, m, n] <- 2 * lambda[20, m] * scoreSig[4, 4] # U_44 score[, m, n] <- score[, m, n] * pi[m] * scorePi[n, m] # Convert from dlog N_i / dlam to dlog N_sum /dlam } } w <- qDensTransf / post LB[iter] <- mean(w * (log(post) - log(qDensTransf))) gradient <- w * score * (log(post) - log(qDensTransf)) gradient <- apply(gradient, 1:2, mean) gradientSq <- gradient^2 gradZ <- w * scoreZ * (log(post) - log(qDensTransf)) gradZ <- apply(gradZ, 2, mean) gradZSq <- gradZ^2 M <- beta1 * M + (1 - beta1) * gradient V <- beta2 * V + (1 - beta2) * gradientSq Mstar <- M / (1 - beta1^iter) Vstar <- V / (1 - beta2^iter) update <- alpha * Mstar / (sqrt(Vstar) + e) if(any(is.na(update))){ print('Break Lambda') break } lambda <- lambda + update MZ <- beta1 * MZ + (1 - beta1) * gradZ VZ <- beta2 * VZ + (1 - beta2) * gradZSq Mstar <- MZ / (1 - beta1^iter) Vstar <- VZ / (1 - beta2^iter) update <- alpha * Mstar / (sqrt(Vstar) + e) if(any(is.na(update))){ print('Break Z') break } z <- z + update if(iter %% 5 == 0){ oldMeanLB <- meanLB meanLB <- mean(LB[iter:(iter- 4)]) diff <- abs(meanLB - oldMeanLB) } #if(iter %% 100 == 0){ # print(paste0('Iteration: ', iter, ' ELBO: ', meanLB)) #} iter <- iter + 1 } # print(paste0('Iteration: ', iter, ' ELBO: ', meanLB)) list(lambda = lambda, z = z, iter = iter-1, LB = LB[1:(iter-1)]) } set.seed(id) a <- 0.5 #AR parameter b <- 1 #scale g <- 2 #skewness k <- 0.5 # kurtosis T <- 500 Tseq <- c(0, seq(50, T, 10)) reps <- 50000 keep <- 0.01 data <- drawGK(c(a, b, g, k), T) y <- data$y eps <- data$eps s0 <- function(x, qr) {T <- length(x); 1/(T-1) * sum(x[1:(T-1)] * x[2:T])} s1 <- function(x, qr) {T <- length(x); (1/T*sum(x))} s3 <- function(x, qr) {T <- length(x); 1/T*sum((x-s1(x))^3)/((1/T*sum((x-s1(x))^2))^1.5)} s4 <- function(x, qr) {T <- length(x); (1/T*sum((x-s1(x))^4)/((1/T*sum((x-s1(x))^2))^2))^0.5} s1r <- function(x, qr) qr[4] s2r <- function(x, qr) qr[6]-qr[2] s3r <- function(x, qr) (qr[6]+qr[2]-2*qr[4])/(qr[6]-qr[2]) s4r <- function(x, qr) (qr[7]-qr[5]+qr[3]-qr[1])/(qr[6]-qr[2]) sumStats <- plyr::each(s0 = s0, s1 = s1r, s2 = s2r, s3 = s3, s4 = s4, s5 = s3r, s6 = s4r) results <- tibble() diverge <- FALSE for(t in 2:8){#(length(Tseq)-1)){ print(paste(t, Sys.time())) ySub <- y[(Tseq[t-1]+1):Tseq[t]] yFull <- y[1:Tseq[t]] # Full ABC Fit qrY <- quantile(yFull, c(.08,.25,.36,.5,.6,.75,.875)) yss <- sumStats(yFull, qrY) reps <- 50000 keep <- 0.01 theta <- cbind(runif(reps, 0, 1), runif(reps, 0, 10), runif(reps, 0, 10), runif(reps, 0, 10)) zss <- matrix(0, reps, 7) #loop through simulation of data for(iter in 1:reps){ z <- drawGK(theta[iter, ], Tseq[t])$y qrZ <- quantile(z, c(.08,.25,.36,.5,.6,.75,.875)) zss[iter, ] <- sumStats(z, qrZ) } varZ <- apply(zss, 2, var) distance <- rep(0, reps) for(iter in 1:reps){ distance[iter] <- eucdist(yss, zss[iter, ], diag(1/varZ)) } ABC <- as.tibble(cbind(theta, distance)) names(ABC) = c('a', 'b', 'g', 'k', 'dist') accept <- ABC[ABC$dist < quantile(ABC$dist, keep), 1:4] if(t == 2){ acceptVB <- accept acceptSABC <- accept acceptSABC2 <- accept } else { # Sequential ABC qrY <- quantile(ySub, c(.08,.25,.36,.5,.6,.75,.875)) yss <- sumStats(ySub, qrY) # Draw Theta reps <- 5000 keep <- 0.1 subset <- sample(1:nrow(acceptSABC), reps, replace = T) theta <- as.matrix(acceptSABC)[subset, ] zss <- matrix(0, reps, 7) #loop through simulation of data for(iter in 1:reps){ z <- drawGK(theta[iter, ], Tseq[t] - Tseq[t-1], eps[Tseq[t-1]])$y qrZ <- quantile(z, c(.08,.25,.36,.5,.6,.75,.875)) zss[iter, ] <- sumStats(z, qrZ) } varZ <- apply(zss, 2, var) distance <- rep(0, reps) for(iter in 1:reps){ distance[iter] <- eucdist(yss, zss[iter, ], diag(1/varZ)) } SABC <- as.tibble(cbind(theta, distance)) names(SABC) = c('a', 'b', 'g', 'k', 'dist') acceptSABC <- SABC[SABC$dist < quantile(SABC$dist, keep), 1:4] # Sequential ABC with intermittent variatonal updates distinct <- length(unique(acceptSABC2$a)) if(distinct >= nrow(acceptSABC) / 10){ if(!diverge){ acceptSABC2 <- acceptSABC } else { # Normal S-ABC Iteration reps <- 5000 keep <- 0.1 subset <- sample(1:nrow(acceptSABC2), reps, replace = T) theta <- as.matrix(acceptSABC2)[subset, ] zss <- matrix(0, reps, 7) #loop through simulation of data for(iter in 1:reps){ z <- drawGK(theta[iter, ], Tseq[t] - Tseq[t-1], eps[Tseq[t-1]])$y qrZ <- quantile(z, c(.08,.25,.36,.5,.6,.75,.875)) zss[iter, ] <- sumStats(z, qrZ) } varZ <- apply(zss, 2, var) distance <- rep(0, reps) for(iter in 1:reps){ distance[iter] <- eucdist(yss, zss[iter, ], diag(1/varZ)) } SABC2 <- as.tibble(cbind(theta, distance)) names(SABC2) = c('a', 'b', 'g', 'k', 'dist') acceptSABC2 <- SABC2[SABC2$dist < quantile(SABC2$dist, keep), 1:4] } } else { diverge <- TRUE # VS-ABC Iteration # Fit KDE grid <- do.call(expand.grid, lapply(acceptSABC2, quantile, prob=seq(0.01, 0.99, length.out = 14))) postDensity <- ks::kde(as.matrix(acceptSABC2), eval.points = grid) grid$post <- c(postDensity$estimate) # Subset from KDE for VB algo normProb <- grid$post normProb[normProb <= 0] <- 0 normProb <- normProb / sum(normProb) subset <- sample(1:nrow(grid), 50, prob = normProb) draws <- grid[subset, 1:4] postDens <- grid[subset, ]$post # Fit VB mix <- 5 z <- rep(0, mix) lambda <- matrix(0, 4 * 5, mix) lambda[,1] <- c(colMeans(acceptSABC2), chol(cov(acceptSABC2))) for(m in 2:mix){ lambda[,m] <- c(colMeans(acceptSABC2) + c(0.5, 0.3, 0.5, 0.3) * (-1)^m + rnorm(4, 0, 0.1), chol(cov(acceptSABC2))) } vb2 <- abcVB(lambda, z, draws, postDens) # Set up ABC qrY <- quantile(ySub, c(.08,.25,.36,.5,.6,.75,.875)) yss <- sumStats(ySub, qrY) # Draw Theta weights <- vb2$z pi <- exp(weights) / sum(exp(weights)) mean <- vb2$lambda[1:4, ] Sig <- array(0, dim = c(4, 4, mix)) for(m in 1:mix){ U <- matrix(vb2$lambda[5:20, m], 4) Sig[,,m]<- t(U) %*% U } reps <- 5000 keep <- 0.1 theta <- matrix(0, reps, 4) for(iter in 1:reps){ okay <- FALSE # Truncate distribution to [0, 1], [0, 10]^3 while(!okay){ u <- runif(1) component <- min(which(cumsum(pi) > u)) draw <- mvtnorm::rmvnorm(1, mean[,component], Sig[,,component]) if(all(draw[2:4] > 0) & all(draw[2:4] < 10) & draw[1] > 0 & draw[1] < 1){ okay <- TRUE } } theta[iter, ] <- draw } zss <- matrix(0, reps, 7) #loop through simulation of data for(iter in 1:reps){ z <- drawGK(theta[iter, ], Tseq[t] - Tseq[t-1], eps[Tseq[t-1]])$y qrZ <- quantile(z, c(.08,.25,.36,.5,.6,.75,.875)) zss[iter, ] <- sumStats(z, qrZ) } varZ <- apply(zss, 2, var) distance <- rep(0, reps) for(iter in 1:reps){ distance[iter] <- eucdist(yss, zss[iter, ], diag(1/varZ)) } SABC2 <- as.tibble(cbind(theta, distance)) names(SABC2) = c('a', 'b', 'g', 'k', 'dist') acceptSABC2 <- SABC2[SABC2$dist < quantile(SABC2$dist, keep), 1:4] } # Fit KDE grid <- do.call(expand.grid, lapply(acceptVB, quantile, prob=seq(0.01, 0.99, length.out = 14))) postDensity <- ks::kde(as.matrix(acceptVB), eval.points = grid) grid$post <- c(postDensity$estimate) # Subset from KDE for VB algo normProb <- grid$post normProb[normProb <= 0] <- 0 normProb <- normProb / sum(normProb) subset <- sample(1:nrow(grid), 100, prob = normProb) draws <- grid[subset, 1:4] postDens <- grid[subset, ]$post # Fit VB mix <- 5 drawsTransf <- matrix(0, nrow(acceptVB), 4) acceptVB <- as.matrix(acceptVB) drawsTransf[,2:4] <- acceptVB[,2:4] drawsTransf[,1] <- -log((1 - acceptVB[,1]) / acceptVB[,1]) z <- rep(0, mix) lambda <- matrix(0, 4 * 5, mix) lambda[,1] <- c(colMeans(drawsTransf), chol(cov(drawsTransf))) for(m in 2:mix){ lambda[,m] <- c(colMeans(drawsTransf) + 1 * (-1)^m + rnorm(4, 0, 0.1), chol(cov(drawsTransf))) } vb <- abcVB(lambda, z, draws, postDens) # Set up ABC qrY <- quantile(ySub, c(.08,.25,.36,.5,.6,.75,.875)) yss <- sumStats(ySub, qrY) # Draw Theta weights <- vb$z pi <- exp(weights) / sum(exp(weights)) mean <- vb$lambda[1:4, ] Sig <- array(0, dim = c(4, 4, mix)) for(m in 1:mix){ U <- matrix(vb$lambda[5:20, m], 4) Sig[,,m]<- t(U) %*% U } reps <- 5000 keep <- 0.1 theta <- matrix(0, reps, 4) for(iter in 1:reps){ okay <- FALSE u <- runif(1) component <- min(which(cumsum(pi) > u)) while(!okay){ theta[iter, ] <- mvtnorm::rmvnorm(1, mean[,component], Sig[,,component]) if(all(theta[iter, 2:4] > 0) & all(theta[iter, 2:4] < 0)){ okay <- TRUE } } } theta[,1] <- 1 / (1 + exp(-theta[,1])) zss <- matrix(0, reps, 7) #loop through simulation of data for(iter in 1:reps){ z <- drawGK(theta[iter, ], Tseq[t] - Tseq[t-1], eps[Tseq[t-1]])$y qrZ <- quantile(z, c(.08,.25,.36,.5,.6,.75,.875)) zss[iter, ] <- sumStats(z, qrZ) } varZ <- apply(zss, 2, var) distance <- rep(0, reps) for(iter in 1:reps){ distance[iter] <- eucdist(yss, zss[iter, ], diag(1/varZ)) } ABCVB <- as.tibble(cbind(theta, distance)) names(ABCVB) = c('a', 'b', 'g', 'k', 'dist') acceptVB <- ABCVB[ABCVB$dist < quantile(ABCVB$dist, keep), 1:4] } # Forecast ABC results fcSims <- matrix(0, nrow(accept), 10) for(i in 1:nrow(accept)){ fcSims[i, ] <- drawGK(unlist(accept[i, ]), 10, eps[Tseq[t]])$y } densFuns <- apply(fcSims, 2, function(x){ xSub <- x[x > quantile(x, 0.01) & x < quantile(x, 0.99)] dens <- density(xSub) approxfun(dens) }) ls <- vapply(1:10, function(x) log(densFuns[[x]](y[Tseq[t]+x])), runif(1)) results <- rbind(results, tibble(ls = ls, t = Tseq[t], h = 1:10, method = 'ABC', unique = 500)) if(t > 2){ fcSims <- matrix(0, nrow(acceptSABC), 10) for(i in 1:nrow(acceptSABC)){ fcSims[i, ] <- drawGK(unlist(acceptSABC[i, ]), 10, eps[Tseq[t]])$y } densFuns <- apply(fcSims, 2, function(x){ xSub <- x[x > quantile(x, 0.01) & x < quantile(x, 0.99)] dens <- density(xSub) approxfun(dens) }) ls <- vapply(1:10, function(x) log(densFuns[[x]](y[Tseq[t]+x])), runif(1)) results <- rbind(results, tibble(ls = ls, t = Tseq[t], h = 1:10, method = 'S-ABC', unique = length(unique(acceptSABC$a)))) fcSims <- matrix(0, nrow(acceptSABC2), 10) for(i in 1:nrow(acceptSABC2)){ fcSims[i, ] <- drawGK(unlist(acceptSABC2[i, ]), 10, eps[Tseq[t]])$y } densFuns <- apply(fcSims, 2, function(x){ xSub <- x[x > quantile(x, 0.01) & x < quantile(x, 0.99)] dens <- density(xSub) approxfun(dens) }) ls <- vapply(1:10, function(x) log(densFuns[[x]](y[Tseq[t]+x])), runif(1)) results <- rbind(results, tibble(ls = ls, t = Tseq[t], h = 1:10, method = 'VS-ABC-2', unique = length(unique(acceptSABC2$a)))) fcSims <- matrix(0, nrow(acceptVB), 10) for(i in 1:nrow(acceptVB)){ fcSims[i, ] <- drawGK(unlist(acceptVB[i, ]), 10, eps[Tseq[t]])$y } densFuns <- apply(fcSims, 2, function(x){ xSub <- x[x > quantile(x, 0.01) & x < quantile(x, 0.99)] dens <- density(xSub) approxfun(dens) }) ls <- vapply(1:10, function(x) log(densFuns[[x]](y[Tseq[t]+x])), runif(1)) results <- rbind(results, tibble(ls = ls, t = Tseq[t], h = 1:10, method = 'VS-ABC', unique = 500)) } } write.csv(results, paste0('VSABC/rep', id, '.csv'), row.names = FALSE) library(tidyverse) results <- tibble() for(i in 1:200){ file <- paste0('ma1gk/rep', i, '.csv') if(file.exists(file)){ temp <- read_csv(file, col_types = cols()) temp$id <- i results <- rbind(results, temp) } } length(unique(results$id)) results %>% group_by(t, h, method) %>% summarise(meanls = mean(ls, na.rm = TRUE)) %>% ggplot() + geom_line(aes(t, meanls, colour = method)) + facet_wrap(~h, scales = 'free', ncol = 5) + theme_bw() + theme(legend.position = 'bottom') + labs(colour = 'Method', x = 'T', y = 'Mean Forecast Logscore') results %>% filter(method %in% c('S-ABC', 'VS-ABC-2') & h == 1) %>% group_by(t, method) %>% summarise(mid = mean(unique), lower = min(unique), upper = max(unique)) %>% ggplot() + geom_line(aes(t, mid, colour = method)) + geom_ribbon(aes(t, ymin = lower, ymax = upper, colour = method), alpha = 0.2) results %>% filter(method %in% c('S-ABC', 'VS-ABC-2') & h == 1) %>% select(-ls) %>% spread(method, unique) %>% mutate(ratio = `VS-ABC-2` / `S-ABC`) %>% select(t, ratio, id) %>% inner_join( results %>% filter(method %in% c('S-ABC', 'VS-ABC-2') & h == 1) %>% select(-unique) %>% spread(method, ls) %>% mutate(diff = `VS-ABC-2` - `S-ABC`) %>% select(t, diff, id) ) %>% group_by(t) %>% summarise(diff = mean(diff, na.rm = TRUE), ratio = mean(ratio)) %>% gather(var, value, -t) %>% ggplot() + geom_line(aes(t, value)) + facet_wrap(~var, scales = 'free') accept %>% as.data.frame() %>% mutate(method = 'ABC') %>% rbind(acceptVB %>% as.data.frame() %>% mutate(method = 'VB'), acceptSABC %>% as.data.frame() %>% mutate(method = 'SABC'), acceptSABC2 %>% as.data.frame() %>% mutate(method = 'SABC2')) %>% gather(var, value, -method) %>% ggplot() + geom_density(aes(value, colour =method)) + facet_wrap(~var, scales = 'free') # Forecast ABC results fcSims <- matrix(0, nrow(accept), 10) for(i in 1:nrow(accept)){ fcSims[i, ] <- drawGK(unlist(accept[i, ]), 10, eps[Tseq[t]])$y } fcSimsSABC <- matrix(0, nrow(acceptSABC), 10) for(i in 1:nrow(acceptSABC)){ fcSimsSABC[i, ] <- drawGK(unlist(acceptSABC[i, ]), 10, eps[Tseq[t]])$y } fcSimsSABC2 <- matrix(0, nrow(acceptSABC2), 10) for(i in 1:nrow(acceptSABC2)){ fcSimsSABC2[i, ] <- drawGK(unlist(acceptSABC2[i, ]), 10, eps[Tseq[t]])$y } fcSimsVB <- matrix(0, nrow(acceptVB), 10) for(i in 1:nrow(acceptVB)){ fcSimsVB[i, ] <- drawGK(unlist(acceptVB[i, ]), 10, eps[Tseq[t]])$y } acceptT <- data.frame(a = rep(a, 500), b = rep(b, 500), g = rep(g, 500), k = rep(k, 500)) fcSimsT <- matrix(0, nrow(acceptT), 10) for(i in 1:nrow(accept)){ fcSimsT[i, ] <- drawGK(unlist(acceptT[i, ]), 10, eps[Tseq[t]])$y } acceptVB2 <- acceptVB acceptVB2$b <- b acceptVB2$a <- a fcSimsVB2 <- matrix(0, nrow(acceptVB2), 10) for(i in 1:nrow(acceptVB)){ fcSimsVB2[i, ] <- drawGK(unlist(acceptVB2[i, ]), 10, eps[Tseq[t]])$y } fc <- data.frame(ABC = fcSims[,10], SABC = fcSimsSABC[,10], SABC2 = fcSimsSABC2[,10], VB = fcSimsVB[,10], True = fcSimsT[,10], VB2 = fcSimsVB2[,10]) fc %>% gather(var, value) %>% ggplot() + geom_density(aes(value, colour = var)) + geom_vline(aes(xintercept = y[190])) + xlim(-10, 30)
5ff4738204315c91b05eedfa51b0b5a654bd04d6
64c4bf0bfb4e4d9ea19782db65defd7bdb2542df
/man/gendist.Rd
f2a83867272979e6dbd39a77d88166fe170a61d3
[]
no_license
martinezsebastian/ri
f199bcd8cd2194f731c1f8e61483be57443d8400
31cdc4616d77cc9ea224d9315a3431e2ba861275
refs/heads/master
2020-04-04T14:28:50.191285
2012-05-10T00:00:00
2012-05-10T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
2,990
rd
gendist.Rd
\name{gendist} \alias{gendist} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generates randomization distribution of estimated ATEs } \description{ Takes hypothesized potential outcomes, a permutation matrix, and arguments for \code{estate()} to produce a randomization distribution of estimated average treatment effects (ATEs). } \usage{ gendist(Ys, perms, X = NULL, Ypre = NULL, prob = NULL, HT = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{Ys}{ list consisting of two N-length numeric vectors labeled \code{Y0} and \code{Y1}, as output by \code{genouts()} } \item{perms}{ N-by-r permutation matrix, as output by \code{genperms} or \code{genperms.custom} } \item{X}{ N-by-k numeric matrix of covariates for regression adjustment } \item{Ypre}{ numeric vector of length N, pretreatment measure of the outcome variable for difference estimation } \item{prob}{ numeric vector within the (0,1) interval of length N, probability of treatment assignment, as output by \code{genprob()} or \code{genprobexact()}. When \code{prob=NULL} (by default), assumes probability of assignment to treatment implied by the permutation matrix} \item{HT}{ when \code{HT=TRUE}, invokes the Horvitz-Thompson (difference-in-totals) estimator. When \code{HT=FALSE}, invokes the inverse-probability-weighted regression estimator } } \value{ An r-length vector of estimated ATEs } \references{ Gerber, Alan S. and Donald P. Green. 2012. \emph{Field Experiments: Design, Analysis, and Interpretation}. New York: W.W. Norton. } \author{ Peter M. Aronow <peter.aronow@yale.edu>; Cyrus Samii <cds2083@nyu.edu> } \seealso{ \code{\link{estate}}, \code{\link{genouts}}, \code{\link{genprob}}, \code{\link{genperms}}, \code{\link{genperms.custom}}} \examples{ y <- c(8,6,2,0,3,1,1,1,2,2,0,1,0,2,2,4,1,1) Z <- c(1,1,0,0,1,1,0,0,1,1,1,1,0,0,1,1,0,0) cluster <- c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9) block <- c(rep(1,4),rep(2,6),rep(3,8)) perms <- genperms(Z,blockvar=block, clustvar=cluster) # all possible permutations probs <- genprobexact(Z,blockvar=block, clustvar=cluster) # probability of treatment ate <- estate(y,Z,prob=probs) # estimate the ATE ## Conduct Sharp Null Hypothesis Test of Zero Effect for Each Unit Ys <- genouts(y,Z,ate=0) # generate potential outcomes under sharp null of no effect distout <- gendist(Ys,perms, prob=probs) # generate sampling dist. under sharp null dispdist(distout, ate) # display characteristics of sampling dist. for inference ## Generate Sampling Distribution Around Estimated ATE Ys <- genouts(y,Z,ate=ate) ## generate potential outcomes under tau = ATE distout <- gendist(Ys,perms, prob=probs) # generate sampling dist. under tau = ATE dispdist(distout, ate) ## display characteristics of sampling dist. for inference } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{randomization} %\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
8dfb3497a65f8d281c83e6c138d8fb2b8216f12a
82b8d7b02e49e98613cb92346baf1e114c084275
/tests/testthat/test-prompt-git.R
2e468b40f446c768a91b3e7c13b86a756685f699
[ "MIT" ]
permissive
gwd999/prompt
ceaae598b7f3ee2a25247f8f5fb8235a1e451329
9f2ee37c1c07aecab335cd676e8fbd03a4a84bc7
refs/heads/x
2023-03-23T16:36:01.130623
2021-03-22T21:47:33
2021-03-22T21:47:33
319,088,522
0
0
NOASSERTION
2021-03-22T21:47:33
2020-12-06T17:17:34
null
UTF-8
R
false
false
3,614
r
test-prompt-git.R
test_that("prompt_git", { withr::local_options(cli.unicode = FALSE) mockery::stub(prompt_git, "is_git_dir", FALSE) expect_equal(prompt_git(), "> ") mockery::stub(prompt_git, "is_git_dir", TRUE) mockery::stub(prompt_git, "git_branch", "main") mockery::stub(prompt_git, "git_dirty", "*") mockery::stub(prompt_git, "git_arrows", " ^v") expect_equal(prompt_git(), "main* ^v > ") }) test_that("is_git_dir", { mockery::stub(is_git_dir, "git", structure(1, status = 0)) expect_true(is_git_dir()) mockery::stub(is_git_dir, "git", structure(1, status = 1)) expect_false(is_git_dir()) }) test_that("git_branch", { if (Sys.which("git") == "") skip("no git") withr::local_dir(withr::local_tempdir()) expect_equal(git_branch(), "main") gert::git_init() expect_equal(git_branch(), "main") cat("foo\n", file = "foo") gert::git_add("foo") gert::git_commit("Initial commit", author = "gcs <gcs@gmail.com>") expect_equal(git_branch(), gert::git_info()$shorthand) }) test_that("git_arrows", { withr::local_options(cli.unicode = FALSE) mockery::stub(git_arrows, "git_remote_status", c(0, 0)) expect_snapshot(git_arrows()) mockery::stub(git_arrows, "git_remote_status", c(0, 1)) expect_snapshot(git_arrows()) mockery::stub(git_arrows, "git_remote_status", c(1, 0)) expect_snapshot(git_arrows()) mockery::stub(git_arrows, "git_remote_status", c(1, 1)) expect_snapshot(git_arrows()) mockery::stub(git_arrows, "git_remote_status", c(NA_integer_, NA_integer_)) expect_snapshot(git_arrows()) }) test_that("git_remote_status", { skip_on_cran() skip_on_os("windows") # The file remote does not work... if (Sys.which("git") == "") skip("no git") withr::local_dir(remote <- withr::local_tempdir()) git("init --bare") withr::local_dir(withr::local_tempdir()) gert::git_init() expect_equal(git_branch(), "main") cat("foo\n", file = "foo") gert::git_add("foo") gert::git_commit("Initial commit", author = "gcs <gcs@gmail.com>") expect_equal(git_remote_status(), c(NA_integer_, NA_integer_)) gert::git_remote_add(remote) gert::git_push(set_upstream = TRUE, verbose = FALSE) expect_equal(git_remote_status(), c(0, 0)) cat("foobar\n", append = TRUE, file = "foo") gert::git_add("foo") gert::git_commit("Second commit", author = "gcs <gcs@gmail.com>") expect_equal(git_remote_status(), c(1, 0)) gert::git_push(verbose = FALSE) gert::git_reset_soft("HEAD^") expect_equal(git_remote_status(), c(0, 1)) cat("qwerty\n", append = TRUE, file = "foo") gert::git_add("foo") gert::git_commit("Third commit", author = "gcs <gcs@gmail.com>") expect_equal(git_remote_status(), c(1, 1)) }) test_that("git_dirty", { if (Sys.which("git") == "") skip("no git") withr::local_dir(withr::local_tempdir()) gert::git_init() cat("foo\n", file = "foo") gert::git_add("foo") gert::git_commit("Initial commit", author = "gcs <gcs@gmail.com>") expect_equal(git_dirty(), "") cat("foobar\n", append = TRUE, file = "foo") expect_equal(git_dirty(), "*") gert::git_add("foo") expect_equal(git_dirty(), "*") gert::git_commit("second", author = "gcs <gcs@gmail.com>") expect_equal(git_dirty(), "") }) test_that("git", { skip_on_cran() if (Sys.which("git") == "") skip("no git") expect_message(git("status", quiet = FALSE)) ret <- git("dsdfsdfsdf") expect_true(attr(ret, "status") > 0) }) test_that("git_path", { expect_error(git_path(tempfile())) expect_equal(git_path(tempdir()), tempdir()) }) test_that("check_git_path", { mockery::stub(check_git_path, "git_path", NULL) expect_error(check_git_path()) })
bc4d6662c638598d6492a778a7953da784a1bcb0
b3478761ab8118aead4760e0658b40e804ff1e4a
/BRT_00_Setup.R
5d19ecf15aabc3247eeb6388bff08cbcd8deaee8
[]
no_license
CaitLittlef/DroughtSens-BRTs
0dda6eee65f2417a12ef8398ffd5257c4a92a653
c83af06843422054ea4872a558fec6114c4edf0d
refs/heads/master
2020-04-17T09:09:07.832529
2019-01-18T17:28:50
2019-01-18T17:28:50
166,446,771
0
0
null
null
null
null
UTF-8
R
false
false
4,435
r
BRT_00_Setup.R
### PACKAGES install.packages("readr") install.packages("tidyverse") install.packages("dplyr") install.packages("dismo") install.packages("gbm") install.packages("usdm") install.packages("pdp") ### LIBRARIES library(readr) library(tidyverse) library(dplyr) library(dismo) library(gbm) library(usdm) library(pdp) ### GET SITUATED setwd("//goshawk.sefs.uw.edu/Space_Lawler/Shared/BackedUp/Caitlin/Drought Sensitivity") # setwd("D:/Shared/BackedUp/Caitlin/Drought Sensitivity") # If on goshawk scratchdir <- ("//goshawk.sefs.uw.edu/Space_Lawler/Shared/Scratch/Workspace/Littlefield/DroughtSensitivity_mods") # scratchdir <- ("D:/Shared/Scratch/Workspace/Littlefield/DroughtSensitivity_mods") datadir <- ("//goshawk.sefs.uw.edu/Space_Lawler/Shared/BackedUp/Caitlin/Drought Sensitivity/data_for_modeling") # datadir <- ("D:/Shared/BackedUp/Caitlin/Drought Sensitivity/data_for_modeling") outdir <- ("//goshawk.sefs.uw.edu/Space_Lawler/Shared/BackedUp/Caitlin/Drought Sensitivity/BRT_modeling/BRT_outputs") # outdir <- ("D:/Shared/BackedUp/Caitlin/Drought Sensitivity/BRT_modeling/BRT_outputs") # Read in data D <- read.csv(paste0(datadir,"/","data_for_modeling.csv")) # # Separate into forest and steppe datasets # Forest <- D[which(D$forest.use==1),] # Steppe <- D[which(D$steppe.use==1),] # # Choose explanatory variables to model -- INITIAL # explan.vars <- c("EAD", # Exposure to ANY drought, but could instead use EMD and/or ESD # "AET","Deficit", # Climate normals # "base_EVI","AGC", # Biomass variables # "soil_AWC","soil_BD", # Soil variables # "elev","CTI","HLI","shade_dens", # Topo variables # "WTD") # Water table depth # # # Choose explanatory variables to model -- THIS PICKS LAND COVER, TOO! # explan.vars <- c("NVC_LC", # Land cover type (factor) # "EAD", # Exposure to ANY drought, but could instead use EMD and/or ESD # "AET","Deficit", # Climate normals # "base_EVI","AGC", # Biomass variables # "soil_AWC","soil_BD", # Soil variables # "elev","CTI","HLI","shade_dens", # Topo variables # "WTD") # Water table depth # # # Choose explanatory variables to model -- DROPS LC, HLI, SHADE_DENS # explan.vars <- c("EAD", # Exposure to ANY drought, but could instead use EMD and/or ESD # "AET","Deficit", # Climate normals # "base_EVI","AGC", # Biomass variables # "soil_AWC","soil_BD", # Soil variables # "elev","CTI", # Topo variables # "WTD") # Water table depth # # # Choose explanatory variables to model -- DROPS LC, HLI, SHADE_DENS, C # explan.vars <- c("EAD", # Exposure to ANY drought, but could instead use EMD and/or ESD # "AET","Deficit", # Climate normals # "base_EVI", # Biomass variables # "soil_AWC","soil_BD", # Soil variables # "elev","CTI", # Topo variables # "WTD") # Water table depth # # # Choose explanatory variables to model -- DROPS LC, HLI, SHADE_DENS, EVI # explan.vars <- c("EAD", # Exposure to ANY drought, but could instead use EMD and/or ESD # "AET","Deficit", # Climate normals # "AGC", # Biomass variables # "soil_AWC","soil_BD", # Soil variables # "elev","CTI", # Topo variables # "WTD") # Water table depth # Choose explanatory variables to model -- DROPS LC, HLI, SHADE_DENS, EVI, C explan.vars <- c("EAD", # Exposure to ANY drought, but could instead use EMD and/or ESD "AET","Deficit", # Climate normals "soil_AWC","soil_BD", # Soil variables "elev","CTI", # Topo variables "WTD") # Water table depth # # Choose explanatory variables to model -- DROPS base_EVI and AGC # explan.vars <- c("NVC_LC", # Land cover type (factor) # "EAD", # Exposure to ANY drought, but could instead use EMD and/or ESD # "AET","Deficit", # Climate normals # "soil_AWC","soil_BD", # Soil variables # "elev","CTI","HLI","shade_dens", # Topo variables # "WTD") # Water table depth
2bd3e58b9696aa560709bc22e1960959d3bc890d
7e852c30d994c115468c8ec7cd8a2b924949e4c4
/binder/install.R
a92aa7edc1a4d1fd6f8df28f0b5e1f91bd991b08
[]
no_license
larssp/mybinder-J-Py-R-oct
f1c952467156d265d152d7b5f06b01802223b6b0
2d4aca3b308937f4fc73ba3a80ba0fc7963cac88
refs/heads/master
2021-06-06T02:53:19.281357
2020-03-25T14:33:37
2020-03-25T14:33:37
146,731,421
0
1
null
2018-09-05T13:18:50
2018-08-30T10:06:17
Jupyter Notebook
UTF-8
R
false
false
907
r
install.R
.libPaths( c( "~/binder/R/packages", .libPaths()) ) install.packages("tidyverse", lib="~/binder/R/packages") install.packages("rmarkdown", lib="~/binder/R/packages") install.packages("httr", lib="~/binder/R/packages") install.packages("shinydashboard", lib="~/binder/R/packages") install.packages("leaflet", lib="~/binder/R/packages") install.packages("repr", lib="~/binder/R/packages") install.packages("IRdisplay", lib="~/binder/R/packages") install.packages("evaluate", lib="~/binder/R/packages") install.packages("crayon", lib="~/binder/R/packages") install.packages("pbdZMQ", lib="~/binder/R/packages") install.packages("devtools", lib="~/binder/R/packages") install.packages("uuid", lib="~/binder/R/packages") install.packages("digest", lib="~/binder/R/packages") devtools::install_github('IRkernel/IRkernel', lib="~/binder/R/packages") IRkernel::installspec(user=TRUE,rprofile="~/binder/.Rprofile")
097ef051905063da4ee25d3a2b0860507f99469e
8a925980d46654ef70a7fee57178503d50627ce9
/Job_Satisfaction.R
8f1c21238d2ac49eba4b5f752f3d2cf17521eb0e
[]
no_license
time-consume/R-project
5130d7eeddafb92acfa01af5c25b6ad273df5f08
017f9a717830f15ad38f52d0a62fdd8d150b50d7
refs/heads/master
2020-05-07T01:21:02.978364
2019-04-23T17:16:41
2019-04-23T17:16:41
180,270,962
0
0
null
null
null
null
UTF-8
R
false
false
721
r
Job_Satisfaction.R
#Load packages require(car) require(ggplot2) require(jtools) require(pequod) #Read data dat<-read.csv("EmployeeSat.csv") names(dat) #lm w/o interactions lmbase<-lm(dat$Job_Satisfaction~dat$ValueCompensation+dat$age) summary(lmbase) #center dat$MCcompensation<-scale(dat$ValueCompensation,center = TRUE,scale = F) dat$MCage<-scale(dat$age,center = TRUE,scale = F) #linear moderation lmint<-lmres(dat$Job_Satisfaction~dat$ValueCompensation*dat$age,data = dat) summary(lmint) anova(lmbase,lmint) #interaction plot lmint1<-lmres(Job_Satisfaction~ValueCompensation+age,data = dat) lmint2<-lmres(Job_Satisfaction~ValueCompensation*age,data = dat) slopes<-simpleSlope(lmint2,pred="Job_Satisfaction",mod1 = "ValueCompensation")
9d64193f0ae3ba431a074ab2dc606dec1b2ede95
23b125bbe7b94578e7696550ff367333400a3c26
/encode_names.R
363d227f1e7998ea52198d16ddf357f54fcb2afc
[]
no_license
tambu85/staribacher_diaries
80d6daefb9a11d69e8d7d57aefd6c4bd64fce603
80c18b9f558c1358000a1ff0648e8f3959fb589d
refs/heads/main
2023-05-09T11:49:21.211773
2021-05-26T14:59:23
2021-05-26T14:59:23
371,061,327
0
0
null
null
null
null
UTF-8
R
false
false
524
r
encode_names.R
library(stringi) library(XML) html_txt <- function(str) { xpathApply(htmlParse(str, asText=TRUE), "//body//text()", xmlValue)[[1]] } ##The html_txt can parse the &#227 etc chars to their respective UTF values which can further be taken by stringi functions to convert into english alphabets x <- names txt <- html_txt(x) Encoding(txt) <- "UTF-8" #encoding to utf-8, It is optional you may avoid it splt_txt <-strsplit(txt,split="\n")[[1]] stringi::stri_trans_general(splt_txt, "latin-ascii")
ad16ad00df3bfb971de2c1e5c9afe4ccac308c3f
011b4028e257972160e712a8f7b414b31b992328
/Scripts/dimReduceCreLines.R
988bf1ba7851a866a7bea37ea743c945c16de863
[]
no_license
flw88/deepseq_awesome
320818d9f32bfe84c59ddca012c46b1db774d61d
fb27d8dc872176d5a428f4ace7b3ebe1294ac1f4
refs/heads/master
2020-05-29T09:16:14.419573
2017-03-23T01:59:43
2017-03-23T01:59:43
70,201,225
1
3
null
null
null
null
UTF-8
R
false
false
2,557
r
dimReduceCreLines.R
#!/usr/bin/Rscript ## setwd and load files print("Loading files...") setwd("/home/local/users/eflynn/deepseq/") fpkm<-read.csv("datasets/scRNASeq/fpkm_table.csv") pheno<-read.csv("datasets/scRNASeq/columns-cells.csv") #genes<-read.csv("datasets/scRNASeq/rows-genes.csv") ## reformat fpkm df fpkm[,1] -> row.names(fpkm) ## rename rows fpkm[,-1] -> fpkm ## remove gene id column gsub("^X","",names(fpkm)) -> names(fpkm) ## remove leading X from cell IDs fpkm_t <- data.frame(t(fpkm)) ## transform so rows are cells, columns are genes names(fpkm_t) <- gsub("^X","",names(fpkm_t)) rownames(fpkm_t)-> fpkm_t$rnaseq_profile_id ## create Cre line data tables subset(fpkm_t, rnaseq_profile_id %in% subset(pheno, pheno$genotype_driver == 'Snap25-IRES2-Cre')$rnaseq_profile_id) -> fpkm_Snap25 subset(fpkm_t, rnaseq_profile_id %in% subset(pheno, pheno$genotype_driver == 'Slc32a1-IRES-Cre')$rnaseq_profile_id) -> fpkm_Slc32 subset(fpkm_t, rnaseq_profile_id %in% subset(pheno, pheno$genotype_driver == 'Slc17a6-IRES-Cre')$rnaseq_profile_id) -> fpkm_Slc17 subset(fpkm_t, rnaseq_profile_id %in% subset(pheno, pheno$genotype_driver == 'Gad2-IRES-Cre')$rnaseq_profile_id) -> fpkm_Gad2 ## run pca for Snap25 Cre line print("Computing principal components on Snap25") fpkm_pr<-prcomp(t(fpkm_Snap25[,-ncol(fpkm_Snap25)]),center=T,scale.=F) unclass(fpkm_pr$rotation) -> fpkm_pr_rot fpkm_pr_rot[1:5,1:5] write.table(fpkm_pr_rot,file="results/batch_effect/fpkm_pr_Snap25.txt",quote=FALSE, sep="\t", row.names=TRUE, col.names=TRUE) ## run pca for Slc32 Cre line print("Computing principal components on Slc32") fpkm_pr<-prcomp(t(fpkm_Slc32[,-ncol(fpkm_Slc32)]),center=T,scale.=F) unclass(fpkm_pr$rotation) -> fpkm_pr_rot fpkm_pr_rot[1:5,1:5] write.table(fpkm_pr_rot,file="results/batch_effect/fpkm_pr_Slc32.txt",quote=FALSE, sep="\t", row.names=TRUE, col.names=TRUE) ## run pca for Slc17 Cre line print("Computing principal components on Slc17") fpkm_pr<-prcomp(t(fpkm_Slc17[,-ncol(fpkm_Slc17)]),center=T,scale.=F) unclass(fpkm_pr$rotation) -> fpkm_pr_rot fpkm_pr_rot[1:5,1:5] write.table(fpkm_pr_rot,file="results/batch_effect/fpkm_pr_Slc17.txt",quote=FALSE, sep="\t", row.names=TRUE, col.names=TRUE) ## run pca for Gad2 Cre line print("Computing principal components on Gad2") fpkm_pr<-prcomp(t(fpkm_Gad2[,-ncol(fpkm_Gad2)]),center=T,scale.=F) unclass(fpkm_pr$rotation) -> fpkm_pr_rot fpkm_pr_rot[1:5,1:5] write.table(fpkm_pr_rot,file="results/batch_effect/fpkm_pr_Gad2.txt",quote=FALSE, sep="\t", row.names=TRUE, col.names=TRUE)
7fe23a664e4d99f149f67a1a6563f60d99f02d7f
1fe9c4fc4f4b3a193ee042c414bcd87c22fec4af
/Data_management_and_frequent_tasks/initLesson.R
f15387413c2d700e202ac8897672fd9fa49e38a7
[ "MIT" ]
permissive
oucru-biostats/Data_management_and_basic_summaries_in_R
949e6efd6e2dbe0c5d381aab2a6cfe087e4786a3
8153ee732eff1a3bc227cd5211ff30c357871e1f
refs/heads/main
2023-03-19T04:17:51.909131
2021-03-03T02:47:39
2021-03-03T02:47:39
343,683,158
0
0
null
null
null
null
UTF-8
R
false
false
1,711
r
initLesson.R
# Code placed in this file fill be executed every time the # lesson is started. Any variables created here will show up in # the user's working directory and thus be accessible to them # throughout the lesson. default.par <- par() check.package <- function(package) requireNamespace(package, quietly = TRUE) # setwd(file.path(rstudioapi::getActiveProject(), "Data_management_and_basic_summaries_in_R","Data_management_and_frequent_tasks")) .get_course_path <- function(){ tryCatch(swirl:::swirl_courses_dir(), error = function(c) {file.path(find.package("swirl"),"Courses")} ) } setwd(file.path(.get_course_path(),"Data_management_and_basic_summaries_in_R","Data_management_and_frequent_tasks")) if(!check.package('imager')) install.packages('imager') if(!check.package('tidyverse')) install.packages('tidyverse') library(tidyverse) # if(!check.package('RCurl')) install.packages('RCurl') # if(!check.package('png')) install.packages('png') # if(!check.package('jpeg')) install.packages('jpeg') # getImg <- function(txt) { # raw <- base64Decode(txt, mode="raw") # if (all(as.raw(c(0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a))==raw[1:8])) { # it's a png... # img <- png::readPNG(raw) # transparent <- img[,,4] == 0 # img <- as.raster(img[,,1:3]) # img[transparent] <- NA # } else if (all(as.raw(c(0xff, 0xd8, 0xff, 0xd9))==raw[c(1:2, length(raw)-(1:0))])) { # it's a jpeg... # img <- jpeg::readJPEG(raw) # } else stop("No Image!") # return(img) # } load('pivot_data.Rdata') drug <- read.csv('drug.csv') base <- read.csv('base.csv') drug_9NA <- read.csv('drug_9NA.csv') ae <- read.csv('ae.csv') installed <- function(){ invisible(TRUE) }
0fad934cb2841e7911ca7a1e7cd4d538126bece0
a71b7fe35d652d86f136823cd1801eb51d902839
/abort.R
ae473dbb54df87a2541eb230f1cee81f6c1f5b4c
[]
no_license
StaThin/data
9efd602022db768b927c3338e5ce7483f57e3469
d7f6c6b5d4df140527c269b032bb3b0be45ceeeb
refs/heads/master
2023-03-29T18:40:09.694794
2023-03-15T09:32:42
2023-03-15T09:32:42
29,299,462
0
0
null
null
null
null
UTF-8
R
false
false
2,013
r
abort.R
# Christensen "abort" <- structure(list(Age = c(20, 20, 20, 30, 30, 30, 40, 40, 40, 50, 50, 50, 60, 60, 60, 70, 70, 70, 20, 30, 40, 50, 60, 70, 20, 30, 40, 50, 60, 70, 20, 30, 40, 50, 60, 70, 20, 30, 40, 50, 60, 70, 20, 30, 40, 50, 60, 70, 20, 30, 40, 50, 60, 70, 20, 30, 40, 50, 60, 70, 20, 30, 40, 50, 60, 70, 20, 30, 40, 50, 60, 70), Freq = c(96, 44, 1, 138, 64, 2, 117, 56, 6, 75, 48, 5, 72, 49, 6, 83, 60, 8, 140, 171, 152, 101, 102, 111, 43, 65, 58, 51, 58, 67, 1, 4, 9, 9, 10, 16, 24, 18, 16, 12, 6, 4, 5, 7, 7, 6, 8, 10, 2, 1, 3, 4, 3, 4, 21, 25, 20, 17, 14, 13, 4, 6, 5, 5, 5, 5, 1, 2, 1, 1, 1, 1), Opin = structure(c(1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), .Label = c("yes", "no"), class = "factor"), Race = structure(c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), .Label = c("white", "nonwhite"), class = "factor"), Sex = structure(c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), .Label = c("m", "f"), class = "factor")), .Names = c("Age", "Freq", "Opin", "Race", "Sex"), class = "data.frame", row.names = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72"))
1b87c2ff36222270abd1e5a57e5fe028c4f95632
46827846ffbc3eb7b420f4f44dceac6af511aad6
/practice9/R/funcs1.R
5806719d567e1ce06b17a6f436fbe6a5d2624c30
[]
no_license
wibbn/r-homeworks
89db3056844c7aa8cf111822a3dfcb6ad0a4ae19
aba89f07f8fef5390321d61c1b3c8ede28c7bd42
refs/heads/master
2023-05-30T07:42:47.174019
2021-06-07T12:21:08
2021-06-07T12:21:08
362,555,400
0
0
null
null
null
null
UTF-8
R
false
false
922
r
funcs1.R
id <- 1:3 country <- as.factor(c("Flatland", "Wonderland", "Sphereland")) craziness <- c(20, 15, 18) region_type <- c("A", "B", "A") author <- as.factor(c("Abbot", "Carroll", "Burger")) size <- c(10, 100, 30) df <- data.frame(id, country, craziness, region_type, author, size) s <- sapply(df, class) first <- function(df) { classes <- c() for (i in 1:length(df[1,])) { classes <- cbind(classes, class(df[,i])) } return (classes) } #2 s <- df[sapply(df, is.numeric)] second <- function(df) { result <- data.frame(temp = rep(c(0), length(df[,1]))) for (i in 1:length(df[1,])) { if (class(df[,i]) == "numeric" || class(df[,i]) == "integer") { result[colnames(df[i])] <- df[i] } } result <- result[-1] return (result) } #3 median <- function(vec) { if (is.numeric(vec)) { return(median(vec)) } else { stop("Vector is not numeric, cannot compute the median") } }
855e09858ac66fab70c1f69b79cf82db0b94e3ce
b25a2e2831e65331c04ea4f130eb821ce8c7615b
/plot1.R
116790f6b7bf0cf6d39e9b1441c063a38d0268b1
[]
no_license
joselle4/RepRes_OpAssign1
6a62dc4f66295dea47459e0f4d8fa08152eb9045
b62c58efdf04feae2b4563d9bb2059a9d92e725b
refs/heads/master
2023-06-23T21:24:03.789720
2021-07-12T22:46:13
2021-07-12T22:46:13
385,403,315
0
0
null
null
null
null
UTF-8
R
false
false
770
r
plot1.R
# Make a plot that answers the question: what is the relationship # between mean covered charges (Average.Covered.Charges) and mean # total payments (Average.Total.Payments) in New York? # load library library(ggplot2) library(cowplot) # set directories filePayments <- paste0(getwd(), "/coursera/payments.csv") # read data payments <- read.csv(filePayments, header = TRUE) # subset NY data ny <- subset(payments, Provider.State == "NY") # create plot and save as pdf filePath <- paste0(getwd(), "/coursera/RepRes_OpAssign1/plot1.pdf") gg <- ggplot(data = ny, aes(x = Average.Covered.Charges, y = Average.Total.Payments)) + geom_point(alpha = 1/3) + ggtitle("Mean Covered Charges vs Mean Total Payments in New York") ggsave(filename = filePath, plot = gg)
f3a7c1d03193acc15b005bf4e11adbecf4ccc91f
17d582790e37f4a1fa3cfcfc531fdf5c4f4086d4
/packrat/lib/x86_64-redhat-linux-gnu/3.5.1/rhdf5/tests/testthat/test_h5read.R
bfea81f9e2b83ef70c81ac364e71b2ae69dd3879
[]
no_license
teyden/asthma-research
bcd02733aeb893074bb71fd58c5c99de03888640
09c1fb98d09e897e652620dcab1482a19743110f
refs/heads/master
2021-01-26T08:20:58.263136
2020-02-27T04:12:56
2020-02-27T04:12:56
243,374,255
0
1
null
null
null
null
UTF-8
R
false
false
4,950
r
test_h5read.R
library(rhdf5) ############################################################ context("h5read") ############################################################ A = 1L:7L; B = matrix(1:18, ncol = 2); D = seq(0, 1, by=0.1) attr(D, "scale") <- "centimeters" ## output file name h5File <- tempfile(pattern = "ex_read", fileext = ".h5") if(file.exists(h5File)) file.remove(h5File) # create file with group heirachy h5createFile(h5File) h5createGroup(file = h5File, group = "foo") h5createDataset(file = h5File, dataset = "foo/A", dims = c(1, length(A) ), storage.mode = "integer") h5write(obj = A, file = h5File, name = "foo/A") h5createDataset(file = h5File, dataset = "foo/B", dims = c(2, length(B)/2 )) h5write(obj = B, file = h5File, name = "foo/B") h5createDataset(file = h5File, dataset = "baa", dims = c(1, length(D) )) h5write(obj = D, file = h5File, name = "baa", write.attributes = TRUE) test_that("Reading a dataset", { baa <- h5read(h5File, name = "baa") expect_is( baa, "matrix" ) expect_equal( dim(baa), c(1, length(D)) ) }) test_that("Reading a group", { foo <- h5read(h5File, name = "foo") expect_is( foo, "list" ) expect_equal( length(foo), 2 ) expect_true( all(c("A", "B") %in% names(foo)) ) }) test_that("Reading a nested dataset", { fooA <- h5read(h5File, name = "foo/A") expect_is( fooA, "matrix" ) expect_equal( dim(fooA), c(1, length(A)) ) }) test_that("Dropping dimensions", { fooA <- h5read(h5File, name = "foo/A", drop = TRUE) expect_is( fooA, "integer" ) expect_null( dim(fooA) ) expect_equal( fooA, A ) ## this drops for matrices too fooB <- h5read(h5File, name = "foo/B", drop = TRUE) expect_is( fooB, "numeric" ) expect_null( dim(fooB) ) expect_equal( fooB, as.numeric(B) ) }) test_that("Reading attributes too", { baa <- h5read(h5File, name = "baa", read.attributes = TRUE) expect_equal( as.character(attributes(baa)$scale), attributes(D)$scale ) }) test_that("Error if file doesn't exist", { expect_error( h5read(file = "/foo/baa.h5", name = "missing"), regexp = "does not exist.$") }) test_that("Error if asking for something that isn't there", { expect_error( h5read(file = h5File, name = "missing"), regexp = "does not exist in this HDF5 file.$") }) ############################################################ context("64-bit conversion") ############################################################ ## output file name h5File <- tempfile(pattern = "ex_read", fileext = ".h5") if(file.exists(h5File)) file.remove(h5File) # create file with integers of different types h5createFile(h5File) h5createDataset(h5File, "int32", dims=50, storage.mode="integer") h5createDataset(h5File, "int64", dims=50, storage.mode="integer64") h5createDataset(h5File, "uint32", dims=50, H5type = "H5T_NATIVE_UINT32") h5write(obj = 1:50, file = h5File, name = "int32") h5write(obj = 1:50, file = h5File, name = "int64") h5write(obj = 2^31 + 1:50, file = h5File, name = "uint32") test_that("Signed 32bit integers are unchanged for all conversion arguments", { expect_is(x1 <- h5read(h5File, name = "int32", bit64conversion = "int"), "array") expect_equal(storage.mode(x1), "integer") expect_is(x2 <- h5read(h5File, name = "int32", bit64conversion = "double"), "array") expect_equal(storage.mode(x2), "integer") expect_is(x3 <- h5read(h5File, name = "int32", bit64conversion = "bit64"), "array") expect_equal(storage.mode(x3), "integer") expect_identical(x1, x2) expect_identical(x1, x3) }) test_that("signed 64-bit integers are converted", { expect_is(x1 <- h5read(h5File, name = "int64", bit64conversion = "int"), "array") expect_equal(storage.mode(x1), "integer") expect_is(x2 <- h5read(h5File, name = "int64", bit64conversion = "double"), "array") expect_equal(storage.mode(x2), "double") expect_is(x3 <- h5read(h5File, name = "int64", bit64conversion = "bit64"), "integer64") expect_equal(storage.mode(x2), "double") }) test_that("Unsigned 32bit integers are converted to NA out of range", { expect_warning(x1 <- h5read(h5File, name = "uint32", bit64conversion = "int")) expect_true(all(is.na(x1))) }) test_that("Unsigned 32bit integers are converted properly to double/bit64", { expect_is(x2 <- h5read(h5File, name = "uint32", bit64conversion = "double"), "array") expect_equal(storage.mode(x2), "double") expect_equivalent(x2, 2^31 + 1:50) expect_is(x3 <- h5read(h5File, name = "uint32", bit64conversion = "bit64"), "integer64") expect_equal(storage.mode(x3), "double") expect_equal(class(x3), "integer64") expect_true(all(x3 > 2^31)) }) ############################################################ test_that("No open HDF5 objects are left", { expect_equal( length(h5validObjects()), 0 ) })
b7cdcca55acfb5a8c83fa924f8aff7b34dfeabbd
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/RISmed/examples/MinuteReceived.Rd.R
7db03203818a98c8d1ee35e28921ca76bb1ca09b
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
190
r
MinuteReceived.Rd.R
library(RISmed) ### Name: MinuteReceived ### Title: Extracts 'MinuteReceived' from 'Medline' object. ### Aliases: MinuteReceived ### ** Examples data(myeloma) MinuteReceived(myeloma)
88a4a37646dbf426af07655130a0aa94742fcec9
cd322790b759a022f357366a9a7beaa68681fff9
/Titanic.R
00641fd91d5877a197840e9580347e6b3574259b
[]
no_license
amardeepm9/Amar_R_project
2588a97ea59b5417ce8d29fc7ae6d1c5bfca988f
72c566ba0972c8923714cb00ec00c5a69b1cf53a
HEAD
2016-09-06T19:28:43.058887
2014-09-23T02:58:58
2014-09-23T02:58:58
null
0
0
null
null
null
null
WINDOWS-1250
R
false
false
2,214
r
Titanic.R
Tit<-read.csv("C:/Users/Anil/Desktop/titanic/train.csv", stringsAsFactors=FALSE) Tit str(Tit) summary(Tit) table(Tit$Survived) #We see that in the training set, 342 passengers survived, while 549 died. # How about a proportion? Well, we can send the output of one function into another. #So now give prop.table() the output of the table function as input: prop.table(table(Tit$Survived)) #now we want to assume no one survived in the test data Test<-read.csv("C:/Users/Anil/Desktop/titanic/test.csv", stringsAsFactors=FALSE) #Since there was no ‘Survived’ column in the dataframe, # it will create one for us and repeat our ‘0’ prediction 418 times #the number of rows we have. If this column already existed, it would overwrite it with the new values, #so be careful! While not entirely necessary for this simple model, # putting the prediction next to the existing data will help keep things in order later, so it’s a good habit to get into for more complicated predictions. Test$Survived <- rep(0, 418) str(Test) submit <- data.frame(PassengerId = Test$PassengerId, Survived = Test$Survived) submit str(submit) write.csv(submit, file = "C:/Users/Anil/Desktop/titanic/allperish.csv", row.names = FALSE) #gender of the passengers Tit<-read.csv("C:/Users/Anil/Desktop/titanic/train.csv") table(Tit$Sex) prop.table(table(Tit$Sex)) str(Tit$Sex) #two-way comparison on the number of males and FEMALES that survived prop.table(table(Tit$Sex, Tit$Survived)) #proportion of each sex that survived, as separate groups prop.table(table(Tit$Sex, Tit$Survived),1) #as we know majority of the female got saved at least 75% of them so we predict Test$Survived <- 0 Test$Survived[Test$Sex == 'female'] <- 1 Test$Survived == 1 submit <- data.frame(PassengerId = Test$PassengerId, Survived = Test$Survived) submit str(submit) write.csv(submit, file = "C:/Users/Anil/Desktop/titanic/allperish.csv", row.names = FALSE) #now digging the age variable summary(Tit$Age) #as we can see there are 177 NA's available in the data and the min age is .42 so we will create another variable Child Tit$child <- 0 Tit$child[Tit$Age < 18]<-1 summary(Tit) aggregate(Survived ~ child + Sex, data=Tit, FUN=sum)
e4353b23b011d31ca29b262cde17733fc8400428
d316f17a1a53f3ed3e0ab6c9d195d51b16754c63
/NBA_Analysis.R
d87be148c5d3c10768406bf4b65fc7e4883b0744
[]
no_license
DaxFree/NBA-Championship-R-Analysis
0a5851bd6042423c1dfaab281b880a619086295c
d43ce300cfa8d0767c712f187ee57f2e7de35195
refs/heads/master
2021-01-20T07:24:19.090909
2017-08-27T07:28:30
2017-08-27T07:28:30
101,536,636
0
0
null
null
null
null
UTF-8
R
false
false
15,896
r
NBA_Analysis.R
# Dax Freeman # May 2017 # Senior Capstone # NBA Championship Teams 80'-16' Basic R Analysis library(readr) install.packages("ggplot2") library(ggplot2) # Bulls Bulls1993 <- read_csv("~/Desktop/CapstoneCSV/Bulls1993.csv") Bulls91 <- read_csv("~/Desktop/CapstoneCSV/Bulls91.csv") Bulls92 <- read_csv("~/Desktop/CapstoneCSV/Bulls92.csv") Bulls96 <- read_csv("~/Desktop/CapstoneCSV/Bulls96.csv") Bulls97 <- read_csv("~/Desktop/CapstoneCSV/Bulls97.csv") Bulls98 <- read_csv("~/Desktop/CapstoneCSV/Bulls98.csv") # Cavaliers Cavaliers16 <- read_csv("~/Desktop/CapstoneCSV/Cavaliers16.csv") # Celtics Celtics81 <- read_csv("~/Desktop/CapstoneCSV/Celtics81.csv") Celtics84 <- read_csv("~/Desktop/CapstoneCSV/Celtics84.csv") Celtics86 <- read_csv("~/Desktop/CapstoneCSV/Celtics86.csv") Celtics08 <- read_csv("~/Desktop/CapstoneCSV/Celtics08.csv") # Golden State Goldenstate15 <- read_csv("~/Desktop/CapstoneCSV/Goldenstate15.csv") # Miami Heat Heat06 <- read_csv("~/Desktop/CapstoneCSV/Heat06.csv") Heat12 <- read_csv("~/Desktop/CapstoneCSV/Heat12.csv") Heat13 <- read_csv("~/Desktop/CapstoneCSV/Heat13.csv") # LA Lakers Lakers_80 <- read_csv("~/Desktop/CapstoneCSV/Lakers-80.csv") Lakers82 <- read_csv("~/Desktop/CapstoneCSV/Lakers82.csv") Lakers85 <- read_csv("~/Desktop/CapstoneCSV/Lakers85.csv") Lakers87 <- read_csv("~/Desktop/CapstoneCSV/Lakers87.csv") Lakers88 <- read_csv("~/Desktop/CapstoneCSV/Lakers88.csv") Lakers00 <- read_csv("~/Desktop/CapstoneCSV/Lakers00.csv") Lakers01 <- read_csv("~/Desktop/CapstoneCSV/Lakers01.csv") Lakers02 <- read_csv("~/Desktop/CapstoneCSV/Lakers02.csv") Lakers09 <- read_csv("~/Desktop/CapstoneCSV/Lakers09.csv") Lakers10 <- read_csv("~/Desktop/CapstoneCSV/Lakers10.csv") # Mavericks Mavericks11 <- read_csv("~/Desktop/CapstoneCSV/Mavericks11.csv") # Detroit Pistons Pistons89 <- read_csv("~/Desktop/CapstoneCSV/Pistons89.csv") Pistons90 <- read_csv("~/Desktop/CapstoneCSV/Pistons90.csv") Pistons04 <- read_csv("~/Desktop/CapstoneCSV/Pistons04.csv") # Houston Rockets Rockets94 <- read_csv("~/Desktop/CapstoneCSV/Rockets94.csv") # San Antonio Spurs Spurs99 <- read_csv("~/Desktop/CapstoneCSV/Spurs99.csv") Spurs03 <- read_csv("~/Desktop/CapstoneCSV/Spurs03.csv") Spurs05 <- read_csv("~/Desktop/CapstoneCSV/Spurs05.csv") Spurs07 <- read_csv("~/Desktop/CapstoneCSV/Spurs07.csv") Spurs14 <- read_csv("~/Desktop/CapstoneCSV/Spurs14.csv") # 80'-16' Team totals AllTeams <- read_csv("~/Desktop/CapstoneCSV/AllTeams.csv") AllTeams.na <- na.omit(AllTeams) plot(Age ~ PTS, data = AllTeams) plot(Age ~ PTS, pch=19, col=c(rep("#fa0003", 75), rep("yellow", 93), rep("green", 145), rep("#00d2fa", 160) , rep("#9b081d", 211), rep("#919900", 356), rep("black", 374), rep("orange", 422), rep("black", 437), rep("black", 518)), data = AllTeams) Bulls1991 <- na.omit(Bulls91) mean(Bulls1991$Age) mean(Bulls1991$'3P%') cor(Bulls1991$'3P%', Bulls1991$'2P%' ) Bulls1991.lm = lm(Bulls1991$'3P%' ~ Bulls1991$'2P%', data = Bulls1991) Bulls1991.lm.2 = lm(Bulls1991$'3P%' ~ Bulls1991$'2P%' + Bulls1991$'FT%', data = Bulls1991) summary(Bulls1991.lm.2) summary(Bulls1991.lm) summary(Bulls1991.lm.2) plot(PTS ~ Age, data=Bulls1991) Bulls1992 <- na.omit(Bulls92) mean(Bulls1992$Age) mean(Bulls1992$'3P%') cor(Bulls1992$'3P%', Bulls1992$'2P%' ) Bulls1992.lm = lm(Bulls1992$'3P%' ~ Bulls1992$'2P%', data = Bulls1992) summary(Bulls1992.lm) Bulls1992.lm.2 = lm(Bulls1992$'3P%' ~ Bulls1992$'2P%' + Bulls1992$'FT%', data = Bulls1992) summary(Bulls1992.lm.2) plot(PTS ~ Age, data=Bulls1992) Bulls1993 <- na.omit(Bulls1993) mean(Bulls1993$Age) mean(Bulls1993$'3P%') cor(Bulls1993$'3P%', Bulls1993$'2P%' ) Bulls1993.lm = lm(Bulls1993$'3P%' ~ Bulls1993$'2P%', data = Bulls1993) summary(Bulls1993.lm) Bulls1993.lm.2 = lm(Bulls1993$'3P%' ~ Bulls1993$'2P%' + Bulls1993$'FT%', data = Bulls1993) summary(Bulls1993.lm.2) plot(PTS ~ Age, data=Bulls1993) Bulls1996 <- na.omit(Bulls96) mean(Bulls1996$Age) mean(Bulls1996$'3P%') cor(Bulls1996$'3P%', Bulls1996$'2P%' ) Bulls1996.lm = lm(Bulls1996$'3P%' ~ Bulls1996$'2P%', data = Bulls1996) summary(Bulls1996.lm) Bulls1996.lm.2 = lm(Bulls1996$'3P%' ~ Bulls1996$'2P%' + Bulls1996$'FT%', data = Bulls1996) summary(Bulls1996.lm.2) plot(PTS ~ Age, data=Bulls1996) Bulls1997 <- na.omit(Bulls97) mean(Bulls1997$Age) mean(Bulls1997$'3P%') cor(Bulls1997$'3P%', Bulls1997$'2P%' ) Bulls1997.lm = lm(Bulls1997$'3P%' ~ Bulls1997$'2P%', data = Bulls1997) summary(Bulls1997.lm) Bulls1997.lm.2 = lm(Bulls1997$'3P%' ~ Bulls1997$'2P%' + Bulls1997$'FT%', data = Bulls1997) summary(Bulls1997.lm.2) plot(PTS ~ Age, data=Bulls1997, pch = 19) Bulls1998 <- na.omit(Bulls98) mean(Bulls1998$Age) mean(Bulls1998$'3P%') cor(Bulls1998$'3P%', Bulls1998$'2P%' ) Bulls1998.lm = lm(Bulls1998$'3P%' ~ Bulls1998$'2P%', data = Bulls1998) summary(Bulls1998.lm) Bulls1998.lm.2 = lm(Bulls1998$'3P%' ~ Bulls1998$'2P%' + Bulls1998$'FT%', data = Bulls1998) summary(Bulls1998.lm.2) plot(PTS ~ Age, data=Bulls1998) Cavaliers2016 <- na.omit(Cavaliers16) mean(Cavaliers2016$Age) mean(Cavaliers2016$'3P%') cor(Cavaliers2016$'3P%', Cavaliers2016$'2P%' ) Cavaliers2016.lm = lm(Cavaliers2016$'3P%' ~ Cavaliers2016$'2P%', data = Cavaliers2016) summary(Cavaliers2016.lm) Cavaliers2016.lm.2 = lm(Cavaliers2016$'3P%' ~ Cavaliers2016$'2P%' + Cavaliers2016$'FT%', data = Cavaliers2016) summary(Cavaliers2016.lm.2) plot(PTS ~ Age, data=Cavaliers2016) Celtics1981 <- na.omit(Celtics81) mean(Celtics1981$Age) mean(Celtics1981$'3P%') cor(Celtics1981$'3P%', Celtics1981$'2P%' ) Celtics1981.lm = lm(Celtics1981$'3P%' ~ Celtics1981$'2P%', data = Celtics1981) summary(Celtics1981.lm) Celtics1981.lm.2 = lm(Celtics1981$'3P%' ~ Celtics1981$'2P%' + Celtics1981$'FT%', data = Celtics1981) summary(Celtics1981.lm.2) plot(PTS ~ Age, data=Celtics1981) Celtics1984 <- na.omit(Celtics84) mean(Celtics1984$Age) mean(Celtics1984$'3P%') cor(Celtics1984$'3P%', Celtics1984$'2P%' ) Celtics1984.lm = lm(Celtics1984$'3P%' ~ Celtics1984$'2P%', data = Celtics1984) summary(Celtics1984.lm) Celtics1984.lm.2 = lm(Celtics1984$'3P%' ~ Celtics1984$'2P%' + Celtics1984$'FT%', data = Celtics1984) summary(Celtics1984.lm.2) plot(PTS ~ Age, data=Celtics1984) Celtics1986 <- na.omit(Celtics86) mean(Celtics1986$Age) mean(Celtics1986$'3P%') cor(Celtics1986$'3P%', Celtics1986$'2P%' ) Celtics1986.lm = lm(Celtics1986$'3P%' ~ Celtics1986$'2P%', data = Celtics1986) summary(Celtics1986.lm) Celtics1986.lm.2 = lm(Celtics1986$'3P%' ~ Celtics1986$'2P%' + Celtics1986$'FT%', data = Celtics1986) summary(Celtics1986.lm.2) plot(PTS ~ Age, data=Celtics1986) Celtics2008 <- na.omit(Celtics08) mean(Celtics2008$Age) mean(Celtics2008$'3P%') cor(Celtics2008$'3P%', Celtics2008$'2P%' ) Celtics2008.lm = lm(Celtics2008$'3P%' ~ Celtics2008$'2P%', data = Celtics2008) summary(Celtics2008.lm) Celtics2008.lm.2 = lm(Celtics2008$'3P%' ~ Celtics2008$'2P%' + Celtics2008$'FT%', data = Celtics2008) summary(Celtics2008.lm.2) plot(PTS ~ Age, data=Celtics2008) Goldenstate2015 <- na.omit(Goldenstate15) mean(Goldenstate2015$Age) mean(Goldenstate2015$'3P%') cor(Goldenstate2015$'3P%', Goldenstate2015$'2P%' ) Goldenstate2015.lm = lm(Goldenstate2015$'3P%' ~ Goldenstate2015$'2P%', data = Goldenstate2015) summary(Goldenstate2015.lm) Goldenstate2015.lm.2 = lm(Goldenstate2015$'3P%' ~ Goldenstate2015$'2P%' + Goldenstate2015$'FT%', data = Goldenstate2015) summary(Goldenstate2015.lm.2) plot(PTS ~ Age, data=Goldenstate2015) Heat2006 <- na.omit(Heat06) mean(Heat2006$Age) mean(Heat2006$'3P%') cor(Heat2006$'3P%', Heat2006$'2P%' ) Heat2006.lm = lm(Heat2006$'3P%' ~ Heat2006$'2P%', data = Heat2006) summary(Heat2006.lm) Heat2006.lm.2 = lm(Heat2006$'3P%' ~ Heat2006$'2P%' + Heat2006$'FT%', data = Heat2006) summary(Heat2006.lm.2) plot(PTS ~ Age, data=Heat2006) Heat2012 <- na.omit(Heat12) mean(Heat2012$Age) mean(Heat2012$'3P%') cor(Heat2012$'3P%', Heat2012$'2P%' ) Heat2012.lm = lm(Heat2012$'3P%' ~ Heat2012$'2P%', data = Heat2012) summary(Heat2012.lm) Heat2012.lm.2 = lm(Heat2012$'3P%' ~ Heat2012$'2P%' + Heat2012$'FT%', data = Heat2012) summary(Heat2012.lm.2) plot(PTS ~ Age, data=Heat2012) Heat2013 <- na.omit(Heat13) mean(Heat2013$Age) mean(Heat2013$'3P%') cor(Heat2013$'3P%', Heat2013$'2P%' ) Heat2013.lm = lm(Heat2013$'3P%' ~ Heat2013$'2P%', data = Heat2013) summary(Heat2013.lm) Heat2013.lm.2 = lm(Heat2013$'3P%' ~ Heat2013$'2P%' + Heat2013$'FT%', data = Heat2013) summary(Heat2013.lm.2) plot(PTS ~ Age, data=Heat2013) Lakers1980 <- na.omit(Lakers_80) mean(Lakers1980$Age) mean(Lakers1980$'3P%') cor(Lakers1980$'3P%', Lakers1980$'2P%' ) Lakers1980.lm = lm(Lakers1980$'3P%' ~ Lakers1980$'2P%', data = Lakers1980) summary(Lakers1980.lm) Lakers1980.lm.2 = lm(Lakers1980$'3P%' ~ Lakers1980$'2P%' + Lakers1980$'FT%', data = Lakers1980) summary(Lakers1980.lm.2) plot(PTS ~ Age, data=Lakers1980) Lakers1982 <- na.omit(Lakers82) mean(Lakers1982$Age) mean(Lakers1982$'3P%') cor(Lakers1982$'3P%', Lakers1982$'2P%' ) Lakers1982.lm = lm(Lakers1982$'3P%' ~ Lakers1982$'2P%', data = Lakers1982) summary(Lakers1982.lm) Lakers1982.lm.2 = lm(Lakers1982$'3P%' ~ Lakers1982$'2P%' + Lakers1982$'FT%', data = Lakers1982) summary(Lakers1982.lm.2) plot(PTS ~ Age, data=Lakers1982) Lakers1985 <- na.omit(Lakers85) mean(Lakers1985$Age) mean(Lakers1985$'3P%') cor(Lakers1985$'3P%', Lakers1985$'2P%' ) Lakers1985.lm = lm(Lakers1985$'3P%' ~ Lakers1985$'2P%', data = Lakers1985) summary(Lakers1985.lm) Lakers1985.lm.2 = lm(Lakers1985$'3P%' ~ Lakers1985$'2P%' + Lakers1985$'FT%', data = Lakers1985) summary(Lakers1985.lm.2) plot(PTS ~ Age, data=Lakers1985) Lakers1987 <- na.omit(Lakers87) mean(Lakers1987$Age) mean(Lakers1987$'3P%') cor(Lakers1987$'3P%', Lakers1987$'2P%' ) Lakers1987.lm = lm(Lakers1987$'3P%' ~ Lakers1987$'2P%', data = Lakers1987) summary(Lakers1987.lm) Lakers1987.lm.2 = lm(Lakers1987$'3P%' ~ Lakers1987$'2P%' + Lakers1987$'FT%', data = Lakers1987) summary(Lakers1987.lm.2) plot(PTS ~ Age, data=Lakers1987) Lakers1988 <- na.omit(Lakers88) mean(Lakers1988$Age) mean(Lakers1988$'3P%') cor(Lakers1988$'3P%', Lakers1988$'2P%' ) Lakers1988.lm = lm(Lakers1988$'3P%' ~ Lakers1988$'2P%', data = Lakers1988) summary(Lakers1988.lm) Lakers1988.lm.2 = lm(Lakers1988$'3P%' ~ Lakers1988$'2P%' + Lakers1988$'FT%', data = Lakers1988) summary(Lakers1988.lm.2) plot(PTS ~ Age, data=Lakers1988) Lakers2000 <- na.omit(Lakers00) mean(Lakers2000$Age) mean(Lakers2000$'3P%') cor(Lakers2000$'3P%', Lakers2000$'2P%' ) Lakers2000.lm = lm(Lakers2000$'3P%' ~ Lakers2000$'2P%', data = Lakers2000) summary(Lakers2000.lm) Lakers2000.lm.2 = lm(Lakers2000$'3P%' ~ Lakers2000$'2P%' + Lakers2000$'FT%', data = Lakers2000) summary(Lakers2000.lm.2) plot(PTS ~ Age, data=Lakers2000) Lakers2001 <- na.omit(Lakers01) mean(Lakers2001$Age) mean(Lakers2001$'3P%') cor(Lakers2001$'3P%', Lakers2001$'2P%' ) Lakers2001.lm = lm(Lakers2001$'3P%' ~ Lakers2001$'2P%', data = Lakers2001) summary(Lakers2001.lm) Lakers2001.lm.2 = lm(Lakers2001$'3P%' ~ Lakers2001$'2P%' + Lakers2001$'FT%', data = Lakers2001) summary(Lakers2001.lm.2) plot(PTS ~ Age, data=Lakers2001) Lakers2002 <- na.omit(Lakers02) mean(Lakers2002$Age) mean(Lakers2002$'3P%') cor(Lakers2002$'3P%', Lakers2002$'2P%' ) Lakers2002.lm = lm(Lakers2002$'3P%' ~ Lakers2002$'2P%', data = Lakers2002) summary(Lakers2002.lm) Lakers2002.lm.2 = lm(Lakers2002$'3P%' ~ Lakers2002$'2P%' + Lakers2002$'FT%', data = Lakers2002) summary(Lakers2002.lm.2) plot(PTS ~ Age, data=Lakers2002) Lakers2009 <- na.omit(Lakers09) mean(Lakers2009$Age) mean(Lakers2009$'3P%') cor(Lakers2009$'3P%', Lakers2009$'2P%' ) Lakers2009.lm = lm(Lakers2009$'3P%' ~ Lakers2009$'2P%', data = Lakers2009) summary(Lakers2009.lm) Lakers2009.lm.2 = lm(Lakers2009$'3P%' ~ Lakers2009$'2P%' + Lakers2009$'FT%', data = Lakers2009) summary(Lakers2009.lm.2) plot(PTS ~ Age, data=Lakers2009) Lakers2010 <- na.omit(Lakers10) mean(Lakers2010$Age) mean(Lakers2010$'3P%') cor(Lakers2010 $'3P%', Lakers2010 $'2P%' ) Lakers2010.lm = lm(Lakers2010$'3P%' ~ Lakers2010 $'2P%', data = Lakers2010 ) summary(Lakers2010.lm) Lakers2010.lm.2 = lm(Lakers2010$'3P%' ~ Lakers2010$'2P%' + Lakers2010$'FT%', data = Lakers2010) summary(Lakers2010.lm.2) plot(PTS ~ Age, data=Lakers2010) Mavericks2011 <- na.omit(Mavericks11) mean(Mavericks2011$Age) mean(Mavericks2011$'3P%') cor(Mavericks2011$'3P%', Mavericks2011$'2P%' ) Mavericks2011.lm = lm(Mavericks2011$'3P%' ~ Mavericks2011$'2P%', data = Mavericks2011) summary(Mavericks2011.lm) Mavericks2011.lm.2 = lm(Mavericks2011$'3P%' ~ Mavericks2011$'2P%' + Mavericks2011$'FT%', data = Mavericks2011) summary(Mavericks2011.lm.2) plot(PTS ~ Age, data=Mavericks2011) Pistons1989 <- na.omit(Pistons89) mean(Pistons1989$Age) mean(Pistons1989$'3P%') cor(Pistons1989$'3P%', Pistons1989$'2P%' ) Pistons1989.lm = lm(Pistons1989$'3P%' ~ Pistons1989$'2P%', data = Pistons1989) summary(Pistons1989.lm) Pistons1989.lm.2 = lm(Pistons1989$'3P%' ~ Pistons1989$'2P%' + Pistons1989$'FT%', data = Pistons1989) summary(Pistons1989.lm.2) plot(PTS ~ Age, data=Pistons1989) Pistons1990 <- na.omit(Pistons90) mean(Pistons1990$Age) mean(Pistons1990$'3P%') cor(Pistons1990$'3P%', Pistons1990$'2P%' ) Pistons1990.lm = lm(Pistons1990$'3P%' ~ Pistons1990 $'2P%', data = Pistons1990 ) summary(Pistons1990.lm) Pistons1990.lm.2 = lm(Pistons1990$'3P%' ~ Pistons1990$'2P%' + Pistons1990$'FT%', data = Pistons1990) summary(Pistons1990.lm.2) plot(PTS ~ Age, data=Pistons1990 ) Pistons2004 <- na.omit(Pistons04) mean(Pistons2004$Age) mean(Pistons2004$'3P%') cor(Pistons2004$'3P%', Pistons2004$'2P%' ) Pistons2004.lm = lm(Pistons2004$'3P%' ~ Pistons2004$'2P%', data = Pistons2004) summary(Pistons2004.lm) Pistons2004.lm.2 = lm(Pistons2004$'3P%' ~ Pistons2004$'2P%' + Pistons2004$'FT%', data = Pistons2004) summary(Pistons2004.lm.2) plot(PTS ~ Age, data=Pistons2004) Rockets1994 <- na.omit(Rockets94) mean(Rockets1994$Age) mean(Rockets1994$'3P%') cor(Rockets1994$'3P%', Rockets1994$'2P%' ) Rockets1994.lm = lm(Rockets1994$'3P%' ~ Rockets1994$'2P%', data = Rockets1994) summary(Rockets1994.lm) Rockets1994.lm.2 = lm(Rockets1994$'3P%' ~ Rockets1994$'2P%' + Rockets1994$'FT%', data = Rockets1994) summary(Rockets1994.lm.2) plot(PTS ~ Age, data=Rockets1994) Spurs1999 <- na.omit(Spurs99) mean(Spurs1999$Age) mean(Spurs1999$'3P%') cor(Spurs1999$'3P%', Spurs1999$'2P%' ) Spurs1999.lm = lm(Spurs1999$'3P%' ~ Spurs1999$'2P%', data = Spurs1999) summary(Spurs1999.lm) Spurs1999.lm.2 = lm(Spurs1999$'3P%' ~ Spurs1999$'2P%' + Spurs1999$'FT%', data = Spurs1999) summary(Spurs1999.lm.2) plot(PTS ~ Age, data=Spurs1999) Spurs2003 <- na.omit(Spurs03) mean(Spurs2003$Age) mean(Spurs2003$'3P%') cor(Spurs2003$'3P%', Spurs2003$'2P%' ) Spurs2003.lm = lm(Spurs2003$'3P%' ~ Spurs2003$'2P%', data = Spurs2003) summary(Spurs2003.lm) Spurs2003.lm.2 = lm(Spurs2003$'3P%' ~ Spurs2003$'2P%' + Spurs2003$'FT%', data = Spurs2003) summary(Spurs2003.lm.2) plot(PTS ~ Age, data=Spurs2003) Spurs2005 <- na.omit(Spurs05) mean(Spurs2005$Age) mean(Spurs2005$'3P%') cor(Spurs2005$'3P%', Spurs2005$'2P%' ) Spurs2005.lm = lm(Spurs2005$'3P%' ~ Spurs2005$'2P%', data = Spurs2005) summary(Spurs2005.lm) Spurs2005.lm.2 = lm(Spurs2005$'3P%' ~ Spurs2005$'2P%' + Spurs2005$'FT%', data = Spurs2005) summary(Spurs2005.lm.2) plot(PTS ~ Age, data=Spurs2005) Spurs2007 <- na.omit(Spurs07) mean(Spurs2007$Age) mean(Spurs2007$'3P%') cor(Spurs2007$'3P%', Spurs2007$'2P%' ) Spurs2007.lm = lm(Spurs2007$'3P%' ~ Spurs2007$'2P%', data = Spurs2007) summary(Spurs2007.lm) Spurs2007.lm.2 = lm(Spurs2007$'3P%' ~ Spurs2007$'2P%' + Spurs2007$'FT%', data = Spurs2007) summary(Spurs2007.lm.2) plot(PTS ~ Age, data=Spurs2007) Spurs2014 <- na.omit(Spurs14) mean(Spurs2014$Age) cor(Spurs2014$'3P%', Spurs2014$'2P%' ) Spurs2014.lm = lm(Spurs2014$'3P%' ~ Spurs2014$'2P%', data = Spurs2014) summary(Spurs2014.lm) Spurs2014.lm.2 = lm(Spurs2014$'3P%' ~ Spurs2014$'2P%' + Spurs2014$'FT%', data = Spurs2014) summary(Spurs2014.lm.2) plot(PTS ~ Age, data=Spurs2014)
611b8007a8146f64ddbdcab8480c4181abf21791
d2c892e59bb876e2205ad6ca9acb3e904aaeab5b
/code/functions/Code_5term_opt.R
d47054d5f49d7e847b95080b5505ef0295ab0ed4
[]
no_license
yuqimiao/multiomics-SIMLR
088738b77a7e0441a41e0d6b14137c2e1aefa8a7
bedd32e5e5ddafad2844803d901e5075c716203a
refs/heads/master
2023-03-09T10:18:04.289011
2021-02-20T04:40:39
2021-02-20T04:40:39
293,400,493
0
0
null
null
null
null
UTF-8
R
false
false
3,633
r
Code_5term_opt.R
library(quadprog) case_control_mat = function(response) { # response: a vector of binary outcomes (0s and 1s) # Construct a matrix to show the case/control status between responses: 1 (same) -1 (different) return(2*as.matrix(stats::dist(response, method = 'manhattan'))-1) } Crazy_5term_opt = function(Kmat_ls, # a list of kernel matrices for all samples response, # binary outcomes rho, # tuning parameter for entropy penalty alpha, # tuning parameter for <S, A/||A||> beta, # tuning parameter for ||S|| gamma, # tuning parameter for laplacian stopping = 10^-3, # stopping rule n_ite = 50, # max number of iterations print_details = F # whether to print stepwise details ) { s_Kmat_ls = Kmat_ls # scaled kernel matrices for (ll in 1:length(s_Kmat_ls)) { s_Kmat_ls[[ll]] = s_Kmat_ls[[ll]] / norm(s_Kmat_ls[[ll]], type = 'F') } n_sam = nrow(s_Kmat_ls[[1]]) # number of samples n_ker = length(s_Kmat_ls) # number of kernels I_n = diag(n_sam) # diagonal matrix old_w = rep(1/n_ker, n_ker) # initialize weights old_S = matrix(NA, nrow = n_sam, ncol = n_sam) # initialize S old_L = matrix(0, nrow = n_sam, ncol = 2) # initialize L old_L[which(response == 1), 1] = old_L[which(response == 0), 2] = 1 old_L = t(t(old_L)/sqrt(colSums(old_L))) # scale old_L A = case_control_mat(response) # case control status matrix s_A = A / norm(A, type = 'F') # scaled A ### Start iteration for (k in 1:n_ite) { w_s_Kmat = matrix(0, n_sam, n_sam) # weighted average of scaled kernel matrices for (ll in 1:n_ker) {w_s_Kmat = w_s_Kmat + old_w[ll] * s_Kmat_ls[[ll]]} ### Initialization new_w = old_w new_L = old_L new_S = old_S linear_terms_S = w_s_Kmat + alpha * s_A + gamma * (old_L %*% t(old_L)) ### Update S for (i in 1:n_sam) { QP_results = solve.QP(Dmat = I_n * beta * 2, # quadratic programming dvec = linear_terms_S[i,], Amat = t(rbind(rep(1,n_sam), I_n)), bvec = c(1, rep(0,n_sam)), meq = 1) new_S[,i] = QP_results$solution } new_S = (new_S+t(new_S))/2 # make sure S is symmetric ### Update L Laplacian = I_n - new_S eg_results = eigen(Laplacian) # eigen-decompositions new_L = eg_results$vectors[, c(n_sam-1, n_sam)] # extract the two eigenvectors ### Update w first_term = vector() # 1st term in optimization for (ll in 1:n_ker) { first_term[ll] = sum(s_Kmat_ls[[ll]] * new_S) new_w[ll] = exp(first_term[ll] / rho) # the new weights for kernels } new_w = new_w/sum(new_w) # scale the kernels ### Print details if (print_details) { cat(paste0('Iteration ',k, ':\n Optimal weights: ')) cat(new_w) cat('\n') opt_value = - sum(first_term) + rho * sum(new_w * log(new_w)) - alpha * sum(new_S * s_A) + beta * norm(new_S, type = 'F')^2 + gamma * sum(diag(t(new_L) %*% (I_n - new_S) %*% new_L)) cat(paste0(' Criterion: ', opt_value, '\n')) } ### Whether to stop if (k>=3 & max(abs(new_w-old_w))<= stopping) {break} else { old_w = new_w old_S = new_S old_L = new_L } } names(new_w) = names(s_Kmat_ls) return(list(w = new_w, S = new_S, L = new_L)) } rho = 10^-2 alpha = 1 beta = 10^0 gamma = 10^0
4484d2785addf203199f39ec83b191184ff24036
c6d3fba0648f0ad48557a6614b42e1943a13e1b8
/cachematrix.R
138db1d987beec14adf3903ef2a8e378127f1772
[]
no_license
tprashanth85/ProgrammingAssignment2
fb7e02e19d487adb814f4b6fd07f2a527a540649
adb488830f09c36faf06094a4138a2b8107d27cf
refs/heads/master
2020-12-25T11:20:26.919850
2014-07-25T04:13:53
2014-07-25T04:13:53
null
0
0
null
null
null
null
UTF-8
R
false
false
2,587
r
cachematrix.R
## The two below functions are developed to simulate caching for calculating the inverse of a ## matrix. makeCacheMatrix creates a special object that can be used to set and get matrix and ## to set and get inverse of matrix. cacheSolve function computes the inverse of the ## special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated ## and if the matrix is unchanged, then the cachesolve will retrieve the inverse from the cache. ## This function creates a special matrix object ## which is really a list containing a function to ## set the value of the matrix ## get the value of the matrix ## set the value of the inverse ## get the value of the inverse ## To run the functions below commands can be tested: ## a<-makeCacheMatrix() ## x<-matrix(1:4,nrow=2,ncol=2) ## y<-matrix(5:8,nrow=2,ncol=2) ## a$setMatrix(x) ## cacheSolve(a) ## cacheSolve(a,x) ## cacheSolve(a,y) makeCacheMatrix <- function(matx = matrix()) { inver <- NULL setMatrix <- function(maty) { ##This function sets the value of the matrix matx <<- maty inver <<- NULL } getMatrix <- function() { ## This function gets the value of the matrix matx } setInverse <- function(inv) { ## This function sets the inverse of the matrix inver <<- inv } getInverse <- function() { ## This function gets the inverse of the matrix inver } ## list of all the functions is returned by makeCacheMatrix to an object list(setMatrix= setMatrix, getMatrix = getMatrix, setInverse = setInverse, getInverse = getInverse) } cacheSolve <- function(x,...) { inv <- x$getInverse() ##Get the inverse if already calculated classlist <- list(...) ##Get the list of arguments passed to cacheSolve if(length(classlist) >=1) ##If arguments are passed, then new matrix is provided { mata <- classlist[[1]] } else { ## No arguments passed means that the existing matrix needs to be used mata <- x$getMatrix() } matb <- x$getMatrix() if(is.matrix(mata) && dim(mata)==dim(matb) && all(mata==matb) && (!is.null(inv))) { ## Compare the existing matrix with the new matrix, if they are same print cached inverse message("No Change in Matrix") message("Getting cached inverse") return(inv) } else { ##If existing matrix and new matrix are different, calculate inverse for new matrix message("New Matrix") message("Calculating Inverse") inv <- solve(mata) x$setInverse(inv) inv } }
62c481c7a4e8dbe573d688117bb98574ed347e12
1182588893ed8a91009078deb8b65620033f32d5
/plot4.R
f31f979d54e69df1da110721dbfc6e19df0531c9
[]
no_license
mwatchorn/ExData_Plotting1
d3405a0e75947b4697f2c486426864d0a391beaa
3ee3c2aaf2ef8302442ca0692c5ab7a5a70d66f1
refs/heads/master
2020-05-29T11:08:05.622717
2016-05-27T01:02:18
2016-05-27T01:02:18
59,589,881
0
0
null
2016-05-24T16:28:21
2016-05-24T16:28:20
null
UTF-8
R
false
false
1,693
r
plot4.R
setwd ("G://ed_proj1") pwr_consume_agg <- read.table ( file = "household_power_consumption.txt" , header = TRUE , sep = ";" , na.strings = "?" , stringsAsFactors = FALSE ) pwr_consume_agg$Date <- as.Date (pwr_consume_agg$Date, "%d/%m/%Y") pwr_consume_eval <- subset (pwr_consume_agg , Date %in% c(as.Date("2007-02-01", "%Y-%m-%d"), as.Date("2007-02-02", "%Y-%m-%d")) ) par(mfrow = c(2, 2)) plot ( pwr_consume_eval$Global_active_power , type = "l" , col = "black" , xlab = "" , ylab = "Global Active Power" , xaxt = "n" ) axis (1, at = seq (1, 2881, by = 1440), labels = c("Thu", "Fri", "Sat")) plot ( pwr_consume_eval$Voltage , type = "l" , col = "black" , xlab = "datetime" , ylab = "Voltage" , xaxt = "n" ) axis (1, at = seq (1, 2881, by = 1440), labels = c("Thu", "Fri", "Sat")) plot ( pwr_consume_eval$Sub_metering_1 , type = "l" , col = "black" , xlab = "" , ylab = "Energy sub metering" , xaxt = "n" ) axis (1, at = seq (1, 2881, by = 1440), labels = c("Thu", "Fri", "Sat")) lines (pwr_consume_eval$Sub_metering_2, type = "l", col = "red") lines (pwr_consume_eval$Sub_metering_3, type = "l", col = "blue") legend ('topright' , c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3") , lty = 1 , col = c('black', 'red') , cex = 0.55 ) plot ( pwr_consume_eval$Global_reactive_power , type = "l" , col = "black" , xlab = "datetime" , ylab = "Global_reactive_power" , xaxt = "n" , yaxt = "n" ) axis (1, at = seq (1, 2881, by = 1440), labels = c("Thu", "Fri", "Sat")) axis (2, at = seq (0.0, 0.5, by = 0.1), cex.axis = 0.75) dev.copy (png, file = "plot4.png", width = 480, height = 480) dev.off ()
586ef78452a8899e39e4f4a255e7db253536cca0
b950da2e7c75b35ed22e7b0f02936f3b022e9c71
/old/WTPCM_r/Eval_WTPwC_IS2a.R
7316c1b356353b3f953c3bea3d3fd72b53314683
[]
no_license
YvesMSaintDrenan/WT_PowerCurveModel
f5c6c2568371aeb303f80988ad175dbf2e69b88e
e82d989a6115651b8ae58c546993783e2bd2f10d
refs/heads/master
2023-06-22T08:37:07.624032
2023-06-12T12:30:30
2023-06-12T12:30:30
246,547,657
9
5
null
null
null
null
UTF-8
R
false
false
7,108
r
Eval_WTPwC_IS2a.R
# library needed to use function polymul # there also is a function conv, which should do the same but for some reason doesn't # library(pracma) Eval_WTPwC <- function(WT_param) { # Define the 11 parameters for the Cp model based on values found in the literature # This assigns the values from Table A.2 of the paper for use in equation A.1 # # Args: # WT_param: [list] list of the following variables: # Drotor: rotor diameter in metres # Pnom: nominal power in kW # and optionally: # Vws: ??? # Vwin: ??? # Vcutin: ??? # Vcutoff: ??? # rMin: ??? # rMax: ??? # TI: ??? # iModel: ??? # CpMAX: ??? # rhoair: ??? # # Returns: # [??] ??? # # Example: # WT_param <- list() # WT_param$rMin <- 4 # WT_param$rMax <- 13 # WT_param$Drotor <- 120 # WT_param$Pnom <- 4000 # WTPwC <- Eval_WTPwC(WT_param) # Exit if required model parameters are not provided if (length(WT_param$Drotor)==0) { stop("Eval_WTPwC: Drotor [rotor diameter in metres] not supplied in your parameters, with no default available.\n") } if (length(WT_param$Pnom)==0) { stop("Eval_WTPwC: Pnom [generator power in kW] not supplied in your parameters, with no default available.\n") } # Assign default model parameters where needed if (length(WT_param$Vws)==0) { WT_param$Vws <- seq(0,30,0.01) } if (length(WT_param$Vcutin)==0) { WT_param$Vcutin <- 0 } if (length(WT_param$Vcutoff)==0) { WT_param$Vcutoff <- 25 } if (length(WT_param$rMin)==0) { WT_param$rMin <- 188.8 * WT_param$Drotor^(-0.7081) # angular speed in rpm # source: http://publications.lib.chalmers.se/records/fulltext/179591/179591.pdf } if (length(WT_param$rMax)==0) { WT_param$rMax <- 793.7 * WT_param$Drotor^(-0.8504) # source: http://publications.lib.chalmers.se/records/fulltext/179591/179591.pdf } if (length(WT_param$TI)==0) { WT_param$TI <- 0.1 } if (length(WT_param$iModel)==0) { WT_param$iModel <- 6 } if (length(WT_param$CpMAX)==0) { WT_param$CpMAX <- NaN } if (length(WT_param$rhoair)==0) { WT_param$rhoair <- 1.225 } #### Calculate the TSR (lambda) value to maximize Cp #### lambda <- seq(0, 12, 0.001) Cpfct <- function(lambda) {CpLambdaModels(WT_param$iModel, lambda)} lambdaOpt <- lambda[which(Cpfct(lambda)$Cp==max(Cpfct(lambda)$Cp))] #### Calculate the rotor rotational speed [rpm] corresponding to the maximum output #### RotorSpeed <- (lambdaOpt*WT_param$Vws)/(WT_param$Drotor/2)/(2*pi/60) RotorSpeed <- mapply(max, WT_param$rMin, mapply(min, WT_param$rMax, RotorSpeed)) #### Calculate the tip speed ratio and Cp value from the rotor speed #### lambda <- (2*pi/60)*RotorSpeed*(WT_param$Drotor/2)/WT_param$Vws Cp <- Cpfct(lambda)$Cp if (is.na(WT_param$CpMAX)) { Cp <- Cp/max(Cp)*WT_param$CpMAX } #### Calculate the power output @ TI=0 #### Pin <- 0.5*WT_param$rhoair*(pi*(WT_param$Drotor/2)^2)*WT_param$Vws^3/1000 Pout_ti0 <- mapply(min, WT_param$Pnom, Pin*Cp) ####IAIN: NINJA CODE START Pout <- Pout_ti0 #### Consider the effect of the TI on the power #### if (WT_param$TI>0) { resWS <- min(diff(WT_param$Vws)) iiWS <- which(WT_param$Vws > 0 & WT_param$Vws < WT_param$Vcutoff) # run through all wind speeds between zero and our cutout speed for (i in iiWS) { xMid <- WT_param$Vws[i] # this wind speed Kstd <- WT_param$TI * xMid # the standard deivation based on the chosen TI xK <- round(3*Kstd/resWS) * resWS # 3 standard deviations, rounded to our nearest wind speeds xK <- seq(-xK, xK, resWS) # our window of wind speeds wK <- exp(-0.5 * (xK/Kstd)^2) # our gaussian kernel # select the power outputs within our window yK <- Pout_ti0[ match(round((xK+xMid)/resWS), round(WT_param$Vws/resWS)) ] # calculate the smoothed power output at this wind speed Pout[i] <- weighted.mean(yK, wK, na.rm=TRUE) } } ####IAIN: NINJA CODE END #### Consider the effect of the cut-in & cut-off wind speed #### Pout[which(WT_param$Vws < WT_param$Vcutin)] <- 0 Pout[which(WT_param$Vws > WT_param$Vcutoff)] <- 0 #### output structure #### WTPwC <- WT_param CpModels <- c('Slootweg et al. 2003', 'Heier 2009', 'Thongam et al. 2009', 'De Kooning et al. 2010', 'Ochieng et Manyonge 2014', 'Dai et al. 2016') WTPwC$CpModel <- CpModels[WT_param$iModel] WTPwC$Pin <- Pin WTPwC$lambdaOpt <- lambdaOpt WTPwC$RotorSpeed <- RotorSpeed WTPwC$lambda <- lambda WTPwC$Cp <- Cp WTPwC$Pout_ti0 <- Pout_ti0 WTPwC$Pout <- Pout return(WTPwC) } ####IAIN: I have swapped size() for length() in the function arguments #### so that we remove need for the pracma library CpLambdaModels <- function(iiMdl, TSR, Beta=array(0, dim=length(TSR))) { # Define the 11 parameters for the Cp model based on values found in the literature # This assigns the values from Table A.2 of the paper for use in equation A.1 # # Args: # iiMdl: [number] model number # TSR: [number] tip speed ratio # Beta: [array of numbers] ??? # # Returns: # [list] ??? c1 <- c(0.73, 0.5, 0.5176, 0.77, 0.5, 0.22) c2 <- c(151, 116, 116, 151, 116, 120) c3 <- c(0.58, 0.4, 0.4, 0, 0, 0.4) c4 <- c(0, 0, 0, 0, 0.4, 0) c5 <- c(0.002, 0, 0, 0, 0, 0) x <- c(2.14, 0, 0, 0, 0, 0) c6 <- c(13.2, 5, 5, 13.65, 5, 5) c7 <- c(18.4, 21, 21, 18.4, 21, 12.5) c8 <- c(0, 0, 0.006795, 0, 0, 0) c9 <- c(-0.02, 0.089, 0.089, 0, 0.08, 0.08) c10 <- c(0.003, 0.035, 0.035, 0, 0.035, 0.035) CellSources <- c('Slootweg et al. 2003', 'Heier 2009', 'Thongam et al. 2009', 'De Kooning et al. 2010', 'Ochieng et Manyonge 2014', 'Dai et al. 2016') Li <- 1/(1/(TSR+c9[iiMdl]*Beta)-c10[iiMdl]/(Beta^3+1)) Cp <- mapply(max, c1[iiMdl]*(c2[iiMdl]/Li-c3[iiMdl]*Beta-c4[iiMdl]*Li*Beta-c5[iiMdl]*Beta^x[iiMdl]-c6[iiMdl])*exp(-c7[iiMdl]/Li)+c8[iiMdl]*TSR, 0) ModelName <- CellSources[iiMdl] Cp[1] <- 0 # set first element 0, because it is NaN (caused by Inf*0) and this causes problems ret = list(Cp,ModelName) names(ret) <- c("Cp","ModelName") return(ret) } # # there is no conv function with the parameter 'same', so implement it here: # conv_same <- function(u, v) { # # Description # # # # Args: # # u: [??] ??? # # v: [??] ??? # # # # Returns: # # [??] ??? # # because polymul cuts off 0s at the beginning, add them artifically # if (u[1]==0) { # u0 <- rle(u)$lengths[1] # } else { # u0 <- 0 # } # if (v[1]==0) { # v0 <- rle(v)$lengths[1] # } else { # v0 <- 0 # } # uv <- c(rep(0, u0+v0), polymul(u, v)) # start <- length(v) / 2 + 1 # end <- length(v) / 2 + length(u) # return(uv[start:end]) # }
be31948ac308c511b1c37b37ca2054fcceecfe34
a114996ecfdd212ea60e54238a24b3bf51112b13
/Functions/PrimeSieve.R
eb4cdf2ea3b52a7304979839a458af26fd43494d
[]
no_license
Adam-Hoelscher/ProjectEuler.R
35610697d5bc4c61e9adfaec6d923a424df20486
e060484365cafcfcd400c3cecacc189293dfc682
refs/heads/master
2021-01-23T01:29:54.585429
2017-09-11T18:28:46
2017-09-11T18:28:46
102,432,553
0
0
null
null
null
null
UTF-8
R
false
false
445
r
PrimeSieve.R
PrimeSieve<-function(to, from = 1){ if (from > to){ warning('Upper bound of sieve is below lower bound integer(0) returned') return(integer(0)) } n <- to a <- c(F, rep(T, times = (n-1))) p <- 2 while (p^2 <= n){ j <- p^2 while (j <= n){ a[j] <- F j <- (j+p) } p <- p+1 while(!a[p]){p <- p+1} } temp <- (1:n)[a] temp <- temp[which(temp >= from)] return(temp) }
32686d471ef4a97641303fdb8e37404d056f17e5
274f1381f22346a58e5635763c924b4872d013f1
/R/callWithoutArgs.R
ca0c87f0babd5be0306b18396a9dbdb2e7fbd97f
[]
no_license
cran/maxLik
c9eeb1d95b3aa4a174cad54f67936cf75ed4b908
3e23589ca66ae3e1d3ddc6dba6c3a88dced57ca9
refs/heads/master
2021-08-07T07:13:49.328723
2021-07-26T16:30:02
2021-07-26T16:30:02
17,697,315
0
4
null
2021-04-03T12:39:53
2014-03-13T05:16:36
R
UTF-8
R
false
false
330
r
callWithoutArgs.R
## strip arguments "args" and call the function with name "fName" thereafter callWithoutArgs <- function(theta, fName, args, ...) { f <- match.call() f[ args ] <- NULL f[[1]] <- as.name(fName) names(f)[2] <- "" f[["fName"]] <- NULL f[["args"]] <- NULL f1 <- eval(f, sys.frame(sys.parent())) return( f1 ) }
8b9516ea88006592d618450d7a16e83b8aa927c1
c1bb17204521840a31199cd14fd54bdecd43f990
/analysis_notebooks/chromHMM_maps_all_tissues.R
c3c8d345f71005a774f348b6a92cad9a8eed75bd
[]
no_license
pjshort/de_novo_noncoding
761593c77e8226a4dad28c41173c274578205c43
a8d1328549910656d01b6b44928a87d71bdabe57
refs/heads/master
2020-02-26T17:26:44.932949
2016-06-19T16:50:47
2016-06-19T16:50:47
58,368,422
1
0
null
null
null
null
UTF-8
R
false
false
3,448
r
chromHMM_maps_all_tissues.R
# test the MAPS difference between quiescent and all other states in each tissue type source("~/software/dddMAPS/dddMAPS/MAPS.R") load("~/software/dddMAPS/data/DDD_4k_parents_synonymous_maps_lm.RData") library(stringr) mu_snp <- read.table("~/reference_data/forSanger_1KG_mutation_rate_table.txt", header=TRUE) gencode = read.table("~/reference_data/gencode.v19.CDS.probe_overlap.min10_coverage.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE) noncoding_intervals = read.table("~/reference_data/noncoding_control_and_functional.min10_coverage.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE) # only needed for noncoding analysis sequences = rbind(gencode[,c("chr", "start", "stop", "seq")], noncoding_intervals[,c("chr", "start", "stop", "seq")]) get_MAPS_for_tissue_chromHMM = function(unaff_parent_variants, chromHMM_bed , maps_lm) { # load the BED file chromHMM_15state = read.table(gzfile(chromHMM_bed), header = FALSE, sep = "\t") colnames(chromHMM_15state) = c("chr", "start", "stop", "chromHMM") unaff_parent_variants$chromHMM_fetal_brain = get_chromHMM(unaff_parent_variants, chromHMM_15state) print(sprintf("Working on chromHMM MAPS for: %s", chromHMM_bed)) m = maps_adjust(unaff_parent_variants, unaff_parent_variants$chromHMM_fetal_brain, maps_lm = maps_lm) counts = table(unaff_parent_variants$chromHMM_fetal_brain) counts = counts[names(m$ps_adjusted)] ps_quiescent = m$ps_adjusted["15_Quies"] se_quiescent = m$standard_error["15_Quies"] counts_quiescent = counts["15_Quies"] tissue = str_match(chromHMM_bed, "E[0-1][0-9][0-9]")[1] pval_df = data.frame(tissue = str_match(chromHMM_bed, "E[0-1][0-9][0-9]")[1]) # to do - use grepl or sub to get E### for tissue maps_points_df = data.frame(tissue = str_match(chromHMM_bed, "E[0-1][0-9][0-9]")[1]) # to do - use grepl or sub to get E### for tissue for (i in seq_along(names(m$ps_adjusted))) { q_count = counts test_statistic = (ps_quiescent - m$ps_adjusted[i])/sqrt(se_quiescent^2 + m$standard_error[i]^2) pval = -log10(as.numeric(pt(-abs(test_statistic), counts_quiescent + counts[i] - 2))) maps_points = (m$ps_adjusted[i] - ps_quiescent) if (is.na(pval)) { pval = 0 } pval_df[,names(m$ps_adjusted)[i]] = pval pval_df$score = "pval" maps_points_df[,names(m$ps_adjusted)[i]] = maps_points maps_points_df$score = "MAPS_points" df = rbind(pval_df, maps_points_df) } return(df) } file_list = list.files(pattern = "/lustre/scratch113/projects/ddd/users/ps14/REP/chromHMM/E[0-1][0-9][0-9]_15_coreMarks_mnemonics.bed.gz") noncoding_functional_elements = read.table("~/reference_data/noncoding_elements.probe_overlap.min10_coverage.txt", header = TRUE, sep = "\t") conserved_elements = subset(noncoding_functional_elements, annotation == "Conserved") unaff_parent_variants = read.table("/lustre/scratch113/projects/ddd/users/ps14/parental_unaffected/unaffected_parent_alleles_all_chromosomes.txt", header = TRUE, sep = "\t") unaff_parent_variants_conserved = filter_with_bed(unaff_parent_variants, conserved_elements) all_tissues = lapply(file_list, function(f) get_MAPS_for_tissue_chromHMM(unaff_parent_variants_conserved, f, maps_lm)) df = do.call(rbind, all_tissues) write.table(df, file = "/lustre/scratch113/projects/ddd/users/ps14/REP/MAPS_all_tissues_chromHMM_15_state.txt", col.names = TRUE, quote = FALSE, sep = "\t", row.names = FALSE)
c50603959210c134484c34b1096493bd77353de4
966ad8c38d12e6365338d3dbfe03e7389a08860a
/archive/analysis/elo_check.R
2613a89e89bc890759cc5a518dadd9d78a7d285e
[ "MIT" ]
permissive
turneralex/afl_elo
cd9522e482d73b60139360b9fba034650ff8ccc9
025a5723adf911c4d9a01f69cd13efd7a17c04d9
refs/heads/master
2023-08-16T21:01:06.210181
2023-08-08T04:57:35
2023-08-08T04:57:35
169,377,895
1
0
MIT
2023-08-08T04:57:36
2019-02-06T08:57:05
HTML
UTF-8
R
false
false
5,494
r
elo_check.R
source(here::here("fixture scripts/fixture_all.R")) source(here::here("elo update/functions_general.R")) library(tidyverse) library(ggpubr) elo_par <- read_csv("./new elo/elo_par.csv") %>% deframe() elo_par[3:10] %>% enframe(name = "location", value = "hga") %>% mutate( location = str_replace(location, "hga_", "") %>% toupper() ) %>% ggplot(aes(fct_reorder(location, hga), hga, label = round(hga, 1))) + geom_col() + geom_text(vjust = -1) + scale_y_continuous(limits = c(0, max(elo_par[3:10]) + 5)) + labs(title = "Home Ground Advantage by Location", subtitle = "Season 2010 - 2019 Inclusive", x = "Location", y = "Home Ground Advantage Value") afl_elo <- afl_fixture_all %>% convert_elo_df() %>% elo_run(k = elo_par["k"], hga = elo_par[3:10], regress = elo_par["regress"]) afl_elo afl_elo %>% mutate( win_loss = if_else(score_adjusted > 0.5, 1, 0) %>% factor() ) %>% filter(win_loss == 1) %>% group_by(season) %>% summarise(score_adjusted = mean(score_adjusted), score_expected = mean(score_expected)) %>% pivot_longer( cols = c("score_adjusted", "score_expected"), names_to = "score_type", values_to = "mean" ) %>% mutate( score_type = if_else(score_type == "score_expected", "Expected Score", "Actual Adjusted Score") ) %>% ggplot(aes(season, mean, colour = score_type, group = score_type)) + geom_point(size = 6) + scale_colour_brewer(palette = "Set2") + scale_y_continuous(limits = c(0.4, 0.7)) + labs(title = "Mean Expected & Actual Adjusted Scores", subtitle = "Winning Teams", x = "Season", y = "Mean", colour = "", caption = "Adjusted Score: Winner Share of Total Points") + theme(legend.position = "bottom") mean_sd_score <- afl_elo %>% group_by(match_id) %>% slice(1) %>% pivot_longer( cols = c("score_adjusted", "score_expected"), names_to = "score_type", values_to = "value" ) %>% mutate( score_type = if_else(score_type == "score_expected", "Expected Score", "Actual Adjusted Score") ) %>% group_by(season, score_type) %>% summarise(mean = mean(value), sd = sd(value)) %>% ungroup() mean_sd_score afl_elo %>% group_by(match_id) %>% slice(1) %>% pivot_longer( cols = c("score_adjusted", "score_expected"), names_to = "score_type", values_to = "value" ) %>% mutate( score_type = if_else(score_type == "score_expected", "Expected Score", "Actual Adjusted Score") ) %>% ggplot(aes(value)) + geom_histogram(binwidth = 0.01) + geom_vline(data = mean_sd_score, aes(xintercept = mean), colour = "firebrick1", size = 1, alpha = 0.6) + facet_grid(season ~ score_type) + labs(title = "Mean Expected & Actual Adjusted Scores Distributions", subtitle = "Home Teams", x = "Value", y = "Count") afl_elo %>% group_by(match_id) %>% slice(1) %>% pivot_longer( cols = c("score_adjusted", "score_expected"), names_to = "score_type", values_to = "value" ) %>% mutate( score_type = if_else(score_type == "score_expected", "Expected Score", "Actual Adjusted Score") ) %>% ggplot(aes(season, value)) + geom_jitter(aes(colour = season), size = 4, alpha = 0.6, width = 0.25) + geom_point(data = mean_sd_score, aes(season, mean), size = 6) + geom_errorbar(data = mean_sd_score, aes(season, mean, ymin = mean - sd, ymax = mean + sd), width = 0.2, size = 2) + facet_grid(score_type ~ ., scales = "free_y") + scale_colour_manual(values = randomcoloR::distinctColorPalette(10)) + labs(title = "Mean Expected & Actual Adjusted Scores Distributions inc. Error Bars", subtitle = "Home Teams", x = "Season", y = "Value", caption = "Error Bars: 1 Standard Deviation") + theme(legend.position = "none") afl_elo %>% group_by(match_id) %>% slice(1) %>% ggplot(aes(score_expected, score_adjusted)) + geom_point(size = 3, alpha = 0.5) + facet_wrap(. ~ season) + stat_cor(label.x = 0.6, label.y = 0.25, p.accuracy = 0.01) + labs(title = "Mean Expected & Actual Adjusted Scores Distributions inc. Error Bars", subtitle = "Home Teams", x = "Expected Score", y = "Actual Adjusted Score", caption = "Error Bars: 1 Standard Deviation") afl_elo %>% group_by(match_id) %>% slice(1) %>% mutate( tip_correct = if_else( (score_expected - 0.5) * (score_adjusted - 0.5) > 0, "Correct", "Incorrect" ) %>% factor() ) %>% ggplot(aes(score_expected, score_adjusted, colour = tip_correct)) + geom_point(size = 3, alpha = 0.5) + facet_wrap(. ~ season) + scale_colour_brewer(palette = "Dark2") + labs(title = "Mean Expected & Actual Adjusted Scores Distributions inc. Error Bars", subtitle = "Home Teams", x = "Expected Score", y = "Actual Adjusted Score", colour = "Tip Result", caption = "Error Bars: 1 Standard Deviation") + theme(legend.position = "bottom")
5df9e9b75f5420addf65838bcfd96b6ee3d82732
42a4ac640afa40dfae8a7e6bdbb7af59a22aa755
/non-app-funs/R/scraping/ts_word_frequency.R
c94b8fd27386e56a07ed31668956dc88efc14777
[]
no_license
Npaffen/Advanced_R_Project
9946ab949351fb5ac8ca49ac9040339ffc55008f
ca89cdd4a6ce0f2abc147a3a94203323f5e0b14a
refs/heads/master
2022-07-15T04:44:58.608442
2022-07-08T07:17:57
2022-07-08T07:17:57
240,293,809
0
0
null
null
null
null
UTF-8
R
false
false
4,430
r
ts_word_frequency.R
library(tidytext) library(stopwords) library(lubridate) library(quanteda) library(Quandl) library(fredr) library(tidyverse) # helper function ---------------------------------------- # this function tokenizes the articles i.e. converts them into # tidy text format--a table with **one-token-per-row**, token == a word. tidy_text <- function(data) { data %>% select(page_num, date, content) %>% unnest_tokens(word, content) %>% anti_join(stop_words, by = "word") %>% dplyr::filter(!str_detect(word, "\\d+")) %>% # remove any digit select(page_num, date, everything()) } # the main function ------------------------------------------------------- # page_num is the number of newspaper page either 01 or 02, start and end_date # define the time-span for the analysis, eng_word is the english word a user is looking for, # econ_data can either be "NASDAQ_CNY" or "dollar_yuan_exch" to plot the eng_word frequency against # this economic indicator ts_word_frequency <- function(page_num = "01", start_date = ymd("2019-01-01"), end_date = today() - 1, eng_word = "outbreak", econ_data = "NASDAQ_CNY") { source(here::here("Chinese.TM.App/R/Scraping/ts_economic_data.R")) economic_data <- ts_economic_data(start_date, end_date, econ_data) # load and process the articles -------------- page_01_files <- list.files("Chinese.TM.App/output", "_page_01_EN.rds$", full.names = TRUE) page_02_files <- list.files("Chinese.TM.App/output", "_page_02_EN.rds$", full.names = TRUE) # check which page should be used for analysis if (page_num == "01") { database <- map_df(page_01_files, read_rds) %>% tidy_text() } else if (page_num == "02") { database <- map_df(page_02_files, read_rds) %>% tidy_text() } # look for the eng_word frequency in the specific time-span, check for lower/upper case of first letter db_filter <- database %>% dplyr::filter(between(date, start_date, end_date)) %>% dplyr::filter(word %in% eng_word | word %in% str_to_title(eng_word) | word %in% str_to_lower(eng_word)) # most common words within particular newspaper of page 1 words_by_newspaper_date_page <- db_filter %>% count(date, word, sort = TRUE) %>% ungroup() ##### Finding tf-idf within newspaper of page_num tf_idf <- words_by_newspaper_date_page %>% bind_tf_idf(word, date, n) %>% arrange(date) #### plotting if (econ_data == "dollar_yuan_exch") { colnames(tf_idf)[[3]] <- eng_word tf_idf_yuan <- tf_idf %>% right_join(economic_data, by = "date") %>% select(date, eng_word, value) %>% gather(key = "variable", value = "value", -date) %>% mutate(value = ifelse(is.na(value), 0, value)) tf_idf_yuan %>% ggplot(aes(date, value)) + geom_line(aes(color = variable), size = 1) + ggtitle(str_c("Time Series Word Frequency for", eng_word, "against Dollar/Yuan Exchange Rate", start_date, "-", end_date, sep = " " )) + scale_x_date(date_labels = "%b/%Y", date_breaks = "3 month") + theme_minimal() } else if (econ_data == "NASDAQ_CNY") { tf_idf_NAS <- tf_idf %>% mutate(n = normalize_to_x(n, 100)) colnames(tf_idf_NAS)[[3]] <- eng_word tf_idf_NAS <- tf_idf_NAS %>% right_join(economic_data, by = "date") %>% select(date, NASDAQ_norm, eng_word) %>% gather(key = "variable", value = "value", -date) %>% mutate(value = ifelse(is.na(value), 100, value)) tf_idf_NAS %>% ggplot(aes(date, value)) + geom_line(aes(color = variable), size = 1) + ggtitle(str_c("Time Series Word Frequency for", eng_word, "against NASDAQ_CNY", start_date, "-", end_date, sep = " " )) + scale_x_date(date_labels = "%b/%Y", date_breaks = "3 month") + theme_minimal() + theme(legend.position = "bottom") } else { tf_idf %>% ggplot(aes(date, n)) + geom_line(color = "#00AFBB", size = 1) + stat_smooth(color = "#FC4E07", fill = "#FC4E07", method = "loess") + ggtitle(str_c("Time Series Word Frequency for", eng_word, start_date, "-", end_date, sep = " " )) + scale_x_date(date_labels = "%b/%Y", date_breaks = "3 month") + theme_minimal() + theme(legend.position = "bottom") } }
351ced45d9dcc0af47a4908d2296ebf9fb09c6a1
3570b80c30c4935f4e19169dece31a3bd33d70d0
/R/discounting.model.simulate.R
c30f3e87cfb9f9c9d7fd920bcfac1e8bcd5369bd
[]
no_license
chuanchang/BehavioralEconomics
71d79b27e0cc389ace8a03c8786719109d589141
99252c64f530cb40b5d13242c545de70879f7476
refs/heads/master
2020-12-02T16:55:48.609199
2011-03-08T01:30:04
2011-03-08T01:30:04
null
0
0
null
null
null
null
UTF-8
R
false
false
360
r
discounting.model.simulate.R
discounting.model.simulate <- function(choices, model, parameters) { for (i in 1:nrow(choices)) { x1 <- choices[i, 'X1'] t1 <- choices[i, 'T1'] x2 <- choices[i, 'X2'] t2 <- choices[i, 'T2'] p <- discounting.model.choice.probability(x1, t1, x2, t2, 1, model, parameters) choices[i, 'C'] <- rbinom(1, 1, p) } return(choices) }
0384f75b36a617a730dd96deade84b5eaba1916a
bf98913ace9f5de43f50c3521774a88494c656b0
/calib_indep.R
6b7a6783e1b8a25ca897f38e8eb837f855bd84dd
[]
no_license
baeyc/floral-coverage
19e9e26adebcd51468638f7e37d603fb31401991
ae938144eeb1a38cf11d5c8b0179e99e4334a9f6
refs/heads/master
2020-09-25T03:22:43.951769
2019-12-04T16:40:53
2019-12-04T16:40:53
225,906,110
0
0
null
null
null
null
UTF-8
R
false
false
4,596
r
calib_indep.R
# Floral coverage model #options(echo=FALSE) #args<-commandArgs(trailingOnly = TRUE) #print(args) args <- c("pasture","4","0","0.7","0","0.7","10","1") landuseCat <-args[1] nbrep <- as.integer(args[2]) m1l <- as.numeric(args[3]) m1u <- as.numeric(args[4]) m2l <- as.numeric(args[5]) m2u <- as.numeric(args[6]) nb_mcmc <- as.numeric(args[7]) nb_burn <- as.numeric(args[8]) rm(args) setwd("/home/baeyc/Code/es2cropBB/pollination/calibration/floralCoverage/") # load libraries library(R.oo) library(gridExtra) library(reshape) source("helpers.R") source("multiplots.R") # load data -> we have one matrix of observations per landscape category source("loadObsData.R") data<-eval(parse(text=landuseCat)) data <- na.omit(data) ## Priors # 1. Hyperparameters (to be changed for each landscape category) # prior on the mode and sample size of the beta law for the first period hypMu1<-c(m1l,m1u) hypS1<-c(2,200) # prior on the mode and sample size of the beta law for the second period hypMu2<-c(m2l,m2u) hypS2<-c(2,200) hyper<-list(mu1=hypMu1,mu2=hypMu2,s1=hypS1,s2=hypS2) ptm <- proc.time() # ------------------------ ## MCMC settings # initialization of the algorithm (to be changed if no uniform priors are used) prodLikInit = 0 while(prodLikInit == 0 | is.na(prodLikInit) | is.infinite(prodLikInit)) { meanAdapt<-c(runif(1,hypMu1[1],hypMu1[2]), runif(1,hypMu2[1],hypMu2[2]), runif(1,hypS1[1],hypS1[2]), runif(1,hypS2[1],hypS2[2])) varAdapt<-(2.38/sqrt(4))*rep(1,4) lambda<-rep(1,length(meanAdapt)) state_algo<-new("stateAlgo",probaAcc=numeric(0), lambda=lambda, adaptMean=meanAdapt, adaptVar=varAdapt, accRate=numeric(0)) chain<-data.frame(mu1=numeric(0),mu2=numeric(0),s1=numeric(0),s2=numeric(0)) ar<-data.frame(ar1=numeric(0),ar2=numeric(0),ar3=numeric(0),ar4=numeric(0), l1=numeric(0),l2=numeric(0),l3=numeric(0),l4=numeric(0), p1=numeric(0),p2=numeric(0),p3=numeric(0),p4=numeric(0)) ar[1,]<-c(rep(0,4),lambda,rep(0,4)) chain[1,]<-meanAdapt # making sure the starting value corresponds to a finite likelihood value size_data = nrow(data) # define a and b param for beta distriutions according to mode and sample size # for the candidate a1<-chain[1,"mu1"]*(chain[1,"s1"]-2)+1 b1<-(chain[1,"s1"]-2)*(1-chain[1,"mu1"])+1 a2<-chain[1,"mu2"]*(chain[1,"s2"]-2)+1 b2<-(chain[1,"s2"]-2)*(1-chain[1,"mu2"])+1 # we define the two copulas, one for the candidate param and one for the current param mat<-as.matrix(data) likInit <- dbeta(mat[,1],a1,b1)*dbeta(mat[,2],a1,b1)*dbeta(mat[,3],a2,b2)*dbeta(mat[,4],a2,b2) prodLikInit <- prod(likInit) print(prodLikInit) } # Chain settings # Type of algorithm : either "hgs" or "mh" algo<-"hgs" # Type of proposal : either "Gl" or "CW" prop<-"CW" # Type of distribution for the proposal : either "unif", "norm" or "beta" dist<-"norm" # run the MCMC algorithm pb<-txtProgressBar(min=1,max=nb_burn+nb_mcmc,style = 3) plotInterv<-500 mixProp<-TRUE tolConv<-0.005 conv<-FALSE m<-2 # Vector of deviances dev<-rep(0,nb_mcmc) var<-rep(NA,4) bsup<-rep(NA,4) system.time({ while(m < (nb_burn+nb_mcmc)) { if(mixProp){ algo <- "hgs" prop <- sample(c("CW","Gl"), size = 1) dist <- sample(c("unif","norm"), size = 1) } result<-generateMCMC_indep(chain[m-1,1:4],state_algo,algo,prop,dist,hyper,data) chain<-rbind(chain,result$chain) state_algo<-adaptAlgo(state_algo,m,unlist(chain[m,]),result$lambda,result$accRate,algo,prop,stochStep = 0.7) a<-updateAccRate(as.matrix(chain),m,ar[(m-1),]) l<-state_algo@lambda pr<-state_algo@probaAcc ar<-rbind(ar,unlist(c(a,l,pr))) if (m > nb_burn) { a1<-chain$mu1[m]*(chain$s1[m]-2)+1 a2<-chain$mu2[m]*(chain$s2[m]-2)+1 b1<-(chain$s1[m]-2)*(1-chain$mu1[m])+1 b2<-(chain$s2[m]-2)*(1-chain$mu2[m])+1 mat<-as.matrix(data) num<-dbeta(mat[,1],a1,b1)*dbeta(mat[,2],a1,b1)*dbeta(mat[,3],a2,b2)*dbeta(mat[,4],a2,b2) dev[m-nb_burn]<-(-2)*sum(log(na.omit(num))) saveRDS(dev,file=paste(landuseCat,"dev_indep",nbrep,sep="_")) } rownames(chain)<-NULL saveRDS(chain,file=paste(landuseCat,"chain_indep",nbrep,sep="_")) m<-m+1 setTxtProgressBar(pb,m) } }) # reset rownames to avoid very long ones chain$iter<-seq(1,nrow(chain)) saveRDS(chain,file=paste(landuseCat,"chain_indep",nbrep,sep="_")) ptmEnd<-proc.time() - ptm print(ptmEnd)
5085ed349bc441103efcb6421dbe9c49e326ab51
f7fb88ca3b6c0b29a3d10ed20df25efa6e4f8602
/man/users_list.Rd
b3234b1332b6f353db5a1a8abb7be67356318566
[]
no_license
cran/civis
9fdf3e2ff64b72fc5c448d02cd621594d0f603b4
e334467f2e8af73bb76c1e80f60e3f6ba460b509
refs/heads/master
2023-04-07T13:39:21.376985
2023-03-31T07:00:03
2023-03-31T07:00:03
103,786,499
0
0
null
null
null
null
UTF-8
R
false
true
2,972
rd
users_list.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R \name{users_list} \alias{users_list} \title{List users} \usage{ users_list( feature_flag = NULL, account_status = NULL, query = NULL, group_id = NULL, group_ids = NULL, organization_id = NULL, exclude_groups = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL ) } \arguments{ \item{feature_flag}{string optional. Return users that have a feature flag enabled.} \item{account_status}{string optional. The account status by which to filter users. May be one of "active", "inactive", or "all". Defaults to active.} \item{query}{string optional. Return users who match the given query, based on name, user, email, and id.} \item{group_id}{integer optional. The ID of the group by which to filter users. Cannot be present if group_ids is.} \item{group_ids}{array optional. The IDs of the groups by which to filter users. Cannot be present if group_id is.} \item{organization_id}{integer optional. The ID of the organization by which to filter users.} \item{exclude_groups}{boolean optional. Whether or to exclude users' groups. Default: false.} \item{limit}{integer optional. Number of results to return. Defaults to 20. Maximum allowed is 10000.} \item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} \item{order}{string optional. The field on which to order the result set. Defaults to name. Must be one of: name, user.} \item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc.} } \value{ An array containing the following fields: \item{id}{integer, The ID of this user.} \item{user}{string, The username of this user.} \item{name}{string, The name of this user.} \item{email}{string, The email of this user.} \item{active}{boolean, Whether this user account is active or deactivated.} \item{primaryGroupId}{integer, The ID of the primary group of this user.} \item{groups}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this group. \item name string, The name of this group. \item slug string, The slug of this group. \item organizationId integer, The ID of the organization associated with this group. \item organizationName string, The name of the organization associated with this group. }} \item{createdAt}{string, The date and time when the user was created.} \item{currentSignInAt}{string, The date and time when the user's current session began.} \item{updatedAt}{string, The date and time when the user was last updated.} \item{lastSeenAt}{string, The date and time when the user last visited Platform.} \item{suspended}{boolean, Whether the user is suspended due to inactivity.} \item{createdById}{integer, The ID of the user who created this user.} \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} } \description{ List users }
796fa781b898b08c1a4cef5edbe5abffa1d4c287
f13d80975c1a67bc5a35227c7a6fed5fd60db0a2
/MarkDown/Overlap.R
81613e5398ed820c424041741bbb1ca44fe1f18b
[]
no_license
haoboguo/NetBAS
6b85e1be3381b81b1692659bd8efe6233782c517
babfed11f5ff22fab9fbe543f9b4783a6d68796f
refs/heads/master
2022-10-03T02:04:49.329435
2020-06-09T16:45:28
2020-06-09T16:45:28
151,443,387
3
4
null
null
null
null
UTF-8
R
false
false
1,621
r
Overlap.R
# overlaps of gene sets library('gplots') list.file <- read.csv("list", header=F, stringsAsFactors=F) hmk.set.list <- list.file$V1 dims <- length(hmk.set.list) om <- matrix(0, ncol = dims, nrow = dims) for (i in 1:dims) { ith.name <- paste(hmk.set.list[i], "/", hmk.set.list[i], ".csv", sep="") ith.file <- read.csv(ith.name, header=T, stringsAsFactors=F) ith.set <- ith.file$gene for (j in 1:dims) { jth.name <- paste(hmk.set.list[j], "/", hmk.set.list[j], ".csv", sep="") jth.file <- read.csv(jth.name, header = T, stringsAsFactors = F) jth.set <- jth.file$gene overlap <- length(which(jth.set %in% ith.set)) om[i,j] <- om[i,j] + overlap } } colnames(om) <- hmk.set.list rownames(om) <- hmk.set.list write.table(om, file = "Overlap.Matrix.tab", row.names = T, col.names = T, quote = T) colors = c(seq(0,15,length=10),seq(16,30,length=10),seq(31,200,length=10)) my_palette <- colorRampPalette(c("white", "red2"))(n = 29)3 #png(filename = "Hallmark.Sets.Overlap.Genes.png",width=28, height=28, res=1200, unit="in") pdf("hallmark.overlap.pdf", width=30,height=30, paper='special') heatmap.2(om, trace='none', cellnote = om, dendrogram='none', colsep = 1:50, rowsep = 1:50, sepcolor="lightgrey", sepwidth = c(0.01,0.01), breaks = colors, col=my_palette, Rowv=F, Colv = F, ylab="Hallmark Sets", xlab="Hallmark Sets", adjCol=c(0,0.2), adjRow=c(0,0.2), srtRow=45, srtCol=-45, scale="none", symbreaks=F, symm=F, symkey = F, margins = c(15,15), key.title =NA, key.xlab=NA, key.ylab=NA, cexRow = 0.8, cexCol=0.8) dev.off()
54387bc49f45941cc7b7fd5280f41e1f61d26fed
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/chillR/examples/check_temperature_record.Rd.R
47ee121d120019db6b39212d0f3393956428ee59
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
274
r
check_temperature_record.Rd.R
library(chillR) ### Name: check_temperature_record ### Title: Check a daily or hourly temperature record for compliance with ### chillR's standards ### Aliases: check_temperature_record ### Keywords: utilities ### ** Examples check_temperature_record(KA_weather)
75b79555b700415c96bed042720ec25cef778f56
8c6d0610b0844bf7ed6bfd8dfcfe302b7286a78f
/functions/get_required.R
6c3c8d85bd22ebe0e541c86d8f1dd06828373500
[]
no_license
SzarR/rJAQv3
0fbf619a55278eeb47f90af7a1078e3433050dd9
a65b8f1659adbb8b8d5033c15b904a28001ae7fb
refs/heads/master
2021-03-28T03:18:20.428927
2020-08-25T14:08:40
2020-08-25T14:08:40
247,830,006
0
0
null
null
null
null
UTF-8
R
false
false
617
r
get_required.R
get_required <- function(datum, section) { # Specify variable names. if(section == 'task'){ VarLab_REQU_Task <- paste0("REQU_", 1:TaskNumbers) } if(section == 'ksao'){ VarLab_REQU_Task <- paste0("REQU_", 1:KSAONumbers) } # Run calculations. REQU <- dichot_scale(datum, scale = VarLab_REQU_Task, rounding = 2) REQU_SD <- standard_deviation(datum, scale = VarLab_REQU_Task, rounding = 2) # Save output. if(section == 'task'){ Output.Frame.Task <<- cbind(Output.Frame.Task, REQU, REQU_SD) } if(section == 'ksao'){ Output.Frame.KSAO <<- cbind(Output.Frame.KSAO, REQU, REQU_SD) } }
47dcfca661a5d7be85274412db70734a962157a4
1e39fc5bc9e4f53f63655269d203fd896caeeb00
/R/double.obs.sim.R
ee085eed4a85c0657cabddc64bb16fdb7fac24eb
[]
no_license
m-murchie/dht-bootstrap
0405c921dd2e5dffee3b32e5123fe7eeb672ebb8
9b64c13593b63cfbfbea6756c756b0ea8177fb64
refs/heads/master
2020-04-05T23:14:16.058224
2016-07-07T15:35:08
2016-07-07T15:35:08
60,344,043
1
1
null
null
null
null
UTF-8
R
false
false
2,453
r
double.obs.sim.R
# --- DOUBLE OBSERVER DATA SIM FUNCTION: VERSION 1 --- double.obs.sim <- function(region.obj, design.obj, pop.description.obj, detect.obj.1, detect.obj.2, ddf.analyses.list, seed = 123456, plot=FALSE) { set.seed(seed) ## simulation object and survery results for observer 1 my.simulation <- make.simulation(reps = 1, single.transect.set = TRUE, double.observer = FALSE, region.obj, design.obj, pop.description.obj, detect.obj.1, ddf.analyses.list) survey.results <- create.survey.results(my.simulation, dht.table = TRUE) data <- survey.results@ddf.data@ddf.dat obs.table <- survey.results@obs.table@obs.table objects.1 <- unique(data$object) ## as above, observer 2 set.seed(seed) my.simulation.2 <- make.simulation(reps = 10, single.transect.set = TRUE, double.observer = FALSE, region.obj, design.obj, pop.description.obj, detect.obj.2, ddf.analyses.list) survey.results.2 <- create.survey.results(my.simulation.2, dht.table = TRUE) data.2 <- survey.results.2@ddf.data@ddf.dat obs.table.2 <- survey.results.2@obs.table@obs.table data <- unique(rbind(data, data.2)) obs.table <- unique(rbind(obs.table, obs.table.2)) ## objects dectected by both observers objects.2 <- unique(data.2$object) all.objects <- data$object det.1 <- as.numeric(all.objects %in% objects.1) det.2 <- as.numeric(all.objects %in% objects.2) data <- data[rep(seq_len(nrow(data)), each=2),] detected <- c(rbind(det.1,det.2)) ## tidy up data data <- subset(data, select=-c(x,y)) observer <- rep(1:2, length(all.objects)) data <- cbind(data["object"], observer, detected, data[,2:3]) names(data)[names(data)=="transect.ID"] <- "Sample.Label" obs.table <- obs.table[order(obs.table$Sample.Label),] region.table <- survey.results@region.table@region.table sample.table <- survey.results@sample.table@sample.table ## dht tables tables <- list("data" = data, "region.table" = region.table, "sample.table" = sample.table, "obs.table" = obs.table) if (plot == TRUE) { plot(survey.results) plot(survey.results.2) } return(tables) }
e1424a1a5434e741848c2640747e34ac0eb8d6a1
f5f142e469ba0526a2768a509630c8b5156b1fcb
/man/highlight_distinct_edges.Rd
f1267776d30510980062b5a2b8e0ad612b7edb4f
[]
no_license
JohnMCMa/dendextend
350ca633b439b8964eec739ba9247c9527ae37f4
1e25e5bf786d943b3aa651f4257336462187d43c
refs/heads/master
2021-01-18T16:05:01.686085
2017-03-30T14:15:29
2017-03-30T14:15:29
86,709,713
0
0
null
2017-03-30T14:06:03
2017-03-30T14:06:03
null
UTF-8
R
false
true
2,531
rd
highlight_distinct_edges.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distinct_edges.R \name{highlight_distinct_edges} \alias{highlight_distinct_edges} \alias{highlight_distinct_edges.dendlist} \alias{highlight_distinct_edges.dendrogram} \title{Highlight distint edges in a tree (compared to another one)} \usage{ highlight_distinct_edges(dend, ...) \method{highlight_distinct_edges}{dendrogram}(dend, dend2, value = 2, edgePar = c("col", "lty", "lwd"), ...) \method{highlight_distinct_edges}{dendlist}(dend, ..., which = c(1L, 2L)) } \arguments{ \item{dend}{a dendrogram or \link{dendlist} to find unique edges in (to highlight)} \item{...}{Ignored.} \item{dend2}{a dendrogram to compare with} \item{value}{a new value scalar for the edgePar attribute.} \item{edgePar}{a character indicating the value inside edgePar to adjust. Can be either "col", "lty", or "lwd".} \item{which}{an integer vector indicating, in the case "dend" is a dendlist, on which of the trees should the modification be performed. If missing - the change will be performed on all of objects in the dendlist.} } \value{ A dendrogram with modified edges - the distinct ones are changed (color, line width, or line type) } \description{ Highlight distint edges in a tree (compared to another one) by changing the branches' color, line width, or line type. This function enables this feature in \link{dend_diff} and \link{tanglegram} } \examples{ x <- 1:5 \%>\% dist \%>\% hclust \%>\% as.dendrogram y <- set(x, "labels", 5:1) distinct_edges(x, y) distinct_edges(y, x) par(mfrow = c(1,2)) plot(highlight_distinct_edges(x, y)) plot(y) # tanglegram(highlight_distinct_edges(x, y),y) # dend_diff(x, y) \dontrun{ # using highlight_distinct_edges combined with dendlist and set # to clearly highlight "stable" branches. data(iris); ss <- c(1:5, 51:55, 101:105) iris1 <-iris[ss,-5] \%>\% dist \%>\% hclust(method = "single") \%>\% as.dendrogram iris2 <- iris[ss,-5] \%>\% dist \%>\% hclust(method = "complete") \%>\% as.dendrogram iris12 <- dendlist(iris1, iris2) \%>\% set("branches_k_color",k=3) \%>\% set("branches_lwd", 3) \%>\% highlight_distinct_edges(value = 1, edgePar = "lwd") iris12 \%>\% untangle(method = "step2side") \%>\% tanglegram(sub="Iris dataset", main_left = "'single' clustering", main_right = "'complete' clustering") } } \seealso{ \link{distinct_edges}, \link{highlight_distinct_edges}, \link{dist.dendlist}, \link{tanglegram} \link{assign_values_to_branches_edgePar}, \link[distory]{distinct.edges}, }
2a2c04d9bf9e1bc3fd231de359ede95a31cafe6c
fbdc3731313c3a2398b3b87351c8ca17e12fbab0
/man/ThetaMater.M2.Rd
64044b99bf717e4e385de36524c0652643ceddc3
[]
no_license
radamsRHA/ThetaMater
b51ccacd7f8cc8c39e23363de05afe1d214044f7
02f87295994dcaa11c71c8cfb507cb2ccb23ed75
refs/heads/master
2021-01-20T14:47:04.711591
2020-03-31T16:54:19
2020-03-31T16:54:19
90,657,612
6
1
null
null
null
null
UTF-8
R
false
true
1,357
rd
ThetaMater.M2.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ThetaMater.M2.R \name{ThetaMater.M2} \alias{ThetaMater.M2} \title{ThetaMater.M2: (Bayesian Model 2) MCMC function that uses the MCMCmetrop1R function from MCMCpack with a discrete gamma rate variation model} \usage{ ThetaMater.M2(k.vec, l.vec, n.vec, c.vec, K, alpha.param, ngens, burnin, thin, theta.shape, theta.scale) } \arguments{ \item{k.vec}{Vector of mutation counts} \item{l.vec}{Vector of locus lengths} \item{n.vec}{Vector of sample numbers} \item{c.vec}{Vector of data pattern counts} \item{K}{Number of classese to approximate the gamma distribution} \item{alpha.param}{Shape of the gamma distribution for describing the amount of among-locus rate variation} \item{ngens}{Number of generations to run the MCMC} \item{burnin}{Number of generations to discard from MCMC chain} \item{thin}{Number of generations to thin in the MCMC chain} \item{theta.shape}{Shape parameter of the gamma distribution for setting the prior on theta} \item{theta.scale}{Scale parameter of the gamma distribution for setting the prior on theta} } \description{ This function returns a list of each step in the MCMC sampling chain } \examples{ library(Rcpp) library(ThetaMater) library(MCMCpack) } \keyword{coalescent} \keyword{genetics,} \keyword{models} \keyword{population}
58e32b59bf385464303f443dd8cc34757ed3659b
83cca7265e5a38e1524c531430371844ca20f8a8
/pkg/man/print.summary.attribution.Rd
0169eab0f85198a5d6e5ac23277268403e27d581
[]
no_license
jmarshallnz/islandR
e486c577dc56fb4352f419150e9e987d782ba36d
22e32150bbe61f5f7edc1dc55fb3cbe29e8aff5e
refs/heads/master
2022-10-07T18:22:16.106515
2022-09-26T22:15:40
2022-09-26T22:15:40
38,412,796
0
3
null
2020-05-16T02:51:27
2015-07-02T05:17:11
R
UTF-8
R
false
true
501
rd
print.summary.attribution.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/attribution.R \name{print.summary.attribution} \alias{print.summary.attribution} \title{Print a summary of an attribution object} \usage{ \method{print}{summary.attribution}(x, ...) } \arguments{ \item{x}{an object of class `summary.attribution`, usually, a result of a call to `summary.attribution`} \item{...}{further parameters passed to base print functions} } \description{ Print a summary of an attribution object }
09fae520348c5ea76331a996b234e20b62deb679
996790634a55078a7d1469286e54b7dff5b23b31
/tests/testthat/test-plot_heatmap.R
3246fbd64a4a27648cc5f477bc0fcf6cda8c09ed
[ "MIT" ]
permissive
epiforecasts/scoringutils
d811ff7dc30e0f4ba6639ed6a42897793fe67b54
b7090104e0736f2f59a2b9771171a256f3af3514
refs/heads/main
2023-08-08T22:48:48.957717
2023-07-28T10:19:06
2023-07-28T10:19:06
240,501,300
32
13
NOASSERTION
2023-09-06T13:12:03
2020-02-14T12:16:42
R
UTF-8
R
false
false
349
r
test-plot_heatmap.R
library(ggplot2, quietly = TRUE) test_that("plot_heatmap() works as expected", { scores <- suppressMessages( summarise_scores(scores, by = c("model", "target_type", "range")) ) p <- plot_heatmap(scores, x = "target_type", metric = "bias") expect_s3_class(p, "ggplot") skip_on_cran() vdiffr::expect_doppelganger("plot_heatmap", p) })
0f71d82d49b22fa1f628d147aad0a10306608706
75a592004d51c8722ac896b119826f12c763950e
/R/Fig. 8.r
5fc3103e3b0ac0ffde4a43a4af7ee68f96134bf6
[]
no_license
YaojieLu/Lu-et-al.-2016-JTB
2fcab27d0d2e5bec713d5fd83a415b86c6ddfe9f
0ba71719014ebe868c323e9a0ed1771f4b435cbc
refs/heads/master
2021-01-10T14:40:25.283953
2019-12-08T01:28:59
2019-12-08T01:28:59
49,685,335
0
0
null
null
null
null
UTF-8
R
false
false
1,293
r
Fig. 8.r
data <- dvs data$ca <- as.factor(data$ca) data$k <- as.factor(data$k) Cols <- c("red","darkgreen","blue") #plots windows(8, 6) par(mgp=c(2.2, 1, 0), xaxs="i", yaxs="i", lwd=2, mar=c(4, 4, 1.5, 2), mfrow=c(1,1)) # the effect (ratio) of elevated ca on A & E ca400medium <- subset(data, ca==400 & k==0.05) ca800medium <- subset(data, ca==800 & k==0.05) a <- length(ca400medium$MAP) caA <- data.frame(MAP=numeric(length=a), ratio=numeric(length=a), stringsAsFactors=FALSE) caA$MAP <- ca400medium$MAP caA$ratio <- ca800medium$A/ca400medium$A caE <- data.frame(MAP=numeric(length=a), ratio=numeric(length=a), stringsAsFactors=FALSE) caE$MAP <- ca400medium$MAP caE$ratio <- ca800medium$E/ca400medium$E plot(caA$MAP, caA$ratio, type="l", xlab="", ylab=expression(Ratio), xlim=c(0, 4000), ylim=c(0.5, 2.5), xaxt="n", col=c("red"), cex.lab=1.3 ) lines(caA$MAP, caE$ratio, lty=1, col=c("blue")) abline(h=1, col=c("black"), lwd=2, lty=2) axis(1, xlim=c(0, 4000), pos=0.5, lwd=2) mtext(expression(MAP~(mm~year^-1)),side=1,line=2.5, cex=1.3) legend("topright", expression(italic(bar(A)[c[a]==800]/bar(A)[c[a]==400]), italic(bar(E)[c[a]==800]/bar(E)[c[a]==400])), col=c("red", "blue"), lty=c(1, 1), lwd=c(2, 2)) box() dev.copy2pdf(file="output/figures/Figure 8.pdf")
04b637f15fae25995fc9d8029b0651263f771af8
bd191bc02b74a7efb9e9ec1dfd9af7d1b22a5be6
/covid-report.R
575df7c5088ebfa57c4b9b0a330d1fe8cd244b31
[]
no_license
didempaloglu/covid-report
a2c2439b76c224f9cbc991a53c79e40c1185437c
b0bc6c0540c575d3f8994bb152438a61118ed87a
refs/heads/main
2023-04-11T16:25:51.127521
2021-05-05T22:03:34
2021-05-05T22:03:34
null
0
0
null
null
null
null
UTF-8
R
false
false
507
r
covid-report.R
############################################# # Reproducible Research # # Final Project # # # # A look at the pandemic # # for a specified period. # # # # Huseyin Can Minareci # # Didem Paloglu # # # #############################################
46d226c993bade617513b6ddd079f0b12e65ae0a
606abfc33cfb89b2f1f6858012c9c5d653f0a933
/man/Brt_pre.Rd
41c3d83664657f18a4b9c8a7f6df1fc94b7e0c9c
[]
no_license
mengluchu/APMtools
b29bbe3ced1a9b5fb3930a763415b9fa8ba8f468
2bce8d890b17bc500c45d7b93ed9667f9beb21d5
refs/heads/master
2022-02-23T08:36:10.706066
2022-01-20T14:47:10
2022-01-20T14:47:10
213,626,720
0
0
null
null
null
null
UTF-8
R
false
true
404
rd
Brt_pre.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Brt_pre.R \name{Brt_pre} \alias{Brt_pre} \title{brt predictions} \usage{ Brt_pre( variabledf, opti = F, ntree = 1000, y_varname = c("day_value", "night_value", "value_mean"), interaction.depth = 6, bag.fraction = 0.5, shrinkage = 0.01, training, test, grepstring, ... ) } \description{ brt predictions }
6dca17855bea8c71930f60767e8ff7dd6b1c82b9
8c2d7e0920b7c279f79cf256e9949f7ef7539807
/R/4-variograms.R
06e23498112f0a4907b1b0c829e7163d0dcd86c6
[]
no_license
nabilabd/ATLRUG_spacetime
6f6a7207b9bdaa3c759661fa1ac0fb3b4d3237f7
503d1d492af0896fddb1aa144be9e58790f718e8
refs/heads/master
2021-01-21T04:53:56.452173
2016-06-11T19:07:44
2016-06-11T19:07:44
50,754,281
0
1
null
null
null
null
UTF-8
R
false
false
583
r
4-variograms.R
source("R/1-data.R") library(gstat) # start with an example: calculate sample vgm data(meuse) coordinates(meuse) <- ~ x + y lzn.vgm <- variogram(log(zinc) ~ 1, data = meuse) plot(lzn.vgm) plot(lzn.vgm, pch=16) # inspect visual properties vgm() show.vgms() # fit a model lzn.fit <- fit.variogram(lzn.vgm, model=vgm(1, "Sph", 900, 1)) # compare plot(lzn.vgm, lzn.fit) -# examine structure: vgm model and fit lzn.vgm lzn.vgm %>% str lzn.fit # other models available. Behavior varies based on arguments: vgm(1, "Sph", 900, 1) vgm(2, "Sph", 900, 1) vgm(2, "Exp", 850, 1)
bb88207ed83894d56f49c837128679f0156d4ae8
4353771966887410582d9ce9877a0ebedc981e6d
/Project 2 - Analysis of UN HIV Data/UN_AIDS_Circumcision.R
9a4ecb8ea22687514889c195aa6ce359a218ed87
[]
no_license
LinRZhou/Project-2
dde81e2b5e0fc45e30bf9bf2d80bf3a411b8e419
50d02fc11233dc8cd292566fb14071cc07e5d2eb
refs/heads/master
2020-04-24T17:19:15.689970
2019-02-22T22:28:15
2019-02-22T22:28:15
172,142,963
0
0
null
null
null
null
UTF-8
R
false
false
4,644
r
UN_AIDS_Circumcision.R
library(plyr) library (tidyverse) library(stringr) source("UN_AIDS_Prevalence_Incidence_Project_2.R") #setwd("C:/Users/linra/Documents/UNC Masters Degree/Fall 2018/BIOS 611/Project 2") HIV_prev_df=read_csv("UN_HIV_Prevalence.csv") HIV_inc_df=read_csv("UN_HIV_Incidence.csv") HIV_Circ_df=read_csv("UN_HIV_Circumcision.csv") ##This code drops the "Value Footnotes" column because it does not contribute anything for my plotting. #Renaming of variables is done pre-emptively to prevent confusion on joining tables. #The mutate function is just to make the country or region name fit into the facets. HIV_Circ_df=HIV_Circ_df%>% rename(Country=`Country or Area`,Number=Value,Count=Unit)%>% select(-c("Value Footnotes"))%>% mutate(Country_wrap=str_wrap(Country,width=15)) #This code drops the "Value Footnotes" column because it does not contribute anything for my plotting. #Renaming of variables is done pre-emptively to prevent confusion on joining tables. #Filter functions are used to select modelled estimates for all adults aged 15-49. #The mutate function is just to make the country or region name fit into the facets. HIV_prev_df=HIV_prev_df%>% rename(Country=`Country or Area`,Prevalence=Value,Prev_Unit=Unit)%>% select(-c("Value Footnotes"))%>% filter(!grepl(paste(c("^Males","^Females"),collapse="|"),Subgroup))%>% mutate(Country_wrap=str_wrap(Country,width=15)) #Made a Key variable that contains the same string(s) as the Subgroup variable, but the "modelled" and its preceding whitespace are removed. #This makes it much easier to join the two tables (Prevalence data and Incidence data). HIV_prev_df$Key<-vector("character",nrow(HIV_prev_df)) for (i in 1:nrow(HIV_prev_df)){ HIV_prev_df$Key[i]<-str_sub(HIV_prev_df$Subgroup[i],end=-10) } #This code drops the "Value Footnotes" column because it does not contribute anything for my plotting. #Renaming of variables is done pre-emptively to prevent confusion on joining tables. #Rename Subgroup variable to Key for easier joining. #The mutate function is just to make the country or region name fit into the facets. HIV_inc_df=HIV_inc_df%>% rename(Country=`Country or Area`,Incidence=Value,Inc_Unit=Unit)%>% select(-c("Value Footnotes"))%>% rename(Key=Subgroup)%>% mutate(Country_wrap=str_wrap(Country,width=15)) #Perform a left join and then filtered for the non-upper and non-lower modelled estimates HIV_prev_inc=HIV_inc_df%>% left_join(HIV_prev_df,by=c("Country_wrap","Year","Key"))%>% filter(!grepl(paste(c("lower","upper"),collapse="|"),Key)) #Remove Ethiopia from dataset because it is not in the HIV prevalence or incidence data HIV_Circ_Trans=HIV_Circ_df%>% left_join(HIV_prev_inc,by=c("Country_wrap","Year"))%>% filter(Country_wrap!="Ethiopia") #Calculate transmission using incidence/prevalence HIV_Circ_Trans$Transmission<-vector("numeric",nrow(HIV_Circ_Trans)) for (i in 1:nrow(HIV_Circ_Trans)){ x=HIV_Circ_Trans$Incidence[i] y=HIV_Circ_Trans$Prevalence[i] HIV_Circ_Trans$Transmission[i]=Ratio_calc(x,y) } HIV_Circ_Trans$Country_wrap=as.factor(HIV_Circ_Trans$Country_wrap) cor.test(HIV_Circ_Trans$Number,HIV_Circ_Trans$Transmission, alternative="t",method="spearman")->Circ_trans_spear ggplot(data=HIV_Circ_Trans)+ geom_point(mapping=aes(x=Number,y=Transmission,colour=as.factor(Year)))+ facet_wrap(~Country_wrap,nrow=3)+ theme(panel.spacing=unit(0.4,"lines"),axis.text.x=element_text(angle=90,hjust=1),plot.title=element_text(hjust=0.5))+ labs(title=expression(paste("Circumcision Prevalence and HIV Transmission Rates Among Adults (15-49)")), x=expression(paste("Number of Circumcisions (Annual)")), y=expression(paste("Transmission Rate")), colour='Year', caption="The calculated value of the Spearman's rank correlation coefficient across all facets is -0.2637.") ggsave('p2_HIV_Circum_Transmission.png') cor.test(HIV_Circ_Trans$Number,HIV_Circ_Trans$Incidence, alternative="t",method="spearman")->Circ_inc_spear ggplot(data=HIV_Circ_Trans)+ geom_point(mapping=aes(x=Number,y=Incidence,colour=as.factor(Year)))+ facet_wrap(~Country_wrap,nrow=3)+ theme(panel.spacing=unit(0.4,"lines"),axis.text.x=element_text(angle=90,hjust=1),plot.title=element_text(hjust=0.5))+ labs(title=expression(paste("Circumcision Prevalence and HIV Incidence Among Adults (15-49)")), x=expression(paste("Number of Circumcisions (Annual)")), y=expression(paste("Incidence Rate (%)")), colour="Year", caption="The calculated value of the Spearman's rank correlation coefficient across all facets is -0.3248.") ggsave('p2_HIV_Circum_Incidence.png')
07dbc4643fce0e58fbfdf15174b464c9f97f05ed
b151b039002a7773d06a95dea652a8684f3dd2d3
/man/derl.Rd
6eca5482b61c6f3ff57c984396e3d3623f2c23af
[ "MIT" ]
permissive
runtastic/sBGmodel
9bb39463e90c0944ac49589dc325b916a94cf090
0d26f593697f682485ddfcaa3f68d54237e37738
refs/heads/master
2020-07-01T14:30:18.078779
2019-08-08T13:19:25
2019-08-08T13:19:25
201,197,152
3
2
null
null
null
null
UTF-8
R
false
true
846
rd
derl.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/customer_performance.R \name{derl} \alias{derl} \title{Calculate discounted expected residual lifetime} \usage{ derl(model, period = 1, discount_rate = 0.25) } \arguments{ \item{model}{a sBG model} \item{period}{a vector with period(s) for which DERL should be calculated. (customer has made period-1 renewals). Default is set to 1.} \item{discount_rate}{a number between 0 and 1 denoting the discount rate. Default is set to 0.25 (recommended value for private businesses).} } \value{ a vector with discounted expected residual lifetime per given period } \description{ \code{derl} computes discounted expected residual lifetime (DERL) for given customer's contract period(s) } \references{ http://brucehardie.com/papers/022/fader_hardie_mksc_10.pdf Equation (6) }
25c602e766ec746b303226797c389d07ae85ce5e
0a11b2535695dc71724334a1a485cfe7b999aaa9
/DMP_ENRICHMENT/CellType_DEG_DMP_Enrichment/CellType_DEG_DML_Correspondence.R
16e924bbfd15faf4ebe1ada0d4aff874006c94f5
[]
no_license
soojinyilab/Schizophrenia_CellType
134af96fd61ec55db9aa040c931517cb735f2ce0
61ab71f654f04cd82941e94e45adea33ad7bd29f
refs/heads/master
2020-04-24T05:07:56.887092
2019-02-13T03:16:39
2019-02-13T03:16:39
171,726,301
1
1
null
2019-02-20T18:21:39
2019-02-20T18:21:39
null
UTF-8
R
false
false
1,980
r
CellType_DEG_DML_Correspondence.R
library(qdap) library(ggpubr) library(cowplot) library(ggthemes) load("CellType_DMR.RData") dge <- read.table("CELLTYPE_LIMMA_DGE.txt") df <- merge(dge,dml, by.x="row.names",by.y="SYMBOL",all=F) df <- na.omit(df) df$Class <- genX(df$annotation, " (", ")") df$Class <- as.factor(df$Class) df$CellType <- ifelse(df$logFC > 0, "OLIG2","NeuN") df$DGE <- ifelse(df$Bonf < 0.05 & abs(df$logFC) > 0.5,"DGE","NOT_DGE") prom <- df[grep("Promoter",df$Class),] pdf("Promoter_CellType_DGE_Methylation.pdf",width=3,height=3,useDingbats=FALSE) ggscatter(prom, x = "areaStat", y = "logFC", color = "DGE",size = 0.1,shape = 21, palette=c("black","lightgrey"), # Add confidence interval cor.coef = TRUE, # Add correlation coefficient. see ?stat_cor cor.coeff.args = list(method = "spearman", label.sep = "\n") )+ theme_classic()+ geom_vline(xintercept = 0, colour = "grey60",linetype="dotted",size=1,alpha=0.5) + geom_hline(yintercept = 0, colour = "grey60",linetype="dotted",size=1,alpha=0.5)+ theme(legend.position=c(0.8,0.9))+ xlim(-3000,+3000) + xlab("Promoter DMR")+ ylab("log2(Fold-Change)")+ ggtitle("OLIG2/NeuN") #+ geom_text_repel(data = top_labelled2, mapping = aes(label = Row.names), size = 2,color = 'black',box.padding = unit(0.1, "lines"),point.padding = unit(0.1, "lines")) dev.off() pdf("CellType_DGE_Methylation.pdf",width=8,height=7,useDingbats=FALSE) ggscatter(df, x = "areaStat", y = "logFC", color = "DGE",size = 0.5,shape = 21, palette=c("black","grey60"), # Add confidence interval cor.coef = TRUE, # Add correlation coefficient. see ?stat_cor cor.coeff.args = list(method = "spearman", label.sep = "\n") )+ theme_classic()+ geom_vline(xintercept = 0, colour = "grey60",linetype="dotted",size=1,alpha=0.5) + geom_hline(yintercept = 0, colour = "grey60",linetype="dotted",size=1,alpha=0.5)+ theme(legend.position="none")+ xlim(-3000,+3000) + xlab("Promoter DMR")+ ylab("log2(Fold-Change)")+ ggtitle("OLIG2/NeuN")+ facet_wrap(~Class) dev.off()
ffa79d56f921a44563863af3bdce978119e83b32
9f2c2d1c1bfe949ec76e6d323d9327d9d400fc24
/Run-WFSims-Autosomal.R
e54b0223c1cf0722421ec922d958bf767e0d4325
[]
no_license
colin-olito/XvAutosomeInversions
fba08c4c05e1df84922a39d25f3c33892c87d89c
cc69ca1a8212439423f58a02482d1ba61d38279f
refs/heads/master
2021-03-22T00:05:22.720462
2018-07-16T22:08:18
2018-07-16T22:08:18
103,689,205
0
1
null
null
null
null
UTF-8
R
false
false
1,318
r
Run-WFSims-Autosomal.R
############################################################## # Wright-Fisher forward simulations of invasion of autosomal # inversion capturing 2 locally adaptive alleles, as well as # possible linked deleterious mutations # # R code for W-F forward simulations. Generates output data # as .csv files saved to ./output/data/simResults. # # # Author: Colin Olito # # NOTES: # rm(list=ls()) ##################### ## Dependencies source('R/functions-figures.R') source('R/functions-WFSims-Autosomal.R') ###################### ## Run Simulations # Locally adaptive alleles recessive recessiveData <- makeReplicateAutoInvSimsData(nReps = 3e+5, N.vals = c(500, 1000), m.vals = c(0.005, 0.01), s = 0.05, h = 0, r = 0.1, n = 100, u = 1e-5, h.del = 0, newMutant = 'random') # Additive fitness effects additiveData <- makeReplicateAutoInvSimsData(nReps = 3e+5, N.vals = c(500, 1000), m.vals = c(0.005, 0.01), s = 0.05, h = 1/2, r = 0.1, n = 100, u = 1e-5, h.del = 0, newMutant = 'random') # Locally adaptive alleles dominant dominantData <- makeReplicateAutoInvSimsData(nReps = 3e+5, N.vals = c(500, 1000), m.vals = c(0.005, 0.01), s = 0.05, h = 1, r = 0.1, n = 100, u = 1e-5, h.del = 0, newMutant = 'random')
5fb871fd5fb9131a9771bef6b88b6b1234f8847f
394b0b27a68e590165d0dfb9243e7b2d5deaf4d5
/man/processZoomParticipantsInfo.Rd
bea220fed6b1c30acb1896ce279d19be11c144ef
[ "MIT" ]
permissive
NastashaVelasco1987/zoomGroupStats
5b414b28e794eecbb9227d4b1cd81d46b00576e4
8f4975f36b5250a72e5075173caa875e8f9f368d
refs/heads/main
2023-05-05T18:23:17.777533
2021-05-24T16:08:23
2021-05-24T16:08:23
null
0
0
null
null
null
null
UTF-8
R
false
true
1,152
rd
processZoomParticipantsInfo.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/processZoomParticipantsInfo.R \name{processZoomParticipantsInfo} \alias{processZoomParticipantsInfo} \title{Process participant information from a Zoom meeting export} \usage{ processZoomParticipantsInfo(inputPath) } \arguments{ \item{inputPath}{character} } \value{ list of two data.frames with parsed information from the downloadable Zoom participants file \itemize{ \item meetInfo - provides the meeting level information that Zoom Cloud gives \item partInfo - provides the participant level information that Zoom Cloud gives } } \description{ This function parses the information from the downloadable meeting information file in Zooms reports section. The function presumes that you have checked the box to include the meeting information in the file. That means that there is a header (2 rows) containing the zoom meeting information. Following that header are four columns: Name of user, user email, total duration, and guest. } \examples{ partInfo = processZoomParticipantsInfo( system.file('extdata', "meeting001_participants.csv", package = 'zoomGroupStats') ) }
847dcd9decb2bb22e65db517bce6a3dec3c1fbfc
be84793607fb7a185eb85f29b42ed504d6cba2e9
/05/src/songsLoading.R
25e67eef5af14c9457fbb7b3383d69caa2eb3584
[]
no_license
laz08/Information-Retrieval
c77b36b0d587f7cd83f9ebf544bdc22b6e28d5c1
4341aa4886a82f19bbd80a67a1ab28878cd3da62
refs/heads/master
2020-04-02T12:57:45.522108
2019-01-03T15:00:07
2019-01-03T15:00:07
154,460,243
0
0
null
null
null
null
UTF-8
R
false
false
7,990
r
songsLoading.R
loadSongs <- function() { if(!LOAD_MERGED_SELECTION){ ################# songdata <- read.csv("./datasets/songdata.csv", encoding="851", stringsAsFactors=FALSE) songdata$link <- NULL songdata$artist <- as.factor(songdata$artist) #View(levels(songdata$artist)) coldplay <- songdata[songdata$artist == 'Coldplay', ] ABBA <- songdata[songdata$artist == 'ABBA', ] Enya <- songdata[songdata$artist == 'Enya', ] Europe <- songdata[songdata$artist == 'Europe', ] Queen <- songdata[songdata$artist == 'Queen', ] Neil.Young <- songdata[songdata$artist == 'Neil Young', ] Pet.Shop.Boys <- songdata[songdata$artist == 'Pet Shop Boys', ] Bruce.Springsteen <- songdata[songdata$artist == 'Bruce Springsteen', ] Phil.Collins <- songdata[songdata$artist == 'Phil Collins', ] aerosmith <- songdata[songdata$artist == 'Aerosmith', ] elo <- songdata[songdata$artist == 'Electric Light Orchestra', ] Elton.John <- songdata[songdata$artist == 'Elton John', ] Evanescense <- songdata[songdata$artist == 'Evanescence',] Grease <- songdata[songdata$artist == 'Grease',] adele <- songdata[songdata$artist == 'Adele', ] bowie <- songdata[songdata$artist == 'David Bowie', ] lana <- songdata[songdata$artist == 'Lana Del Rey', ] lcohen <- songdata[songdata$artist == 'Leonard Cohen', ] maroon5 <- songdata[songdata$artist == 'Maroon 5', ] edsheeran <- songdata[songdata$artist == 'Ed Sheeran', ] beatles <- songdata[songdata$artist == 'The Beatles', ] Pink.Floyd <- songdata[songdata$artist == 'Pink Floyd', ] nrow(Pink.Floyd) ### Laura Selection # View(coldplay[, 1:2]) coldplaySongs = c("3258", "3275", "3290", "3292", "3301", "3321", "3331", "28155", "28169", "28171", "28184", "28188") coldplayLaura <- coldplay[coldplaySongs, ] #View(ABBA[, 1:2]) abbaSongs = c("8", "19", "23", "46", "57", "63", "107", "108") abba.Laura <- ABBA[abbaSongs, ] enyaSongs = c("5266", "31691", "5256") enya.Laura <- Enya[enyaSongs, ] europeSongs <- c("31913", "31926", "31959") europe.Laura <- Europe[europeSongs, ] queenSongs <- c("16529", "16545", "16562", "16576", "49281", "49283", "49285", "49299", "49321", "49322", "49377") queen.Laura <- Queen[queenSongs, ] neilSongs <- c("13880") neil.Laura <- Neil.Young[neilSongs, ] psb <- c("15798", "15800", "15802", "15804", "15811", "15814", "15819", "15821", "15837", "15844", "15858", "47958", "47966", "47968", "47970", "47979", "47986", "47996", "48020", "48021") psb.Laura <- Pet.Shop.Boys[psb, ] bruceSongs <- c("2011", "2038", "2053", "26541", "26544", "26584") bruce.Laura <- Bruce.Springsteen[bruceSongs, ] philSongs <- c("15942", "15962", "15974", "16003", "48239") phil.Laura <- Phil.Collins[philSongs, ] beatles.Laura.Songs <- c("1209", "24693", "24695", "24696", "1224", "24750", "24748", "24802", "24818", "24820", "24823") beatles.Laura<- beatles[beatles.Laura.Songs, ] maroon5.L.songs <- c("43092", "12523", "12534") maroon5Laura <- maroon5[maroon5.L.songs, ] aerosmithSongs <- c("23017", "22995") aerosmith.Laura <- aerosmith[aerosmithSongs, ] eloSongs <- c("4744", "30861", "30871", "30891", "30928", "30947") elo.Laura <- elo[eloSongs, ] eltonSongs <- c("4879", "31144") Elton.John.Laura <- Elton.John[eltonSongs, ] EvanescenseSongs <- c("5543") Ev.Laura <- Evanescense[EvanescenseSongs, ] GreaseSongs <- c("34585", "7201", "34578", "34592", "34591") Grease.Laura <- Grease[GreaseSongs, ] laura.selection = rbind(coldplayLaura, abba.Laura) laura.selection = rbind(laura.selection, enya.Laura) laura.selection = rbind(laura.selection, europe.Laura) laura.selection = rbind(laura.selection, queen.Laura) laura.selection = rbind(laura.selection, neil.Laura) laura.selection = rbind(laura.selection, psb.Laura) laura.selection = rbind(laura.selection, bruce.Laura) laura.selection = rbind(laura.selection, phil.Laura) laura.selection = rbind(laura.selection, beatles.Laura) laura.selection = rbind(laura.selection, maroon5Laura) laura.selection = rbind(laura.selection, aerosmith.Laura) laura.selection = rbind(laura.selection, elo.Laura) laura.selection = rbind(laura.selection, Elton.John.Laura) laura.selection = rbind(laura.selection, Ev.Laura) laura.selection = rbind(laura.selection, Grease.Laura) nrow(laura.selection) #Carolina selection adeleSongs <- c("134", "138", "139", "143", "22952", "22953", "22954", "22960", "22967", "22969", "22971", "22976", "22977", "22984", "22989", "22990", "22991") adeleC <- adele[adeleSongs, ] bowieSongs <- c("3763", "3810") bowieC <- bowie[bowieSongs, ] lanaSongs <- c("11161", "11167", "11170", "41166", "41169", "41210", "41227", "41234") lanaC <- lana[lanaSongs, ] lcohenSongs <- c("11448", "11453", "11456", "11459", "11461", "11496", "41527", "41541", "41546") lcohenC <- lcohen[lcohenSongs, ] maroon5Songs <- c("12534", "43092", "43101", "43139", "43140", "43141", "43134", "43142", "43146", "43156", "43162", "43170", "43171", "43176", "43178") maroon5C <- maroon5[maroon5Songs, ] edSongs <- c("30753", "30756", "30758", "30759", "30768", "30777") edsheeranC <- edsheeran[edSongs, ] coldplaycSongs <- c("3257", "3260", "3266", "3277", "3307", "3308", "3309", "3319", "3322", "3320", "3325", "28155", "28183", "28188") coldplayC <- coldplay[coldplaycSongs, ] abbacSongs <- c("12", "14", "50", "71", "91", "96", "100", "105", "84") abbaC <- ABBA[abbacSongs, ] pinkfSongs <- c("48440", "48431", "16098") pinkfC <- Pink.Floyd[pinkfSongs, ] beatlesSongs <- c("1209", "1211", "1214", "1223", "1224", "24693", "24695", "24696", "24708", "24748", "24750", "24762", "24764", "24792", "24802", "24806", "24811") beatlesC <- beatles[beatlesSongs, ] c.selection = rbind(adeleC, bowieC) c.selection = rbind(c.selection, lanaC) c.selection = rbind(c.selection, lcohenC) c.selection = rbind(c.selection, maroon5C) c.selection = rbind(c.selection, edsheeranC) c.selection = rbind(c.selection, coldplayC) c.selection = rbind(c.selection, abbaC) c.selection = rbind(c.selection, pinkfC) c.selection = rbind(c.selection, beatlesC) nrow(c.selection) #random songs to guarantee number of nodes >= 200 random.songs <- songdata[sample(nrow(songdata), 20, replace = FALSE), ] random.songs$Carolina <- FALSE random.songs$Laura <- FALSE # Tmp songs to check for duplicates all.songs <- rbind(c.selection, laura.selection) # Check there are no duplicated rows. (duplicated.rows = which(duplicated(all.songs) | duplicated(all.songs[nrow(all.songs):1, ])[nrow(all.songs):1])) all.songs[37, -3] # 12534 all.songs[38, -3] # 43092 c.selection$Carolina <- TRUE laura.selection$Laura <- TRUE all.songs2 <- merge(c.selection, laura.selection, all=TRUE) all.songs2 <- merge(all.songs2, random.songs, all = TRUE) all.songs2[is.na(all.songs2$Carolina), ]$Carolina <- FALSE all.songs2[is.na(all.songs2$Laura), ]$Laura <- FALSE # View(all.songs2) write.csv(all.songs2, "./datasets/merged_songs.csv") return(all.songs2) } merged_songs <- read.csv("./datasets/merged_songs.csv", stringsAsFactors = FALSE) merged_songs$X <- NULL return(merged_songs) }
ab7ebe694daacd8960497af9e4b6c0443e8ee391
29585dff702209dd446c0ab52ceea046c58e384e
/causaleffect/R/causal.effect.R
510c164f90e8b12ae777cdee2b10a0767500759d
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
1,883
r
causal.effect.R
causal.effect <- function(y, x, z = NULL, G, expr = TRUE, simp = TRUE) { if (!is.dag(observed.graph(G))) stop("Graph 'G' is not a DAG") to <- topological.sort(observed.graph(G)) to <- get.vertex.attribute(G, "name")[to] G.unobs <- unobserved.graph(G) G.Adj <- as.matrix(get.adjacency(G.unobs)) if (length(setdiff(y, to)) > 0) stop("Set 'y' contains variables not present in the graph.") if (length(setdiff(x, to)) > 0) stop("Set 'x' contains variables not present in the graph.") if (length(z) > 0) { if (length(setdiff(z, to)) > 0) stop("Set 'z' contains variables not present in the graph.") } res <- probability() if (length(z) == 0) { res <- id(y, x, probability(), G, to) # res <- organize.terms(res) } else { res <- idc(y, x, z, probability(), G, to) # res <- organize.terms(res) res2 <- res res2$sumset <- union(res2$sumset, y) res$fraction <- TRUE res$divisor <- res2 } if (simp) { to.u <- topological.sort(G.unobs) to.u <- get.vertex.attribute(G.unobs, "name")[to.u] res <- deconstruct(res, probability(recursive = TRUE, children = list(), fraction = TRUE, divisor = probability(recursive = TRUE, children = list()))) res <- parse.expression(res, to, G.Adj) if (length(res$divisor$children) > 0) { res$divisor <- parse.expression(res$divisor, to, G.Adj) if (zero.children(res$divisor)) { res$fraction <- FALSE res$divisor <- list() } } else { res$fraction <- FALSE res$divisor <- list() } res <- deconstruct(res, probability(recursive = TRUE, children = list(), fraction = TRUE, divisor = probability(recursive = TRUE, children = list()))) res <- parse.deconstruct(res) res <- simplify.expression(res, G.unobs, to.u) } if (expr) res <- get.expression(res) return(res) }
cbc9ba5fae9fb4174b86982357aca49390d58741
c326adaafd17d86d9caaa199fe75120f20188acd
/man/overlap_jaccard_mat.Rd
f1eb367d6fe249fe799afe8c4ae65dc773006bc6
[ "BSD-2-Clause" ]
permissive
adrisede/lowcat
4614c881aafccf814e816e880d4ec5fd87fe738f
cdbdcc23e80306d17723dc1cec36c84a62acd95a
refs/heads/master
2022-03-02T21:35:26.457043
2019-10-07T20:45:43
2019-10-07T20:45:43
null
0
0
null
null
null
null
UTF-8
R
false
true
774
rd
overlap_jaccard_mat.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distances.R \name{overlap_jaccard_mat} \alias{overlap_jaccard_mat} \title{Compute jaccard similarity values based on GenomicRanges overlaps in parallel.} \usage{ overlap_jaccard_mat(fragment_list, n_cores = "auto", cluster_type = "PSOCK") } \arguments{ \item{fragment_list}{The list object containing GenomicRanges objects.} \item{n_cores}{The number of cores to use in parallel. Use "auto" to detect and use all cores. Default is 6.} \item{cluster_type}{Either "PSOCK" (for Windows) or "FORK" (possible on Linux and Mac). FORK is more memory-efficient.} } \value{ a matrix of jaccard distances } \description{ Compute jaccard similarity values based on GenomicRanges overlaps in parallel. }
511d6396e0a2b184eb9ddd7f7a9ff64a6e832fbf
2fb4cc8514d1a472a96eec3ce9160841de177603
/anniversary_stats.R
d0c08444b8c0afd1d2196c8041ea6248ef7242d5
[]
no_license
georoen/standbildNews
f36aacfd5acdb5ef9e237a02210274c0d877aa9b
ec351e7f09dd3ef7892b3bae27d23f4fcf9184f2
refs/heads/master
2022-10-30T23:56:26.530842
2022-10-11T20:01:46
2022-10-11T20:01:46
83,040,162
3
0
null
2018-07-16T20:51:06
2017-02-24T12:40:37
R
UTF-8
R
false
false
2,576
r
anniversary_stats.R
library(dplyr) library(lubridate) library(ggplot2) library(ggridges) x <- read.csv("Logfile.csv", stringsAsFactors = FALSE) x$date <- as.Date(x$date) x <- as_data_frame(x) date_all <- x %>% distinct(date, sendung) %>% group_by(sendung) %>% summarize(start_date = min(date), n = n()) %>% na.omit() %>% pull(start_date) %>% max() x1 <- x %>% filter(date >= date_all) %>% mutate(prozent_num = as.numeric(gsub("%", "", prozent))) %>% na.omit() %>% mutate(sendung = gsub("h19", "ZDF Heute 19 Uhr", sendung), sendung = gsub("hjo", "ZDF Heute Journal", sendung), sendung = gsub("t20", "ARD Tagesschau", sendung), sendung = gsub("tth", "ARD Tagesthemen", sendung), res_sec = as.numeric(gsub("^.*/", "", res)), sender = ifelse(grepl("ZDF", sendung), "ZDF", "ARD"), frames = gsub("NANA", "0T", frames), censored_frames = as.numeric(gsub("^[[:digit:]]*F|T", "", frames)), censored_secs = res_sec * censored_frames, sendung = ordered(sendung, c("ZDF Heute 19 Uhr", "ZDF Heute Journal","ARD Tagesschau", "ARD Tagesthemen"))) x1 x1sum <- x1 %>% group_by(sendung) %>% summarize(sender = first(sender), censored_secs = sum(censored_secs)) x1max <- x1 %>% group_by(sendung) %>% summarize(Prozent = max(prozent_num)) # Plot 1: Prozent Treppchen x1mean <- x1 %>% group_by(sender, sendung) %>% summarize(Prozent = mean(prozent_num)) ggplot(x1mean, aes(sendung, Prozent)) + geom_col() + geom_text(aes(label = round(Prozent,1)), nudge_y = 0.25, size = 20) + scale_fill_manual(values = alpha(c("darkorange", "darkgrey", "#3284be", "dodgerblue4"), alpha = 0.7)) + facet_wrap(~sender, scales = "free_x") # Plot 2: JOY heißt jetzt ggridges x1 %>% ggplot(aes(y = sendung, x = prozent_num, group = sendung, fill = sendung)) + # geom_boxplot(outlier.size = 0.5) + # geom_violin() + geom_density_ridges2(color = "lightgrey", scale = 0.5) + # geom_point(data = x1max, aes(x = sendung, y = Prozent, color = sendung), # shape = 18, size = 5) + # geom_label(data = x1max, aes(x = sendung, y = Prozent, label = paste("max.\n", Prozent, "%"))) + theme_minimal() + labs(x = "Standbild pro Sendung", y = element_blank()) + guides(color = "none", fill = "none") + scale_fill_manual(values = alpha(c("darkorange", "darkgrey", "#3284be", "dodgerblue4"), alpha = 0.7)) + scale_x_continuous(limits = c(0,40)) #+ # theme(panel.grid.major.y = element_blank()) # scale_y_discrete(breaks = NULL)
c587a30477f33def9cdbb33d2cebe0add682a98c
03b184b4221623bc72002f6f5326972e922583bb
/appannie.R
de9505b11ef1b1a5811b213b4b4ff64c7c03ad32
[]
no_license
carolssnz/appannie
4175c2aaf8ec1aa0a194c6ae61957cf91b415b9d
4ba6f91b640dd5ee779c142af5deb26b53c1350e
refs/heads/master
2021-01-21T03:49:45.073607
2014-11-01T14:06:11
2014-11-01T14:06:11
null
0
0
null
null
null
null
UTF-8
R
false
false
2,346
r
appannie.R
#!/usr/bin/R # running average function runavg_n = 7 runavg <- function(x, n=runavg_n) { filter(x, rep(1/n, n), sides=2) } to_file = !interactive() for (f in Sys.glob('Numbers *.csv')) { n = sub('.csv', '', sub('Numbers ', '', f)) # to PNG unless interactive if (to_file) { pdf(file=paste(paste('Downloads', n, Sys.Date()), 'pdf', sep='.'), width=12, height=7) } # read CSV and create a true date column d = read.csv(f) d$stamp = strptime(d$date, "%Y-%m-%d") # past year and year before that and the one before that y1 = d[(365+runavg_n):1,] y2 = d[(730+runavg_n):366,] y3 = d[(1095+runavg_n):731,] max_d = max(y1$num_downloads, y2$num_downloads, y3$num_downloads, na.rm=T) lbl_set = subset(y1, stamp$mday == 1) lbls = strftime(lbl_set$stamp, "%b") lbls_at = nrow(y1) - as.numeric(rownames(lbl_set)) # daily downloads par(mar=c(3,5,3,3)) plot(y3$num_downloads, type='l', main=n, xlab=NULL, xaxt='n', ylab="downloads / day", col='cornsilk2', ylim=c(0, max_d)) axis(1, at=lbls_at, labels=lbls) abline(v=nrow(y1) - as.numeric(rownames(subset(y1, stamp$yday == 0)))) lines(runavg(y3$num_downloads), lwd=2, col='cornflowerblue') lines(y2$num_downloads, col='cornsilk3') lines(runavg(y2$num_downloads), lwd=3, col='blue') lines(y1$num_downloads, col='cornsilk4') lines(runavg(y1$num_downloads), lwd=4, col='blue4') # number of updates (scaled) max_upd = max(d$num_updates, na.rm=T) factor = max_upd / max_d lines(d[(365+runavg_n):1,]$num_updates / factor, lty='dotted') axis(4, at=c(0, max_d), labels=c(0, max_upd)) # legend legend('topleft', c( paste(paste(min(y1$stamp$year + 1900, na.rm=T), max(y1$stamp$year + 1900, na.rm=T), sep='-'), format(sum(y1$num_downloads, na.rm=T), big.mark=','), sep=': '), ifelse(sum(y2$num_downloads, na.rm=T) > 0, paste(paste(min(y2$stamp$year + 1900, na.rm=T), max(y2$stamp$year + 1900, na.rm=T), sep='-'), format(sum(y2$num_downloads, na.rm=T), big.mark=','), sep=': '), ''), ifelse(sum(y3$num_downloads, na.rm=T) > 0, paste(paste(min(y3$stamp$year + 1900, na.rm=T), max(y3$stamp$year + 1900, na.rm=T), sep='-'), format(sum(y3$num_downloads, na.rm=T), big.mark=','), sep=': '), ''), "Updates" ), lwd=c(4,3,2,1), lty=c('solid', 'solid', 'solid', 'dotted'), col=c('blue4', 'blue', 'cornflowerblue', 'black')) if (to_file) { dev.off() } }
df52a11f88f6da6d8defc03cdea332959fc7121d
0cc863fed706b96df0c44afe7d466cff23228049
/R/ZTNegativeBinomial.R
4e3878b779bc0c26d94b2fecd979951efd85dcef
[ "MIT" ]
permissive
alexpghayes/distributions3
80a96665b4dabe2300908d569cb74de3cc75b151
67d27df128c86d80fe0c903b5b2c8af1fb9b0643
refs/heads/main
2023-01-27T14:49:47.588553
2023-01-18T18:12:22
2023-01-18T18:12:22
185,505,802
52
11
NOASSERTION
2023-01-18T18:12:24
2019-05-08T01:38:24
R
UTF-8
R
false
false
14,980
r
ZTNegativeBinomial.R
#' The zero-truncated negative binomial distribution #' #' Density, distribution function, quantile function, and random #' generation for the zero-truncated negative binomial distribution with #' parameters \code{mu} and \code{theta} (or \code{size}). #' #' The negative binomial distribution left-truncated at zero (or zero-truncated #' negative binomial for short) is the distribution obtained, when considering #' a negative binomial variable Y conditional on Y being greater than zero. #' #' All functions follow the usual conventions of d/p/q/r functions #' in base R. In particular, all four \code{ztnbinom} functions for the #' zero-truncated negative binomial distribution call the corresponding \code{nbinom} #' functions for the negative binomial distribution from base R internally. #' #' @aliases dztnbinom pztnbinom qztnbinom rztnbinom #' #' @param x vector of (non-negative integer) quantiles. #' @param q vector of quantiles. #' @param p vector of probabilities. #' @param n number of random values to return. #' @param mu vector of (non-negative) negative binomial location parameters. #' @param theta,size vector of (non-negative) negative binomial overdispersion parameters. #' Only \code{theta} or, equivalently, \code{size} may be specified. #' @param log,log.p logical indicating whether probabilities p are given as log(p). #' @param lower.tail logical indicating whether probabilities are \eqn{P[X \le x]} (lower tail) or \eqn{P[X > x]} (upper tail). #' #' @seealso \code{\link{ZTNegativeBinomial}}, \code{\link{dnbinom}} #' #' @keywords distribution #' #' @examples #' ## theoretical probabilities for a zero-truncated negative binomial distribution #' x <- 0:8 #' p <- dztnbinom(x, mu = 2.5, theta = 1) #' plot(x, p, type = "h", lwd = 2) #' #' ## corresponding empirical frequencies from a simulated sample #' set.seed(0) #' y <- rztnbinom(500, mu = 2.5, theta = 1) #' hist(y, breaks = -1:max(y) + 0.5) #' #' @importFrom stats dnbinom pnbinom #' @rdname ztnbinom #' @export dztnbinom <- function(x, mu, theta, size, log = FALSE) { if(!missing(theta) & !missing(size)) stop("only 'theta' or 'size' may be specified") if(!missing(size)) theta <- size rval <- dnbinom(x, mu = mu, size = theta, log = TRUE) - pnbinom(0, mu = mu, size = theta, lower.tail = FALSE, log.p = TRUE) rval[x < 1] <- -Inf rval[mu <= 0] <- -Inf rval[(mu <= 0) & (x == 1)] <- 0 if(log) rval else exp(rval) } #' @importFrom stats pnbinom dnbinom #' @rdname ztnbinom #' @export pztnbinom <- function(q, mu, theta, size, lower.tail = TRUE, log.p = FALSE) { if(!missing(theta) & !missing(size)) stop("only 'theta' or 'size' may be specified") if(!missing(size)) theta <- size rval <- log(pnbinom(q, mu = mu, size = theta, lower.tail = lower.tail, log.p = FALSE) - dnbinom(0, mu = mu, size = theta)) - pnbinom(0, mu = mu, size = theta, lower.tail = FALSE, log.p = TRUE) rval[q < 1] <- if(lower.tail) -Inf else 0 if(log.p) rval else exp(rval) } #' @importFrom stats qnbinom pnbinom dnbinom #' @rdname ztnbinom #' @export qztnbinom <- function(p, mu, theta, size, lower.tail = TRUE, log.p = FALSE) { if(!missing(theta) & !missing(size)) stop("only 'theta' or 'size' may be specified") if(!missing(size)) theta <- size p_orig <- p p <- if(log.p) p else log(p) p <- p + pnbinom(0, mu = mu, size = theta, lower.tail = FALSE, log.p = TRUE) p <- exp(p) + dnbinom(0, mu = mu, size = theta) rval <- qnbinom(p, mu = mu, size = theta, lower.tail = lower.tail, log.p = FALSE) if(lower.tail) rval[p_orig < dztnbinom(1, mu = mu, theta = theta, log = log.p)] <- 1 rval } #' @importFrom stats runif #' @rdname ztnbinom #' @export rztnbinom <- function(n, mu, theta, size) { if(!missing(theta) & !missing(size)) stop("only 'theta' or 'size' may be specified") if(!missing(size)) theta <- size qztnbinom(runif(n), mu = mu, theta = theta) } #' Create a zero-truncated negative binomial distribution #' #' Zero-truncated negative binomial distributions are frequently used to model counts #' where zero observations cannot occur or have been excluded. #' #' @param mu Location parameter of the negative binomial component of the distribution. #' Can be any positive number. #' @param theta Overdispersion parameter of the negative binomial component of the distribution. #' Can be any positive number. #' #' @return A `ZTNegativeBinomial` object. #' @export #' #' @family discrete distributions #' #' @details #' #' We recommend reading this documentation on #' <https://alexpghayes.github.io/distributions3/>, where the math #' will render with additional detail. #' #' In the following, let \eqn{X} be a zero-truncated negative binomial random variable with parameter #' `mu` = \eqn{\mu}. #' #' **Support**: \eqn{\{1, 2, 3, ...\}}{{1, 2, 3, ...}} #' #' **Mean**: #' \deqn{ #' \mu \cdot \frac{1}{1 - F(0; \mu, \theta)} #' }{ #' \mu \cdot 1/(1 - F(0; \mu, \theta)) #' } #' #' where \eqn{F(k; \mu, \theta)} is the c.d.f. of the \code{\link{NegativeBinomial}} distribution. #' #' **Variance**: \eqn{m \cdot (\mu + 1 - m)}, where \eqn{m} is the mean above. #' #' **Probability mass function (p.m.f.)**: #' #' \deqn{ #' P(X = k) = \frac{f(k; \mu, \theta)}{1 - F(0; \mu, \theta)} #' }{ #' P(X = k) = f(k; \mu, \theta)/(1 - F(0; \mu, \theta)) #' } #' #' where \eqn{f(k; \mu, \theta)} is the p.m.f. of the \code{\link{NegativeBinomial}} #' distribution. #' #' **Cumulative distribution function (c.d.f.)**: #' #' \deqn{ #' P(X = k) = \frac{F(k; \mu, \theta)}{1 - F(0; \mu, \theta)} #' }{ #' P(X = k) = F(k; \mu, \theta)/(1 - F(0; \mu, \theta)) #' } #' #' **Moment generating function (m.g.f.)**: #' #' Omitted for now. #' #' @examples #' ## set up a zero-truncated negative binomial distribution #' X <- ZTNegativeBinomial(mu = 2.5, theta = 1) #' X #' #' ## standard functions #' pdf(X, 0:8) #' cdf(X, 0:8) #' quantile(X, seq(0, 1, by = 0.25)) #' #' ## cdf() and quantile() are inverses for each other #' quantile(X, cdf(X, 3)) #' #' ## density visualization #' plot(0:8, pdf(X, 0:8), type = "h", lwd = 2) #' #' ## corresponding sample with histogram of empirical frequencies #' set.seed(0) #' x <- random(X, 500) #' hist(x, breaks = -1:max(x) + 0.5) ZTNegativeBinomial <- function(mu, theta) { d <- data.frame(mu = mu, theta = theta) class(d) <- c("ZTNegativeBinomial", "distribution") return(d) } #' @export mean.ZTNegativeBinomial <- function(x, ...) { ellipsis::check_dots_used() m <- x$mu / pnbinom(0, mu = x$mu, size = x$theta, lower.tail = FALSE) m[x$mu <= 0] <- 1 setNames(m, names(x)) } #' @export variance.ZTNegativeBinomial <- function(x, ...) { ellipsis::check_dots_used() m <- x$mu / pnbinom(0, mu = x$mu, size = x$theta, lower.tail = FALSE) m[x$mu <= 0] <- 1 v <- m * (x$mu/x$theta + x$mu + 1 - m) setNames(v, names(x)) } #' @export skewness.ZTNegativeBinomial <- function(x, ...) { stop("not implemented yet") ellipsis::check_dots_used() f <- 1 / pnbinom(0, mu = x$mu, size = x$theta, lower.tail = FALSE) m <- x$mu * f s <- sqrt(m * (x$mu/x$theta + x$mu + 1 - m)) ## FIXME: E[X^3] would be needed here rval <- (f * (x$mu + 3 * x$mu^2 + x$mu^3) - 3 * m * s^2 - m^3) / s^3 rval[x$mu <= 0] <- NaN setNames(rval, names(x)) } #' @export kurtosis.ZTNegativeBinomial <- function(x, ...) { stop("not implemented yet") ellipsis::check_dots_used() f <- 1 / pnbinom(0, mu = x$mu, size = x$theta, lower.tail = FALSE) m <- x$mu * f s2 <- m * (x$mu/x$theta + x$mu + 1 - m) ## FIXME: E[X^4] would be needed here rval <- ( f * (x$mu + 7 * x$mu^2 + 6 * x$mu^3 + x$mu^4) - 4 * m * f * (x$mu + 3 * x$mu^2 + x$mu^3) + 6 * m^2 * f * (x$mu + x$mu^2) - 3 * m^4 ) / s2^2 - 3 rval[x$mu <= 0] <- NaN setNames(rval, names(x)) } #' Draw a random sample from a zero-truncated negative binomial distribution #' #' @inherit ZTNegativeBinomial examples #' #' @param x A `ZTNegativeBinomial` object created by a call to [ZTNegativeBinomial()]. #' @param n The number of samples to draw. Defaults to `1L`. #' @param drop logical. Should the result be simplified to a vector if possible? #' @param ... Unused. Unevaluated arguments will generate a warning to #' catch mispellings or other possible errors. #' #' @return In case of a single distribution object or `n = 1`, either a numeric #' vector of length `n` (if `drop = TRUE`, default) or a `matrix` with `n` columns #' (if `drop = FALSE`). #' @export #' random.ZTNegativeBinomial <- function(x, n = 1L, drop = TRUE, ...) { n <- make_positive_integer(n) if (n == 0L) return(numeric(0L)) FUN <- function(at, d) rztnbinom(n = at, mu = d$mu, theta = d$theta) apply_dpqr(d = x, FUN = FUN, at = n, type = "random", drop = drop) } #' Evaluate the probability mass function of a zero-truncated negative binomial distribution #' #' @inherit ZTNegativeBinomial examples #' #' @param d A `ZTNegativeBinomial` object created by a call to [ZTNegativeBinomial()]. #' @param x A vector of elements whose probabilities you would like to #' determine given the distribution `d`. #' @param drop logical. Should the result be simplified to a vector if possible? #' @param elementwise logical. Should each distribution in \code{d} be evaluated #' at all elements of \code{x} (\code{elementwise = FALSE}, yielding a matrix)? #' Or, if \code{d} and \code{x} have the same length, should the evaluation be #' done element by element (\code{elementwise = TRUE}, yielding a vector)? The #' default of \code{NULL} means that \code{elementwise = TRUE} is used if the #' lengths match and otherwise \code{elementwise = FALSE} is used. #' @param ... Arguments to be passed to \code{\link{dztnbinom}}. #' Unevaluated arguments will generate a warning to catch mispellings or other #' possible errors. #' #' @return In case of a single distribution object, either a numeric #' vector of length `probs` (if `drop = TRUE`, default) or a `matrix` with #' `length(x)` columns (if `drop = FALSE`). In case of a vectorized distribution #' object, a matrix with `length(x)` columns containing all possible combinations. #' @export #' pdf.ZTNegativeBinomial <- function(d, x, drop = TRUE, elementwise = NULL, ...) { FUN <- function(at, d) dztnbinom(x = at, mu = d$mu, theta = d$theta, ...) apply_dpqr(d = d, FUN = FUN, at = x, type = "density", drop = drop, elementwise = elementwise) } #' @rdname pdf.ZTNegativeBinomial #' @export #' log_pdf.ZTNegativeBinomial <- function(d, x, drop = TRUE, elementwise = NULL, ...) { FUN <- function(at, d) dztnbinom(x = at, mu = d$mu, theta = d$theta, log = TRUE) apply_dpqr(d = d, FUN = FUN, at = x, type = "logLik", drop = drop, elementwise = elementwise) } #' Evaluate the cumulative distribution function of a zero-truncated negative binomial distribution #' #' @inherit ZTNegativeBinomial examples #' #' @param d A `ZTNegativeBinomial` object created by a call to [ZTNegativeBinomial()]. #' @param x A vector of elements whose cumulative probabilities you would #' like to determine given the distribution `d`. #' @param drop logical. Should the result be simplified to a vector if possible? #' @param elementwise logical. Should each distribution in \code{d} be evaluated #' at all elements of \code{x} (\code{elementwise = FALSE}, yielding a matrix)? #' Or, if \code{d} and \code{x} have the same length, should the evaluation be #' done element by element (\code{elementwise = TRUE}, yielding a vector)? The #' default of \code{NULL} means that \code{elementwise = TRUE} is used if the #' lengths match and otherwise \code{elementwise = FALSE} is used. #' @param ... Arguments to be passed to \code{\link{pztnbinom}}. #' Unevaluated arguments will generate a warning to catch mispellings or other #' possible errors. #' #' @return In case of a single distribution object, either a numeric #' vector of length `probs` (if `drop = TRUE`, default) or a `matrix` with #' `length(x)` columns (if `drop = FALSE`). In case of a vectorized distribution #' object, a matrix with `length(x)` columns containing all possible combinations. #' @export #' cdf.ZTNegativeBinomial <- function(d, x, drop = TRUE, elementwise = NULL, ...) { FUN <- function(at, d) pztnbinom(q = at, mu = d$mu, theta = d$theta, ...) apply_dpqr(d = d, FUN = FUN, at = x, type = "probability", drop = drop, elementwise = elementwise) } #' Determine quantiles of a zero-truncated negative binomial distribution #' #' `quantile()` is the inverse of `cdf()`. #' #' @inherit ZTNegativeBinomial examples #' @inheritParams random.ZTNegativeBinomial #' #' @param probs A vector of probabilities. #' @param drop logical. Should the result be simplified to a vector if possible? #' @param elementwise logical. Should each distribution in \code{x} be evaluated #' at all elements of \code{probs} (\code{elementwise = FALSE}, yielding a matrix)? #' Or, if \code{x} and \code{probs} have the same length, should the evaluation be #' done element by element (\code{elementwise = TRUE}, yielding a vector)? The #' default of \code{NULL} means that \code{elementwise = TRUE} is used if the #' lengths match and otherwise \code{elementwise = FALSE} is used. #' @param ... Arguments to be passed to \code{\link{qztnbinom}}. #' Unevaluated arguments will generate a warning to catch mispellings or other #' possible errors. #' #' @return In case of a single distribution object, either a numeric #' vector of length `probs` (if `drop = TRUE`, default) or a `matrix` with #' `length(probs)` columns (if `drop = FALSE`). In case of a vectorized #' distribution object, a matrix with `length(probs)` columns containing all #' possible combinations. #' @export #' quantile.ZTNegativeBinomial <- function(x, probs, drop = TRUE, elementwise = NULL, ...) { FUN <- function(at, d) qztnbinom(p = at, mu = d$mu, theta = d$theta, ...) apply_dpqr(d = x, FUN = FUN, at = probs, type = "quantile", drop = drop, elementwise = elementwise) } #' Return the support of the zero-truncated negative binomial distribution #' #' @param d An `ZTNegativeBinomial` object created by a call to [ZTNegativeBinomial()]. #' @param drop logical. Should the result be simplified to a vector if possible? #' @param ... Currently not used. #' #' @return A vector of length 2 with the minimum and maximum value of the support. #' #' @export support.ZTNegativeBinomial <- function(d, drop = TRUE, ...) { ellipsis::check_dots_used() min <- rep(1, length(d)) max <- rep(Inf, length(d)) make_support(min, max, d, drop = drop) } #' @exportS3Method is_discrete.ZTNegativeBinomial <- function(d, ...) { ellipsis::check_dots_used() setNames(rep.int(TRUE, length(d)), names(d)) } #' @exportS3Method is_continuous.ZTNegativeBinomial <- function(d, ...) { ellipsis::check_dots_used() setNames(rep.int(FALSE, length(d)), names(d)) } ## FIXME: currently no fit_mle.ZTNegativeBinomial and suff_stat.ZTNegativeBinomial
38bc7e7e068e9704103a2d7db54fe169f6696744
ad67aec0b4bbc865465e37368b7660c0488c86a1
/man/HTable_row_italic.Rd
4ec09e1014dd0e62b5b1b18b63339c0069dab09f
[]
no_license
fazetu/htable
b6dadc033fb2081a14236d18166aacb067347c7d
8924560ba8ec6fa0e364db30ae2897b69ae6ef16
refs/heads/master
2021-07-19T09:42:55.501605
2020-02-07T21:48:50
2020-02-07T21:48:50
195,873,455
0
0
null
2019-11-12T15:36:30
2019-07-08T19:24:42
R
UTF-8
R
false
true
640
rd
HTable_row_italic.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/row_styles.R \name{HTable_row_italic} \alias{HTable_row_italic} \title{Add an italic style to a row} \usage{ obj$row_italic(row = NULL, include_header = FALSE) } \arguments{ \item{row}{Numeric vector of which rows to target.} \item{include_header}{Boolean if \code{rows} should include the header as the 1st index. E.g. if \code{FALSE}, \code{row = 1} will target the first row, but if \code{TRUE}, \code{row = 1} will target the header.} } \description{ Add an italic style to each <td> and, optionally, <th> tag in a row. Changes the \code{styles} field. }
8375d2c97659fc5056a80c60576c4d866e0ffd97
34231281a76d728e59946edd2720126ec86248ba
/plot4.R
be9095af2da5abdd9296d156107db7a7fe987de4
[]
no_license
ajonikoyi/ExData_Plotting1
192914b8ca62dfcfe7a37b705d0de1fbbd5d2787
56aedd4cad04a44d291117df63085f4f876d3af6
refs/heads/master
2021-01-20T14:15:15.181896
2017-05-08T05:41:42
2017-05-08T05:41:42
90,574,670
0
0
null
2017-05-08T01:36:56
2017-05-08T01:36:56
null
UTF-8
R
false
false
1,157
r
plot4.R
##Open file and subset as necessary test1<-read.table("./household_power_consumption.txt",sep=";",na.strings="?",header=TRUE) test2<-subset(test1,test1$Date=="1/2/2007"|test1$Date=="2/2/2007") powerconsumption<-test2 ##Convert to POSIXct format for plots powerconsumption$weekdayy<-as.POSIXct(strptime(paste(test2[,1],test2[,2]),format="%d/%m/%Y %H:%M:%S")) ##Open graphic device png(file="plot4.png") ##Plot parameters par(mfrow=c(2,2),font.axis=2,font.lab=2) with(powerconsumption,plot(weekdayy,Global_active_power,type="l",xlab="",ylab="Global active power (kilowatts)")) with(powerconsumption,plot(weekdayy,Voltage,type="l",xlab="datetime")) with(powerconsumption,plot(weekdayy,Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")) with(powerconsumption,lines(weekdayy,Sub_metering_2,type="l",col="red")) with(powerconsumption,lines(weekdayy,Sub_metering_3,type="l",col="blue")) legend("topright",lty=c(1,1,1),col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) with(powerconsumption,plot(weekdayy,Global_reactive_power,type="l",xlab="datetime")) ##TUrn device off dev.off()
68755053bfa225718171507bc5255d98fe694867
ba07f5cbc690640115108e4ee07b46ef8340e5fe
/DA3-labs/airbnb_london_prediction.R
f7f4e3f2b7a68ac328d3e5b78827a876002d58f4
[]
no_license
ozkrleal/london-prediction-r
08a16f4c6b3416d57d3b2cea24b10c797eafed41
f81488a92dae37b7e54074d6ebb76b62f95fbfa7
refs/heads/master
2020-12-20T13:12:59.718332
2020-02-15T00:32:02
2020-02-15T00:32:02
236,085,457
0
0
null
null
null
null
UTF-8
R
false
false
29,575
r
airbnb_london_prediction.R
############################################################ # # DATA ANALYSIS TEXTBOOK # MODEL SELECTION # CASE STUDY # Ch 14 # Airbnb London 2017 march 05 data # v1.4 2019 09 05 # ############################################################ # # WHAT THIS CODES DOES: # Descriptive statistics and regressions library(caret) library(tidyverse) library(skimr) library(ggplot2) library(ggthemes) library(gridExtra) library(grid) library(lattice) library(glmnet) library(stargazer) library(xtable) library(directlabels) # location folders #data_in <- "lab2/data/" #data_out <- "lab2/data/" #output <- "lab2/output/" # load ggplot theme function source("homework1/theme_bg.R") source("homework1/da_helper_functions.R") source("airbnb_prediction_functions.R") options(digits = 3) ############# # Load data # ############# # Used area #airbnb_london_not_hackney_workfile_adj.rds area <- "london_not_hackney" data <- readRDS(paste0("homework1/airbnb_", area, "_workfile_adj.rds")) %>% mutate_if(is.character, factor) ###################### # Quick look at data # ###################### glimpse(data) # where do we have missing variables now? to_filter <- sapply(data, function(x) sum(is.na(x))) to_filter[to_filter > 0] # drop if no target data <- data %>% drop_na(price) #what to do with missing values? just an example: data <- data %>% mutate( n_bathrooms = ifelse(is.na(n_bathrooms), median(n_bathrooms, na.rm = T), n_bathrooms), #assume at least 1 bath n_beds = ifelse(is.na(n_beds), n_accommodates, n_beds)) #assume n_beds=n_accomodates # what are those factor variables? data %>% select_if(is.factor) %>% skim() #to_drop <- c("neighbourhood_cleansed", "f_neighbourhood_cleansed") #data <- data %>% # select(-one_of(to_drop)) ################################### # Business logic- define our prediction problem ################################### # Decision 1 # Size, we need a normal apartment data <- data %>% filter(n_accommodates < 6) # Decision 2 # Remove missing data, that has no score rating data %>% select(n_review_scores_rating) %>% is.na() %>% sum() data <- data %>% drop_na(n_review_scores_rating) data <- data %>% filter(f_room_type != "Hotel room") # save workfile #saveRDS(data, paste0(data_out, "airbnb_hackney_work.csv"), row.names = F) saveRDS(data, "homework1/airbnb_london_work.rds") ##################################### # Look at some descriptive statistics ##################################### #How is the average price changing in my district by `property_type`, `room_type` and the `bed_type`? data %>% group_by(f_property_type, f_room_type) %>% dplyr::summarize(mean_price = mean(price, na.rm=TRUE)) data %>% group_by(f_bed_type) %>% dplyr::summarize(mean_price = mean(price, na.rm=TRUE)) Hmisc::describe(data$price) ## Distribution of price by type below 400 datau <- subset(data, price<400) # Density chart plot1 <- ggplot(data = datau, aes(x=price)) + geom_density(aes(color=f_room_type, fill=f_room_type), na.rm =TRUE, alpha= 0.3) + labs(x="Price", y="Density", color = "") + scale_color_manual(name="", values=c(color[2],color[1], color[3]), labels=c("Entire home/apt","Private room", "Shared room")) + scale_fill_manual(name="", values=c(color[2],color[1], color[3]), labels=c("Entire home/apt","Private room", "Shared room")) + theme_bg() + background_grid(major = "y", minor = "y") + theme(legend.position = "bottom") plot1 ggsave(paste0("homework1/", "plot1_R.png"),plot = plot1, width=mywidth_large, height=myheight_large, units = "cm", dpi = 1200) cairo_ps(filename = paste0("homework1/", "plot1_R.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(plot1) dev.off() plot1 ## Boxplot of price by room type plot2 <- ggplot(data = datau, aes(x = f_room_type, y = price)) + stat_boxplot(aes(group = f_room_type), geom = "errorbar", width = 0.3, color = viridis(3, begin=0.2, end=0.7), size = 0.5, na.rm=T)+ geom_boxplot(aes(group = f_room_type), color = viridis(3, begin=0.2, end=0.7), fill = viridis(3, begin = 0.2, end=0.7), size = 0.5, width = 0.6, alpha = 0.3, na.rm=T, outlier.shape = NA) + scale_y_continuous(limits = c(0,300), breaks = seq(0,300,100)) + labs(x = "Room type",y = "Price")+ theme_bg() + background_grid(major = "xy", minor="none") plot2 ggsave(paste0("homework1/", "plot2_R.png"), plot = plot2, width=mywidth_large, height=myheight_large, units = "cm", dpi = 1200) cairo_ps(filename = paste0("homework1/", "plot2_R.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(plot2) dev.off() plot2 # way to combine them ch14_priceplot1and2 <- plot_grid(plot1, plot2, nrow=2) ggsave(paste("homework1/","ch14_priceplot1and2.png",sep="")) cairo_ps(filename = paste0("homework1/", "ch14_priceplot1and2.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(ch14_priceplot1and2) dev.off() ch14_priceplot1and2 # Boxplot plot3 <- ggplot(datau, aes(x = factor(n_accommodates), y = price, fill = factor(f_property_type), color=factor(f_property_type))) + geom_boxplot(alpha=0.8, na.rm=T, outlier.shape = NA, width = 0.8) + scale_color_manual(name="", values=c(color[2],color[1])) + scale_fill_manual(name="", values=c(color[2],color[1])) + labs(x = "Accomodate Persons",y = "Price")+ theme_bg() + background_grid(major = "xy", minor="none") + theme(legend.position = "bottom") plot3 ggsave(paste0("homework1/", "plot3_R.png"), plot = plot3, width=mywidth_large, height=myheight_large, units = "cm", dpi = 1200) cairo_ps(filename = paste0("homework1/", "plot3_R.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(plot3) dev.off() plot3 # Barchart plot4 <- ggplot(data = datau, aes(x = factor(n_accommodates), color = f_room_type, fill = f_room_type)) + geom_bar(alpha=0.8, na.rm=T, width = 0.8) + scale_color_manual(name="", values=c(color[2],color[1], color[3])) + scale_fill_manual(name="", values=c(color[2],color[1], color[3])) + labs(x = "Accomodate Persons",y = "Frequency")+ theme_bg() + background_grid(major = "y", minor="y") + theme(legend.position = "bottom") plot4 ggsave(paste0("homework1/", "plot4_R.png"), plot = plot4, width=mywidth_large, height=myheight_large, units = "cm", dpi = 1200) cairo_ps(filename = paste0("homework1/", "plot4_R.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(plot4) dev.off() plot4 ch14_priceplot3and4 <- plot_grid(plot3, plot4, nrow=2) ggsave(paste("homework1/","ch14_priceplot3and4.png",sep="")) cairo_ps(filename = paste0("homework1/", "ch14_priceplot3and4.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(ch14_priceplot3and4) dev.off() ch14_priceplot3and4 ##################### # Setting up models # ##################### # Basic Variables basic_lev <- c("n_accommodates", "n_beds", "f_property_type", "f_room_type", "n_days_since") basic_log <- c("ln_accommodates", "ln_beds", "f_property_type", "f_room_type","ln_days_since") # Factorized variables basic_add <- c("f_bathroom","f_cancellation_policy","f_bed_type") reviews <- c("f_number_of_reviews","n_review_scores_rating") # Higher orders poly_lev <- c("n_accommodates2", "n_days_since2", "n_days_since3") poly_log <- c("ln_accommodates2","ln_days_since2","ln_days_since3") #not use p_host_response_rate due to missing obs # Dummy variables: Extras -> collect all options and create dummies amenities <- grep("^d_.*", names(data), value = TRUE) ################################################# # Look for interactions ################################################ colnames(data) #Look up room type interactions p1 <- price_diff_by_variables2(data, "f_room_type2", "d_balcony", "Room type", "Balcony") p2 <- price_diff_by_variables2(data, "f_room_type2", "f_property_type", "Room type", "Property type") #Look up canelation policy p3 <- price_diff_by_variables2(data, "f_cancellation_policy", "d_balcony", "Cancellation policy", "Balcony") p4 <- price_diff_by_variables2(data, "f_cancellation_policy", "d_cabletv", "Cancellation policy", "Cable TV") #Look up property type p5 <- price_diff_by_variables2(data, "f_property_type", "d_cats", "Property type", "Cats") p6 <- price_diff_by_variables2(data, "f_property_type", "d_breakfast", "Property type", "Breakfast") ch14_airbnb_interactions <- plot_grid(p1, p2, p3, p4, p5, p6, nrow=3, ncol=2) save_plot(paste0(output, "ch14_airbnb_interactions.png"), ch14_airbnb_interactions, nrow=3, ncol=2, base_width=mywidth_large/2, base_height=myheight_large/3, dpi = 1200) cairo_ps(filename = paste0(output, "ch14_airbnb_interactions.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(ch14_airbnb_interactions) dev.off() ch14_airbnb_interactions # dummies suggested by graphs X1 <- c("f_room_type*f_property_type", "f_room_type*d_balcony") # Additional interactions of factors and dummies X2 <- c("d_balcony*f_property_type", "d_cats*f_property_type", "d_breakfast*f_property_type") X3 <- c(paste0("(f_property_type + f_room_type + f_cancellation_policy + f_bed_type) * (", paste(amenities, collapse=" + "),")")) # Create models in levels models: 1-8 modellev1 <- " ~ n_accommodates" modellev2 <- paste0(" ~ ",paste(basic_lev,collapse = " + ")) modellev3 <- paste0(" ~ ",paste(c(basic_lev, basic_add,reviews),collapse = " + ")) modellev4 <- paste0(" ~ ",paste(c(basic_lev,basic_add,reviews,poly_lev),collapse = " + ")) modellev5 <- paste0(" ~ ",paste(c(basic_lev,basic_add,reviews,poly_lev,X1),collapse = " + ")) modellev6 <- paste0(" ~ ",paste(c(basic_lev,basic_add,reviews,poly_lev,X1,X2),collapse = " + ")) modellev7 <- paste0(" ~ ",paste(c(basic_lev,basic_add,reviews,poly_lev,X1,X2,amenities),collapse = " + ")) modellev8 <- paste0(" ~ ",paste(c(basic_lev,basic_add,reviews,poly_lev,X1,X2,amenities,X3),collapse = " + ")) # Create models in logs, models: 1-8 modellog1 <- " ~ ln_accommodates" modellog2 <- paste0(" ~ ",paste(basic_log,collapse = " + ")) modellog3 <- paste0(" ~ ",paste(c(basic_log, basic_add),collapse = " + ")) modellog4 <- paste0(" ~ ",paste(c(basic_log,basic_add,reviews,poly_log),collapse = " + ")) modellog5 <- paste0(" ~ ",paste(c(basic_log,basic_add,reviews,poly_log,X1),collapse = " + ")) modellog6 <- paste0(" ~ ",paste(c(basic_log,basic_add,reviews,poly_log,X1,X2),collapse = " + ")) modellog7 <- paste0(" ~ ",paste(c(basic_log,basic_add,reviews,poly_log,X1,X2,amenities),collapse = " + ")) modellog8 <- paste0(" ~ ",paste(c(basic_log,basic_add,reviews,poly_log,X1,X2,amenities,X3),collapse = " + ")) ################################# # Separate hold-out set # ################################# # create a holdout set (20% of observations) smp_size <- floor(0.2 * nrow(data)) # Set the random number generator: It will make results reproducable set.seed(20180123) # create ids: # 1) seq_len: generate regular sequences # 2) sample: select random rows from a table holdout_ids <- sample(seq_len(nrow(data)), size = smp_size) data$holdout <- 0 data$holdout[holdout_ids] <- 1 #Hold-out set Set data_holdout <- data %>% filter(holdout == 1) #Working data set data_work <- data %>% filter(holdout == 0) ############################## # cross validation # ############################## ## K/N = 5 n_folds=5 # Create the folds set.seed(20180124) folds_i <- sample(rep(1:n_folds, length.out = nrow(data_work) )) # Create results model_results_cv <- list() for (type in c("lev","log")) { for (i in (1:8)){ model_name <- paste0("model",type,i) model_pretty_name <- paste0("(",i,")") yvar <- ifelse(type=="lev","price","ln_price") xvars <- eval(parse(text = model_name)) formula <- formula(paste0(yvar,xvars)) # Initialize values rmse_train <- c() rmse_test <- c() model_work_data <- lm(formula,data = data_work) BIC <- BIC(model_work_data) nvars <- model_work_data$rank -1 r2 <- summary(model_work_data)$r.squared # Do the k-fold estimation for (k in 1:n_folds) { test_i <- which(folds_i == k) # Train sample: all except test_i data_train <- data_work[-test_i, ] # Test sample data_test <- data_work[test_i, ] # Estimation and prediction model <- lm(formula,data = data_train) prediction_train <- predict(model, newdata = data_train) prediction_test <- predict(model, newdata = data_test) # Criteria evaluation if (type=="lev") { rmse_train[k] <- mse_lev(prediction_train, data_train[,yvar] %>% pull)**(1/2) rmse_test[k] <- mse_lev(prediction_test, data_test[,yvar] %>% pull)**(1/2) } else { rmselog <- mse_lev(prediction_train, data_train[,yvar] %>% pull)**(1/2) rmse_train[k] <- mse_log(prediction_train, data_train[,yvar] %>% pull,rmselog)**(1/2) rmse_test[k] <- mse_log(prediction_test, data_test[,yvar] %>% pull,rmselog)**(1/2) } } model_results_cv[[model_name]] <- list(yvar=yvar,xvars=xvars,formula=formula,model_work_data=model_work_data, rmse_train = rmse_train,rmse_test = rmse_test,BIC = BIC, model_name = model_pretty_name, nvars = nvars, r2 = r2) } } t1 <- imap(model_results_cv, ~{ as.data.frame(.x[c("rmse_test", "rmse_train")]) %>% summarise_all(.funs = mean) %>% mutate("model_name" = .y , "model_pretty_name" = .x[["model_name"]] , "nvars" = .x[["nvars"]], "r2" = .x[["r2"]], "BIC" = .x[["BIC"]]) }) %>% bind_rows() t1 column_names <- c("Model", "N predictors", "R-squared", "BIC", "Training RMSE", "Test RMSE") # Nice table produced and saved as .tex without \beign{table} # -R2, BIC on full work data-n. # -In sample rmse: average on training data; avg test : average on test data t14_2 <- t1 %>% filter(grepl("lev",model_name)) %>% select("model_pretty_name", "nvars", "r2" , "BIC", "rmse_train", "rmse_test") colnames(t14_2) <- column_names print(xtable(t14_2, type = "latex", digits=c(0,0,0,2,0,2,2)), file = paste0(output, "ch14_table_fit_level.tex"), include.rownames=FALSE, booktabs=TRUE, floating = FALSE) t14_2_log <- t1 %>% filter(grepl("log",model_name)) %>% select("model_pretty_name", "nvars", "r2" , "BIC", "rmse_train", "rmse_test") colnames(t14_2_log) <- column_names print(xtable(t14_2_log, type = "latex", digits=c(0,0,0,2,0,2,2)), file = paste0(output, "ch14_table_fit_log.tex"),include.rownames=FALSE, booktabs=TRUE, floating = FALSE) # Graph where x axis is models (noting number of vars), y axis: 1: in sample rmsse, 2: CV avg test RMSE, 3: BIC (Right scale) t1_levels <- t1 %>% filter(grepl("lev",model_name)) %>% mutate(BIC = BIC/1000) %>% select("nvars", "BIC", "rmse_train", "rmse_test") %>% gather(var,value, BIC:rmse_test) %>% mutate(var = factor(var, levels = c("BIC", "rmse_train", "rmse_test"), labels = c("BIC","RMSE In-sample","RMSE test CV"))) t1_logs <- t1 %>% filter(grepl("log",model_name)) %>% mutate(BIC = BIC/100) %>% select("nvars", "BIC", "rmse_train", "rmse_test") %>% gather(var,value, BIC:rmse_test) %>% mutate(var = factor(var, levels = c("BIC", "rmse_train", "rmse_test"), labels = c("BIC","RMSE In-sample","RMSE test CV"))) model_result_plot_levels <- ggplot(data = t1_levels, aes(x = factor(nvars), y = value, color=var, group = var)) + geom_line() + scale_y_continuous( name = "RMSE", sec.axis = sec_axis(~ . * 1000 , name = "CV average BIC"), limits = c(20, 45)) + scale_x_discrete( name = "Number of vars", expand=c(0, 1)) + geom_dl(aes(label = var), method = list("top.points", cex=0.7)) + scale_colour_discrete(guide = 'none') + theme_bg() + ggtitle("Model fit measures") ggsave(paste0(output, "ch14_airbnb_model_result_levels.png"), model_result_plot_levels, width=mywidth_large, height=myheight_large, dpi = 1200) cairo_ps(filename = paste0(output, "ch14_airbnb_model_result_levels.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(model_result_plot_levels) dev.off() model_result_plot_logs <- ggplot(data = t1_logs, aes(x = factor(nvars), y = value, color=var, group = var)) + geom_line() + scale_y_continuous( name = "RMSE", sec.axis = sec_axis(~ . * 100 , name = "CV average BIC"), limits = c(10, 45)) + scale_x_discrete( name = "Number of vars", expand=c(0, 1)) + geom_dl(aes(label = var), method = list("top.points", cex=0.7)) + scale_colour_discrete(guide = 'none') + theme_bg() + ggtitle("Model fit measures") ggsave(paste0(output, "ch14_airbnb_model_result_logs.png"), model_result_plot_logs, width=mywidth_large, height=myheight_large, dpi = 1200) cairo_ps(filename = paste0(output, "ch14_airbnb_model_result_logs.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(model_result_plot_logs) dev.off() ################################# # LASSO # ################################# # take model 7 and find observations where there is no missing data vars_model_7 <- c("price", basic_lev,basic_add,reviews,poly_lev,amenities) data_work_complete <- data_work %>% select_(.dots = vars_model_7) %>% drop_na() # Set lasso tuning parameters train_control <- trainControl(method = "cv", number = n_folds) tune_grid <- expand.grid("alpha" = c(1), "lambda" = seq(0.05, 1, by = 0.05)) # We use model 7 without the interactions so that it is easy to compare later to post lasso ols formula <- formula(paste0("price ~ ", paste(setdiff(vars_model_7, "price"), collapse = " + "))) set.seed(1234) lasso_model <- caret::train(formula, data = data_work_complete, method = "glmnet", preProcess = c("center", "scale"), trControl = train_control, tuneGrid = tune_grid) print(lasso_model$bestTune$lambda) lasso_coeffs <- coef(lasso_model$finalModel, lasso_model$bestTune$lambda) %>% as.matrix() %>% as.data.frame() %>% rownames_to_column(var = "variable") %>% rename(coefficient = `1`) # the column has a name "1", to be renamed print(lasso_coeffs) # Evaluate model. CV error: lasso_cv_rmse <- lasso_model$results %>% filter(lambda == lasso_model$bestTune$lambda) %>% select(RMSE) print(lasso_cv_rmse[1, 1]) ################################################### # Diagnsotics # ################################################### # T2 # rows: linear model #3, linear model #7 log linear model #7, LASSO, post-Lasso OLS: # keep vars from lasso, do OLS. show predicted mse nonzero_lasso_vars <- lasso_coeffs %>% filter(coefficient != 0) %>% pull(variable) # do not use the few interactions for now: post_lasso_ols_vars <- intersect(nonzero_lasso_vars, names(data_work_complete)) model3_level <- model_results_cv[["modellev3"]][["model_work_data"]] model7_level <- model_results_cv[["modellev7"]][["model_work_data"]] model7_log <- model_results_cv[["modellog7"]][["model_work_data"]] post_lasso <- lm(formula(paste("price ~ ", paste(post_lasso_ols_vars, collapse = " + "))), data = data_work_complete) # evaluate on holdout set model3_level_work_rmse <- mse_lev(predict(model3_level, newdata = data_work), data_work[,"price"] %>% pull)**(1/2) model3_level_holdout_rmse <- mse_lev(predict(model3_level, newdata = data_holdout), data_holdout[,"price"] %>% pull)**(1/2) model7_level_work_rmse <- mse_lev(predict(model7_level, newdata = data_work), data_work[,"price"] %>% pull)**(1/2) model7_level_holdout_rmse <- mse_lev(predict(model7_level, newdata = data_holdout), data_holdout[,"price"] %>% pull)**(1/2) rmselog <- mse_lev(predict(model7_log, newdata = data_work), data_work[,"ln_price"] %>% pull)**(1/2) model7_log_work_rmse <- mse_log(predict(model7_log, newdata = data_work), data_work[,"ln_price"] %>% pull,rmselog)**(1/2) model7_log_holdout_rmse <- mse_log(predict(model7_log, newdata = data_holdout), data_holdout[,"ln_price"] %>% pull,rmselog)**(1/2) data_holdout_complete <- data_holdout %>% select_(.dots = vars_model_7) %>% drop_na() lasso_holdout_rmse <- RMSE(predict(lasso_model, newdata = data_holdout_complete), data_holdout_complete$price) lasso_work_rmse <- RMSE(predict(lasso_model, newdata = data_work_complete), data_work_complete$price) post_lasso_holdout_rmse <- RMSE(predict(post_lasso, newdata = data_holdout_complete), data_holdout_complete$price) post_lasso_work_rmse <- RMSE(predict(post_lasso, newdata = data_work_complete), data_work_complete$price) t2 <- data.frame( "model_name" = c("Model 3, Levels", "Model 7, Levels", "Model 7, Logs", "LASSO", "post-Lasso OLS"), "rmse_work" = c(model3_level_work_rmse, model7_level_work_rmse, model7_log_work_rmse, lasso_work_rmse, post_lasso_work_rmse), "rmse_holdout" = c(model3_level_holdout_rmse, model7_level_holdout_rmse, model7_log_holdout_rmse, lasso_holdout_rmse, post_lasso_holdout_rmse)) t2 print(xtable(t2, type = "latex"), file = paste0(output, "ch14_table_rmse_holdout.tex"),include.rownames=FALSE, digits=2, booktabs=TRUE, floating = FALSE) model7_level_holdout_rmse <- mse_lev(predict(model7_level, newdata = data_holdout), data_holdout[,"price"] %>% pull)**(1/2) p2 <- predict(model7_level, data_holdout) resid_p <- p2-data_holdout$price summary(resid_p) pred2_new <- predict(model7_level, data_holdout ,se.fit = TRUE, interval = "prediction") p2<- pred2_new$fit sum(p2) sum1 <- cbind(t(p1), t(p2)) colnames(sum1) <- c('Model1', 'Model3') rownames(sum1) <- c('Predicted', 'PI_low', 'PI_high') sum1 ################################################### # FIGURES FOR FITTED VS ACTUAL OUTCOME VARIABLES # ################################################### # Target variable Ylev <- data_holdout[["price"]] meanY <-mean(Ylev) sdY <- sd(Ylev) meanY_m2SE <- meanY -2 * sdY meanY_p2SE <- meanY + 2 * sdY Y5p <- quantile(Ylev, 0.05, na.rm=TRUE) Y95p <- quantile(Ylev, 0.95, na.rm=TRUE) # Predicted values predictionlev_holdout_pred <- as.data.frame(predict(model7_level, newdata = data_holdout, interval="predict")) %>% rename(pred_lwr = lwr, pred_upr = upr) predictionlev_holdout_conf <- as.data.frame(predict(model7_level, newdata = data_holdout, interval="confidence")) %>% rename(conf_lwr = lwr, conf_upr = upr) predictionlev_holdout <- cbind(data_holdout[,c("price","n_accommodates")], predictionlev_holdout_pred, predictionlev_holdout_conf[,c("conf_lwr","conf_upr")]) # Logged target variable # model log 7 on log price Ylog <- data_holdout[["ln_price"]] predictionlog_test <- predict(model7_log, newdata = data_holdout) predictionlog_test2 <- exp(predictionlog_test) * exp((rmselog)^2/2) # Create data frame with the real and predicted values d <- data.frame(ylev=Ylev, ylog=Ylog, predlev=predictionlev_holdout[,"fit"] , predlog=predictionlog_test2) # Check the differences d$elev <- d$ylev - d$predlev # Plot predicted vs price level_vs_pred <- ggplot(data = d[(d$ylev<400),]) + geom_point(aes(y=ylev, x=predlev), color = color[3], size = 1.5, shape = 16, alpha = 0.7, show.legend=FALSE, na.rm=TRUE) + geom_smooth(aes(y=ylev, x=predlev), method="lm", color=color[1], se=F, size=1, na.rm=T)+ scale_x_continuous(limits=c(0, 300), breaks=seq(0, 300, by=50)) + scale_y_continuous(limits=c(0, 300), breaks=seq(0, 300, by=50)) + labs(y = "Price", x = "Predicted price") + theme_bg() + background_grid(major = "xy", minor="none") level_vs_pred ggsave(paste0(output, "level_vs_pred.png"), width=mywidth_large, height=myheight_large, units = "cm", dpi = 1200) cairo_ps(filename = paste0(output, "level_vs_pred.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(level_vs_pred) dev.off() # calculate PI for modellev7 for n_accomodate # show a graph with x: n_accomodate, y: price # graph: F14_CI_n_accomodate # Redo predicted values at 80% PI predictionlev_holdout_pred <- as.data.frame(predict(model7_level, newdata = data_holdout, interval="predict", level=0.8)) %>% rename(pred_lwr = lwr, pred_upr = upr) predictionlev_holdout_conf <- as.data.frame(predict(model7_level, newdata = data_holdout, interval="confidence", level=0.8)) %>% rename(conf_lwr = lwr, conf_upr = upr) predictionlev_holdout <- cbind(data_holdout[,c("price","n_accommodates")], predictionlev_holdout_pred, predictionlev_holdout_conf[,c("conf_lwr","conf_upr")]) predictionlev_holdout_summary <- predictionlev_holdout %>% group_by(n_accommodates) %>% summarise(fit = mean(fit, na.rm=TRUE), pred_lwr = mean(pred_lwr, na.rm=TRUE), pred_upr = mean(pred_upr, na.rm=TRUE), conf_lwr = mean(conf_lwr, na.rm=TRUE), conf_upr = mean(conf_upr, na.rm=TRUE)) F14_CI_n_accomodate <- ggplot(predictionlev_holdout_summary, aes(x=factor(n_accommodates))) + geom_bar(aes(y = fit ), stat="identity", fill = color[1] ) + geom_errorbar(aes(ymin=pred_lwr, ymax=pred_upr, color = "Pred. interval"),width=.2) + # geom_errorbar(aes(ymin=conf_lwr, ymax=conf_upr, color = "Conf. interval"),width=.2) + scale_y_continuous(name = "Price") + scale_x_discrete(name = "Number of people accomodated") + scale_color_manual(values=c(color[4], color[4])) + theme_bg() + theme(legend.title= element_blank(),legend.position="bottom") F14_CI_n_accomodate ggsave(paste0(output, "F14_CI_n_accomodate.png"), width=mywidth_large, height=myheight_large, units = "cm", dpi = 1200) cairo_ps(filename = paste0(output, "F14_CI_n_accomodate.eps"), width = mywidth_large, height = myheight_large, pointsize = 12, fallback_resolution = 1200) print(F14_CI_n_accomodate) dev.off() ############################################### ############################################### ############################################### # not used ############################################### ############################################### ############################################### ############################################### # Level prediction against the errors ggplot(data =d, aes(x=ylev, y=elev)) + geom_point(color = color[1], size = 1.5, shape = 16, alpha = 0.8, show.legend=FALSE, na.rm=TRUE) + geom_smooth(method="lm", color=color[3], se=F, size=1, na.rm=T)+ labs(x = "Price", y = "Residual") + theme_bg() + background_grid(major = "xy", minor="none") #ggsave(paste0(output, "F14_preerr1.png"), width=mywidth_large, height=myheight_large, units = "cm", dpi = 1200) # Plot the Level and Log Prediction less than 400 ggplot(data = d[(d$ylev<400),]) + geom_point(aes(x = ylev, y = predlev), color = color[1], size = 1.5, shape = 16, alpha = 0.8, show.legend=FALSE, na.rm=TRUE) + geom_point(aes(x = ylev, y = predlog), color = color[2], size = 1.5, shape = 16, alpha = 0.8, show.legend=FALSE, na.rm=TRUE) + geom_smooth(aes(x = ylev, y = predlev), method="lm", color=color[1], se=F, size=1, na.rm=T)+ geom_smooth(aes(x = ylev, y = predlog), method="lm", color=color[2], se=F, size=1, na.rm=T) + labs(x = "Price", y = "Predicted price") + theme_bg() + background_grid(major = "xy", minor="none") #ggsave(paste0(output, "ch14_log_vs_lin_all.png"), width=mywidth_large, height=myheight_large, units = "cm", dpi = 1200) # Plot the Level and Log Prediction within 0.5% and 95% ggplot(data =d[(d$ylev>Y5p) & (d$ylev<Y95p),]) + geom_point(aes(x = ylev, y = predlev), color = color[1], size = 1.5, shape = 16, alpha = 0.8, show.legend=FALSE, na.rm=TRUE) + geom_point(aes(x = ylev, y = predlog), color = color[2], size = 1.5, shape = 16, alpha = 0.8, show.legend=FALSE, na.rm=TRUE) + geom_smooth(aes(x = ylev, y = predlev), method="lm", color=color[1], se=F, size=1, na.rm=T)+ geom_smooth(aes(x = ylev, y = predlog), method="lm", color=color[2], se=F, size=1, na.rm=T) + labs(x = "Price", y = "Predicted price") + theme_bg() + background_grid(major = "xy", minor="none") #ggsave(paste0(output, "log_vs_lin_95.png"), width=mywidth_large, height=myheight_large, units = "cm", dpi = 1200) ################################################### # Post Lasso OLS # ################################################### # keep vars from lasso, do OLS. show predicted mse nonzero_lasso_vars <- lasso_coeffs %>% filter(coefficient != 0) %>% pull(variable) # do not use the few interactions for now: post_lasso_ols_vars <- intersect(nonzero_lasso_vars, names(data_work_complete)) fit_control <- trainControl(method = "cv", number = 5) set.seed(1234) # set the same seed as before so that CV folds are EXACTLY the same post_lasso_ols_model <- caret::train( formula(paste("price ~ ", paste(post_lasso_ols_vars, collapse = " + "))), data = data_work_complete, method = "lm", trControl = fit_control ) post_lasso_ols_rmse <- post_lasso_ols_model$results[["RMSE"]] post_lasso_ols_rmse #RMSE on CV
66356084df379351a86f60cc0ef9faa33d4afeb7
c22cbf66cb76e152185ee8c595a4e2a422cb4273
/R/DensityView.R
cdd7c6acfe7aa1f87bfdd2a65301753b1d0b7749
[]
no_license
WubingZhang/ggView
1a73e98fba493d8a904c390513fb9df97defde64
344419b69a2261c092a07ee0f996a5730a64ee6d
refs/heads/master
2021-07-24T12:08:02.129949
2021-06-30T19:43:09
2021-06-30T19:43:09
149,407,780
1
0
null
null
null
null
UTF-8
R
false
false
3,942
r
DensityView.R
#' Density plot for gene beta scores in Control and Treatment #' #' Plot the density of gene beta scores in two samples. #' #' @docType methods #' @name DensityView #' @rdname DensityView #' #' @param beta Data frame, including \code{samples} as columns. #' @param samples Character, specifying sample names in \code{beta}. #' @param main As in 'plot'. #' @param xlab As in 'plot'. #' @param filename Figure file name to create on disk. Default filename="NULL", which means #' don't save the figure on disk. #' @param width As in ggsave. #' @param height As in ggsave. #' @param ... Other available parameters in ggsave. #' #' @return An object created by \code{ggplot}, which can be assigned and further customized. #' #' @author Wubing Zhang #' #' @seealso \code{\link{ViolinView}} #' #' @examples #' data(mle.gene_summary) #' # Read beta score from gene summary table in MAGeCK MLE results #' dd = ReadBeta(mle.gene_summary) #' DensityView(dd, samples=c("dmso", "plx")) #' #or #' DensityView(dd[, c("dmso", "plx")]) #' #' @importFrom data.table melt #' @importFrom ggsci scale_color_npg #' #' @export #===Distribution of beta scores====================================== DensityView <- function(beta, samples = NULL, main = NULL,xlab = "Beta Score", filename = NULL, width = 5, height = 4, ...){ if(!is.null(samples) && length(samples)>0){ beta = beta[, samples, drop = FALSE]} dd1 = data.table::melt(beta,id=NULL) if(!"variable" %in% colnames(dd1)){ dd1$variable = colnames(beta) } #========== p=ggplot(data=dd1,aes(x=value,color=variable,group=variable)) p=p+geom_density() # p=p+facet_wrap(~variable,nrow=1) p=p+scale_color_npg() p=p+labs(color=NULL) p=p+theme(legend.justification = c(1, 1), legend.position = c(0.99, 0.99)) # p=p+theme(legend.text = element_text(size=8)) p=p+labs(x=xlab, y="Density", title=main) p = p + theme_bw(base_size = 14) if(!is.null(filename)){ ggsave(plot=p, filename=filename, units = "in", width=width, height=height, ...) } return(p) } #' Density plot #' #' Plot the density of beta score deviations. #' #' @docType methods #' @name DensityDiffView #' #' @param beta Data frame, including \code{ctrlname} and \code{treatname} as columns. #' @param ctrlname A character, specifying the name of control sample. #' @param treatname A character, specifying the name of treatment sample. #' @param main As in 'plot'. #' @param filename Figure file name to create on disk. Default filename="NULL", which means no output. #' @param width As in ggsave. #' @param height As in ggsave. #' @param ... Other parameters in ggsave. #' #' @return An object created by \code{ggplot}, which can be assigned and further customized. #' #' @author Wubing Zhang #' #' #' @examples #' data(mle.gene_summary) #' # Read beta score from gene summary table in MAGeCK MLE results #' dd = ReadBeta(mle.gene_summary) #' # Density plot of beta score deviation between control and treatment #' DensityDiffView(dd, ctrlname = "dmso", treatname = "plx") #' #' #' @export #===Distribution of beta scores====================================== DensityDiffView <- function(beta, ctrlname="Control", treatname="Treatment", main=NULL, filename=NULL, width = 5, height = 4, ...){ d = beta d$Diff = rowMeans(d[,treatname,drop=FALSE])-rowMeans(d[,ctrlname,drop=FALSE]) d$r = rnorm(length(d$Diff), mean=0, sd=sd(d$Diff)-0.01) p = ggplot(d,aes(x=Diff)) p = p + geom_histogram(aes(y = ..density..), fill="gray90", binwidth=0.02) p = p + geom_density(colour="black") p = p + geom_density(aes(x=r,y=..density..), linetype="dashed", colour="red") p = p + geom_vline(xintercept = 0,linetype="dashed") p = p + theme_bw(base_size = 14) p = p + labs(x="Treat-Control Beta Score", y="Density", title=main) if(!is.null(filename)){ ggsave(plot=p, filename=filename, units = "in", width=width, height=height, ...) } return(p) }
f061bc0f75890fee8d3417ce5cca16f25919830a
0ce2d79cfe9d5528eebdb57fb82ae261bcbeb50b
/cachematrix.R
8529507daafde205090609ce5f0216c0d8bdcde9
[]
no_license
SamhooXee/ProgrammingAssignment2
3ffd8e36f1d59782d1f774474daf636d92b6bae3
646c843f3b3ec311e9dee215385c4e63ee394521
refs/heads/master
2020-12-28T20:31:48.948567
2014-12-19T15:52:13
2014-12-19T15:52:13
null
0
0
null
null
null
null
UTF-8
R
false
false
1,079
r
cachematrix.R
## The first function, makeCacheMatrix creates a special "matrix", which is really a list containing a function to ## set the value of the matrix ## get the value of the matrix ## set the value of the inverse ## get the value of the inverse makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setInverse <- function(inverse) m <<- inverse getInverse <- function() m list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## calculate inverse of a Matrix if no cached result, or return the cached result cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getInverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setInverse(m) m } # test hilbert <- function(n) { i <- 1:n; 1 / outer(i - 1, i, "+") } A <- hilbert(4) A[] <- as.complex(A) cachematrix <- makeCacheMatrix(A) print(cacheSolve(cachematrix)) print(cacheSolve(cachematrix))
531dcd71b98ccc5ba3aeaa48b8eb249c2e701101
f6a640c67b28543cc607ba83fb8587767e905251
/4. CART_tuning.R
ff77c24c873970ce30474673189609ddd1ea6e11
[]
no_license
RichardYCX/LoanDefaultAnalytics
122ef480d7712f2c850c6469b3f6486983a40a37
5105c810b832b586cb7bf5ca00c65c2f581bb7e0
refs/heads/main
2023-02-10T23:18:10.688227
2021-01-09T15:34:11
2021-01-09T15:34:11
316,862,948
1
0
null
null
null
null
UTF-8
R
false
false
9,971
r
4. CART_tuning.R
# ======================================================================================================== # Purpose: Rscript for Application of LOG REG and CART (with sampling) on Full Loan Dataset # Notes: Add clustering? #========================================================================================================= setwd('C:/Users/woony/Desktop/BC2406/Project') library(data.table) library(caTools) clean_data <- fread("clean_data.csv", stringsAsFactors = TRUE) names(clean_data) dim(clean_data) # check that there are no 'character' columns sapply(clean_data, class) # reorder data from earliest to latest clean_data <- clean_data[order(issue_year, issue_month)] # train-test split according to data split = round(clean_data[,.N]*0.7) train = clean_data[1:split,] test = clean_data[(split+1):.N,] #========================================================================================================= # ===================== SMOTE:======================================================================== library(DMwR) # Check initial count of unique value in loan_status as.data.frame(table(train$loan_status)) # Var1 Freq # 1 Charged Off 157425 # 2 Fully Paid 674069 # Fix problem of memory alloc memory.limit() memory.limit(size = 32000) ## Smote : Synthetic Minority Oversampling Technique To Handle Class Imbalancy In Binary Classification balanced.data <- SMOTE(loan_status ~., train, perc.over = 300, k = 5, perc.under = 150) as.data.frame(table(balanced.data$loan_status)) # Var1 Freq #1 Charged Off 5632 #2 Fully Paid 6336 #========================================================================================================= # ===================== Down Sampling:======================================================================== # Check initial count of unique value in loan_status as.data.frame(table(train$loan_status)) # Var1 Freq # 1 Charged Off 157425 # 2 Fully Paid 674069 # Sample the majority to address imbalanced data & use same testset to test ---- # Random sample from majority class loan_status = Fully Paid and combine with loan_status = Charged Off to form new trainset ----- majority <- train[loan_status == "Fully Paid"] minority <- train[loan_status == "Charged Off"] # Randomly sample the row numbers to be in trainset. Same sample size as minority cases. chosen <- sample(seq(1:nrow(majority)), size = nrow(minority)) # Subset the original trainset based on randomly chosen row numbers. majority.chosen <- majority[chosen] # Combine two data tables by appending the rows balanced.data <- rbind(majority.chosen, minority) summary(balanced.data$loan_status) ## Check trainset is balanced. # Charged Off Fully Paid # 157425 157425 #========================================================================================================= # ==================================== Apply Logistic Regression on sampled data ========================================= library(dplyr) library(ggplot2) # Develop model on Train Set m1 <- glm(loan_status ~ ., family = binomial, data = balanced.data) summary(m1) toselect.variables <- summary(m1)$coeff[-1,4] < 0.05 sig.variables <- names(toselect.variables)[toselect.variables == TRUE] sig.variables # Remove insignificant variables m2.formula <- as.formula(loan_status ~ addr_state + avg_cur_bal + avg_fico_range_high + avg_fico_range_low + avg_inq_last_6mths + avg_mths_since_last_major_derog + avg_revol_bal + avg_revol_util + combined_dti + delinq_2yrs + earliest_cr_line_year+ emp_length + issue_month + issue_year + loan_amnt + mo_sin_rcnt_rev_tl_op + mo_sin_rcnt_tl + mths_since_last_delinq + mths_since_last_record + mths_since_recent_revol_delinq + num_actv_bc_tl + num_actv_rev_tl + num_bc_tl + num_tl_op_past_12m + pub_rec_bankruptcies + term + tot_cur_bal + total_bal_ex_mort + total_il_high_credit_limit + total_rev_hi_lim) m2 <- glm(m2.formula, family = binomial, data = balanced.data) summary(m2) # 2nd round of removing insignificant variables toselect.variables <- summary(m2)$coeff[-1,4] < 0.05 sig.variables <- names(toselect.variables)[toselect.variables == TRUE] sig.variables m3.formula <- as.formula(loan_status ~ avg_cur_bal + avg_fico_range_high + avg_fico_range_low + avg_inq_last_6mths + avg_revol_bal + avg_revol_util + combined_dti + delinq_2yrs + earliest_cr_line_year+ emp_length + issue_month + issue_year + loan_amnt + mo_sin_rcnt_rev_tl_op + mo_sin_rcnt_tl + mths_since_last_delinq + mths_since_last_record + num_actv_bc_tl + num_actv_rev_tl + num_bc_tl + num_tl_op_past_12m + pub_rec_bankruptcies + term + tot_cur_bal + total_bal_ex_mort + total_il_high_credit_limit + total_rev_hi_lim) # remove: addr_state + avg_mths_since_last_major_derog + mths_since_recent_revol_delinq # + m3 <- glm(m3.formula, family = binomial, data = balanced.data ) summary(m3) # VIF vif(m3) # some variables have large GVIF > 5 # avg_cur_bal, avg_revol_bal, tot_cur_bal, total_bal_ex_mort, total_il_high_credit_limit # + total_rev_hi_lim # Remove variables with GVIF values over 5 m4.formula <- as.formula(loan_status ~ avg_fico_range_high + avg_fico_range_low + avg_inq_last_6mths + avg_revol_util + combined_dti + delinq_2yrs + earliest_cr_line_year+ emp_length + issue_month + issue_year + loan_amnt + mo_sin_rcnt_rev_tl_op + mo_sin_rcnt_tl + mths_since_last_delinq + mths_since_last_record + num_actv_bc_tl + num_actv_rev_tl + num_bc_tl + num_tl_op_past_12m + pub_rec_bankruptcies + term) m4 <- glm(m4.formula, family = binomial, data = balanced.data) summary(m4) OR <- exp(coef(m4)) OR OR.CI <- exp(confint(m4)) OR.CI # VIF vif(m4) # highest GVIF is ~3.39 # Apply model on Test set # Confusion matrix prob_test_logreg <- predict(m4, newdata = test, type = "response") threshold = 0.5 y.hat.test.logreg <- ifelse(prob_test_logreg > threshold, 1, 0) y.hat.test.logreg <- factor(y.hat.test.logreg, levels = c(0,1), labels = c("Charged Off", "Fully Paid")) table_logreg_balanced <- table(test$loan_status, y.hat.test.logreg, deparse.level = 2) table_logreg_balanced # y.hat.test.logreg # test$loan_status Charged Off Fully Paid # Charged Off 71162 6187 # Fully Paid 15735 263270 mean(y.hat.test.logreg == test$loan_status) # 0.9384825 (LOGREG on downsampled) # FN rates: aka predict as fully paid when it is charged off = FN / (TP+FP) FN <- 6187 TP <- 263270 FP <- 15735 print(FN/(TP+FP)) # FNR = 0.02217523 #========================================================================================================= # ==================================== Apply CART on sampled data ========================================= library(rpart) library(rpart.plot) cart1 <- rpart(loan_status ~ ., data = balanced.data, method = 'class', control = rpart.control(minsplit = 2, cp = 0)) printcp(cart1) plotcp(cart1) print(cart1) # Extract the Optimal Tree via code # Compute min CVerror + 1SE in maximal tree cart1. CVerror.cap <- cart1$cptable[which.min(cart1$cptable[,"xerror"]), "xerror"] + cart1$cptable[which.min(cart1$cptable[,"xerror"]), "xstd"] # Find the optimal CP region whose CV error is just below CVerror.cap in maximal tree cart1. i <- 1; j<- 4 while (cart1$cptable[i,j] > CVerror.cap) { i <- i + 1 } # Get geometric mean of the two identified CP values in the optimal region if optimal tree has at least one split. cp.opt = ifelse(i > 1, sqrt(cart1$cptable[i,1] * cart1$cptable[i-1,1]), 1) ## i = 6 shows that the 2nd tree is optimal based on 1 SE rule. # Prune the max tree using a particular CP value cart2 <- prune(cart1, cp = cp.opt) printcp(cart2, digits = 3) ## --- Trainset Error & CV Error -------------------------- ## Root node error: 157425/314850 = 0.5 ## CP nsplit rel error xerror xstd ## 6 0.00095 8 0.211 0.211 0.00109 print(cart2) rpart.plot(cart2, nn = T, main = "Optimal Tree in loan_status with sampling") cart2$variable.importance ## avg_fico_range_high, avg_fico_range_low, max_fico_range_high has the top 3 highest importance. # Model applied on Test-set # Test set confusion matrix predictions.smote.cart <- predict(cart2, newdata = test, type = 'class') table_cart_balanced <- table(test$loan_status, predictions.smote.cart, deparse.level = 2) table_cart_balanced # predictions # test$loan_status Charged Off Fully Paid # Charged Off 72526 4823 # Fully Paid 17030 261975 round(prop.table(table1), 3) # Overall Accuracy mean(predictions.smote.cart == test$loan_status) # 0.9386761 (CART with smote) # FN rates: aka predict as fully paid when it is charged off = FN / (TP+FP) FN <- 4823 TP <- 261975 FP <- 17030 print(FN/(TP+FP)) # FNR = 0.01728643
5416affc34b7a23e15627bd3f0f744c3379f4a02
d0a2e85476c647d49516600dacfa755110b33e06
/03_assignment/others/github_api.R
0744d17bff2eac1a3123913c7307541f02b4e24e
[]
no_license
rtorlow/courses
4d3c3d9fa58834259d2d1f8f5bb830016a228ca1
ac6bdbc43e9e58ba30f9616f7c6cc5d88bb9c512
refs/heads/master
2020-04-07T07:42:19.837046
2015-11-04T17:23:05
2015-11-04T17:23:05
42,225,051
0
0
null
2015-09-10T05:53:54
2015-09-10T05:53:54
null
UTF-8
R
false
false
2,328
r
github_api.R
# ============================================================================= # # Q1 Quiz2 # ============================================================================= # # NOT RUN In R-STUDIO, only R Original is working!!! .libPaths("D:/R-project/Rpackages") library(httr) # 1. Find OAuth settings for github: # http://developer.github.com/v3/oauth/ oauth_endpoints("github") # 2. To make your own application, register at at # https://github.com/settings/applications. # Use any URL for the homepage URL (http://github.com is fine) # and http://localhost:1410 as the callback url # # Replace your key and secret below. myapp <- oauth_app("github", key = "a2f0703cb64735d0817b", secret = "0ab99cdcf3764c5416744ed60b6261b89d030346") # 3. Get OAuth credentials github_token <- oauth2.0_token(oauth_endpoints("github"), myapp) # 4. Use API gtoken <- config(token = github_token) req <- GET("https://api.github.com/users/jtleek/repos", gtoken) stop_for_status(req) content(req) library(jsonlite) jsonData <- fromJSON("https://api.github.com/users/jtleek/repos") names(jsonData) jsonData[45]$[7] jsonData$name[7] jsonData test2 <- dplyr::filter(jsonData, name=="datasharing") # ============================================================================= # # Q2/Q3 in quiz 2 # ============================================================================= # require("sqldf") setwd("D:/Coursera/DataScientist/03_assignment") acs <- read.csv("getdata_data_ss06pid.csv") test <- sqldf("select pwgtp1 from acs where AGEP < 50") test2 <- dplyr::filter(acs, AGEP <50) #Q3 unique(acs$AGEP) sqldf("select distinct AGEP from acs") # ============================================================================= # # Q4 in quiz 2 # ============================================================================= # con = url("http://biostat.jhsph.edu/~jleek/contact.html") htmlCode = readLines(con) close(con) nchar(htmlCode[c(10,20,30,100)]) # ============================================================================= # # Q5 in quiz 2 # ============================================================================= # test <- read.fwf("getdata_wksst8110.for", widths = c(10, 5, 4,4,5,4,4,5,4,4,5,4,4), skip = 4) sum(test$V6) #32426.7
8f5e0c2fd0672fad5ea0e2066ea1f62d712b5e66
a6be7e12866b55842e989750efd6524e14590912
/R/swap.R
48559bcb9d9d118028b5696d32e1cb8ec63b93fc
[]
no_license
SigbertIngress/findWeb
0ef0439dcc1e2c8461f81e2bd1677e702d8114a2
cfce77c8ecea73045277799aeb21ade678c2986b
refs/heads/master
2020-03-26T05:50:56.359556
2018-09-11T07:56:43
2018-09-11T07:56:43
144,577,893
3
0
null
null
null
null
UTF-8
R
false
false
493
r
swap.R
#' swap #' #' Swaps the given structure between portals. Mainly for use in \link{optimizeWeb}. # #' @param g a web #' @param i numeric: index of first portal to be swapped #' @param j numeric: index of second portal to be swapped #' #' @return the modified web #' @export #' #' @examples #' par(mfrow=c(1,2)) #' g <- fishbone(8) #' plot(g) #' gs <- swap(g, 1, 3) #' plot(gs) swap <- function(g, i, j) { tmp <- g$map[i] g$map[i] <- g$map[j] g$map[j] <- tmp g$error <- evaluateC(g) g }
23bf8d4d8162998516f4e78c7e29b7fe881cd181
355838dd080a4fbcf4cccb45eeec673bd2dc2c94
/.Rproj.user/5700CA6/sources/s-E8B6519/B35E8EE1-contents
6696275d1ffca7b990ec4a578b0ffd77d053d1a1
[]
no_license
ZordoC/March_2019-Classification_Preference_Brand
2a205d05a1f33b610309cb80e75fa0aac577630f
31387cf1f1d182b2ba84fdb58d150420e87c3acd
refs/heads/master
2020-05-02T19:44:55.614279
2019-05-13T09:43:09
2019-05-13T09:43:09
178,167,624
0
0
null
null
null
null
UTF-8
R
false
false
1,024
B35E8EE1-contents
ggCompleteSurvey <- ggplot(CompleteResponsesOG, aes(x=salary, y=age)) + geom_point(aes(col=brand), size=3) + labs(title="Brand by Salary and Age", y="Age", x="Salary") plot(ggCompleteSurvey) IncompleteResponsesWPredictions <- cbind(IncompleteSurvery,predictionRandomForestALL) ggIncompleteSurvey <- ggplot(IncompleteResponsesWPredictions, aes(x=salary, y=age)) + geom_point(aes(col=predictionRandomForestALL), size=3) + labs(title="Brand by Salary and Age", y="Age", x="Salary") plot(ggIncompleteSurvey) library(ggplot2) gg <- ggplot(CompleteResponsesOG, aes(x=salary, y=age)) + geom_point(aes(col=brand), size=3) + # Set color to vary based on state categories. labs(title="Brand by Age and Salary", subtitle="", y="Age", x="Salary") plot(gg) plot(CompleteResponsesOG$age,CompleteResponsesOG$salary) myplot <- ggplot(CompleteResponsesOG, aes(brand)) + geom_bar(aes(y = (..count..)/sum(..count..))) + scale_y_continuous(labels=scales::percent) + ylab("relative frequencies") myplot
90f338fe70b0ae69187603627037622b79a8fefc
4c8fefa343c910f1f37630abf3ccdd7633b27603
/man/unmatvec.Rd
df8cd1e19fefe94e9451ae8386bf7f2cfd62a0fa
[]
no_license
cran/rTensor
55d1b0d4b22e4716e39cc296bf842ab2471554a6
94a947179df5c67485b70e154c7f8edf317be65a
refs/heads/master
2021-07-07T18:47:18.540146
2021-05-15T05:20:10
2021-05-15T05:20:10
17,698,970
2
3
null
null
null
null
UTF-8
R
false
true
895
rd
unmatvec.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rTensor_Misc.R \name{unmatvec} \alias{unmatvec} \title{Unmatvec Folding of Matrix} \usage{ unmatvec(mat, modes = NULL) } \arguments{ \item{mat}{matrix to be folded into a Tensor} \item{modes}{the modes of the output Tensor} } \value{ Tensor object with modes given by \code{modes} } \description{ The inverse operation to \code{\link{matvec-methods}}, turning a matrix into a Tensor. For a full account of matrix folding/unfolding operations, consult Kolda and Bader (2009). } \examples{ tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) matT1<-matvec(tnsr) identical(unmatvec(matT1,modes=c(3,4,5)),tnsr) } \references{ T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. } \seealso{ \code{\link{matvec-methods}}, \code{\link{fold}}, \code{\link{k_fold}} }
9a317f798aa2e7731a3b79809a7966f51a002373
6e5efc0b6b6b37c735c1c773531c41b51675eb10
/man/Setup.AdductData.Rd
a7504c4c1659895a20cedee117d20607f4a9fc04
[ "GPL-2.0-or-later" ]
permissive
xia-lab/MetaboAnalystR
09aa09c9e57d7da7d73679f5a515eb68c4158e89
9edbbd1e2edda3e0796b65adf440ad827abb7beb
refs/heads/master
2023-08-10T06:08:56.194564
2023-08-01T15:13:15
2023-08-01T15:13:15
109,994,826
268
165
MIT
2023-03-02T16:33:42
2017-11-08T15:38:12
R
UTF-8
R
false
true
504
rd
Setup.AdductData.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/peaks_to_function.R \name{Setup.AdductData} \alias{Setup.AdductData} \title{Save adduct names for mapping} \usage{ Setup.AdductData(mSetObj = NA, qvec) } \arguments{ \item{mSetObj}{Input the name of the created mSetObj (see InitDataObjects)} \item{qvec}{Input the vector to query} } \description{ Save adduct names for mapping } \author{ Jeff Xia\email{jeff.xia@mcgill.ca} McGill University, Canada License: GNU GPL (>= 2) }
82cf4b84a087035804bcbc8ce2544abb5d89b7eb
3f5f4dd503ab172270f69d8b189b98a1a19861fe
/helpers.R
7dcb1bef68b68a17898de55ec0a0680cbe528067
[]
no_license
SamHannah/wf-sequence-DualProcess
b739f8386e6d071f24a8306f26c6d8aaad380837
b3704be1f978e19d9c77f6c4daa7c6fa04f10b2c
refs/heads/master
2021-01-10T15:33:07.795128
2015-06-25T18:58:40
2015-06-25T18:58:40
36,904,798
0
0
null
null
null
null
UTF-8
R
false
false
18,432
r
helpers.R
# runs a simulation of a word frequency sequence effect experiment (no feedback) at some level of L, F, SF and dp, # and for some number of simulated subjects # Uses a DUAL-PROCESS version of MAL, with recall = test of normed echo against memory using high, fixed criterion and recognition = # test of unnormed resubmitted echo against low, variable criterion. # returning five data frames: # 1) the main data frame containing the results in terms of the mean ∆P("old") for targets and lures across trial quarters # (and SEs); # 2) the quartile means (and SEs) for all four test item types (LF lures, etc.); # 3) the four overall means and SEs; # 4) sample criteria across trials four two sample "subjects, one starting low (0.25 below expected # mean of the intensity distribution) and one starting high (0.25 above expected intensity mean); # 5) the distribution of intensities across test items. sequence<- function(SF1, dp, L, A, N_subjects){ library(dplyr) HF = 5; LF = 1 Frequency<- 0 N_el = 100; N_items = 80 N_seqSegment = 2 # cutting quartet means into quarters (N_seqSegment = 4), deciles (N_seqSegment =10)? N_trialPres = (N_items/2)/N_seqSegment N_study = N_items*2 N_types = 4; N_test = N_items*N_types indices<- seq(1:N_items) LFind<- indices[ indices %% 2 == 1] HFind<- indices[ indices %% 2 == 0] pairOrder<- vector(, N_test/N_seqSegment); testOrder<- vector(, N_test) testItems<- matrix(0, nrow=N_test, ncol=N_el) studyItems<- matrix(0, nrow=N_study, ncol=N_el) N_bins = 21 Bin_size = 0.075 Bin_start = -.525 Bin_vector<- vector(,N_bins) Critter<- matrix(0, nrow=N_test,ncol=2) Distribution<- matrix(0, nrow=N_bins, ncol=N_types) highCrit<- 0; lowCrit<- 0; crit1<- 0; crit2<- 0; crit3<- 0 hcStart<-0.325 # mean for Gaussian controlling setting of initial high criterion position hcVar<-0.025 # variance for above lcStart<-0.5 # mean for Gaussian controlling setting of initial low criterion position lcVar<-0.1 # variance for above Means<- vector(, N_types); meanError<- vector(, N_types) quartMeans<- matrix(0, nrow=N_seqSegment, ncol=N_types); quartError<- matrix(0, nrow=N_seqSegment, ncol=N_types) dpOldMeans<- matrix(0, nrow=N_seqSegment, ncol=N_types/2); dpOldError<- matrix(0, nrow=N_seqSegment, ncol=N_types/2) Summary<- matrix(0,nrow=N_subjects, ncol=N_types) summaryQ<- array(0, dim=c(N_seqSegment, N_types, N_subjects)) dPoldQ<- array(0, dim=c(N_seqSegment, N_types/2, N_subjects)) ######### functions ########################## # standard error se<- function(x){ sd(x)/sqrt(length(x)) } qMean<- function(x){ .Internal(mean(x)) } # calculate 95% CI based on t-distribution using N_subjects-1 df CI<-function(x){ y<- abs(qt(.025, N_subjects-1)) return(x*y) } # Hintzman's activation function; cube of similarity, which is dot product of two vectors, divided by # the number of relevant features (i.e, those features that are non-zero for at least one of the two vectors) getSim<-function(VectA, VectB) { VectC<-abs(VectA)+abs(VectB) nR<- length(VectC[VectC != 0]) return((VectA %*% VectB)/nR) } # eEturns the Hintzman similarity value and the tested vector: for use in picking the most similar vector in memory to a probe getSam<-function(VectA, VectB) { VectC<-abs(VectA)+abs(VectB) nR<- length(VectC[VectC != 0]) return(c((VectA %*% VectB)/nR, VectB)) } #constructs a single echo, normalizes it, computes discrepancy with studied item, and then # applies probabilistic weighting of elements for encoding. Is applied row-wise to memory matrix using apply(), and contents of # output summed up into a single vector using t(rowSums`(Echo)) constructE<- function(z){ Act<- getSim(Probe,z)^3 Echo<- Echo+(Act*z) return(Echo) } #### set up distribution vector ####### Bin_vector[1]<- Bin_start for (k in 2:N_bins) { Bin_vector[k]<- Bin_vector[k-1] + Bin_size } #### start N_subject simulations crit2<- 1 crit3<- 0 for (N_reps in 1: N_subjects) { highCrit<- rnorm(1, hcStart, hcVar) lowCrit<- rnorm(1, lcStart, lcVar) crit1<- lowCrit # crit1 used to record initial crit setting SF<- SF1 Qholo<- matrix(sample(c(-1,1), replace=T, N_items*N_el), nrow=N_items, byrow=T) Qholn<- matrix(sample(c(-1,1), replace=T, N_items*N_el), nrow=N_items, byrow=T) Qhnlo<- matrix(sample(c(-1,1), replace=T, N_items*N_el), nrow=N_items, byrow=T) Qhnln<- matrix(sample(c(-1,1), replace=T, N_items*N_el), nrow=N_items, byrow=T) Items<- array(0, dim=c(N_items,N_el,N_types) ) Memory<- matrix(0,nrow=0, ncol=N_el) Echo<- vector(, N_el) Probe<- vector(, N_el) HOLO<- 0; HOLN<- 0; HNLO<- 0; HNLN<- 0 LLcnt<- 0; HLcnt<- 0; LTcnt<- 0; HTcnt<- 0 # counts hits/fas HFoldLFold<- vector(,N_seqSegment); HFoldLFnew<- vector(,N_seqSegment) # counts p("old") across different sequence conditons HFnewLFold<- vector(,N_seqSegment); HFnewLFnew<- vector(,N_seqSegment) ######### test trial pairs consisting of lf (old/new) and hf (old/new) trials, ######### then combine into Item array, with item types on separate pages. Items[1:(N_items/2), ,1]<- Qholo[LFind, ] Items[((N_items/2)+1):N_items, ,1]<- Qhnlo[LFind, ] Items[1:(N_items/2), ,2]<- Qholo[HFind, ] Items[((N_items/2)+1):N_items, ,2]<- Qholn[HFind, ] Items[1:(N_items/2), ,3]<- Qhnln[LFind, ] Items[((N_items/2)+1):N_items, ,3]<- Qholn[LFind, ] Items[1:(N_items/2), ,4]<- Qhnln[HFind, ] Items[((N_items/2)+1):N_items, ,4]<- Qhnlo[HFind, ] ########## test array ################## # Make an array of individual test items by randomly combining trial pairs # This ensures random ordering of trials (outside of pairs), while ensuring # that there are an equal number of test items in each test condition # weave together pairs from quartet arrays, and form testOrder vector (coding for individual trial conditions) from pairOrder vector #test PAIR order: 1 = HnLn, 2 = HnLo, 3 = HoLn, 4 = HoLo # test ITEM order: 1= Low old, 2 = High old, 3 = Low new, 4 = High new pairOrder<- c(sample(rep(seq(1:N_types), each = N_trialPres)), sample(rep(seq(1:N_types), each = N_trialPres)), sample(rep(seq(1:N_types), each = N_trialPres)), sample(rep(seq(1:N_types), each = N_trialPres)) ) j<-1; k<- 1; m<- 1; n<- 1; v<- 1 for (i in 1:(N_test/2) ) { switch(pairOrder[i], "1" = {testItems[j:(j+1), ]<- Qhnln[k:(k+1), ] testOrder[j]<- 3 testOrder[(j+1)]<- 4 k<- k+2 }, "2" = {testItems[j:(j+1), ]<- Qhnlo[m:(m+1), ] testOrder[j]<- 1 testOrder[(j+1)]<- 4 m<- m+2 }, "3" = {testItems[j:(j+1), ]<- Qholn[n:(n+1), ] testOrder[j]<- 3 testOrder[(j+1)]<- 2 n<- n+2 }, "4" = {testItems[j:(j+1), ] = Qholo[v:(v+1), ] testOrder[j]<- 1 testOrder[(j+1)]<- 2 v<- v+2 } ) j<- j+2 } ####### pre-study: how accessible are memories from before the study period ####### j<- 0 for (i in 1:N_types) { if (i == 1 || i == 3) Frequency<- LF if (i == 2 || i == 4) Frequency<- HF for (k in 1:Frequency) { pV<- matrix(sample(c(0,1), replace=T, prob=c(1-A,A), N_items*N_el), nrow=N_items, ncol=N_el) Memory<- rbind(Memory, pV*Items[, , i]) } } ##### study ##### studyItems[1:N_items, ]<- Items[ , ,1] studyItems[(N_items+1):N_study, ]<- Items[ , ,2] studyItems[sample(nrow(studyItems)),] for (j in 1:N_study) { Echo<- vector(, N_el) Probe<- studyItems[j, ] # for (i in 1:nrow(Memory)){Echo<- Echo + Memory[i,]*getSim(Memory[i,], Probe )^3 } EchoM<- apply(Memory, 1, constructE ) Echo<- as.vector(t(rowSums(EchoM))) Echo<- Echo/max(abs(Echo)) pE<- sample(c(0,1),N_el,replace=T, prob=c(1-L,L)) Echo<- pE*(Probe-Echo) Memory<- rbind(Memory,Echo) } #### test #### for (j in 1:N_test) { Evidence <- 0 Intensity<- 0 Echo<- vector(,N_el) Probe<- testItems[j,] recallProbe<- Probe EchoM<- apply(Memory, 1, constructE ) Echo<- as.vector(t(rowSums(EchoM))) Intensity<- getSim(Probe, Echo) #strength intensity # recognition with normed Echo Echo<- Echo/max(abs(Echo)) Intensity<- getSim(Probe, Echo) # clarity intensity Evidence<- Intensity - lowCrit lowCrit<- lowCrit + SF*tanh(Evidence)^3 SF<- SF - dp*SF ## recall: test normed, resubmitted echo with fixed, high criterion tested against individual traces (or, resubmit probe....) if (Evidence <= 0) { # Probe<- Echo Echo<- vector(, N_el) EchoM<- apply(Memory, 1, constructE ) Echo<- as.vector(t(rowSums(EchoM))) Echo<- Echo/max(abs(Echo)) rTest<- apply(Memory, 1, function(x) getSam(Echo, x) ) # returns a matrix of 101 X M, where M = # of memory traces; first row element = similarity rSim<- max(rTest[1,]) maxTrace<- rTest[2:101, which(rTest[1,]== rSim) ] if (rSim > highCrit){ if (getSim(recallProbe, maxTrace) > highCrit){ Evidence<- 1.0 # recall to accept }else{ Evidence<- -1.0 # recall to reject } } } if ( (crit2 == 1 && crit1 <= lcStart-(lcStart*.2)) || (crit2 == 2 && crit1 >= lcStart+(lcStart*.2)) ){ Critter[j,crit2]<- lowCrit crit3<- 1 } # construction of presentation pairs: used to evaluate performance for 10 LF-HF pairs per quarter: w indexes quarters w<- 0 switch(testOrder[j], "1"={}, "2"={ if (j > 2 && testOrder[j-1] == 1) { HOLO<- HOLO+1 w<- ceiling(HOLO/N_trialPres) # N_trialPres = number of pairs/quarter = 10 }else{ HOLN<- HOLN+1 w<- ceiling(HOLN/N_trialPres) } }, "3"={}, "4"={ if (j > 2 && testOrder[j-1] == 1) { HNLO<- HNLO+1 w<- ceiling(HNLO/N_trialPres) }else{ HNLN<- HNLN+1 w<- ceiling(HNLN/N_trialPres) } } ) # scoring a P('old') response if (Evidence > 0.0) { switch(testOrder[j], "1"={ LTcnt<- LTcnt + 1.0 }, "2"={ HTcnt<- HTcnt + 1.0 if (j > 2 && testOrder[j-1] == 1) { HFoldLFold[w]<- HFoldLFold[w]+1.0 } else { HFoldLFnew[w]<- HFoldLFnew[w]+1.0 } }, "3"={ LLcnt<- LLcnt + 1.0 }, "4"={ HLcnt<- HLcnt + 1.0 if (j > 2 && testOrder[j-1] == 1) { HFnewLFold[w]<- HFnewLFold[w]+1.0 }else{ HFnewLFnew[w]<- HFnewLFnew[w]+1.0 } } ) } # end big if (Evidence > 0) for (k in 1:N_bins) { if (Intensity >= Bin_vector[k]-Bin_size/2 && Intensity <= Bin_vector[k]+Bin_size/2 ) { Distribution[k,testOrder[j] ]<- Distribution[k,testOrder[j]] + 1.0 } } } # end of test loop # adjust crit counters to stock collecting after 2 good subjects if (crit3 == 1) { crit2<- crit2+1 crit3 <- 0 } Summary[N_reps, ]<- c( LLcnt/N_items, HLcnt/N_items, HTcnt/N_items, LTcnt/N_items) for (i in 1:N_seqSegment ){ summaryQ[ i, , N_reps]<- c( HFnewLFnew[i]/N_trialPres, HFnewLFold[i]/N_trialPres, HFoldLFnew[i]/N_trialPres, HFoldLFold[i]/N_trialPres) dPoldQ[i, , N_reps]<- c( HFnewLFnew[i]/N_trialPres - HFnewLFold[i]/N_trialPres, HFoldLFnew[i]/N_trialPres - HFoldLFold[i]/N_trialPres) } } # end of simulation (subject ) loop # averaging across subjects Means<- colMeans(Summary) meanError<- apply(Summary, 2, se) quartMeans<- apply(summaryQ, c(1,2), qMean) # each row of quartMeans holds means for one test ntile across test items, with columns holding means for a test item across test quaters quartError<- apply(summaryQ, c(1,2), se) dpOldMeans<- apply(dPoldQ, c(1,2), qMean) dpOldError<- apply(dPoldQ, c(1,2), se) #turn these into data frames dPold<- data.frame(Quartiles=factor(N_seqSegment*2, levels = seq(1:N_seqSegment)), "Test.Item"=factor(N_seqSegment*2, levels=c("Lure", "Target")), "Mean.deltaP" = numeric(N_seqSegment*2),SE = numeric(N_seqSegment*2)) dPold[,1]<- rep(seq(1:N_seqSegment), times=2) dPold[1:N_seqSegment,2]<- "Lure" dPold[(N_seqSegment+1):(N_seqSegment*2),2]<- "Target" dPold[1:N_seqSegment,3]<- dpOldMeans[,1] dPold[(N_seqSegment+1):(N_seqSegment*2),3]<- dpOldMeans[,2] dPold[1:N_seqSegment,4]<- dpOldError[,1] dPold[(N_seqSegment+1):(N_seqSegment*2),4]<- dpOldError[,2] dPold<- dPold %>% mutate("CI"=CI(SE)) qMeans<- data.frame(Quartiles=factor(N_seqSegment*N_types, levels = seq(1:N_seqSegment)), "Test.Item"=factor(N_seqSegment*N_types,levels=c("HFnew(LFnew)", "HFnew(LFold)","HFold(LFnew)","HFold(LFold)" )), "Mean.P.old" = numeric(N_seqSegment*N_types),SE = numeric(N_seqSegment*N_types)) qMeans[,1]<- c(rep(seq(1:N_seqSegment), times=4)) qMeans[1:N_seqSegment,2]<- "HFnew(LFnew)" qMeans[(N_seqSegment+1):(N_seqSegment*2),2]<- "HFnew(LFold)" qMeans[((N_seqSegment*2)+1):(N_seqSegment*3),2]<- "HFold(LFnew)" qMeans[((N_seqSegment*3)+1):(N_seqSegment*4),2]<- "HFold(LFold)" qMeans[1:N_seqSegment,3]<- quartMeans[,1] qMeans[(N_seqSegment+1):(N_seqSegment*2),3]<- quartMeans[,2] qMeans[((N_seqSegment*2)+1):(N_seqSegment*3),3]<- quartMeans[,3] qMeans[((N_seqSegment*3)+1):(N_seqSegment*4),3]<- quartMeans[,4] qMeans[1:N_seqSegment,4]<- quartError[,1] qMeans[(N_seqSegment+1):(N_seqSegment*2),4]<- quartError[,2] qMeans[((N_seqSegment*2)+1):(N_seqSegment*3),4]<- quartError[,3] qMeans[((N_seqSegment*3)+1):(N_seqSegment*4),4]<- quartError[,4] qMeans<- qMeans %>% mutate("CI" = CI(SE)) allMeans<- data.frame("Test.Item"=factor(4, levels=c("LF Lure", "HF Lure","HF Target","LF Target" )), "Mean.P.old" = numeric(4),SE = numeric(4)) allMeans[,1]<- as.factor(c("LF Lure", "HF Lure","HF Target","LF Target" )) allMeans[,2]<- as.vector( t(t(Means)) ) # t(t()) because first transpose turns vector into matrix without changing shape allMeans[,3]<- as.vector( t(t(meanError))) allMeans$Test.Item<- factor(allMeans$Test.Item, levels=c("LF Lure", "HF Lure","HF Target","LF Target" )) allMeans<- allMeans %>% mutate("CI"=CI(SE)) Crit<- data.frame(Trial = numeric(N_test*2), Type=factor(N_test*2, levels=c("Low", "High")), Criterion = numeric(N_test*2)) Crit[,1]<- rep(seq(1:N_test), times = 2) Crit[,2]<- as.factor(rep(c("Low", "High"), each = N_test )) Crit[1:N_test,3]<- Critter[,1] Crit[(N_test+1):(N_test*2),3]<- Critter[,2] Dist<- data.frame(bins=numeric(N_bins*N_types), "Test.Item"=factor(N_bins*N_types, levels=c("LF Target", "HF Target", "LF Lure","HF Lure" ) ), Count = numeric(N_bins*N_types) ) Dist[,1]<- rep(Bin_vector, times=N_types) Dist[,2]<- as.factor(rep(c("LF Target", "HF Target","LF Lure", "HF Lure" ), each = N_bins)) Dist[1:N_bins,3]<- Distribution[,1] Dist[(N_bins+1):(N_bins*2),3]<- Distribution[,2] Dist[((N_bins*2)+1):(N_bins*3),3]<- Distribution[,3] Dist[((N_bins*3)+1):(N_bins*4),3]<- Distribution[,4] outs<- list(dPold, qMeans, allMeans, Crit, Dist) return(outs) } # end of function # set up multiple plots with ggplot, From Cookbook for R # ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects) # - cols: Number of columns in layout # - layout: A matrix specifying the layout. If present, 'cols' is ignored. # # If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE), # then plot 1 will go in the upper left, 2 will go in the upper right, and # 3 will go all the way across the bottom. multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { library(grid) # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { # Make the panel # ncol: Number of columns of plots # nrow: Number of rows needed, calculated from # of cols layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } }
60c73439a5ed4ceea408534c6fc1dbc9a8a29cfa
ed6b76d240b43781fef5d74445f0eb73fadc1bb5
/R/debinfer_utils.R
bebbc90e7d54cb3db6af8ac8649922f5d94969d0
[]
no_license
pboesu/debinfer
36fe4b8b803c6620e7d7f597a98dfe5393a14d88
6642195ed327f5e1fd262ddc04f51e20eb0298ef
refs/heads/master
2022-11-17T19:38:29.143214
2022-11-16T17:16:49
2022-11-16T17:16:49
56,190,045
14
5
null
2019-12-10T16:43:45
2016-04-13T22:23:44
R
UTF-8
R
false
false
2,604
r
debinfer_utils.R
#more utility functions for the inference outputs #' Summary of the inference results #' #' A wrapper for coda::summary.mcmc #' #' @param object a deBInfer_result object #' @param ... further arguments to summary.mcmc #' @seealso \code{\link[coda]{summary.mcmc}} #' @import coda #' @export summary.debinfer_result <- function(object, ...){ summary(object$samples, ...) } #' is.debinfer_result #' #' Check debinfer_result class #' #' @param x an object #' @export is.debinfer_result <- function(x){ if (inherits(x, "debinfer_result")) TRUE else FALSE } #' is.debinfer_parlist #' #' Check debinfer_parlist class #' #' @param x an object #' @export is.debinfer_parlist <- function(x){ if (inherits(x, "debinfer_parlist")) TRUE else FALSE } #' Get starting/fixed values of DE initial values #' #' Accessor function for initial values #' #' @param x a debinfer_result or debinfer_parlist object #' @return a named numeric vector #' @export deinits <- function(x){ if (is.debinfer_result(x)){ is.init <- vapply(x$all.params, function(x) x$var.type, character(1))=="init" inits <- vapply(x$all.params, function(x) x$value, numeric(1))[is.init] return(inits) } else { if (is.debinfer_parlist(x)){ is.init <- vapply(x, function(x) x$var.type, character(1))=="init" inits <- vapply(x, function(x) x$value, numeric(1))[is.init] return(inits) } else NULL} } #' Get starting/fixed values of DE parameters #' #' Accessor function for parameters #' #' @param x a debinfer_result or debinfer_parlist object #' @return a named numeric vector #' @export depars <- function(x){ if (is.debinfer_result(x)){ is.depar <- vapply(x$all.params, function(x) x$var.type, character(1))=="de" depars <- vapply(x$all.params, function(x) x$value, numeric(1))[is.depar] return(depars) } else { if (is.debinfer_parlist(x)){ is.depar <- vapply(x, function(x) x$var.type, character(1))=="de" depars <- vapply(x, function(x) x$value, numeric(1))[is.depar] return(depars) } else NULL} } #' Reshape posterior model solutions #' #' Take a list of DE model solutions and transform into a list of of matrices, one for each state variable, where each row is an iteration, and each column is a time point #' #' @param x a post_sim object #' @import plyr #' @export reshape_post_sim <- function(x){ if(!inherits(x, "post_sim")) stop("input not of class 'post_sim'") out <- list() out$time <- x[[1]][,'time'] for (i in 2:ncol(x[[1]])){ name <- colnames(x[[1]])[i] out[[name]] <- plyr::laply(x, function(x) x[,i]) } return(out) }
a1571481bc94684f4962b4b3863c41fd4eb9d48c
48b40fd678b06843cc2a0c12c7f6ae7b5028bf57
/man/fix_hour.Rd
08f79926b974db6936567314da4bda573013d3e7
[]
no_license
BayAreaMetro/gtfsr
f57c68f163a1df0dd47f7743a27aee43130a2a84
e14fc6c4102eba019fe6c255f65ddca1c5cee372
refs/heads/master
2020-03-09T21:32:00.962025
2019-02-16T21:27:12
2019-02-16T21:27:12
129,011,655
0
0
null
2018-04-17T17:40:35
2018-04-11T00:39:45
R
UTF-8
R
false
true
364
rd
fix_hour.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mtc_utilities.R \name{fix_hour} \alias{fix_hour} \title{Format GTFS Time strings as standard time string} \usage{ fix_hour(x) } \arguments{ \item{a}{GTFS Time string} } \value{ Time string with no hours greater than 24 } \description{ Format GTFS Time strings as standard time string }
d0453b5057b51336f3c185641f82dc560b5497e3
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/rgdal/examples/GridsDatums.Rd.R
d8ec85ac0536746ab0694c704d53b26352cdba7b
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
318
r
GridsDatums.Rd.R
library(rgdal) ### Name: GridsDatums ### Title: Grids and Datums PE&RS listing ### Aliases: GridsDatums ### Keywords: datasets ### ** Examples data(GridsDatums) GridsDatums[grep("Norway", GridsDatums$country),] GridsDatums[grep("Google", GridsDatums$country),] GridsDatums[grep("^Mars$", GridsDatums$country),]
65b5f6f44f320e29d7a37b4c80cd7bfc472b9819
5e206bb3b8bb3e2e7c24f051b675d6bfd36e6146
/ITMD_527_Data_Analytics/Assignment1/Q2.R
50a6753d540e74b59e8413caf088209006c954b4
[]
no_license
Schavan7/CollegeProjects
9274c5ecff122365a28ed50e7936262c31198fa2
6e9a2f3deaae0e8e4460ba526cf3a86b7a38cdac
refs/heads/master
2021-04-28T16:36:53.668740
2018-02-25T17:54:34
2018-02-25T17:54:34
122,018,758
0
0
null
null
null
null
UTF-8
R
false
false
1,325
r
Q2.R
gasmf = read.csv("gasmf.csv", header = TRUE) str(gasmf) summary(gasmf) gasmf$X=NULL # as the data was just confusing having X1(the ID column) and x1 the actual IV. I deleted the Id column to avoid confusion. #1. model =lm(y~x1+x6, data = gasmf) summary(model) abline(model) plot(model) par(mfrow=c(2,2)) #2 fullmodel = lm(y~. ,data=gasmf) reducedModel = lm(y~x1+x6, data = gasmf) anova(fullmodel, reducedModel) anova(reducedModel) names(gasmf) #3. summary(reducedModel) #4 confint(reducedModel,"x1", level = 0.95) #5 reducedModel = lm(y~x1+x6, data = gasmf) summary(reducedModel) # I can reject null hypothesis for x1 and i cannot reject null hypothesis #for x6 as the p value is greater than .05 gasmf$x1 x1p = lm(y~x1, gasmf) summary(x1p) gasmf$x6 x6p = lm(y~x6, gasmf) summary(x6p) #6 ci for 95% == NOT SURE ABT THIS predict(reducedModel, data.frame(x1=275, x6=2), interval="confidence",level= 0.95) #7 predict(reducedModel, data.frame(x1=257,x6=2), interval="confidence",level= 0.95) #8 reducedModelWithX = lm(y~x1, data = gasmf) predict(reducedModelWithX, data.frame(x1=257), interval="confidence",level= 0.95) #campare the two models from 7 and 8 #9 From question 7 and 8 we get different CIs and we can say that removing x6 will #deccrease the CI of the curve whcih is good for the regression.
c9b9e7442d05610071c5e5fe0cc888ccef06039e
d467e26218f698bf656095a0fdcaa52a3a30ba90
/man/cnk_ngg.Rd
e0b08bc241819fc94c79cd248834f554f94ba68c
[]
no_license
AABI-Gibbs-type-priors/AABI-RGibbs-type-priors
8c302fed368a5598d48dccde274768d54e4102a0
2dd3fee993fa330043a199c0a8c152023f6379bd
refs/heads/main
2023-01-11T23:05:09.266336
2020-11-13T20:35:40
2020-11-13T20:35:40
312,677,121
0
0
null
null
null
null
UTF-8
R
false
true
529
rd
cnk_ngg.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RGibbsTypePriors.R \name{cnk_ngg} \alias{cnk_ngg} \title{Computes the generalized binomial coefficients} \usage{ cnk_ngg(n, k, sigma, prec = 53) } \arguments{ \item{n}{The value for n} \item{k}{The value for k} \item{sigma}{The value for sigma} \item{prec}{The number of bits of precision used during internal computation} } \value{ The value of the generalized binomial coefficient } \description{ Computes the generalized binomial coefficients }
eab079360493b29809bd0ec9390d8948e8b42196
4f0b6567a19d2babeafa616c6d8ddf5572004c37
/data-science-scripts/zach/diamonds subsets.R
760fad1295606e74ea05684b96276f37dcb9f275
[]
no_license
mcohenmcohen/DataRobot
a9b8fff8b0532e9066a207c1914b12702abbf48c
156e548d94d033b032d5027f2e436a13ddb85cf8
refs/heads/master
2022-09-01T18:14:29.587815
2022-08-23T20:54:59
2022-08-23T20:54:59
134,179,047
0
1
null
null
null
null
UTF-8
R
false
false
457
r
diamonds subsets.R
library(ggplot2) for(i in 2:4){ n <- 10^i for(m in c(n, 5*n)){ file <- paste0('~/datasets/diamonds', m, '.csv.gz') print(file) write.csv(head(diamonds, m), gzfile(file)) } } for(n in 2:4){ m <- n * 1000 file <- paste0('~/datasets/diamonds', m, '.csv.gz') print(file) write.csv(head(diamonds, m), gzfile(file)) } m <- 2500 file <- paste0('~/datasets/diamonds', m, '.csv.gz') print(file) write.csv(head(diamonds, m), gzfile(file))
0ced40abe22e98a4d80cb2d63eb00e52f4fe8d82
a01fe8e31792d79dab00764ef6ae505e699524b4
/R/ThetaMultiplicative.R
d965ba3259e65625de205dec6670570fc82ba5eb
[]
no_license
dpwynne/mmnst
7c5972d75fad0d983c65ac044cf2b566531720e5
cadda88522a025115eb9a163510629ec2f55c672
refs/heads/master
2023-08-06T23:43:19.372858
2023-07-26T15:47:40
2023-07-26T15:47:40
202,798,431
4
2
null
2019-12-18T19:46:11
2019-08-16T20:59:31
R
UTF-8
R
false
false
1,491
r
ThetaMultiplicative.R
#' Multiplicative intensity function \eqn{\theta(t)} #' #' Calculates the multiplicative intensity function \eqn{\theta(t)} introduced in Ramezan *et al.* (2014). #' #' @param t a numeric vector of time points at which to evaluate the function. #' @param f a numeric vector containing frequency values. #' @param w0 a numeric vector containing initial phase values. #' @param eta a numeric vector containing \eqn{\eta} values (contribution of each periodic component to the intensity function). #' @param gamma a numeric vector containing \eqn{\gamma} values (amplitude of each periodic component in the function). #' @param terminal.points a numeric vector containing the endpoints of the dyadic partitioning. #' @param ct a numeric vector containing the estimated piecewise constant intensity function \eqn{c(t)}. The length of ct should be a whole number power of 2. #' #' @return A numeric vector containing the values of the multiplicative intensity function calculated at given time points. #' #' @references Ramezan, R., Marriott, P., and Chenouri, S. (2014), *Statistics in Medicine*, **33**(2), 238-256. doi: 10.1002/sim.5923. #' #' @export ThetaMultiplicative <- function(t,f,w0,eta,gamma,terminal.points,ct){ ##t is time vector from time.start to time.end with user-defined resolution #f is frequencies, w0 is phases ##eta and gamma represent contribution and amplitude of nu to intensity function CtAllPoints(t,terminal.points,ct)*( (1-sum(eta))+sum(eta*gamma*nu(f*t+w0)) ) }
e76b38c12902681c8e9289f5185dd9c740c68ab6
837a3177789464eabb12b7abfb12e8621feb71fb
/(2)Diagnostic_plots/(01d)plotChl_a_compare.R
acfe72772579bdbae541bbbe5f494a1c91df78df
[]
no_license
mcgregorv/AtlantisRscripts
21140225d43ba583a1bebc70557c8cb5f61b9b5c
1009f0d1961fc95bc4a98d25eea7dc1d7cccee77
refs/heads/master
2020-07-11T14:20:34.142538
2019-08-26T22:22:13
2019-08-26T22:22:13
204,566,512
0
0
null
null
null
null
UTF-8
R
false
false
6,916
r
(01d)plotChl_a_compare.R
library(ggmap) library(rgdal) library(gtable) library(maps) library(mapdata) library(rgeos) source(paste(DIR$'General functions',"\\get_first_number.R",sep="")) source(paste(DIR$'General functions',"formatShape.R",sep="")) this_run<-"base" this_out<-"TEST150yrfish" burnin<-35 #number of years to skip in plot this_path<-paste(DIR$'Base',"ATLANTISmodels\\",this_run,"\\",sep="") outPath<-paste(this_path,"output",this_out,"\\",sep="") baseOutPath<-paste(this_path,"output",base_out,"\\", sep="") ################################ #read in satelite data months<-c("jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec") nm<-length(months) chl_a_all<-data.frame(matrix(NA,ncol=4,nrow=0)) colnames(chl_a_all)<-c("lon","lat","chl_a","month") for(m in 1:nm){ thisMonth<-months[m] thisMonthNC<-nc_open(paste(DIR$'Data',"chlorophyll\\",thisMonth,"_chl.nc",sep="")) monthTracers<-sort(names(thisMonthNC$var)) this_chlor_a<-ncvar_get(thisMonthNC,"chlor_a") thisLon<-ncvar_get(thisMonthNC,"lon") thisLat<-ncvar_get(thisMonthNC,"lat") rownames(this_chlor_a)<-thisLon; colnames(this_chlor_a)<-thisLat chl_a_df<-adply(this_chlor_a,c(1,2)) colnames(chl_a_df)<-c("lon","lat","chl_a") chl_a_df$month<-thisMonth chl_a_all<-rbind(chl_a_all,chl_a_df) } thisMax<-max(chl_a_all$chl_a,na.rm=TRUE) ##assign to polygon source(paste(DIR$'General functions',"findMyPolygon.R",sep="")) colRamp<-colorRampPalette(colors=c(myYellow,myOrange,myRed,myPurple,myBlue,myGreen)) #get box boundaries. this brings in dynamicBoxes thisFile<-paste(this_path,"..\\inputs\\bgm\\Chatham_Boxes.R",sep="") source(thisFile) nboxes<-length(dynamicBoxes) names(dynamicBoxes[[1]]) # "x" "y" "minDepth" "maxDepth" chl_a_all$box<-NA # test_chl_a_all<-chl_a_all[1:100,] # test_chl<-chl_a_all # test_chl$lon<-as.double(as.character(test_chl$lon)) # test_chl$lat<-as.double(as.character(test_chl$lat)) # test<-mapply(FUN=findMyPolygon,x=test_chl$lon,y=test_chl$lat,boxes=dynamicBoxes,nboxes) for(i in 1:(nrow(chl_a_all))){ xx<-findMyPolygon(x=as.double(as.character(chl_a_all$lon[i])),y=as.double(as.character(chl_a_all$lat[i])),boxes=dynamicBoxes,nboxes) if(length(xx)==1){ chl_a_all$box[i]<-xx } } index<-chl_a_all$chl_a==-9999 chl_a_all$chl_a[index]<-NA monthsOrder<-cbind(months,seq(1,12)) chl_by_month<-tapply(chl_a_all$chl_a,chl_a_all[,c("month")],mean,na.rm=TRUE) chl_by_box<-tapply(chl_a_all$chl_a,chl_a_all[,c("box")],mean,na.rm=TRUE) chl_by_boxMonth<-tapply(chl_a_all$chl_a,chl_a_all[,c("month","box")],mean,na.rm=TRUE) plotByMonths<-as.double(chl_by_month[match(months,names(chl_by_month))]) plot(plotByMonths,type="h",xaxt="n",lend=1,lwd=5,col=myGrey_trans,xlab="",ylab="Chl_a") axis(at=seq(1,12),labels=months,side=1,las=2) plot(chl_by_box,type="h",lend=1,lwd=5,col=myGrey_trans,xlab="Box number",ylab="Chl_a") ####################### #read in tracers file thisBaseOut<-paste("Short",sep="") # thisFishOut<-paste("FishShort",sep="") this_path<-paste(DIR$'Base',"ATLANTISmodels\\",this_run,"\\",sep="") baseOutPath<-paste(this_path,"output",thisBaseOut,"\\",sep="") plotPath<-paste(this_path,"..\\Figures\\",this_run,"\\",this_out,"",sep="") plotBurnIn<-FALSE daysTimeStep<-365 numStepsPerYear<-365/daysTimeStep year0<-1900 #this is the first year of the burn-in part fishingStartYear<-1900 #first year historical catches are removed modelStartYear<-1920 #read in the base tracers file BaseNC.nc<-nc_open(paste(baseOutPath,"output.nc",sep="")) baseVol<-ncvar_get(BaseNC.nc,"volume") baseDz<-ncvar_get(BaseNC.nc,"dz") base_nts<-dim(baseVol)[3] chl_a_tracer<-ncvar_get(BaseNC.nc,"Chl_a") chl_tracer_byBox<-apply(chl_a_tracer,2,mean,na.rm=TRUE) chl_tracer_BoxIC<-apply(chl_a_tracer[,,1],2,mean,na.rm=TRUE) chl_tracer_BoxEnd<-apply(chl_a_tracer[,,(base_nts-1)],2,mean,na.rm=TRUE) par(mfrow=c(4,3),mar=c(3,3,0,0)) for(m in 1:nm){ thisMonth<-months[m] thisData<-chl_by_boxMonth[thisMonth,] plot(thisData,type="h",lwd=5,lend=1,col=myRed) mtext(thisMonth,side=3,adj=0,line=-1) par(new=TRUE) plot(chl_tracer_byBox[2:25],type="l",lwd=2) } for(m in 1:nm){ plot(x=chl_tracer_byBox[2:25],y=chl_by_box,ylim=c(0,1),xlim=c(0,1)) points(x=chl_tracer_byBox[2:25],chl_by_boxMonth[m,],col=myOrange,pch=8) mtext(rownames(chl_by_boxMonth)[m]) points(x=c(0,1),y=c(0,1),col="red",lty=2,type="l") } for(m in 1:nm){ plot(x=chl_tracer_BoxEnd[2:25],y=chl_by_box,ylim=c(0,1),xlim=c(0,1)) points(x=chl_tracer_BoxEnd[2:25],chl_by_boxMonth[m,],col=myGreen,pch=8) mtext(rownames(chl_by_boxMonth)[m]) points(x=c(0,1),y=c(0,1),col="red",lty=2,type="l") } #################### plot spatially ################# chl_colors<-rev(rainbow(n=11,start=0.05,end=0.8)) chl_max<-max(chl_tracer_byBox,na.rm=TRUE) getCol<-function(x){ y<-round((log(10*x))/(log(10*chl_max)),1)*10 # y<-min(1,y); y<-max(0,y) ycol<-chl_colors[y] return(ycol) } chl_col_tracers<-unlist(lapply(chl_tracer_byBox,FUN=getCol)) plot(seq(1,11),col=rev(chl_colors),pch=8) #read in shape file shapeFile<-paste(DIR$'Base',"ATLANTISmodels\\inputs\\bgm\\CHAT30_LL",sep="") sdata<-read.shapefile(shapeFile) shape<-formatShape(shapeFile=shapeFile) ns<-length(shape) SpDF <- SpatialPolygonsDataFrame(shape,data.frame( z=1:ns, row.names=paste("P",seq(1,(ns)),sep=""))) labels<-seq(1,(ns)) pdf("test.pdf") plot(shape) LABELpOS<-polygonsLabel(shape, labels = labels, cex=.1,doPlot=FALSE) dev.off() labeldf<-data.frame(cbind("x"=LABELpOS[1:ns],"y"=LABELpOS[(ns+1):(2*ns)])) # pdf(paste(plotPath,"plotChl_a.pdf",sep="")) # par(mfrow=c(3,2),mar=c(0,0,0,0),oma=c(0,0,0,0)) # for(t in 1:nts){ # timeData<-thisData[,t] # timeColors<-unlist(lapply(timeData,getCol)) plot(shape) map('nzHires',add=TRUE,col="black",lwd=2) map.axes() for(plotB in 1:dim(labeldf)[1]){ polygon(sdata$shp$shp[[plotB]]$points,col=chl_col_tracers[plotB-1]) } plot(chl_tracer_BoxIC[2:25],type="h",lwd=3,col=myBlue,lend=1) points(chl_by_box,type="h",lwd=7,col=myGrey_trans) plot(x=seq(1,24,by=1),y=chl_by_box,type="h",lend=1,lwd=5,col=myGrey_trans,xlab="Box number",ylab="Chl_a") points(x=seq(1.5,24.5,by=1),y=chl_tracer_byBox[2:25],type="h",lend=1,lwd=5,col=myBlue) ################################ # # # this_nbox<-length(sdata$shp$shp) # boxes<-seq(1,this_nbox) # # groupsDF<-read.csv(paste(this_path,"CRAM_groups.csv",sep="")) # # if(thisGroup %in% groupsDF$Code){ # thisName<-str_trim(groupsDF$Name[groupsDF$Code==thisGroup],side="both") # } else{ # thisName<-thisGroup; thisVar<-thisGroup # } # # #read in nc file # ThisNC.nc<-nc_open(paste(outPath,"output.nc",sep="")) # thisVol<-ncvar_get(ThisNC.nc,"volume") # thisDz<-ncvar_get(ThisNC.nc,"dz") # # nts<-dim(thisVol)[3] #number of timesteps
98d6a78dff61c83cfa948c589379211d482c091f
410f2d89246c991adbb0cc5893f6d3eddb5cbb02
/func_outlier_removal.R
c99df2c21e6eea400b23a70f23e79e265994ae65
[]
no_license
muluayele999/HAPPI_GWAS
e1783ed97c829cc59f832b3be4c208064534e7e9
97761f6b521aa984b81e2dd9184115120e1cb5ec
refs/heads/master
2022-06-16T12:26:49.947704
2020-05-15T18:38:35
2020-05-15T18:38:35
null
0
0
null
null
null
null
UTF-8
R
false
false
2,631
r
func_outlier_removal.R
## Outlier removal function outlier_removal <- function(dat, by_column = c(1, 2), start_column = 3){ # Convert first column to character dat[,1] <- as.character(dat[,1]) # Convert the rest columns to numeric for (i in 2:ncol(dat)) { dat[,i] <- as.numeric(dat[,i]) } # Create lmer formula if (length(by_column) > 1) { termlabels <- c() for (i in 1:length(by_column)) { temp <- paste("(1|", colnames(dat)[i], ")", sep = "") termlabels <- c(termlabels, temp) } } # Calculate threshold threshold <- qt(1-.05/(2*nrow(dat)), (nrow(dat)-3)) # Find outlier outliers_residuals <- apply(dat[, start_column:ncol(dat)], 2, FUN = function(x){ if (length(by_column) > 1) { lme <- lmer(formula = reformulate(termlabels = termlabels, response = "x"), data = dat, REML=TRUE) } else if(length(by_column) == 1){ lme <- lm(x ~ 1, data = dat) } res <- residuals(lme) H <- hatvalues(lme) sigma <- summary(lme)$sigm sres <- sapply(1:length(res), function(i) res[[i]]/(sigma*sqrt(1-H[[i]]))) which(abs(sres) > threshold) }) if(!identical(outliers_residuals, integer(0))){ temp_outliers_residuals <- outliers_residuals outlier_dat <- data.frame() # Remove outliers for (i in 1:length(temp_outliers_residuals)) { rows <- match(temp_outliers_residuals[[i]], row.names(dat[names(temp_outliers_residuals)[i]])) columns <- which(grepl(names(temp_outliers_residuals)[i], colnames(dat))) temp_outliers_residuals[[i]] <- dat[rows, c(by_column, columns)] dat[rows, columns] <- NA if(nrow(temp_outliers_residuals[[i]]) > 0){ if(nrow(outlier_dat) == 0){ outlier_dat <- temp_outliers_residuals[[i]] }else{ outlier_dat <- merge(outlier_dat, temp_outliers_residuals[[i]], by = intersect(colnames(outlier_dat), colnames(temp_outliers_residuals[[i]])), all=TRUE) } } } # Re-arrange first column for (i in 1:length(by_column)) { dat <- dat[order(as.numeric(gsub("[[:alpha:]]", "", dat[,i]))),] } # Re-arrange row names row.names(dat) <- seq(from = 1, to = nrow(dat), by = 1) } # Return data if(exists("dat") & exists("outlier_dat") & exists("outliers_residuals")){ return(list("Outlier_removed_data" = dat, "Outlier_data" = outlier_dat, "Outliers_residuals" = outliers_residuals)) } else{ return(dat) } }
c70ffda35340afdabf5d1b281df143312e1c9ff1
940e4d3aed30a8e0557b65c85d649ee97e31a9a4
/get_bracket_db.R
efa96268a24573af0f5843ad053783513d1328e3
[]
no_license
nate-d-olson/strviz
c60092fb74e3d33f2bdfa39712623d15c9a0cb90
f7d51700a0da5d8feba3ec299826989af7685b41
refs/heads/master
2021-01-10T07:02:47.595460
2015-08-03T18:56:40
2015-08-03T18:56:40
36,394,532
0
0
null
null
null
null
UTF-8
R
false
false
795
r
get_bracket_db.R
library(dplyr) setwd("/Users/sew/Desktop/strviz/Bracket_Notation") file_names <- dir() Bracket_Database <- do.call(rbind,lapply(file_names,read.csv)) Bracket_Database <- select(Bracket_Database, 3:6) Bracket_Database["Forward"] <- "Forward" Bracket_Database["Reverse"] <- "Reverse" Bracket_Database[Bracket_Database==""] <- NA Bracket_Database <- na.omit(Bracket_Database) rownames(Bracket_Database) <- NULL Bracket_Database <- Bracket_Database[,c(3,1,5,4,2,6)] df <- Bracket_Database[,c(1:3)] df2 <- Bracket_Database[,c(4:6)] names(df)[1] <- "Sequence" names(df2)[1] <- "Sequence" names(df)[2] <- "Bracket Notation" names(df2)[2] <- "Bracket Notation" names(df)[3] <- "Direction" names(df2)[3] <- "Direction" Bracket_Database <- bind_rows(df, df2) Bracket_Database <- unique(Bracket_Database)
5a3f8a981cd7d5fc299fbb589a11f3ddcb400e8f
49847557d80237231eb76b46a34e3ec9357d45b8
/cachematrix.R
b4d8df8ecb7643227050f021a4cf6cb2255bf6c0
[]
no_license
vladislavvalt/ProgrammingAssignment2
982c74e34b23eed54b38effa2ea76ae254853d6b
d7f2fe8712a592aa86fbe52ddac87ce4e271ec50
refs/heads/master
2021-01-17T10:02:35.927193
2014-10-25T09:32:57
2014-10-25T09:32:57
null
0
0
null
null
null
null
UTF-8
R
false
false
1,028
r
cachematrix.R
## Functions in this file allows to compute inverse form of the matrix ## with caching. Preserves unnecessary computation. ## Creates a wrapper around the matrix object for storing ## its inverse form cached makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y){ x <<- y inv <<- NULL } get <- function() x set_inverse <- function(inverse) inv <<- inverse get_inverse <- function() inv list(set = set, get = get, set_inverse = set_inverse, get_inverse = get_inverse) } ## Lazy calculate matrix inverse function which uses cache ## argument must be matrix wrapper mentioned above ## Return a matrix that is the inverse of 'x' cacheSolve <- function(x, ...) { inv <- x$get_inverse() if(!is.null(inv)){ message("getting cached data") return(inv) } data <- x$get() inv <- solve(data) x$set_inverse(inv) inv }
3f70b3ab07a84a98877a7883f0c133a78b70ab6d
27f9da112786a84da2296cd94f2a3cc0e455f8cc
/colocalization.R
30a40e569252162858377442433c672ecdbddd7e
[]
no_license
Dan609/stat
a980e4aac638d626132dcb72bba5ee8301796830
25851eb526bfcddaf058db08747132e94f334497
refs/heads/master
2021-06-17T21:42:45.725305
2021-03-01T07:40:48
2021-03-01T07:40:48
171,457,622
0
0
null
null
null
null
UTF-8
R
false
false
6,952
r
colocalization.R
# Statistical analysis of colocalozation coefficients, Dan Bobkov, 2019 # Firstly, calculate coef in ImajeJ : # Kendall's Tau-b rank correlation value = bTau # Spearman's rank correlation value = Rs # Manders' coefficients = tM1 and tM2 # Pearson's R value (above threshold) = Rval # Import libraries library(ggplot2) library(plyr) library(dplyr) library(ggpubr) library(car) library(stringi) library(Hmisc) library(gplots) library(PMCMRplus) library(dunn.test) library(DescTools) library(ggsignif) # Load data data1 <- read.csv('coloc3.csv') hist(data1) # Name dependent variables data1$probe <- as.factor(data1$probe) t.test <- compare_means(bTau ~ probe, data = data1, method = "t.test", ref.group = 'p07') wilcox.test <- compare_means(bTau ~ probe, data = data1, method = "wilcox.test", ref.group = 'p07') # order levels data1$probe <- ordered(data1$probe, levels = c("p07", "p09", "p12", "p15", "p18", "p21", "p25", "p27", "p28", "p35", "p36")) # # ##################################### # Kendall's Tau-b rank correlation value ##################################### # normality test # perform the Shapiro-Wilk test of normality for one variable (univariate): shapiro.test(data1$bTau) # Kruskal Wallis Test One Way Anova by Ranks kruskal.test(data1$bTau ~ data1$probe) # Density plot: the density plot provides a visual judgment about whether the distribution is bell shaped. ggdensity(data1$bTau, main = "Density plot of bTau", xlab = "bTau") # Q-Q plot: Q-Q plot (or quantile-quantile plot) draws the correlation between a given sample and the normal distribution. # qqPlot(data1$bTau) ggqqplot(data1$bTau) # Perform pairwise comparisons compare_means(bTau ~ probe, data = data1, method = "anova") compare_means(bTau ~ probe, data = data1, method = "kruskal.test") compare_means(bTau ~ probe, data = data1, method = "t.test") compare_means(bTau ~ probe, data = data1, method = "wilcox.test") write.csv(compare_means(bTau ~ probe, data = data1, method = "t.test"), file="t.test.bTau.csv") write.csv(compare_means(bTau ~ probe, data = data1, method = "wilcox.test"), file="wilcox.test.bTau.csv") # One-way ANOVA # Compute the analysis of variance res.aov <- aov(bTau ~ probe, data = data1) # Summary of the analysis summary(res.aov) TukeyHSD(res.aov) par(mar = c(4.5, 8, 4.5, 4.5)) plot(TukeyHSD(res.aov), las = 1) u # Bar plot with signifiers df.summary <- group_by(data1, probe) %>% summarise( sd = sd(bTau, na.rm = TRUE), bTau = mean(bTau) ) df.summary ## ggplot(df.summary, aes(probe, bTau)) + geom_bar(stat = "identity", fill = 'gray', color = "black", size= 1, show.legend=TRUE) + geom_errorbar(aes(ymin = bTau-sd, ymax = bTau+sd), width = 0.2, size=1) + theme( # Change axis lines axis.line = element_line(size = 1), # Change axis ticks text labels: font color, size and face axis.text.x = element_text(face = "bold", size = 12, angle = 90), # Change x axis tick labels only axis.text.y = element_text(face = "bold", size = 12, angle = 0), # Change y axis tick labels only # Change axis ticks line: font color, size, linetype and length axis.ticks = element_line(), # Change ticks line fo all axes axis.ticks.x = element_line(), # Change x axis ticks only axis.ticks.y = element_line(), # Change y axis ticks only axis.ticks.length = unit(3, "pt") # Change the length of tick marks ) + geom_point() + ylim(0, 1) + ggtitle("Colocalization of myosin-9 and F-actin in MSCWJ-1 cells") + labs(y="Kendall's Tau-b rank correlation value", x = "Experimental groups") + # xmin / xmax positions should match the x-axis labels' positions geom_signif(y_position = c(0.95), xmin = c(5), xmax = c(9), annotation = "***", tip_length = 0.04) + # xmin / xmax positions should match the x-axis labels' positions geom_signif(y_position = c(0.85), xmin = c(2), xmax = c(4), annotation = "*", tip_length = 0.04) + # xmin / xmax positions should match the x-axis labels' positions geom_signif(y_position = c(0.9), xmin = c(9), xmax = c(11), annotation = "*", tip_length = 0.04) + # xmin / xmax positions should match the x-axis labels' positions geom_signif(y_position = c(0.77), xmin = c(4), xmax = c(6), annotation = "**", tip_length = 0.04) ## # (1) Compute summary statistics for the variable probe # (2) Bar plots of means + individual jitter points + errors # Kendall's Tau-b rank correlation value df.summary.bTau <- group_by(data1, probe) %>% summarise( sd = sd(bTau, na.rm = TRUE), bTau = mean(bTau) ) df.summary.bTau df.bTau <- data1 ggplot(df.bTau, aes(probe, bTau)) + geom_bar(stat = "identity", data = df.summary.bTau, fill = NA, color = "black") + geom_jitter(position = position_jitter(0.2), color = "black") + geom_errorbar( aes(ymin = bTau-sd, ymax = bTau+sd), data = df.summary.bTau, width = 0.2) # plotmeans plotmeans(bTau ~ probe, data = data1, frame = FALSE, ylim = c(0, 1), mean.labels=FALSE, connect=TRUE, n.label=TRUE, text.n.label="n = ", xlab = "Passages", ylab = "Kendall's Tau-b rank correlation value", main="Colocalization of Myosin-9 and F-actin in WJMSC-1 cells, \nMean Plot with 95% CI") + scale_x_discrete(name ="Passages", limits=c("p07", "p09", "p12", "p15", "p18", "p21", "p25", "p27", "p28", "p35", "p36")) + scale_y_continuous(name="Kendall's Tau-b rank correlation value", limits=c(0, 1)) # where plots saved: plots.dir.path <- list.files(tempdir(), pattern="rs-graphics", full.names = TRUE); plots.png.paths <- list.files(plots.dir.path, pattern=".png", full.names = TRUE) # Now, you can copy these files to your desired directory, as follows: # file.copy(from=plots.png.paths, to="path_to_folder") savehistory(file='myscript.R') ##################################### # # # # boxplot(bTau ~ probe, data1) # boxplot(Rval ~ probe, data1) # boxplot(Rs ~ probe, data1) # boxplot(tM1 ~ probe, data1) # boxplot(tM2 ~ probe, data1) #
a184e98debe37a8f0a000bca507e7cca4cd64161
21458fe5033fc2f62ffaa0b8bfb04b7a1ad1bf86
/man/sr2lim.Rd
464758190a803b3caaee05091a8e0144dfc95e6b
[]
no_license
einarhjorleifsson/fishvise
c1c3c7fd6d38765ef879e676627acf62a4d6ea4d
b1e56db880c037cf9dd75adea47f2d811d5b85bb
refs/heads/master
2021-01-10T19:21:22.523937
2015-01-08T09:38:42
2015-01-08T09:38:42
14,083,740
0
1
null
null
null
null
UTF-8
R
false
false
256
rd
sr2lim.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{sr2lim} \alias{sr2lim} \title{Set the limits for small statistical squares} \usage{ sr2lim(sr) } \arguments{ \item{sr}{The small statistical square} } \description{ May not be of any bloody use }
df403b2c4628d48f493aa384c1923c8d687777b9
0723925deca1f9332721e0704fcacf0e9c1a1d7a
/Assignment2Q3d.R
35490d8bff15beeb024913ea0ac2a20c1bbdbc34
[]
no_license
kuzmia/mathbio
4b988645f143221b540c7b017836793ef2367c08
8049f32d487f798160ab6ea7e06397c9f7ddd61d
refs/heads/master
2021-05-05T08:16:13.486637
2018-04-05T18:23:16
2018-04-05T18:23:16
118,956,754
0
1
null
null
null
null
UTF-8
R
false
false
1,527
r
Assignment2Q3d.R
library(deSolve) ## Vector Field for SIR model SIR.vector.field <- function(t, vars=c(S,I,R), parms=c(R_0,gamma)) { with(as.list(c(parms, vars)), { dS <- -gamma*R_0*S*I # dS/dt dI <- gamma*R_0*S*I - gamma*I # dI/dt dR <- gamma*I #dR/dt vec.fld <- c(dS=dS, dI=dI, dR=dR) return(list(vec.fld)) # ode() requires a list }) } ## Plot solutions of the SIR model tmax <- 70 # end time for numerical integration of the ODE ## draw box for plot: plot(0,0,xlim=c(0,tmax),ylim=c(-10,2), type="n",xlab="Time (t)",ylab="Log (I)",las=1) draw.soln <- function(ic=c(S=1,I=0), tmax=1, times=seq(0,tmax,by=tmax/500), func, parms, legend,... ) { soln <- ode(ic, times, func, parms) lines(times+14, log(soln[,"I"]), col=Rep_nums[i], lwd=3,... ) }#translate times to point where pim data begins exponential growth ##Initial conditions: I0 <- 0.001 S0 <- 1 - I0 R0 <- 1 - I0 - S0 ##Draw solutions for several values of parameter R_0: Rep_nums <- c(2.1) gamma <- 1/4.1 for (i in 1:length(Rep_nums)) { draw.soln(ic=c(S=S0,I=I0,R=R0), tmax=tmax, func=SIR.vector.field, parms=c(R_0=Rep_nums[i],gamma), lty=i #Different line style for each solution ) } ##Legend for Ro legend("topright",legend=Rep_nums, title = expression(paste(italic("Ro =")))) ##Legend for gamma legend("topleft", legend=(1/gamma), title = expression(paste(gamma," ="))) ## Plots log of mortality data and translates down 8 units points(df$date,log(df$pim)-8)
0a98f67296eca82e681386a0defdbe16e2bc5d6b
ec4719729a7fc7c219dcedbf26c3518cbf3bfb5e
/man/asap.data.list.Rd
31739ffab9b3305bc21aadf9883931d822c7a386
[]
no_license
cran/BaM
ca832e5dd3fddc64aac6e5f5a9c2c6a60961c7c1
272242a3e43a5770ffeea306b92e5365d7ebeba7
refs/heads/master
2022-11-09T11:35:50.643781
2022-10-14T10:25:17
2022-10-14T10:25:17
17,677,986
1
0
null
null
null
null
UTF-8
R
false
true
1,870
rd
asap.data.list.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/asap.data.list-data.R \docType{data} \name{asap.data.list} \alias{asap.data.list} \title{asap.data.list} \usage{ data(asap.data.list) } \description{ The American State Administrator's Project (ASAP) survey asks administrators about the influence of a variety of external political actors including "clientele groups" in their agencies., see page 395. The variables included in the dataset are: \itemize{ \item\code{contracting} scale from 0 : 6 where higher indicates more private contracting within the respondent's agency. \item\code{gov.incluence} respondents' assessment of the governor's influence on contracting in their agency. \item\code{leg.influence} respondents' assessment of the legislatures' influence on contracting in their agency, ranging from 0 : 21. \item\code{elect.board} dichotomous variable coded 1 if appointed by a board, a commission or elected, and 0 otherwise. \item\code{years.tenure} number of years that the respondent has worked at their current agency. \item\code{education} ordinal variable for level of education possessed by the respondent. \item\code{partisan.ID} a 5-point ordinal variable (1-5) for the respondent's partisanship (strong Democrat to strong Republican). \item\code{category} categories of agency type. \item\code{med.time} whether the respondent spent more or less than the sample median with representatives of interest groups. \item\code{medt.contr} interaction variable between med.time and contracting. \item\code{gov.ideology} state government ideology from Berry et al. (1998) from 0 to 100. \item\code{lobbyists} total state lobbying registrants in 2000-01 from Gray and Lowery (1996, 2001). \item\code{nonprofits} provides the total number of nonprofit groups in the respondents' state in the year 2008, divided by 10,000. } }
3af6b639b91ff08ec39b5eb72ef5ed26a5658e24
29585dff702209dd446c0ab52ceea046c58e384e
/dashboard/R/dashboard_stop.R
311c52ee38b84ee67fe67134696eea14877895a3
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
797
r
dashboard_stop.R
#' dashboard_stop stops the local Rook server #' #' \code{dashboard_stop} stops the Rook server running. Not needed in linux, but required in unix environment #' #' @docType methods #' @param dashboard.env name of the global environment variable used across the dashboard package #' @export #' @examples #' dashboard_open(data=iris) # other options: pathoutput=getwd() ... #' dcpiechart(x=names(iris)[5]) #' dcbarchart(x=names(iris)[1] , gap=75) #' dcpiechart(x=names(iris)[2]) #' dctable(index=names(iris)[5]) #' dashboard_launch(browse = FALSE) # Just generates files. Server is not launched #' dashboard_stop(dashboard.env) # should have a server running #' dashboard_stop <-function(dashboard.env=dashboard.env){ # stop the server if (interactive()){ dashboard.env$s$stop() } }
bfa67e1e9db1e3a69f98f27fe5a60cffb42710d0
34cc0de8269856373e4ccd1bf2f97d67de979169
/R/extract_lm_quantities.R
e1fc8d9c5be2d992d93d9898be2b23f2ef8f13bf
[]
no_license
mmp3/deltacomp
0a120e9bdd40937f304cdc4709d40e529a128c13
366e689c0dcde7782858121384d43bc412d2af0d
refs/heads/master
2023-06-04T11:29:05.434094
2021-05-28T07:36:22
2021-05-28T07:36:22
null
0
0
null
null
null
null
UTF-8
R
false
false
1,177
r
extract_lm_quantities.R
#' Extract critical quantities from a lm object (for confidence interval calculations) #' #' @param lm_X a lm object #' @param alpha level of significance. Defaults to 0.05. #' #' @return #' A list containing the \code{lm}'s model matrix (\code{dmX}), #' the inverse of \code{t(dmX) %*% dmX} (\code{XtX_inv}), #' the standard error (\code{s_e}), #' the estimated single column beta matrix (\code{beta_hat}), and #' the critical value of the relevant degrees of freedom t-dist (\code{crit_val}). #' @export #' #' @examples #' data(fat_data) #' lm_fat <- lm(fat ~ sl, data = fat_data) #' extract_lm_quantities(lm_fat) extract_lm_quantities <- function(lm_X, alpha = 0.05) { # get the design matrix from the LM dmX <- model.matrix(lm_X) # (X^T X)^{-1} XtX_inv <- solve(t(dmX) %*% dmX) # the resid standard error s_e <- sqrt(sum(residuals(lm_X) ^ 2) / df.residual(lm_X)) # crit val with 95% conf of relevant t-dist crit_val <- qt(1 - alpha / 2, df.residual(lm_X)) # beta estimates beta_hat <- matrix(coefficients(lm_X), ncol = 1) return(list( dmX = dmX, XtX_inv = XtX_inv, s_e = s_e, beta_hat = beta_hat, crit_val = crit_val )) }
ec17fd39664fddae9d20ef91b3e202da0ef84412
16cface76db9d5c130ae2adfdd0c2e6d89c41603
/R/fars_functions.R
0e4f76c0766870ee75c24d04904fed54f9b8a38f
[]
no_license
patrik-h-m/practicepackage
b1f0bd07d03a5c158a22b2cc325b812999f9155d
5298a0dfb80ba7f4b0024a54a346e1b9704a6940
refs/heads/main
2023-07-17T10:45:02.991375
2021-08-19T08:57:23
2021-08-19T08:57:23
397,834,022
0
0
null
null
null
null
UTF-8
R
false
false
4,997
r
fars_functions.R
#' Read data from csv file #' #' This function reads data reads data from a csv file into a tibble. #' Internally the function uses \code{readr::read_csv} to read the file. #' #' If \code{filename} does not exist an error is thrown. #' #' @param filename the name of the file from which to read the data #' #' @return a \code{link{tbl_df}} #' #' @importFrom readr read_csv #' @importFrom dplyr tbl_df #' #' @examples #' \dontrun{fars_read("data/accident_2015.csv.bz2")} #' #' @export fars_read <- function(filename) { if(!file.exists(filename)) stop("file '", filename, "' does not exist") data <- suppressMessages({ readr::read_csv(filename, progress = FALSE) }) dplyr::tbl_df(data) } #' Create a file name for certain year's data #' #' This is a helper function that creates "accident_2020.csv.bz2" style #' file names. #' #' @param year an integer to be used in the file name to indicate the year #' #' @return a string in format accident_YYYY.csv.bz2 where YYYY is replaced by #' \code{year} #' #' @examples #' \dontrun{make_filename(2020)} #' #' @export make_filename <- function(year) { year <- as.integer(year) sprintf("accident_%d.csv.bz2", year) } #' Read accident data from files #' #' This function reads data from files named in format accident_YYYY.csv.bz2 #' where YYYY denotes the year which the data represents. Argument \code{years} #' specifies which years' data is read. #' #' The function results in an error no file for a specified year is found or #' the file contains invalid data. #' #' @param years a vector of years for which to read data #' #' @return a list of tibbles, each containing data for one year #' #' @examples #' \dontrun{ #' y <- c(2017, 2018, 2019) #' fars_read_years(y) #' } #' #' @importFrom dplyr mutate #' @importFrom dplyr select #' #' @export fars_read_years <- function(years) { lapply(years, function(year) { file <- make_filename(year) tryCatch({ dat <- fars_read(file) dplyr::mutate(dat, year = year) %>% dplyr::select(MONTH, year) }, error = function(e) { warning("invalid year: ", year) return(NULL) }) }) } #' Produce summary showing the number accidents for each month #' #' This function reads FARS data using \code{fars_read_years} and produces a #' summary that shows the number of cases for each month. #' #' The function returns an error if \code{fars_read_years} is unable to read #' data for specified years. #' #' @param years a vector of years for which to read data #' #' @return a tibble with columns "MONTH" (which has values 1-12) and one column #' per year. The values in these columns show the number of accidents #' in that month and year. #' #' @examples #' \dontrun{ #' y <- c(2017, 2018, 2019) #' fars_summarize_years(y) #' } #' #' @importFrom dplyr bind_rows #' @importFrom dplyr group_by #' @importFrom dplyr summarize #' @importFrom tidyr spread #' @importFrom dplyr %>% #' #' @export fars_summarize_years <- function(years) { dat_list <- fars_read_years(years) dplyr::bind_rows(dat_list) %>% dplyr::group_by(year, MONTH) %>% dplyr::summarize(n = n()) %>% tidyr::spread(year, n) } #' Plot accidents on a map #' #' The function is used to create a map showing accidents in a specified US #' state during a specified year. #' #' The function results in an error if \code{fars_read} fails to read data or if #' the data contains no accidents found for the given state. #' #' @param state.num The number of the US state to plot. The states are numbered #' alphabetically (1 = Alabama, 2 = Alaska etc.) #' @param year the year whose accidents to plot on the map #' #' @return This function draws a map. It does not return a value. #' #' @examples #' \dontrun{ #' fars_map_state(6, 2013) #' fars_map_state(49, 2015) #' } #' #' @importFrom maps map #' @importFrom graphics points #' @importFrom dplyr filter #' #' @export fars_map_state <- function(state.num, year) { filename <- make_filename(year) data <- fars_read(filename) state.num <- as.integer(state.num) if(!(state.num %in% unique(data$STATE))) stop("invalid STATE number: ", state.num) data.sub <- dplyr::filter(data, STATE == state.num) if(nrow(data.sub) == 0L) { message("no accidents to plot") return(invisible(NULL)) } is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900 is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90 with(data.sub, { maps::map("state", ylim = range(LATITUDE, na.rm = TRUE), xlim = range(LONGITUD, na.rm = TRUE)) graphics::points(LONGITUD, LATITUDE, pch = 46) }) }
083b3f0e276bf6d56a8936022afa9e3bc8d28dbb
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/rebus.numbers/tests/test-number_range.R
19a68c1ea90c4f2f8ba1d69c359c6b92f1191870
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,042
r
test-number_range.R
context("number_range") test_that( "number_range returns a correct regex for 0 to 9999", { expected <- as.regex("(?:[0-9]{4})") actual <- number_range(0, 9999) expect_equal(actual, expected) } ) test_that( "number_range returns a correct regex for 123 to 876", { expected <- as.regex( "(?:12[3-9]|1[3-9][0-9]|[2-7][0-9]{2}|8[0-6][0-9]|87[0-6])" ) actual <- number_range(123, 876) expect_equal(actual, expected) } ) test_that( "number_range returns a correct regex for 1010 to 9090", { expected <- as.regex( "(?:10[1-9][0-9]|1[1-9][0-9]{2}|[2-8][0-9]{3}|90[0-8][0-9]|9090)" ) actual <- number_range(1010, 9090) expect_equal(actual, expected) } ) test_that( "number_range returns a correct regex for -123 to 876", { expected <- as.regex( "(?:-(?:[1-9]|[1-9][0-9]|1[0-1][0-9]|12[0-3])|(?:[0-7][0-9]{2}|8[0-6][0-9]|87[0-6]))" ) actual <- number_range(-123, 876) expect_equal(actual, expected) } )
e9d74856a125f4c60118da40baba340b1a19b133
0ac8da4ce785221deb33e97d6b5a27ac3eb87fd7
/R/poached50_A20.R
ed088908f3f8a3caee3dee7987f229781f43e78f
[]
no_license
andbeck/Parrots_2021
3d113a04bc451677e50031d795d5d866a358a88f
0c82c2af75ba183dc867004d1e2b87964d2c13a4
refs/heads/main
2023-05-12T07:44:29.344671
2021-06-08T20:23:36
2021-06-08T20:23:36
338,265,549
1
0
null
null
null
null
UTF-8
R
false
false
28,645
r
poached50_A20.R
# YSA Stochastic Model of Population Growth under Poaching Pressure of 10% reduced fledgling survival # With changes in immature stage duration of 1:5 years. # With adult stage duration of 20:16 years. # With imputed survival rates. # Density independent #### Libraries ---- library(popbio) library(tidyverse) library(patchwork) #### Functions ---- ## Matrix model function source("R/make_projection_matrix.R") ## Stochastic population growth function source("R/stochastic_proj.R") #### YSA Data ---- ## YSA breeding biology data 2006-2014 from Bonaire source("R/YSA_life_history_data.R") # Mean fecundity fecundity <- c(0, 0, 1.6*total_summary$mean_hatch[1]*(total_summary$mean_nestling_surv[1]*0.5)) # Mean survival (0.73 is from Salinas-Melgoza & Renton 2007, 0.838 is survival from imputation) survival <- c(0.73, 0.838, 0.838) # Current population is estimated around 1000 individuals. 1:1 sex ratio means female population is 500 Nc <- 500 # Time to project to time <- 100 #### YSA Simulated Vital Rates for LSA ---- set.seed(2021) # Number of simulations n_sim <- 1000 # Fledgling survival s1 <- sapply(1:n_sim, function(x) betaval(0.73, 0.2)) # Immature survival s2 <- sapply(1:n_sim, function(x) betaval(0.838, 0.051)) # Adult survival s3 <- sapply(1:n_sim, function(x) betaval(0.838, 0.051)) # Fecundity m3 <- rlnorm(n = n_sim, log(1.6*total_summary$mean_hatch[1]*(total_summary$mean_nestling_surv[1]*0.5)), log(1.01)) #replaced sd with small value for log ## Create lists of survival and fecundity # Survival survival_df <- data.frame(s1, s2, s3) colnames(survival_df)<- c() survival_list <- asplit(survival_df, 1) # Fecundity fecundity_df <- data.frame(0, 0, m3) colnames(fecundity_df)<- c() fecundity_list <- asplit(fecundity_df, 1) #### LSA for Immature Duration of 1 and Adult Duration 0f 18 ---- ## Stage duration duration <- c(1, 1, 18) ## Initial Population Vector # Stable stage distribution of mean matrix stable_stage <- make_projection_matrix(survival, fecundity, duration) %>% stable.stage() %>% as.list() # Initial population vector estimated from stable stage distribution P50D1A18_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc) ### Life-stage Simulation Analysis for Population in Stochastic Environment ## Stage duration list - repeat so that length is the same as survival and fecundity duration_list <- rep(list(duration), times = n_sim) ## Simulate list of matrices using the vital rates and make_projection_matrix function P50D1A18_matrices <- list() for(i in 1:n_sim){ mpm <- make_projection_matrix(survival_list[[i]], fecundity_list[[i]], duration_list[[i]]) P50D1A18_matrices[[i]] <- mpm } head(P50D1A18_matrices) ## Repeat Stochastic Population Growth P50D1A18_stochastic_pop <- list() for(i in 1:n_sim){ mp <- stochastic_proj(P50D1A18_matrices, n = P50D1A18_n0, time = time) P50D1A18_stochastic_pop[i] <- mp } # Multiply female population sizes by 2 to get total population size P50D1A18_total_pop <- lapply(P50D1A18_stochastic_pop, "*", 2) # Create for loop for pop sizes in each projection as a data frame to plot with ggplot P50D1A18_df_plots <- list() for(i in 1:n_sim){ mpl <- data.frame(time = 1:time, pop_sizes = P50D1A18_total_pop[[i]]) P50D1A18_df_plots[[i]] <- mpl } # Add identifier for each simulation P50D1A18_plot_data <- bind_rows(P50D1A18_df_plots, .id = "id") # Plot projection P50D1A18_plot <- ggplot(P50D1A18_plot_data, aes(time, pop_sizes, fill=id)) + geom_line() + theme_classic() + labs(x = "Time (years)", y = "Total population size") # Mean population size time series with 95% confidence intervals from LSA P50D1A18_mean_plot_data <- P50D1A18_plot_data %>% group_by(time) %>% summarise(mean = mean(pop_sizes), se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes))) # Get predictions and 95% CI P50D1A18_plot_pred <- P50D1A18_mean_plot_data %>% mutate( pop_size = mean, # lower limit 95% CI ll = mean - 1.96 * se_pop_size, # upper limit 95% CI ul = mean + 1.96 * se_pop_size ) # Plot mean population projection with CIs P50D1A18_mean_plot <- ggplot(P50D1A18_plot_pred, aes(x= time, y = mean)) + geom_line() + geom_ribbon(data = P50D1A18_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) + theme_classic() + labs(x = "Time (years)", y = "Mean total population size") #### Calculate final mean population size and standard deviation from LSA P50D1A18_pop_sizes <- numeric() for (i in 1:n_sim) { ms <- P50D1A18_total_pop[[i]][time] P50D1A18_pop_sizes[i] <- ms } #mean pop size P50D1A18_pop_mean <- mean(P50D1A18_pop_sizes) # standard deviation pop size P50D1A18_pop_sd <- sd(P50D1A18_pop_sizes) # standard error pop size P50D1A18_pop_se <- sd(P50D1A18_pop_sizes)/sqrt(length(P50D1A18_pop_sizes)) #### Calculate Stochastic Growth Rate P50D1A18_lambda_s <- stoch.growth.rate(P50D1A18_matrices, prob = NULL, maxt = time, verbose = TRUE) #convert from log P50D1A18_lambda_s$approx <- exp(P50D1A18_lambda_s$approx) P50D1A18_lambda_s$sim <- exp(P50D1A18_lambda_s$sim) P50D1A18_lambda_s$sim.CI <- exp(P50D1A18_lambda_s$sim.CI) #### Calculate Quasi-extinction Probability P50D1A18_quasi <- stoch.quasi.ext(P50D1A18_matrices, n0= P50D1A18_n0, Nx = 50, tmax = time, maxruns = 1, nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE) # Plot quasi-extinction probabilities P50D1A18_quasi_df <- data.frame(P50D1A18_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year") P50D1A18_quasi_plot <- ggplot(P50D1A18_quasi_df, aes(x = Year, y = quasi, colour = sim)) + geom_line() + theme_bw() + ylim(0, 1) + theme(legend.position = "none") + labs(y = "Cumulative probability of quasi-extinction") #### Calculate Stochastic Elasticities P50D1A18_sens <- stoch.sens(P50D1A18_matrices, tlimit=time) P50D1A18_elas <- P50D1A18_sens$elasticities P50D1A18_elas_v <- c(P50D1A18_elas[1,1], P50D1A18_elas[1,2], P50D1A18_elas[1,3], P50D1A18_elas[2,1], P50D1A18_elas[2,2], P50D1A18_elas[2,3], P50D1A18_elas[3,1], P50D1A18_elas[3,2], P50D1A18_elas[3,3]) stage<-c("m1", "m2", "m3", "s1", "s2", "s3", "g1", "g2", "s3") P50D1A18_elas_df <- data.frame(P50D1A18_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage) P50D1A18_elas_plot <- ggplot(P50D1A18_elas_df, aes(x = stage, y= P50D1A18_elas_v)) + labs(x = "Vital rate", y = "Stochastic elasticity") + theme_bw() + geom_col(fill = "grey20") #### LSA for Immature Duration of 2 and Adult Duration of 17 ---- ## Stage duration duration <- c(1, 2, 17) ## Initial Population Vector # Stable stage distribution of mean matrix stable_stage <- make_projection_matrix(survival, fecundity, duration) %>% stable.stage() %>% as.list() ## Initial population vector estimated from stable stage distribution P50D2A17_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc) ### Life-stage Simulation Analysis for Population in Stochastic Environment ## Stage duration list - repeat so that length is the same as survival and fecundity duration_list <- rep(list(duration), times = n_sim) ## Simulate list of matrices using the vital rates and make_projection_matrix function P50D2A17_matrices <- list() for(i in 1:n_sim){ mpm <- make_projection_matrix(survival_list[[i]], fecundity_list[[i]], duration_list[[i]]) P50D2A17_matrices[[i]] <- mpm } ## Repeat Stochastic Population Growth P50D2A17_stochastic_pop <- list() for(i in 1:n_sim){ mp <- stochastic_proj(P50D2A17_matrices, n = P50D2A17_n0, time = time) P50D2A17_stochastic_pop[i] <- mp } # Multiply female population sizes by 2 to get total population size P50D2A17_total_pop <- lapply(P50D2A17_stochastic_pop, "*", 2) # Create for loop for pop sizes in each projection as a data frame to plot with ggplot P50D2A17_df_plots <- list() for(i in 1:n_sim){ mpl <- data.frame(time = 1:time, pop_sizes = P50D2A17_total_pop[[i]]) P50D2A17_df_plots[[i]] <- mpl } # Add identifier for each simulation P50D2A17_plot_data <- bind_rows(P50D2A17_df_plots, .id = "id") # Plot projection P50D2A17_plot <- ggplot(P50D2A17_plot_data, aes(time, pop_sizes, fill=id)) + geom_line() + theme_classic() + labs(x = "Time (years)", y = "Total population size") # Mean population size time series with 95% confidence intervals from LSA P50D2A17_mean_plot_data <- P50D2A17_plot_data %>% group_by(time) %>% summarise(mean = mean(pop_sizes), se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes))) # Get predictions and 95% CI P50D2A17_plot_pred <- P50D2A17_mean_plot_data %>% mutate( pop_size = mean, # lower limit 95% CI ll = mean - 1.96 * se_pop_size, # upper limit 95% CI ul = mean + 1.96 * se_pop_size ) # Plot mean population projection with CIs P50D2A17_mean_plot <- ggplot(P50D2A17_plot_pred, aes(x= time, y = mean)) + geom_line() + geom_ribbon(data = P50D2A17_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) + theme_classic() + labs(x = "Time (years)", y = "Mean total population size") #### Calculate final mean population size and standard deviation from LSA P50D2A17_pop_sizes <- numeric() for (i in 1:n_sim) { ms <- P50D2A17_total_pop[[i]][time] P50D2A17_pop_sizes[i] <- ms } # mean pop size P50D2A17_pop_mean <- mean(P50D2A17_pop_sizes) # standard deviation pop size P50D2A17_pop_sd <- sd(P50D2A17_pop_sizes) #standard error pop size P50D2A17_pop_se <- sd(P50D2A17_pop_sizes)/sqrt(length(P50D2A17_pop_sizes)) #### Calculate Stochastic Growth Rate P50D2A17_lambda_s <- stoch.growth.rate(P50D2A17_matrices, prob = NULL, maxt = time, verbose = TRUE) #convert from log P50D2A17_lambda_s$approx <- exp(P50D2A17_lambda_s$approx) P50D2A17_lambda_s$sim <- exp(P50D2A17_lambda_s$sim) P50D2A17_lambda_s$sim.CI <- exp(P50D2A17_lambda_s$sim.CI) #### Calculate Quasi-extinction Probability P50D2A17_quasi <- stoch.quasi.ext(P50D2A17_matrices, n0= P50D2A17_n0, Nx = 50, tmax = time, maxruns = 1, nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE) # Plot quasi-extinction probabilities P50D2A17_quasi_df <- data.frame(P50D2A17_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year") P50D2A17_quasi_plot <- ggplot(P50D2A17_quasi_df, aes(x = Year, y = quasi, colour = sim)) + geom_line() + theme_bw() + ylim(0, 1) + theme(legend.position = "none") + labs(y = "Cumulative probability of quasi-extinction") #### Calculate Stochastic Elasticities P50D2A17_sens <- stoch.sens(P50D2A17_matrices, tlimit=time) P50D2A17_elas <- P50D2A17_sens$elasticities P50D2A17_elas_v <- c(P50D2A17_elas[1,1], P50D2A17_elas[1,2], P50D2A17_elas[1,3], P50D2A17_elas[2,1], P50D2A17_elas[2,2], P50D2A17_elas[2,3], P50D2A17_elas[3,1], P50D2A17_elas[3,2], P50D2A17_elas[3,3]) stage<-c("m1", "m2", "m3", "s1", "s2", "s3", "g1", "g2", "s3") P50D2A17_elas_df <- data.frame(P50D2A17_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage) P50D2A17_elas_plot <- ggplot(P50D2A17_elas_df, aes(x = stage, y= P50D2A17_elas_v)) + labs(x = "Vital rate", y = "Stochastic elasticity") + theme_bw() + geom_col(fill = "grey20") #### LSA for Immature Duration of 3 and Adult Duration of 16 ---- ## Stage duration duration <- c(1, 3, 16) ## Initial Population Vector # Stable stage distribution of mean matrix stable_stage <- make_projection_matrix(survival, fecundity, duration) %>% stable.stage() %>% as.list() # Initial population vector estimated from stable stage distribution P50D3A16_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc) ### Life-stage Simulation Analysis for Population in Stochastic Environment ## Stage duration list - repeat so that length is the same as survival and fecundity duration_list <- rep(list(duration), times = n_sim) ## Simulate list of matrices using the vital rates and make_projection_matrix function P50D3A16_matrices <- list() for(i in 1:n_sim){ mpm <- make_projection_matrix(survival_list[[i]], fecundity_list[[i]], duration_list[[i]]) P50D3A16_matrices[[i]] <- mpm } ##Repeat Stochastic Population Growth P50D3A16_stochastic_pop <- list() for(i in 1:n_sim){ mp <- stochastic_proj(P50D3A16_matrices, n = P50D3A16_n0, time = time) P50D3A16_stochastic_pop[i] <- mp } # Multiply female population sizes by 2 to get total population size P50D3A16_total_pop <- lapply(P50D3A16_stochastic_pop, "*", 2) # Create for loop for pop sizes in each projection as a data frame to plot with ggplot P50D3A16_df_plots <- list() for(i in 1:n_sim){ mpl <- data.frame(time = 1:time, pop_sizes = P50D3A16_total_pop[[i]]) P50D3A16_df_plots[[i]] <- mpl } # Add identifier for each simulation P50D3A16_plot_data <- bind_rows(P50D3A16_df_plots, .id = "id") # Plot projection P50D3A16_plot <- ggplot(P50D3A16_plot_data, aes(time, pop_sizes, fill=id)) + geom_line() + theme_classic() + labs(x = "Time (years)", y = "Total population size") # Mean population size time series with 95% confidence intervals from LSA P50D3A16_mean_plot_data <- P50D3A16_plot_data %>% group_by(time) %>% summarise(mean = mean(pop_sizes), se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes))) # Get predictions and 95% CI P50D3A16_plot_pred <- P50D3A16_mean_plot_data %>% mutate( pop_size = mean, # lower limit 95% CI ll = mean - 1.96 * se_pop_size, # upper limit 95% CI ul = mean + 1.96 * se_pop_size ) # Plot mean population projection with CIs P50D3A16_mean_plot <- ggplot(P50D3A16_plot_pred, aes(x= time, y = mean)) + geom_line() + geom_ribbon(data = P50D3A16_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) + theme_classic() + labs(x = "Time (years)", y = "Mean total population size") #### Calculate final mean population size and standard deviation from LSA P50D3A16_pop_sizes <- numeric() for (i in 1:n_sim) { ms <- P50D3A16_total_pop[[i]][time] P50D3A16_pop_sizes[i] <- ms } # mean pop size P50D3A16_pop_mean <- mean(P50D3A16_pop_sizes) # standard deviation pop size P50D3A16_pop_sd <- sd(P50D3A16_pop_sizes) # standard error pop size P50D3A16_pop_se <- sd(P50D3A16_pop_sizes)/sqrt(length(P50D3A16_pop_sizes)) #### Calculate Stochastic Growth Rate P50D3A16_lambda_s <- stoch.growth.rate(P50D3A16_matrices, prob = NULL, maxt = time, verbose = TRUE) #convert from log P50D3A16_lambda_s$approx <- exp(P50D3A16_lambda_s$approx) P50D3A16_lambda_s$sim <- exp(P50D3A16_lambda_s$sim) P50D3A16_lambda_s$sim.CI <- exp(P50D3A16_lambda_s$sim.CI) #### Calculate Quasi-extinction Probability P50D3A16_quasi <- stoch.quasi.ext(P50D3A16_matrices, n0= P50D3A16_n0, Nx = 50, tmax = time, maxruns = 1, nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE) # Plot quasi-extinction probabilities P50D3A16_quasi_df <- data.frame(P50D3A16_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year") P50D3A16_quasi_plot <- ggplot(P50D3A16_quasi_df, aes(x = Year, y = quasi, colour = sim)) + geom_line() + theme_bw() + ylim(0, 1) + theme(legend.position = "none") + labs(y = "Cumulative probability of quasi-extinction") #### Calculate Stochastic Elasticities P50D3A16_sens <- stoch.sens(P50D3A16_matrices, tlimit=time) P50D3A16_elas <- P50D3A16_sens$elasticities P50D3A16_elas_v <- c(P50D3A16_elas[1,1], P50D3A16_elas[1,2], P50D3A16_elas[1,3], P50D3A16_elas[2,1], P50D3A16_elas[2,2], P50D3A16_elas[2,3], P50D3A16_elas[3,1], P50D3A16_elas[3,2], P50D3A16_elas[3,3]) P50D3A16_elas_df <- data.frame(P50D3A16_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage) P50D3A16_elas_plot <- ggplot(P50D3A16_elas_df, aes(x = stage, y= P50D3A16_elas_v)) + labs(x = "Vital rate", y = "Stochastic elasticity") + theme_bw() + geom_col(fill = "grey20") #### LSA for Immature Duration of 4 and Adult Duration of 15 ---- ## Stage duration duration <- c(1, 4, 15) ## Initial Population Vector # Stable stage distribution of mean matrix stable_stage <- make_projection_matrix(survival, fecundity, duration) %>% stable.stage() %>% as.list() # Initial population vector estimated from stable stage distribution P50D4A15_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc) ### Life-stage Simulation Analysis for Population in Stochastic Environment ## Stage duration list - repeat so that length is the same as survival and fecundity duration_list <- rep(list(duration), times = n_sim) ## Simulate list of matrices using the vital rates and make_projection_matrix function P50D4A15_matrices <- list() for(i in 1:n_sim){ mpm <- make_projection_matrix(survival_list[[i]], fecundity_list[[i]], duration_list[[i]]) P50D4A15_matrices[[i]] <- mpm } ## Repeat Stochastic Population Growth P50D4A15_stochastic_pop <- list() for(i in 1:n_sim){ mp <- stochastic_proj(P50D4A15_matrices, n = P50D4A15_n0, time = time) P50D4A15_stochastic_pop[i] <- mp } # Multiply female population sizes by 2 to get total population size P50D4A15_total_pop <- lapply(P50D4A15_stochastic_pop, "*", 2) # Create for loop for pop sizes in each projection as a data frame to plot with ggplot P50D4A15_df_plots <- list() for(i in 1:n_sim){ mpl <- data.frame(time = 1:time, pop_sizes = P50D4A15_total_pop[[i]]) P50D4A15_df_plots[[i]] <- mpl } # Add identifier for each simulation P50D4A15_plot_data <- bind_rows(P50D4A15_df_plots, .id = "id") # Plot projection P50D4A15_plot <- ggplot(P50D4A15_plot_data, aes(time, pop_sizes, fill=id)) + geom_line() + theme_classic() + labs(x = "Time (years)", y = "Total population size") # Mean population size time series with 95% confidence intervals from LSA P50D4A15_mean_plot_data <- P50D4A15_plot_data %>% group_by(time) %>% summarise(mean = mean(pop_sizes), se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes))) # Get predictions and 95% CI P50D4A15_plot_pred <- P50D4A15_mean_plot_data %>% mutate( pop_size = mean, # lower limit 95% CI ll = mean - 1.96 * se_pop_size, # upper limit 95% CI ul = mean + 1.96 * se_pop_size ) # Plot mean population projection with CIs P50D4A15_mean_plot <- ggplot(P50D4A15_plot_pred, aes(x= time, y = mean)) + geom_line() + geom_ribbon(data = P50D4A15_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) + theme_classic() + labs(x = "Time (years)", y = "Mean total population size") #### Calculate final mean population size and standard deviation from LSA P50D4A15_pop_sizes <- numeric() for (i in 1:n_sim) { ms <- P50D4A15_total_pop[[i]][time] P50D4A15_pop_sizes[i] <- ms } # mean pop size P50D4A15_pop_mean <- mean(P50D4A15_pop_sizes) # standard deviation pop size P50D4A15_pop_sd <- sd(P50D4A15_pop_sizes) # standard error pop size P50D4A15_pop_se <- sd(P50D4A15_pop_sizes)/sqrt(length(P50D4A15_pop_sizes)) #### Calculate Stochastic Growth Rate P50D4A15_lambda_s <- stoch.growth.rate(P50D4A15_matrices, prob = NULL, maxt = time, verbose = TRUE) #convert from log P50D4A15_lambda_s$approx <- exp(P50D4A15_lambda_s$approx) P50D4A15_lambda_s$sim <- exp(P50D4A15_lambda_s$sim) P50D4A15_lambda_s$sim.CI <- exp(P50D4A15_lambda_s$sim.CI) #### Calculate Quasi-extinction Probability P50D4A15_quasi <- stoch.quasi.ext(P50D4A15_matrices, n0= P50D4A15_n0, Nx = 50, tmax = time, maxruns = 1, nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE) # Plot quasi-extinction probabilities P50D4A15_quasi_df <- data.frame(P50D4A15_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year") P50D4A15_quasi_plot <- ggplot(P50D4A15_quasi_df, aes(x = Year, y = quasi, colour = sim)) + geom_line() + theme_bw() + ylim(0, 1) + theme(legend.position = "none") + labs(y = "Cumulative probability of quasi-extinction") #### Calculate Stochastic Elasticities P50D4A15_sens <- stoch.sens(P50D4A15_matrices, tlimit=time) P50D4A15_elas <- P50D4A15_sens$elasticities P50D4A15_elas_v <- c(P50D4A15_elas[1,1], P50D4A15_elas[1,2], P50D4A15_elas[1,3], P50D4A15_elas[2,1], P50D4A15_elas[2,2], P50D4A15_elas[2,3], P50D4A15_elas[3,1], P50D4A15_elas[3,2], P50D4A15_elas[3,3]) P50D4A15_elas_df <- data.frame(P50D4A15_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage) P50D4A15_elas_plot <- ggplot(P50D4A15_elas_df, aes(x = stage, y= P50D4A15_elas_v)) + labs(x = "Vital rate", y = "Stochastic elasticity") + theme_bw() + geom_col(fill = "grey20") #### LSA for Immature Duration of 5 and Adult Duration of 14 ---- ## Stage duration duration <- c(1, 5, 14) ## Initial Population Vector # Stable stage distribution of mean matrix stable_stage <- make_projection_matrix(survival, fecundity, duration) %>% stable.stage() %>% as.list() # Initial population vector estimated from stable stage distribution P50D5A14_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc) ### Life-stage Simulation Analysis for Population in Stochastic Environment ## Stage duration list - repeat so that length is the same as survival and fecundity duration_list <- rep(list(duration), times = n_sim) ## Simulate list of matrices using the vital rates and make_projection_matrix function P50D5A14_matrices <- list() for(i in 1:n_sim){ mpm <- make_projection_matrix(survival_list[[i]], fecundity_list[[i]], duration_list[[i]]) P50D5A14_matrices[[i]] <- mpm } ## Repeat Stochastic Population Growth P50D5A14_stochastic_pop <- list() for(i in 1:n_sim){ mp <- stochastic_proj(P50D5A14_matrices, n = P50D5A14_n0, time = time) P50D5A14_stochastic_pop[i] <- mp } # Multiply female population sizes by 2 to get total population size P50D5A14_total_pop <- lapply(P50D5A14_stochastic_pop, "*", 2) # Create for loop for pop sizes in each projection as a data frame to plot with ggplot P50D5A14_df_plots <- list() for(i in 1:n_sim){ mpl <- data.frame(time = 1:time, pop_sizes = P50D5A14_total_pop[[i]]) P50D5A14_df_plots[[i]] <- mpl } # Add identifier for each simulation P50D5A14_plot_data <- bind_rows(P50D5A14_df_plots, .id = "id") # Plot projection P50D5A14_plot <- ggplot(P50D5A14_plot_data, aes(time, pop_sizes, fill=id)) + geom_line() + theme_classic() + labs(x = "Time (years)", y = "Total population size") # Mean population size time series with 95% confidence intervals from LSA P50D5A14_mean_plot_data <- P50D5A14_plot_data %>% group_by(time) %>% summarise(mean = mean(pop_sizes), se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes))) # Get predictions and 95% CI P50D5A14_plot_pred <- P50D5A14_mean_plot_data %>% mutate( pop_size = mean, # lower limit 95% CI ll = mean - 1.96 * se_pop_size, # upper limit 95% CI ul = mean + 1.96 * se_pop_size ) # Plot mean population projection with CIs P50D5A14_mean_plot <- ggplot(P50D5A14_plot_pred, aes(x= time, y = mean)) + geom_line() + geom_ribbon(data = P50D5A14_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) + theme_classic() + labs(x = "Time (years)", y = "Mean total population size") #### Calculate final mean population size and standard deviation from LSA P50D5A14_pop_sizes <- numeric() for (i in 1:n_sim) { ms <- P50D5A14_total_pop[[i]][time] P50D5A14_pop_sizes[i] <- ms } # mean pop size P50D5A14_pop_mean <- mean(P50D5A14_pop_sizes) # standard deviation pop size P50D5A14_pop_sd <- sd(P50D5A14_pop_sizes) # standard error pop size P50D5A14_pop_se <- sd(P50D5A14_pop_sizes)/sqrt(length(P50D5A14_pop_sizes)) #### Calculate Stochastic Growth Rate P50D5A14_lambda_s <- stoch.growth.rate(P50D5A14_matrices, prob = NULL, maxt = time, verbose = TRUE) #convert from log P50D5A14_lambda_s$approx <- exp(P50D5A14_lambda_s$approx) P50D5A14_lambda_s$sim <- exp(P50D5A14_lambda_s$sim) P50D5A14_lambda_s$sim.CI <- exp(P50D5A14_lambda_s$sim.CI) #### Calculate Quasi-extinction Probability P50D5A14_quasi <- stoch.quasi.ext(P50D5A14_matrices, n0= P50D5A14_n0, Nx = 50, tmax = time, maxruns = 1, nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE) # Plot quasi-extinction probabilities P50D5A14_quasi_df <- data.frame(P50D5A14_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year") P50D5A14_quasi_plot <- ggplot(P50D5A14_quasi_df, aes(x = Year, y = quasi, colour = sim)) + geom_line() + theme_bw() + ylim(0, 1) + theme(legend.position = "none") + labs(y = "Cumulative probability of quasi-extinction") #### Calculate Stochastic Elasticities P50D5A14_sens <- stoch.sens(P50D5A14_matrices, tlimit=time) P50D5A14_elas <- P50D5A14_sens$elasticities P50D5A14_elas_v <- c(P50D5A14_elas[1,1], P50D5A14_elas[1,2], P50D5A14_elas[1,3], P50D5A14_elas[2,1], P50D5A14_elas[2,2], P50D5A14_elas[2,3], P50D5A14_elas[3,1], P50D5A14_elas[3,2], P50D5A14_elas[3,3]) P50D5A14_elas_df <- data.frame(P50D5A14_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage) P50D5A14_elas_plot <- ggplot(P50D5A14_elas_df, aes(x = stage, y= P50D5A14_elas_v)) + labs(x = "Vital rate", y = "Stochastic elasticity") + theme_bw() + geom_col(fill = "grey20") #### PLOTS ---- ## Stochastic Population Projection Plot P50_A20_plot <- P50D1A18_plot + P50D2A17_plot + P50D3A16_plot + P50D4A15_plot + P50D5A14_plot ## Mean and CI Stochastic Population Plot P50_A20_mean_plot <- P50D1A18_mean_plot + P50D2A17_mean_plot + P50D3A16_mean_plot + P50D4A15_mean_plot + P50D5A14_mean_plot ## Stochastic Population Growth (Lambda s) P50_lambda_approx <- c(P50D1A18_lambda_s$approx, P50D2A17_lambda_s$approx, P50D3A16_lambda_s$approx, P50D4A15_lambda_s$approx, P50D5A14_lambda_s$approx) P50_lambda_sim <- c(P50D1A18_lambda_s$sim, P50D2A17_lambda_s$sim, P50D3A16_lambda_s$sim, P50D4A15_lambda_s$sim, P50D5A14_lambda_s$sim) P50_lower_CI <- c(P50D1A18_lambda_s$sim.CI[1], P50D2A17_lambda_s$sim.CI[1], P50D3A16_lambda_s$sim.CI[1], P50D4A15_lambda_s$sim.CI[1], P50D5A14_lambda_s$sim.CI[1]) P50_upper_CI <- c(P50D1A18_lambda_s$sim.CI[2], P50D2A17_lambda_s$sim.CI[2], P50D3A16_lambda_s$sim.CI[2], P50D4A15_lambda_s$sim.CI[2], P50D5A14_lambda_s$sim.CI[2]) stage_duration <- c("1 year", "2 years", "3 years", "4 years", "5 years") P50_lambda_df <- data.frame(stage_duration, P50_lambda_approx, P50_lambda_sim, P50_upper_CI, P50_lower_CI) P50_lambda_plot <- ggplot(P50_lambda_df) + geom_point(aes(x = stage_duration, y = P50_lambda_sim), fill = "grey20", size = 2) + geom_errorbar(aes(x = stage_duration, ymin = P50_lower_CI, ymax = P50_upper_CI), width = 0.2) + theme_bw() + geom_hline(yintercept=1, linetype="dashed", colour = "red") + scale_x_discrete(labels=c("1 year" = "1", "2 years" = "2", "3 years" = "3", "4 years" = "4", "5 years" = "5")) + labs(x = "Immature stage duration (years)", y = "Lambda for stochastic population growth") ## Quasi-extinction Threshold Plots P50A20_quasi_df <- rbind.data.frame(P50D1A18_quasi_df, P50D2A17_quasi_df, P50D3A16_quasi_df, P50D4A15_quasi_df, P50D5A14_quasi_df) P50A20_quasi_plot <- ggplot(P50A20_quasi_df, aes(x = Year, y = quasi, colour = sim)) + geom_line() + theme_bw() + ylim(0, 1) + labs(y = "Cumulative probability of quasi-extinction") + scale_colour_discrete(name = "Immature stage \nduration", breaks = c("P50D1A18_quasi", "P50D2A17_quasi", "P50D3A16_quasi", "P50D4A15_quasi", "P50D5A14_quasi"), labels = c("1 year", "2 years", "3 years", "4 years", "5 years")) #P50_A20_quasi_plots <- P50D1A18_quasi_plot + P50D2A17_quasi_plot + P50D3A16_quasi_plot + P50D4A15_quasi_plot + P50D5A14_quasi_plot #Elasticity analysis plots P50A20_elas_df<- rbind.data.frame(P50D1A18_elas_df, P50D2A17_elas_df, P50D3A16_elas_df, P50D4A15_elas_df, P50D5A14_elas_df) P50A20_elas_plot <- ggplot(P50A20_elas_df, aes(x = stage, y= elasticity, fill = duration)) + labs(x = "Vital rate", y = "Stochastic elasticity") + theme_bw() + geom_col(position = "dodge", colour = "black") + scale_fill_manual(name = "Immature stage \nduration", breaks = c("P50D1A18_elas_v", "P50D2A17_elas_v", "P50D3A16_elas_v", "P50D4A15_elas_v", "P50D5A14_elas_v"), labels = c("1 year", "2 years", "3 years", "4 years", "5 years"), values = c("grey65", "grey40", "grey35", "grey15", "grey0")) #image2(P50D1A18_elas) #image2(P50D2A17_elas) #image2(P50D3A16_elas) #image2(P50D4A15_elas) #image2(P50D5A14_elas)
1b14231b3f0bbc3667b940799abd94785c6383bf
ab39ad07bbbb65c1e61315076a43bce6cfa688f3
/R_Codes/3-PLS/spls_scores/R/plot.spls.R
c9edc208485664155f9d05395134cc04b676bd2c
[]
no_license
ManonMartin/thesisMaterial
771e7fdcd0ff3ceba7607f2abaa7d05e20366d61
950b79588f224c649ef12a3a3c0fa7c3280e9806
refs/heads/master
2021-11-06T23:15:01.741098
2021-11-04T13:34:46
2021-11-04T13:34:46
208,266,675
2
2
null
null
null
null
UTF-8
R
false
false
965
r
plot.spls.R
# coefficient path plot "plot.spls" <- function( x, yvar=c(1:ncol(x$y)), ... ) { # initialization betamat <- x$betamat K <- x$K eta <- x$eta p <- ncol(x$x) # coefficient plot Ks <- c(1:K) for ( i in 1:length(yvar) ) { if ( i>1 ) { dev.new() } betamatq <- c() for ( j in Ks ) { betamatq <- cbind( betamatq, betamat[[j]][,yvar[i]] ) } ylimit <- c( min(betamatq), max(betamatq) ) main.name <- paste('Coefficient Path Plot (eta=',eta,')',sep='') plot( Ks, Ks, xlim=c(1,K), ylim=ylimit, type='n', xlab='K', ylab='Coefficient Estimates', main=main.name, ... ) if ( length(Ks)>1 ) { for (j in 1:p) { lines( Ks, betamatq[j,], col=j ) } } else { points( rep( 1, length(betamatq) ), betamatq ) } abline( h=0, lty=2, col='red' ) } }
3835e92ed2b4588ae76c8ea67a872943628f232c
e532902731033a9b2e4339cd6b7073389a21b78d
/man/sdRain.Rd
2649a1929fcebc8794bcefce4a4ca8d7f3372870
[]
no_license
CRUKMI-ComputationalBiology/twoddpcr
0cd307a42a26c1db00a6ac969d5ad9803d14bef4
58667eb96e30274285bf93a2db091ad6f2ddb7bd
refs/heads/master
2021-07-22T01:49:54.307616
2021-02-09T23:48:44
2021-02-09T23:48:44
70,164,790
9
1
null
null
null
null
UTF-8
R
false
true
3,964
rd
sdRain.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sdRain.R \name{sdRain} \alias{sdRain} \alias{sdRain,data.frame-method} \alias{sdRain,ddpcrWell-method} \alias{sdRain,ddpcrPlate-method} \title{Add rain to a classification by using a chosen multiple of standard deviation.} \usage{ sdRain(droplets, cMethod, errorLevel = 5, ...) \S4method{sdRain}{data.frame}(droplets, cMethod, errorLevel = 5, fullTable = TRUE) \S4method{sdRain}{ddpcrWell}(droplets, cMethod, errorLevel = 5) \S4method{sdRain}{ddpcrPlate}(droplets, cMethod, errorLevel = 5) } \arguments{ \item{droplets}{A \code{\link{ddpcrWell}} or \code{\link{ddpcrPlate}} object, or a droplet data frame including a classification column.} \item{cMethod}{The name or column number of the classification for which we want to add rain to.} \item{errorLevel}{How many multiples of standard deviation from the mean of each cluster to retain. Can be a list where each item corresponds to a class name and the multiple for that class. Can also be a numeric vector of length 1, which is equivalent to a list with all the same entries. Defaults to 5.} \item{...}{Other options depending on the type of \code{droplets}.} \item{fullTable}{If \code{TRUE}, returns a full data frame of droplets with an extra column of rainy data; if \code{FALSE}, simply returns a factor where each entry corresponds to an entry in the original classification column with added rain. Defaults to \code{FALSE}.} } \value{ If \code{droplets} is a data frame, return a data frame or factor (depending on \code{fullTable}) where droplets with ambiguous classifications are labelled as "Rain". If \code{droplets} is a \code{ddpcrWell} object, return a \code{ddpcrWell} object with a rainy classification. If \code{droplets} is a \code{ddpcrPlate} object, return a \code{ddpcrPlate} object with a rainy classifications. } \description{ Although we can use various algorithms to classify all droplets in a ddPCR experiment, there will be some variation between the classifications. We can perhaps have a relatively high confidence that droplets near the centres of clusters do indeed belong to that cluster, whereas we probably have a lower confidence in the classification of those further away, say, near the 'boundary' of two clusters. We may view these droplets (or a subset of them) as having an ambiguous class. This function allows us to only consider droplets classified within a certain distance of the means of each cluster and label the rest as "Rain". } \examples{ ## Compare the types of droplets in a single well for the "Cluster" class ## and then with rain. aWell <- ddpcrWell(well=KRASdata[["E03"]]) aWell <- sdRain(aWell, cMethod="Cluster") cl <- wellClassification(aWell) table(cl$Cluster) table(cl$ClusterSdRain) ## Compare the types of droplets in multiple wells for the "Cluster" class ## and then with rain. krasPlate <- ddpcrPlate(wells=KRASdata[c("E03", "H03", "C04", "F04")]) krasPlate <- sdRain(krasPlate, cMethod="Cluster") plateSummary(krasPlate, cMethod="Cluster")[, c(1:5)] plateSummary(krasPlate, cMethod="ClusterSdRain")[, c(1:5)] ## The 'errorLevel' parameter can changed. krasPlate <- sdRain(krasPlate, cMethod="Cluster", errorLevel=4) plateSummary(krasPlate, cMethod="ClusterSdRain")[, c(1:5)] ## The 'errorLevel' parameter can also be changed for each cluster. krasPlate <- sdRain(krasPlate, cMethod="Cluster", errorLevel=list(NN=5, NP=5, PN=4, PP=3)) plateSummary(krasPlate, cMethod="ClusterSdRain")[, c(1:5)] } \references{ This approach was described in {Jones, M., Williams, J., Gaertner, K., Phillips, R., Hurst, J., & Frater, J. (2014). Low copy target detection by Droplet Digital PCR through application of a novel open access bioinformatic pipeline, "definetherain." Journal of Virological Methods, 202(100), 46--53. \url{http://doi.org/10.1016/j.jviromet.2014.02.020}} } \author{ Anthony Chiu, \email{anthony.chiu@cruk.manchester.ac.uk} }