blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10d6affa081893d4841605c8a4a2e637169afe51
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dint/examples/c.date_xx.Rd.R
|
1a1273971452f0d7b6002a6d64b5ae0ac03c40bc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
c.date_xx.Rd.R
|
library(dint)
### Name: c.date_xx
### Title: Concatenate date_xx Objects
### Aliases: c.date_xx
### ** Examples
c(date_yq(2000, 1:2), date_yq(2000, 3:3))
# raises an error
try(c(date_yq(2000, 1:2), date_ym(2000, 1:12)))
|
e2d52aaf9e58c1af1562513062bf6f56925e875c
|
657cb8d31a7edde2ba866b43417fcff13168025c
|
/R/ga_data.R
|
aecb64be94e733330caad7c969ab5dc4d3d63f2d
|
[] |
no_license
|
jdeboer/googleAnalyticsR
|
d026832e1d48787c5c0bc58489a2932cc8ddb5b2
|
8ca31878fff7fb3b0215acad810e137b609f7018
|
refs/heads/master
| 2023-01-20T04:53:50.946223
| 2020-11-24T19:41:38
| 2020-11-24T19:41:38
| 48,536,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,664
|
r
|
ga_data.R
|
version_aw <- function(){
"v1alpha"
}
#' Google Analytics Data for GA4 (App+Web)
#'
#' Fetches Google Analytics from the Data API for Google Analytics 4 (Previously App+Web)
#'
#' @seealso \href{https://developers.google.com/analytics/devguides/reporting/data/v1}{Google Documentation}
#'
#' @details
#'
#' @inheritParams Entity
#' @inheritParams RunReportRequest
#' @param date_range A vector of length two with start and end dates in YYYY-MM-DD format
#' @param dimensionDelimiter If combining dimensions in one column, the delimiter for the value field
#' @param realtime If TRUE then will call the real-time reports, that have a more limited set of dimensions/metrics - see \url{https://developers.google.com/analytics/devguides/reporting/data/v1/realtime-basics}
#' @importFrom googleAuthR gar_api_generator
#' @import assertthat
#' @family BatchRunReportsRequest functions
#' @export
#' @examples
#'
#' \dontrun{
#'
#' # send up to 4 date ranges
#' multi_date <- ga_data(
#' 206670707,
#' metrics = c("activeUsers","sessions"),
#' dimensions = c("date","city","dayOfWeek"),
#' date_range = c("2020-03-31", "2020-04-27", "2020-04-30", "2020-05-27"),
#' dimensionFilter = ga_data_filter("city"=="Copenhagen"),
#' limit = 100
#' )
#'
#'
#' # metric and dimension expressions
#'
#' # create your own named metrics
#' met_expression <- ga_data(
#' 206670707,
#' metrics = c("activeUsers","sessions",sessionsPerUser = "sessions/activeUsers"),
#' dimensions = c("date","city","dayOfWeek"),
#' date_range = c("2020-03-31", "2020-04-27"),
#' limit = 100
#' )
#'
#' # create your own aggregation dimensions
#' dim_expression <- ga_data(
#' 206670707,
#' metrics = c("activeUsers","sessions"),
#' dimensions = c("date","city","dayOfWeek", cdow = "city/dayOfWeek"),
#' date_range = c("2020-03-31", "2020-04-27"),
#' limit = 100
#' )
#'
#' # run a real-time report (no date dimension allowed)
#' realtime <- ga_data(
#' 206670707,
#' metrics = "activeUsers",
#' dimensions = "city",
#' dimensionFilter = ga_data_filter("city"=="Copenhagen"),
#' limit = 100,
#' realtime = TRUE)
#'
#' }
ga_data <- function(propertyId,
metrics,
date_range = NULL,
dimensions = NULL,
dimensionFilter = NULL,
dimensionDelimiter = "/",
metricFilter = NULL,
# metricAggregations = NULL,
orderBys = NULL,
limit = 100,
realtime=FALSE) {
# in case someone passes in a filter instead of an expression
dimensionFilter <- as_filterExpression(dimensionFilter)
metricFilter <- as_filterExpression(metricFilter)
# if(!is.null(metricAggregations)){
# assert_that(all(metricAggregations %in% c("TOTAL",
# "MINIMUM",
# "MAXIMUM",
# "COUNT")))
# }
#TODO
metricAggregations <- NULL
dims <- gaw_dimension(dimensions, delimiter = dimensionDelimiter)
mets <- gaw_metric(metrics)
if(realtime){
brrr <- RunRealtimeReport(
dimensions = dims,
metrics = mets,
limit = limit,
dimensionFilter = dimensionFilter,
metricFilter = metricFilter,
metricAggregations = metricAggregations,
orderBys = orderBys,
returnPropertyQuota = TRUE
)
myMessage("Realtime Report Request", level = 3)
res <- ga_aw_realtime(propertyId, brrr)
return(res)
}
# here as not needed for real-time but needed for brrr
dates <- gaw_dates(date_range)
brrr <- BatchRunReportsRequest(
entity = Entity(propertyId),
requests = list(
RunReportRequest(
metrics = mets,
dimensions = dims,
dateRanges = dates,
limit = limit,
dimensionFilter = dimensionFilter,
metricFilter = metricFilter,
metricAggregations = metricAggregations,
orderBys = orderBys,
keepEmptyRows = TRUE,
returnPropertyQuota = TRUE
)
)
)
ga_aw_report(brrr)
}
#' Realtime API
#' @noRd
ga_aw_realtime <- function(property, requestObj){
url <- sprintf("https://analyticsdata.googleapis.com/%s/properties/%s:runRealtimeReport",
version_aw(), property)
# analyticsdata.runRealtimeReport
f <- gar_api_generator(url, "POST",
data_parse_function = parse_realtime)
stopifnot(inherits(requestObj, "gar_RunRealtimeReport"))
o <- f(the_body = requestObj)
o
}
#' Normal Reporting API
#' @noRd
ga_aw_report <- function(requestObj){
url <- sprintf("https://analyticsdata.googleapis.com/%s:batchRunReports",
version_aw())
# analyticsdata.batchRunReports
f <- gar_api_generator(url, "POST",
data_parse_function = parse_batchrunreports)
stopifnot(inherits(requestObj, "gar_BatchRunReportsRequest"))
o <- f(the_body = requestObj)
o
}
parse_realtime <- function(x){
if(no_rows(x)) return(data.frame())
dim_names <- x$dimensionHeaders$name
met_names <- x$metricHeaders$name
parse_rows(x, dim_names, met_names)
}
parse_batchrunreports <- function(x){
o <- x$reports
if(no_rows(o)) return(data.frame())
dim_names <- o$dimensionHeaders[[1]]$name
met_names <- o$metricHeaders[[1]]$name
parse_rows(o, dim_names, met_names)
}
no_rows <- function(o){
if(is.null(o$rows)){
myMessage("No data found", level = 3)
return(TRUE)
}
FALSE
}
#' @noRd
#' @importFrom dplyr bind_cols across mutate
parse_rows <- function(o, dim_names, met_names){
the_data <- lapply(o$rows, function(x){
dds <- get_value_cols(x, type = "dimensionValues")
mms <- get_value_cols(x, type = "metricValues")
dds <- setNames(dds, dim_names)
mms <- setNames(mms, met_names)
if(nrow(dds) == 0){
o <- mms
} else {
o <- bind_cols(dds, mms)
}
o
})
res <- Reduce(rbind, the_data)
#type changes
if("date" %in% names(res)){
res$date <- as.Date(res$date, format = "%Y%m%d")
}
res <- res %>% mutate(across(met_names, as.numeric))
quota_messages(o)
attr(res, "metadata") <- if(ncol(o$metadata) > 0) o$metadata else NULL
res
}
#' @noRd
#' @importFrom tibble as_tibble
get_value_cols <- function(x,
type = c("dimensionValues", "metricValues")){
type <- match.arg(type)
as_tibble(
do.call(rbind, lapply(x[[type]], function(y) y[["value"]])),
.name_repair = "minimal")
}
|
aeb8adc9422d7da0ca90f37e6d6b496115ce2cb5
|
b4df4497594607163aae708f21abda95b05e9ce6
|
/R/acqlogit/acqlogit_compustat_update.R
|
d5d1848c207a40f6cee3fd19af21945dfb9a7cd9
|
[] |
no_license
|
sdownin/compnet
|
9ce7bad397954226229da8bfc66951f266a207c6
|
c154c07af36f1745b11daad377caf8bdb699016e
|
refs/heads/master
| 2021-06-27T20:32:37.671291
| 2020-12-14T04:17:31
| 2020-12-14T04:17:31
| 62,546,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,124
|
r
|
acqlogit_compustat_update.R
|
###
##
## Update Compustat Fundamentals Annual Data
## to subset columns for controls and add Market-to-Book ratio
##
###
#setwd("C:/Users/T430/Google Drive/PhD/Dissertation/competition networks/compnet2")
# .libPaths('C:/Users/T430/Documents/R/win-library/3.2')
library(parallel)
library(network, quietly = T)
library(texreg, quietly = T)
library(igraph, quietly = T)
library(plyr, quietly = T)
library(lattice, quietly = T)
library(latticeExtra, quietly = T)
library(ggplot2, quietly = T)
library(reshape2)
.cs.update <- function()
{
data_dir <- "C:/Users/T430/Google Drive/PhD/Dissertation/crunchbase/"
is.missing <- function(x)
{
if(is.null(x))
return(TRUE)
return(is.na(x) | is.nan(x) | x == '')
}
##===============================
##
## UPDATE COMPUSTAT DATA FOR MARKET-to-BOOK VALUE
##
##-------------------------------
## COMPUSTAT *** SLOW TO LOAD ***
csa <- read.csv(file.path('compustat','fundamentals-annual.csv'), na.strings = c(NA,'','NA'), stringsAsFactors = F, fill = T)
dim(csa)
names(csa)
## SELECT COLUMNS FROM COMPUSTAT
cols <- c('conm','conml','gvkey','datadate','fyear','indfmt','consol','popsrc','tic','cusip',
'act', ## total assets (ln for size proxy)
'che', ## cash and short term investments (scale by total assets for cash holdings)
'emp', ## employees (ln employee size proxy)
'ebitda', ## ebidta (scale by total assets for ROA performance proxy)
'prcc_c', ## close market price at calendar year
'csho', ## shares outstanding (PRCC_C x CSHO = market value of equity)
'seq', ## stockholder equity
'ceq', ## total common equity
'pstkrv', ## preferred stock redemption value
'pstkl', ## preferred stock liquidation
'pstk', ## preferred stock par value
'lt', ## Total Liabilities
'mib', ## Minority Interest
'txditc' ## balance sheet deferred taxes
)
## SUBSET COMPUSTAT
csa2 <- csa[,cols]
##===========================
##
## COMPUTE M/B RATIO
## @see https://wrds-www.wharton.upenn.edu/pages/support/applications/risk-and-valuation-measures/market-book-mb-ratio/
##
##---------------------------
## DATA YEAR
csa2$datayear <- as.integer(str_sub(csa2$datadate,1,4))
## DATA MONTH
csa2$datamonth <- as.integer(str_sub(csa2$datadate,5,6))
## YEAR FOR BOOK VALUE USED IN BOOK-to-MARKET RATIO
## if fiscal year ends in Jan-May, use previous year book value, else use current year
csa2$bookyear <- apply(csa2[,c('datayear','datamonth')], 1, function(x){
y <- as.numeric(x[1])
m <- as.numeric(x[2])
return(ifelse( m < 6, y-1, y ))
})
## GET MARKET VAL OF EQUITY BY FISCAL YEAR
csa2$prcc_c_f <- NA ## init price by firm fiscal year (previous year price if datayear ends in Jan-May)
conms <- sort(unique(csa2$conm))
for (j in 1:length(conms)) {
conm <- conms[j]
years <- unique(csa2$datayear[which(csa2$conm==conm & csa2$datayear<=2017)])
if (j %% 10 == 0) cat(sprintf('%s years %s-%s (%.2f%s)\n',conm,min(years),max(years),100*j/length(conms),'%'))
for (year in years) {
i_set <- which(csa2$conm==conm & csa2$datayear==year) ## set price index
if (length(i_set)==0) next
i_get <- which(csa2$conm==conm & csa2$datayear==csa2$bookyear[i_set]) ## get price index
if (length(i_get)==0) next
csa2$prcc_c_f[i_set] <- csa2$prcc_c[i_get]
}
}
## MARKET VALUE OF EQUITY -- USING CORRECT PRICE BY FIRM FISCAL YEAR
csa2$mcap_c <- csa2$prcc_c_f * csa2$csho
## STOCK HOLDER EQUITY algorithm
## SHE = 1. `seq` if available, else
## 2. `ceq`+`pstk` if available, else
## 3. `act`-(`lt`+`mib`)
csa2names <- names(csa2)
# count <- 0 ## DEBUG `she` computation
csa2$she <- apply(csa2, 1, function(x){
# count <<- count+1 ## DEBUG
seq <- as.numeric(unlist(x[which(csa2names=='seq')]))
if (!is.na(seq)) {
# cat(sprintf('row %s: seq\n',count)) ## DEBUG
return(seq)
}
ceq <- as.numeric(unlist(x[which(csa2names=='ceq')]))
pstk <- as.numeric(unlist(x[which(csa2names=='pstk')]))
if (!is.na(ceq) & !is.na(pstk)) {
# cat(sprintf('row %s: ceq+pstk\n',count)) ## DEBUG
return(ceq + pstk)
}
act <- as.numeric(unlist(x[which(csa2names=='act')]))
lt <- as.numeric(unlist(x[which(csa2names=='lt')]))
mib <- as.numeric(unlist(x[which(csa2names=='mib')]))
if (!is.na(act) & !is.na(lt) & !is.na(mib)) {
# cat(sprintf('row %s: act-(lt+mib)\n',count)) ## DEBUG
# cat(sprintf('act %s, lt %s, mib %s\n',act,lt,mib))
return(act - (lt + mib))
}
return(NA)
})
## BOOK VALUE OF EQUITY
csa2$bve <- apply(csa2[,c('she','pstkrv','pstkl','pstk')], 1, function(x){
she <- x[1]
ps <- as.numeric( if(!is.na(x[2])){ x[2] }else if(!is.na(x[3])){ x[3] }else if(!is.na(x[4])){ x[4] }else{ 0 } )
return(ifelse( is.na(she), NA, she-ps ))
})
## MARKET-to-BOOK RATIO
csa2$m2b <- apply(csa2[,c('mcap_c','bve')], 1, function(x){
if (any(is.na(x))) return(NA)
return(ifelse( x[2]==0, NA, x[1]/x[2] ))
})
## SAVE updated compustat data
write.csv(csa2, file=file.path('compustat','fundamentals-annual-UPDATED.csv'), row.names = F)
}
## run
.cs.update()
|
a2e510c7c6bc83c4a6af69f14ba78ca74fb2678c
|
784ac8673ffdf4798187b1f4a1c5532bfe73c99d
|
/stackoverflow/41360278.R
|
3faaac316fc297351deefed50d957c7f5ed164b1
|
[] |
no_license
|
serhatcevikel/R
|
be4e928ffb63ad881ddd618d3e3fa85f80855dd3
|
623f0b5757140d0f695e8542b31f86be1dde9f01
|
refs/heads/master
| 2020-05-22T01:33:29.291220
| 2018-03-19T22:18:18
| 2018-03-19T22:18:18
| 48,517,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,808
|
r
|
41360278.R
|
# R: replicate a row and update the date entry by one per row
# Answer to http://stackoverflow.com/questions/41360278/r-replicate-a-row-and-update-the-date-entry-by-one-per-row
# The input and its intended output show that I want to replicate the row of the input and update the date entry. How can I do this?
# Input
# > aa<- data.frame(a=c(1,11,111),b=c(2,22,222),length=c(3,5,1),date=c(as.Date("28.12.2016",format="%d.%m.%Y"), as.Date("30.12.2016",format="%d.%m.%Y"), as.Date("01.01.2017",format="%d.%m.%Y")))
# > aa
# a b length date
# 1 1 2 3 2016-12-28
# 2 11 22 5 2016-12-30
# 3 111 222 1 2017-01-01
# Intended Output
# a b length date
# 1 1 2 3 2016-12-28
# 2 1 2 3 2016-12-29
# 3 1 2 3 2016-12-30
# 4 11 22 5 2016-12-30
# 5 11 22 5 2016-12-31
# 6 11 22 5 2017-01-01
# 7 11 22 5 2017-01-02
# 8 11 22 5 2017-01-03
# 9 111 222 1 2017-01-01
aa<- data.frame(a=c(1,11,111),b=c(2,22,222),length=c(3,5,1),date=c(as.Date("28.12.2016",format="%d.%m.%Y"), as.Date("30.12.2016",format="%d.%m.%Y"), as.Date("01.01.2017",format="%d.%m.%Y")))
replicaterow <- function(df1 = aa) {
lastrow <- df1[nrow(df1),]
lastrow[4] <- lastrow[4] + 1
df1 <- rbind(df1, lastrow)
return(df1)
}
replicaterow1 <- function(df1 = aa) {
newdf <- df1[0,]
rowss <- nrow(df1)
rowcount <- 1
for (i in 1:rowss) {
rowi <- df1[i,]
reps <- as.integer(rowi[3])
newrow <- rowi
newdf[rowcount,] <- rowi
rowcount <- rowcount + 1
if (reps > 1) {
for(j in 1:(reps-1)) {
newrow[4] <- newrow[4] + 1
newdf[rowcount,] <- newrow
rowcount <- rowcount + 1
}
}
}
return(newdf)
}
|
089be1f4054096d1762d066f4f7a5840e14819d5
|
25ec9519eeb158a777ed9865dfb57aab0809c60d
|
/tmp/LatticeKrig/man/LatticeKrig.Rd
|
51e3ca7b6ace15ee6e7a674b4ce7faaca480ab37
|
[] |
no_license
|
NCAR/LatticeKrig
|
cccdcaba2d16c96b722de6a2e499e09f5c36ccf2
|
5caccca61f52b53d215d9375dedb8553e6ee75b7
|
refs/heads/master
| 2021-09-14T10:49:13.136451
| 2021-08-23T21:58:31
| 2021-08-23T21:58:31
| 61,819,138
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,414
|
rd
|
LatticeKrig.Rd
|
% # LatticeKrig is a package for analysis of spatial data written for
% # the R software environment .
% # Copyright (C) 2016
% # University Corporation for Atmospheric Research (UCAR)
% # Contact: Douglas Nychka, nychka@ucar.edu,
% # National Center for Atmospheric Research, PO Box 3000, Boulder, CO 80307-3000
% #
% # This program is free software; you can redistribute it and/or modify
% # it under the terms of the GNU General Public License as published by
% # the Free Software Foundation; either version 2 of the License, or
% # (at your option) any later version.
% # This program is distributed in the hope that it will be useful,
% # but WITHOUT ANY WARRANTY; without even the implied warranty of
% # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% # GNU General Public License for more details.
% # Modified DH Dec 2013
% # Modified NL Jan -> DWN Jan 2014
\name{LatticeKrig}
\alias{LatticeKrig}
\alias{print.LatticeKrig}
\title{User-friendly spatial prediction and inference using a compactly supported
multi-resolution basis and a lattice model for the basis coefficients.
}
\description{ This is a simple and high level function to fit the LatticeKrig spatial
model to a data set.
In is simplest form for 2-d spatial data:
\code{obj<-LatticeKrig(x,y)} will fit a LatticeKrig type model to 2d locations \code{x} and observation vector \code{y}. Several (actually many!) default choices are made for the multi-resolution spatial
covariance
in this top level function. It uses the defaults that are based on
a "thin-plate spline" like model for the spatial estimator
and also uses \code{LKrigFindLambda} to estimate some covariance parameters (sill and nugget variances)
through likelihood maximization (i.e. estimates the measurement and process
variances.) For the simplest "black box" use, only the observations and their 2-d
locations need to be specified. But please see the caveats below in the Details
section and also see the vignette \url{https://github.com/NCAR/LatticeKrig/tree/master/Vignette} for a gentle introduction.
Despite the simple syntax,
the LatticeKrig function still takes advantage of the
multi-resolution features of the basic
\code{LKrig} function and any \code{LKrig} parameter can be passed through the
function call. See the example below for varying the range parameter.
Also, see \code{\link{LKinfo}} and\code{\link{LKrigSetup}} for documentation on the complete object that
describes the LatticeKrig model and the function to create it easily. See \code{\link{LKGeometry}} for documentation on
extending or adding other spatial models to this package.
The returned value from this function can be used subsequently for prediction,
conditional simulation, and other parts of the spatial analysis. See
\code{\link{predict.LKrig}} and \code{\link{LKrig.sim.conditional}}
}
\usage{
LatticeKrig(x, y, Z = NULL, nlevel = 3, findAwght = FALSE, LKinfo = NULL,
X=NULL, U=NULL, na.rm =
TRUE, tol = 0.005, verbose = FALSE, ...)
\method{print}{LatticeKrig}( x, digits=4, ...)
}
\arguments{
\item{x}{Spatial locations of observations. For the \code{LatticeKrig} function this should be a matrix
where the columns index the spatial dimensions and rows index the observations.
For example for 100 2-d locations, \code{x} would be a 100X2 matrix.
Or for the function \code{print.LatticeKrig} \code{x} is the returned object from the
\code{LatticeKrig} function. }
\item{y}{Spatial observations. No missing values are allowed.}
\item{Z}{Linear covariates to be included in fixed part of the model
that are distinct from the default first order polynomial in
\code{x} (i.e. the spatial drift).}
\item{X}{For linear inverse problems the matrix that maps coefficients of the basis to the
predicted values of observations. X must be in spam format. To convert from spind or dense
format to spam format see \code{help(spam)} as an alternative
\code{\link{spind2spam}}. See an example for this extension in the
\code{\link{LKrig}} help file.
}
\item{U}{For linear inverse problems the matrix that maps coefficients of the fixed part of the model to the predicted
values of observations. This needs to specified along with \code{X}
}
\item{nlevel}{Number of levels for the multi-resolution basis. Each
level increases the number of basis functions by roughly a factor of
4.}
\item{findAwght}{If FALSE the default a.wght parameter (related to correlation range) is
set to mimic a thin plate spline. If TRUE this parameter and hence the range is
estimated my maximum likelihood. }
\item{LKinfo}{An optional list giving the full specification of the
covariance. If this is missing it will be created internally and
returned. If passed this parameterization will be used
except lambda will be re-estimated by maximum likelihood.}
\item{na.rm}{If TRUE NA's are removed from \code{y} and \code{x} is subsetted.}
\item{tol}{Tolerance for the log likelihood used to judge convergence.}
\item{verbose}{ If TRUE print out intermediate results. }
\item{\dots}{Additional arguments to pass to LKrig.
The easiest way to pass a full specification is to create an LKinfo object beforehand and then just pass that (see example below.) This gives more control and the setup function will do some error checking on arguments. Also see help(LKrig) for a complete list of arguments to pass. For convenience we note that if you get some pesky memory warnings from spam
you can set the storage higher by adding the argument \code{choleskyMemory}. For example to bump up to 2E6 include:
\code{choleskyMemory=list(nnzR= 2E6)}.
}
\item{digits}{Number of significant digits in printed summary.}
}
\details{
Keep in mind that overall LatticeKrig is just a specific type of
spatial estimate that is designed to handle larger size data sets.
It focuses on a specific form of covariance function, but the estimator is
still the Kriging/Multivariate Gaussian Conditional Expectation/BLUE that is
standard in this field.
The simplest model fit is:
\deqn{Y_k = p(x_k) + h(x_k) + e_k }
\eqn{Y_k} is the \eqn{k^{th}} observation at location \eqn{x_k} with measurement error \eqn{e_k}.
Here \eqn{p(x)} is a low order polynomial of degree m-1 with the default \code{m==2}. \eqn{h(x)} is a mean zero Gaussian process with the representation:
\deqn{ h(x)= \sum_{l=1}^L \sum_{j=1}^{m(j)} \phi_{m,l}(x) c_{m,l} }
where \eqn{\phi} are multi-resolution basis functions and the coefficients
have mean zero and spatial dependence specified by a Markov random field. Keep in mind that this unusual form still implies a specific covariance function for \eqn{h(x)}. In fact one can use the Krig or mKrig function from fields
to reproduce the LatticeKrig estimate for smaller data sets and check computations.
(See \code{\link{LKrig.cov}} with examples ). Details on the basis functions and the Markov random field are given in the \code{\link{LKrig}}
help function.
Throughout this package we assume the standard deviation of \eqn{e_k}
is \code{sigma}
and the marginal variance of \eqn{h(x)} is \code{rho}. An important derived
parameter of the spatial estimate is \code{lambda = sigma^2/ rho} the noise to
signal ratio. \code{sigma} and \code{rho} are estimated my restricted maximum
likelihood in \code{LatticeKrig}.
This top level function is built on the more basic function \code{\link{LKrig}}
supports a very flexible
covariance. \code{LKrig} depends on the parameters \code{nlevel}, \code{a.wght} and
\code{alpha} specifying all these relevant parameters may be
discouraging to a new (or impatient!) user. Thus LatticeKrig is a "wrapper" that
generates some simplified, default model choices to call the more general
function \code{LKrig}. It is useful for users not fully familiar with
the LatticeKrig methodology or those that wish to try a default
approach to get a quick look at a spatial analysis. You always go back and add some specific non default choices to the LatticeKrig call (e.g. changing \code{a.wght}).
For the 2-dimensional case the default values
are set to give about 4 times as many basis functions as observations, use 5 extra
lattice points on the edge to minimize boundary effects,
and to use four levels of multi-resolution.
An important default is that a linear spatial drift is included in the model so
the model will relax to a linear prediction based on the x values in the absence of
a spatial component. In other words, the model includes by default a fixed part
that is linear in x.
The spatial correlation
range is nearly stationary and set large to mimic a thin-plate
spline. The smoothness mimics the Whittle covariance function (
smoothness = 1 for the Matern). (See \link{LKrig.cov.plot} to get a plot of
the implied covariance function.) LatticeKrig also provides maximum likelihood
estimates
of the measurement error standard deviation ("sigma") and process variance parameter ("rho") that are perhaps the
parameters that most effect the shape of the estimated spatial field. The ubiquitous
parameter lambda throughout LatticeKrig is just the reparameterization lambda == sigma^2 / rho.
This top level function is pitched with all the caveats that
statistical model assumptions should always be checked and applying
generic methods to a specific problems without checking the
appropriateness can lead to misleading results. So plot your data and
try several models. Details on the full
computations can be found in the \code{LKrig} man page.
The \code{lambda = sigma2/ rho} parameter in \code{LKrig} is essential to the
Lattice Krig computation and an inappropriate value will result in
over or under fitting and incorrect interpolated values. The function
\code{LKrigFindLambda} is used within \code{LatticeKrig} to estimate a
lambda value from the data using maximum likelihood.
One interesting feature of this package is the ability to handle spatial processes
on different geometries and the form is specified by the \code{LKGeometry} argument.
The current choices are:
\describe{
\item{\code{\link{LKRectangle}}}{A 2 dimensional Euclidean spatial domain. The default}
\item{\code{\link{LKInterval}}}{A 1 dimensional Euclidean spatial domain.}
\item{\code{\link{LKBox}}}{A 3 dimensional Euclidean spatial domain.}
\item{\code{\link{LKRing}}}{A 2 dimensional spatial domain where the first coordinate is periodic (on [0,360]) and the second is Euclidean. E.g. a slice around the equator and not having a large latitudinal range. }
\item{\code{\link{LKCylinder}}}{A 3 dimension model where an additional coordinate is added to the LKRing geometry. This is useful for representing a small section of the sphere where one also has a height component.}
\item{\code{\link{LKSphere}}}{A full 2-d spherical geometry. Coordinates are given in longitude, latitude but the distances and any structures are on the sphere.
}
}
One important feature of this package is that the different geometries all use the
same computation engine LKrig, following the same computational algorithm. The
differences in setting up the problem and in evaluating the function are
implemented as S3 methods. The details of this strategy are described in
\code{\link{LKGeometry}} and allow the user to add new geometries.
This function also supports a model where the observations are simply
expressed as linear combinations of the basis function coefficients. Equivalently
this is a model where the observed data can be expressed as linear functionals
applied to the polynomial term and the spatial process.
Typically these
linear maps represent observing integrals or weighted combinations of the
fields and are important for data that is aggregated over by space.
See help(LKrig) for an example of how this model is set up at the end of the
Examples section.
}
\value{
The main call inside \code{LatticeKrig} is to \code{LKrig} and so a
\code{LKrig} object is returned. Thus all of the functionality that
comes with \code{LKrig} objects such as \code{predict},
\code{summary}, \code{predictSurface}, etc. remain the same as
described in \code{LKrig}. Also, see the components \code{residuals}
and \code{fitted.values} in the returned object for these parts of the
fitted spatial model. The component \code{LKinfo} has all the details
that specify the basis functions and co variance model.
The component \code{MLE} gives details of the likelihood evaluations to estimate
the sigma and rho parameters.
}
\author{
Doug Nychka
}
\seealso{
LKrig, LKrig.setup, LKrigFindLambda, LKinfo, LKrig.sim.conditional
}
\examples{
# Load ozone data set
data(ozone2)
x<-ozone2$lon.lat
y<- ozone2$y[16,]
# thin plate spline-like model with the lambda parameter estimated by
# maximum likelihood. Default choices are made for a.wght, nlevel, NC
# and alpha.
obj<- LatticeKrig( x, y)
\dontrun{
# summary of fit and a plot of fitted surface
print( obj)
surface( obj )
US(add=TRUE)
points(x)
# prediction standard errors
out.se<- predictSE( obj, xnew= x)
# predict at observations:
# here x can be any two column matrix of coordinates this
# function returns a vector of predictions
out.fhat<- predict( obj, xnew= x)
# conveniently predict on a 100X100 grid for plotting
# use the grid.list arugment to give more control over the grid choice.
# output object is the standard list with components x, y, and z
# suitable for contour, persp, image, etc.
out.surf<- predictSurface( obj, nx=100, ny=100)
# image.plot( out.surf)
}
# running an example by first setting up the model object
# this is the main way to specify the spatial model components
\dontrun{
# this is just a small model to run quickly
# compare the LKinfo object here to one created implicitly: obj$LKinfo
LKinfo1<- LKrigSetup( x, NC=5, nlevel=3, a.wght=4.1, nu=1.0)
obj1<- LatticeKrig( x,y, LKinfo= LKinfo1)
}
#
# In this example lon/lat are treated as just Euclidean coordinates
# a quick adjustment for small regions is to account for the difference
# in physical distance in N-S verses E_W
# is to just scale the longitude degrees to be comparable to degrees in latitude
# at least in the middle of the domain. The assumption is that for small spatial
# domains this approximation will not be bad for the coordinates at the edges too.
# You accomplish this by adding a scaling, V matrix:
# Here the V argument is rolled into the LKinfo object created within the function
#
\dontrun{
meanLat<- mean( x[,2])*pi/180
Vlonlat <- diag( c( 1/cos(meanLat), 1) )
obj1<- LatticeKrig( x, y, V = Vlonlat )
}
\dontrun{
# Refit using with just one level of basis functions
# on a 20X20 grid within the spatial domain ( so about 400)
# actually number is 720 ( see obj1b$LKinfo) due adding edge nodes
# Add an aspect ratio of spatial domain
# and find the a.wght parameter along with nugget and process variances.
# this takes a while partly because LatticeKrig model is not optimized for small data sets!
obj1b<- LatticeKrig( x, y, nlevel=1, NC=20, findAwght=TRUE)
# rudimentary look at how likelihood was optimized
#log lambda and omega = log(a.wght-4)/2 are useful parameterization ...
quilt.plot( obj1b$MLE$lnLike.eval[,c("logLambda","omega")],
obj1b$MLE$lnLike.eval[,"lnProfileLike.FULL"],
xlab="loglamda", ylab="omega",
zlim =c(-640,-612))
points( obj1b$MLE$lnLike.eval[,c("logLambda","omega")],cex=.25)
}
# fitting replicate spatial data sets
# here we use the common observations over days for the ozone
# data set. Whether these are true replicated fields is in question
# but the analysis is still useful
\dontrun{
Y<- na.omit( t( ozone2$y) )
ind<- attr( Y,"na.action")
X<- ozone2$lon.lat[-ind, ]
out1<- LatticeKrig( X, Y, nlevel=1, NC=20, findAwght=TRUE)
out2<- LatticeKrig( X, Y, nlevel=1, NC=20, findAwght=TRUE,
collapseFixedEffect=TRUE)
# compare the two models
# Note second a.wght reflects more spatial correlation when individual
# fixed effect is not removed ( 4.4 verses 4.07)
# nugget variance is nearly the same!
out1$MLE$summary[1:7]
out2$MLE$summary[1:7]
}
\dontrun{
# Refit using the tensor product type of basis functions
# (default is "Radial"). An example how an additional argument that is
# passed to the LKrigSetup function to create the LKinfo object.
obj2<- LatticeKrig( x, y, BasisType="Tensor")
}
#
# A 1-d example with 3 levels of basis functions
# See LKrig for an explanation if nlevel, NC, alpha and a.wght
# covariance parameters.
\dontrun{
x<- matrix(rat.diet$t)
y<- rat.diet$trt
fitObj<- LatticeKrig( x, y)
# NOTE lots of defaults are set for the model! See print( fitObj)
plot( x,y)
xg<- matrix(seq( 0,105,,100))
lines( xg, predict(fitObj, xg) )
}
\dontrun{
# a 3D example
set.seed( 123)
N<- 1000
x<- matrix( runif(3* N,-1,1), ncol=3, nrow=N)
y<- 10*exp( -rdist( x, rbind( c(.5,.5,.6) ) )/.5)
# NOTE setting of memory size for Cholesky. This avoids some warnings and
# extra computation by the spam package
LKinfo<- LKrigSetup( x, nlevel=1, a.wght= 6.01, NC=6, NC.buffer=2,
LKGeometry="LKBox", normalize=FALSE, mean.neighbor=200,
choleskyMemory=list(nnzR= 2E6) )
out1<- LatticeKrig( x,y, LKinfo=LKinfo)
glist<- list( x1=seq( -1,1,,30), x2=seq( -1,1,,30), x3 = 0)
xgrid<- make.surface.grid( glist)
yhat<- predict( out1, xgrid)
# compare yhat to true function created above
image.plot( as.surface( glist, yhat))
}
#
###########################################################################
# Including a covariate (linear fixed part in spatial model)
##########################################################################
\dontrun{
data(COmonthlyMet)
obj <- LatticeKrig(CO.loc, CO.tmin.MAM.climate, Z=CO.elev)
obj2 <- LatticeKrig(CO.loc, CO.tmin.MAM.climate)
# compare with and without linear covariates
set.panel(1,2)
surface(obj)
US(add=TRUE)
title("With Elevation Covariate")
surface(obj2)
US(add=TRUE)
title("Without Elevation Covariate")
}
\dontrun{
data(COmonthlyMet)
# Examining a few different "range" parameters
a.wghtGrid<- 4 + c(.05, .1, .5, 1, 2, 4)^2
#NOTE smallest is "spline like" the largest is essentially independent
# coefficients at each level. In this case the "independent" end is
# favored but the eff df. of the surface is very similar across models
# indicating about the same separate of the estimates into spatial
# signal and noise
#
for( k in 1:5 ){
obj <- LatticeKrig(CO.loc, CO.tmin.MAM.climate, Z=CO.elev,
a.wght=a.wghtGrid[k])
cat( "a.wght:", a.wghtGrid[k], "ln Profile Like:",
obj$lnProfileLike, "Eff df:", obj$trA.est, fill=TRUE)
}
# MLE
obj0 <- LatticeKrig(CO.loc, CO.tmin.MAM.climate, Z=CO.elev,
findAwght=TRUE)
print(obj0$MLE$summary)
}
#########################################################################
# Reproducing some of the analysis for the example in the
# JCGS LatticeKrig paper.
#########################################################################
#### Here is an example of dealing with approximate spherical geometry.
\dontrun{
data(NorthAmericanRainfall)
library(mapproj)
x<- cbind(NorthAmericanRainfall$longitude, NorthAmericanRainfall$latitude)
y<- NorthAmericanRainfall$precip
log.y<- log(y)
elev<- NorthAmericanRainfall$elevation
# this is a simple projection as part of this and handled by the mapproj package
x.s<- mapproject( x[,1], x[,2], projection="stereographic")
x.s<- cbind( x.s$x, x.s$y)
# an alternative is to transform coordinates using another projection,
# e.g. a Lambert conformal projection
# with the project function from the rgdal package
# library( rgdal)
# x.s<- project(x,"+proj=lcc +lat_1=22 +lat_2=58 +lon_0=-93 +ellps=WGS84")
# this package has the advantage that the inverse projection is also
# included ( inv=TRUE) so it is easy to evaluate the surface back on a Mercator grid.
obj0<- LatticeKrig(x.s, log.y, Z=elev )
fitSurface<- predictSurface( obj0, drop.Z=TRUE)
fitSurface$z<- exp(fitSurface$z)/100
colorTable<- designer.colors( 256, c("red4", "orange", "yellow","green1", "green4", "blue"))
image.plot( fitSurface, col=colorTable)
map( "world", add=TRUE, col="grey30", lwd=3, proj="")
}
}
\keyword{spatial}
|
de0855a9653b73d25f1901979b496cbad24cfd31
|
43052fc5c751616120d35ee8aafb8dbd7b6dda3f
|
/ui.R
|
45382a4af70fe2f9e24495604922fa62094668b0
|
[] |
no_license
|
benbray111/NGramWordPrediction
|
faa91849c6d8b06cbc389f037fa7e8552c1ff66e
|
e4fdab524f6a79aceade0c4e2e6b354b374fcd80
|
refs/heads/master
| 2021-06-26T11:52:29.093244
| 2014-12-14T21:10:52
| 2014-12-14T21:10:52
| 58,488,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,262
|
r
|
ui.R
|
#version 1.1
library(shiny)
# ui.R
shinyUI(fluidPage(
titlePanel(
"Word Prediction"
),
sidebarLayout(
##################
sidebarPanel(
fluidRow(
plotOutput("WordCloudOutput")
),
fluidRow("Author:", a("Ben Bray", href="http://www.linkedin.com/pub/ben-bray/12/467/25/")
),
fluidRow("Created December, 2014"
) ,
fluidRow("for the ", a("Johns Hopkins Data Science Specialization", href="http://www.coursera.org/specialization/jhudatascience/1")
) ,
fluidRow( "Click ", a("here", href="https://github.com/benbray111"), "for details and code."
)
),
######################
##################
mainPanel(
column(6,
fluidRow(
helpText("Enter at least 3 words to see a prediction of the next word"),
textInput("userInput", "", ""),
submitButton("Submit", icon("predict")),
br(),
br(),
align="center"
),
fluidRow(
h4("The most likely next word is..."),
h2(div(textOutput("Prediction"), style="color:#666666")), align="center"
),
fluidRow(
h4("with a percentage score of..."),
h2(div(textOutput("Prob"), style="color:#666666")), align="center"
)
),
column(6,
fluidRow(h4(textOutput("Explanation1"))
),
fluidRow(
textOutput("Explanation2")
),
br()
,
fluidRow(
textOutput("Explanation3"),
br()
),
fluidRow(
textOutput("Explanation4"),
br()
),
fluidRow(
textOutput("Explanation5"),
br()
),
fluidRow(
div(textOutput("Explanation6"), style="color:red"), br())
)
)
##################
)
))
|
f607edf52c6986a5db89d9bca1363668002be77a
|
48131173b92726ec3993c8d0c966899b6483c2f6
|
/w4/week4.R
|
4e5b512d187ea603be3a534e11d69768f0f1209f
|
[] |
no_license
|
jasonqiangguo/Quant3_Lab
|
21045c5efa610535ac5d67ec60e4c683f70f827d
|
f432a2df1374f4284a9c6caf0950ed7f2f4b139d
|
refs/heads/master
| 2020-04-10T22:50:53.676968
| 2019-12-28T11:28:15
| 2019-12-28T11:28:15
| 69,981,465
| 7
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,134
|
r
|
week4.R
|
#########################################################
## Durational Model
## Instructor: Jason Guo
## Quant III Lab 4
#########################################################
library(foreign)
library(survival)
install.packages("KMsurv")
library(KMsurv)
library(Zelig)
#Independent variables:
#durat: Duration of the government
#invest: Investiture dummy variable - existence of a legal requirement for legislative investiture and is a hurdle that should diminish average duration by causing some governments to fail very quickly
#polar: Polarization index - measure of support for extremist parties, indicating bargaining system complexity and diminished duration. varies 0-43, with a mean of 15.3
#fract: Fractionalization - index that characterizes the number and size of parties in parliment; increased complexity is expected to reduce cabinet duration. varies 349-868, with a mean of 719.
#numst: Numerical status - dummy variable that distinguishes between majority (coded 1) and minority (coded 0) governments, with majority governments expected to last longer
#format: Formation attempts - the number of attempts to form a government during the crisis. the more foiled attempts, the more complex the bargaining environment, and the shorter the cabinet duration. varies 1-8, with a mean of 1.9
#postelec: Postelection - modeled as a dummy variable coded 1 if the government formed immediately after the election, and 0 otherwise. forming in midterm may indicate situational instability not picked up by other variables
#caretakr: Caretaker government - control for caretaker governments of shorter durations that hold office while more 'permanent' replacement is negotiated
#failure: a cabinet was dissolved,1; censored,0
cab <- read.dta("cabinet.dta")
# Nonparametric estimation, Kaplan-Meier estimator
nonpar <- survfit(Surv(durat, event= failure) ~ 1, data=cab)
plot(nonpar, xlab="Months", ylab="F(t)", fun="event")
# (note that first we need to fit a model with just a constant)
# survivor function S(t) = 1 - F(t) OR CDF for (1-f(t))
plot(nonpar, xlab="Months", ylab="S(t)")
# hazard rate h(t) = f(t) / S(t) OR f(t) / (1 - F(t))
# discrete hazard rate (note that Stata applies some smoothing to this)
library(muhaz)
fit2 <- kphaz.fit(cab$durat, cab$failure)
kphaz.plot(fit2)
############# PARAMETRIC MODELS #######################
################################################################
# EXPONENTIAL DISTRIBUTION: CONSTANT HAZARD RATE
################################################################
# Estimation
exponential <- survreg(Surv(durat, failure) ~ 1 + invest + polar + fract + numst + format +
caretakr, dist = "exponential", data = cab)
summary(exponential)
round(exponential$coefficients, 3)
## Use Zelig to Simulate Expected Duration Time E(T) for polar Profile ##
exponential.zelig <- zelig(Surv(durat, failure) ~ invest + polar + fract + numst + format +
caretakr, model = "exp", data = cab, cite = F)
polar.seq <- seq(min(cab$polar), max(cab$polar), length = 50)
x.polar <- setx(exponential.zelig, invest = 0, polar = polar.seq, fract = mean(cab$fract),
numst = 1, format = mean(cab$format), caretakr = 0)
sim.polar <- sim(exponential.zelig, x = x.polar)
pe.polar <- rep(0, 50)
for (i in 1:50){
pe.polar[i] <- apply(sim.polar$getqi(qi="ev", xvalue="range")[[i]], 2, mean)
}
# 95% Confidence interval
lo.polar <- rep(0, 50)
for (i in 1:50){
lo.polar[i] <- apply(sim.polar$getqi(qi="ev", xvalue="range")[[i]], 2, quantile, prob = 0.025)
}
hi.polar <- rep(0, 50)
for (i in 1:50){
hi.polar[i] <- apply(sim.polar$getqi(qi="ev", xvalue="range")[[i]], 2, quantile, prob = 0.975)
}
# Make the plot
par(mar = c(4, 6, 0.1, 0.5))
plot(polar.seq, pe.polar, type = "n", xlab = "", ylab = "", ylim = c(0, 70),
axes = FALSE)
abline(v = seq(min(polar.seq), max(polar.seq), length = 10), col = "gray75",
lty = 3)
abline(h = seq(0, 70, by = 5), col = "gray75", lty = 3)
lines(polar.seq, pe.polar, lwd = 3, lty = 1)
lines(polar.seq, lo.polar, lwd = 2, lty = 2)
lines(polar.seq, hi.polar, lwd = 2, lty = 2)
title(ylab = expression("Expected Cabinet Duration"), line = 4, cex.lab = 1.5)
title(xlab = expression("Support for Extremist Parties"), line = 2.75, cex.lab = 1.5)
axis(1)
axis(2, at = seq(0, 70, by = 5), las = 2, cex.axis = 1.1)
box()
rug(jitter(cab$polar), ticksize = 0.015)
legend("topright", bty = "n", c(expression("Point Estimate"), expression("95% Conf. Interval")),
lty = c(1, 2), lwd = c(3, 2), cex = 1.25)
# Marginal effects
x <- c(1, median(cab$invest), mean(cab$polar), mean(cab$fract), 1, mean(cab$format), median(cab$caretakr))
b <- exponential$coefficients
b[5] * exp(x %*% b)
# baseline hazard rate
t <- seq(0, 70, 1)
lambda.i <- exp(-predict(exponential, cab, type="linear"))
lambda <- mean(lambda.i, na.rm=TRUE)
hazard <- lambda
plot(t, rep(hazard, length(t)), type="l", main="Exponential", xlab="Months",
ylab="Hazard Rate")
################################################################
# WEIBULL DISTRIBUTION: MONOTONICALLY INCREASING/DECREASING HAZARD RATE
################################################################
weibull <- survreg(Surv(durat, failure) ~ 1 + invest + polar + fract + numst + format +
caretakr, dist = "weibull", data = cab)
summary(weibull)
round(weibull$coefficients, 3)
## Use Zelig to Simulate Expected Duration Time E(T) for polar Profile ##
weibull.zelig <- zelig(Surv(durat, failure) ~ invest + polar + fract + numst + format +
caretakr, model = "weibull", data = cab, cite = F)
polar.seq <- seq(min(cab$polar), max(cab$polar), length = 50)
x.polar <- setx(weibull.zelig, invest = 0, polar = polar.seq, fract = mean(cab$fract),
numst = 1, format = mean(cab$format), caretakr = 0)
sim.polar <- sim(weibull.zelig, x = x.polar)
pe.polar <- rep(0, 50)
for (i in 1:50){
pe.polar[i] <- apply(sim.polar$getqi(qi="ev", xvalue="range")[[i]], 2, mean)
}
# 95% Confidence interval
lo.polar <- rep(0, 50)
for (i in 1:50){
lo.polar[i] <- apply(sim.polar$getqi(qi="ev", xvalue="range")[[i]], 2, quantile, prob = 0.025)
}
hi.polar <- rep(0, 50)
for (i in 1:50){
hi.polar[i] <- apply(sim.polar$getqi(qi="ev", xvalue="range")[[i]], 2, quantile, prob = 0.975)
}
# Make the plot
par(mar = c(4, 6, 0.1, 0.5))
plot(polar.seq, pe.polar, type = "n", xlab = "", ylab = "", ylim = c(0, 70),
axes = FALSE)
abline(v = seq(min(polar.seq), max(polar.seq), length = 10), col = "gray75",
lty = 3)
abline(h = seq(0, 70, by = 5), col = "gray75", lty = 3)
lines(polar.seq, pe.polar, lwd = 3, lty = 1)
lines(polar.seq, lo.polar, lwd = 2, lty = 2)
lines(polar.seq, hi.polar, lwd = 2, lty = 2)
title(ylab = expression("Expected Cabinet Duration"), line = 4, cex.lab = 1.5)
title(xlab = expression("Support for Extremist Parties"), line = 2.75, cex.lab = 1.5)
axis(1)
axis(2, at = seq(0, 70, by = 5), las = 2, cex.axis = 1.1)
box()
rug(jitter(cab$polar), ticksize = 0.015)
legend("topright", bty = "n", c(expression("Point Estimate"), expression("95% Conf. Interval")),
lty = c(1, 2), lwd = c(3, 2), cex = 1.25)
# Marginal effects
x <- c(1, median(cab$invest), mean(cab$polar), mean(cab$fract), 1, mean(cab$format), median(cab$caretakr))
b <- weibull$coefficients
b[5] * exp(x %*% b)
# Hazard rate
lambda.i <- exp(-predict(weibull, cab, type="linear"))
lambda <- mean(lambda.i, na.rm=TRUE)
t <- seq(0,70,1)
p <- 1/weibull$scale
scale <- weibull$scale
hazard <- lambda * p * (lambda * t)^(p-1)
plot(t, hazard, type="l", main="Weibull",
xlab="Months", ylab="Hazard Rate")
################################################################
# LOGNORMAL DISTRIBUTION: NONMONOTONIC HAZARD RATE
################################################################
lognormal <- survreg(Surv(durat, failure) ~ 1 + invest + polar + fract + numst + format +
caretakr, dist = "lognormal", data = cab)
summary(lognormal)
round(lognormal$coefficients, 3)
# Marginal effects
x <- c(1, median(cab$invest), mean(cab$polar), mean(cab$fract), 1, mean(cab$format), median(cab$caretakr))
b <- lognormal$coefficients
b[5] * exp(x %*% b)
# Hazard Rate in Lognormal
lambda.i <- exp(-predict(lognormal, cab, type="linear"))
lambda <- mean(lambda.i, na.rm=TRUE)
p <- 1/exponential$scale
pdf <- (2*pi)^{-1/2} * p * t^{-1} * exp((-p^2 * (log(lambda*t))^2)/2)
cdf <- 1 - pnorm(p*log(lambda*t))
hazard <- pdf/cdf
plot(t, hazard, type="l", main="Log-normal",
xlab="Months", ylab="Hazard Rate")
################################################################
# COX MODEL
################################################################
data2 <- read.dta("civil_cox.dta")
names(data2)[36:37] <- c("t", "t0")
(cox.model <- coxph(Surv(date0, date1, event=cens, type="counting") ~
gini_m + ginmis + rgdpch + elf + elf2 + logpop + y70stv + y80stv +
y90stv + d2 + d3 + d4 + cluster(indsp), data=data2, method="efron"))
hazard <- basehaz(cox.model)
plot(hazard$time, hazard$hazard, type="l", ylab="baseline hazard", xlab="time")
|
ad3f76f25654e6269a4d8b0d64b80bfaab758fc2
|
b934fa93e660667ec7e5193639a02137f29e746e
|
/ikde.Rcheck/00_pkg_src/ikde/R/stan.multiply.R
|
8f248df475f205a0681a507d03ef396b34aa84e1
|
[] |
no_license
|
tkmckenzie/ikde-scripts
|
b5fe5ec86de11905a7bfd7c03f3640dea37ea106
|
989c2dbc416cd489788d5a6071282d1c109d8c3e
|
refs/heads/master
| 2020-04-10T19:35:48.600541
| 2019-01-09T17:36:21
| 2019-01-09T17:36:21
| 161,240,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 901
|
r
|
stan.multiply.R
|
#' Function to replicate multiplication in Stan
#'
#' @param x First term in product
#' @param y Second term in product
#'
#' @details
#' Accepts arguments x and y. If either is a singleton, returns the value of x*y (in R notation).
#' If both arguments are matrices or vectors, returns x%*%y (in R notation).
#'
#' @return Returns an object of the same type as the base
#'
#' @examples
#' X <- matrix(1:9, nrow = 3)
#' b <- c(4, 5, 6)
#'
#' (3 + 2) * X %stan*% (5 * b)
#' # [,1]
#' # [1,] 1650
#' # [2,] 2025
#' # [3,] 2400
#'
#' @export
#' @rdname stan.multiply
`%stan*%` <-
function(x, y){
#Stan function defined for x^y, for
# real x, real y (x * y)
# real x, vector y (x * y)
# vector x, row_vector y (x %*% y)
# matrix x, vector y (x %*% y)
if ((length(x) == 1) | (length(y) == 1)){
return(x * y)
} else{
return(x %*% y)
}
}
|
450feea4c4f9492d6aa00e9d61fa34796b9f076a
|
bf987274d72fce30a71a6ae80e7a6d3505f98664
|
/R/R-intro/r-intro-ate.R
|
962399d9793af50b909fa0d3c0783abb6ccc522b
|
[
"Apache-2.0"
] |
permissive
|
sherrytp/be_bc_f19
|
cb49ab2d8cb469d1be6328c4256c52c72ded948d
|
5377ccd2f1aec45bf79b123b360a4975ba9127a7
|
refs/heads/master
| 2021-06-21T23:56:04.250878
| 2021-03-29T08:44:11
| 2021-03-29T08:44:11
| 207,918,928
| 0
| 1
| null | 2019-09-11T22:39:35
| 2019-09-11T22:39:34
| null |
UTF-8
|
R
| false
| false
| 1,202
|
r
|
r-intro-ate.R
|
# author: @lrdegeest
# simulate data -----------------------------------------------------------
n = 100
treatment <- rep(0:1, each=n)
error <- rnorm(n, mean = 0, sd = 2)
gender <- rbinom(n, 1, 0.5)
y <- 2.0 + 6.0*treatment + 0.5*gender + error
df <- data.frame(y,treatment,gender)
df$treatment_string <- ifelse(df$treatment == 0, "Control", "Treatment")
df$gender_string <- ifelse(df$gender == 0, "Male", "Female")
df <- as_tibble(df)
print(df)
# estimate average treatment effect ---------------------------------------
m1 <- lm(y~factor(treatment), data=df)
summary(m1)
#Recall that in a linear regression, the hypothesis test of each coefficient is carried out with a t-test.
#The t-statistic is just the estimated coefficient divided by the standard error:
tstat = coef(summary(m1))[2] / coef(summary(m1))[,2][2]
tstat
# you can also confirm the two-tailed p-value using the student t distribution pt(t-stat, degrees of freedom):
2*pt(-abs(tstat),df=n-1)
# now control for gender
m2 <- lm(y~factor(treatment)+factor(gender), data=df)
summary(m2)
# is there a heterogenous treatment effect?
m3 <- lm(y~factor(treatment)+factor(gender) + factor(treatment)*factor(gender), data=df)
summary(m3)
|
aa291e742d33d3022d81eb69f338398a65235458
|
3481ac56941c2f6853c84c1fca9500529e0f74cb
|
/alumnos/jtmancilla/german/Ejercicio2_eda.r
|
d3f13ff08ab7a00b2b02612adcd262babe3dc3c1
|
[] |
no_license
|
MarcosOMtz/itam-dm
|
8029cceb64dbccb9240d52da2359f905fb0c7063
|
cf2efc0b5c2b66cc82d2b93b26b6c665534b6b6e
|
refs/heads/master
| 2020-06-12T19:45:59.158190
| 2015-03-11T05:27:36
| 2015-03-11T05:27:36
| 31,153,956
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 410
|
r
|
Ejercicio2_eda.r
|
source("0-load.r")
source("2-eda.r")
library(corrgram)
german.data <- load()
summary(german.data)
str(german.data)
View(german.data)
# Visualización
eda1(german.data,4)
# tomando como variable objetivo good.loan
eda2(german.data,21,4)
# visualización de NA`s
na_german <- as.data.frame(abs(is.na(german.data)))
sort(colSums(na_german),decreasing=T)
sort(colMeans(na_german*100),decreasing=T)
|
9ef38aebf73f255c2072dde93a42ced6b518e70c
|
568f6f7f1f49d15bf27530a5775e4de149ef55a0
|
/man/050-summary.FMmodel.Rd
|
8122c91cae44029d33f7ef2683b1d2a7fa93740c
|
[] |
no_license
|
cran/FactoRizationMachines
|
3679358677009b7b4ff01061d58d4f0f00f3c055
|
5ec6946326619a2247a15d07186913074139e06b
|
refs/heads/master
| 2020-06-13T12:09:10.026331
| 2017-10-18T19:34:55
| 2017-10-18T19:34:55
| 75,382,750
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,437
|
rd
|
050-summary.FMmodel.Rd
|
\name{summary.FMmodel}
\alias{summary.FMmodel}
\alias{print.FMmodel}
\title{
Summary and Print Method for FMmodel Objects
}
\description{
Function generating the summary of a FMmodel object.
}
\details{
The summary contains for instance:
- the number of training examples the model was build with,
- the number of variables (features) the model considers,
- the minimum value of the target vector elements (to truncate the prediction),
- the maximum value of the target vector elements (to truncate the prediction),
- the number of factors for each considered order:
the first element specifies whether linear weights are used (\code{1}) or not (\code{0}),
the second element specifies the number of parameters factorizing the second-order,
the third element specifies the number of parameters factorizing the third-order.
}
\usage{
\method{summary}{FMmodel}(object, ...)
\method{print}{FMmodel}(x, ...)
}
\arguments{
\item{object}{
a FMmodel object (output of \code{\link{SVM.train}}, \code{\link{FM.train}}, or \code{\link{HoFM.train}})
}
\item{x}{
a FMmodel object (output of \code{\link{SVM.train}}, \code{\link{FM.train}}, or \code{\link{HoFM.train}})
}
\item{\dots}{
additional arguments
}
}
\seealso{
\code{\link{SVM.train}},
\code{\link{FM.train}},
\code{\link{HoFM.train}}
}
|
0b04da6bd56f01c86f25cbe557dd31bfa6c77329
|
89bee9af99ec25372b20bca5325e3f06399a95a7
|
/Documents/DiffusionAnalysisFunctions_Annotated.R
|
e777b50a9b814b0a15bbdad27fe5ea3cd32ea0c3
|
[] |
no_license
|
SteveLund/FP-Algebra
|
bace9593abe79cdb28b85598aff114e5a4dc80e3
|
bbad74eddb797f6c27c21a69b0800be10eb62b36
|
refs/heads/master
| 2020-06-01T06:20:20.981968
| 2014-04-28T17:09:43
| 2014-04-28T17:09:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,706
|
r
|
DiffusionAnalysisFunctions_Annotated.R
|
require(foreach)
require(doMC)
require(aroma.light)
require(nlme)
require(mgcv)
require(mnormt)
### Wrapper function for estimating the bandwidth for the kernel density estimate at each observation time
bw.est<-function(dat,bw="SJ",record) {
try(registerDoMC(cores = 6))
BW<- foreach(i = 1:length(dat), .combine = 'c') %dopar% {
t.bw<- density(dat[[i]],bw=bw)$bw
print(paste("BW estimate for t=",record[i],"is",signif(t.bw,3)))
t.bw
}
BW
}
### Estimate the density or its derivative with respect to x
den.fun <- function(dat, x, deriv = 0, trim = TRUE, low.thresh = NULL, hi.thresh = NULL,bw="nrd0") {
# `dat' is a vector of observations sampled from the population at a single observation time.
# `x' is a vector of locations at which the density (or its derivative) is to be estimated
# `deriv' is an indicator: 0 corresponds to the density, 1 corresponds to the derivative with respect to x
# `trim' is an indicator: if TRUE, then estimates from the distribution tails are made to be NA; if FALSE, the estimates for all values of `x' are returned
# `low.thresh' is a scalar specifying the lower bound of the linear dynamic range (used for censored data)
# `hi.thresh' is a scalar specifying the upper bound of the linear dynamic range (used for censored data)
# `bw' bandwidth argument passed to density function
dat2 <- dat
prop.thresh <- 0
if (!is.null(low.thresh))
if (is.na(low.thresh))
low.thresh <- NULL
if (!is.null(hi.thresh))
if (is.na(hi.thresh))
hi.thresh <- NULL
if (is.numeric(low.thresh)) {
# dat2 <- c(dat[dat > low.thresh], 2 * low.thresh - dat[dat > low.thresh])
prop.thresh <- prop.thresh + mean(dat <= low.thresh)
}
if (is.numeric(hi.thresh)) {
# dat2 <- c(dat2[dat2 < hi.thresh], 2 * hi.thresh - dat2[dat2 < hi.thresh])
prop.thresh <- prop.thresh + mean(dat >= hi.thresh)
}
est <- density(dat2, n = 5000, bw=bw)
if (is.numeric(low.thresh)) {
est$y <- est$y[est$x >= low.thresh]
est$x <- est$x[est$x >= low.thresh]
}
if (is.numeric(hi.thresh)) {
est$y <- est$y[est$x <= hi.thresh]
est$x <- est$x[est$x <= hi.thresh]
}
est$y <- (1 - prop.thresh) * est$y/(sum(est$y) * mean(diff(est$x)))
t.fun <- splinefun(x = est$x, y = est$y)
z <- t.fun(x, deriv)
if (trim)
z[x < min(est$x) | x > max(est$x)] <- NA
if (deriv == 0)
z[z < 0] <- 0
z
}
### Transform from logistic scale to linear scale
unlogit <- function(x) exp(x)/(1 + exp(x))
### Transform from linear scale to logistic scale
logit <- function(x) log(x/(1 - x))
### Goodness of fit test used when fitting splines to cumulative distribution functions F(x,t)
GOF.test <- function(X, N, fit, logit.p = FALSE, fit.P = NULL) {
if (is.vector(fit))
p.fit <- fit else p.fit <- predict(fit, fit$x)$y
if (logit.p)
p.fit <- unlogit(p.fit)
if (!is.null(fit.P))
p.fit <- fit.P + predict(fit, fit$x)$y
p.fit[p.fit > 1] <- 1
p.fit[p.fit < 0] <- 0
alpha <- X + 1
beta <- N - X + 1
P.x <- alpha/(alpha + beta)
t.stat <- 2 * (sum(dbinom(X, N, P.x, log = TRUE)) - sum(dbinom(X, N, p.fit, log = TRUE)))
if (is.vector(fit))
t.pval <- 1 - pchisq(t.stat, length(X)) else t.pval <- 1 - pchisq(t.stat, length(X) - fit$df)
if (is.na(t.pval))
stop(paste("GOF.test pval is", t.pval, "for iteration", i))
t.pval
}
### Make semi-transparent colors. Code for this function was taken from Nick Sabbe's post on stackoverflow.com
makeTransparent <- function(someColor, alpha = 100) {
newColor <- col2rgb(someColor)
apply(newColor, 2, function(curcoldata) {
rgb(red = curcoldata[1], green = curcoldata[2], blue = curcoldata[3], alpha = alpha,
maxColorValue = 255)
})
}
## Define colors to be used for plots.
COL <- c(makeTransparent(c(2:6)), makeTransparent(c("orange")),1)
### Estimate the density, its derivative with respect to x, and the probability flux.
DnA.ingredients <- function(x.loc, record, dat, Plot = FALSE, hi.thresh = NULL, low.thresh = NULL,bw,n.iter=20){
# `x.loc' is a vector of locations at which the density (or its derivative) is to be estimated
# `record' is a vector specifying the observation times of the population
# `dat' is a list. Each element of dat is vector of observations sampled from the population at a single observation time.
# `Plot' is an indicator: if TRUE, the estimated densities will be plotted as will the spline fits to the cumulative distribution functions (F(x,t)); if FALSE, these plots are supressed
# `trim' is an indicator: if TRUE, then estimates from the distribution tails are made to be NA; if FALSE, the estimates for all values of `x' are returned
# `hi.thresh' is a vector specifying the upper bound of the linear dynamic range at each observation time(used for censored data)
# `low.thresh' is a vector specifying the lower bound of the linear dynamic range at each observation time(used for censored data)
# `bw' bandwidth argument passed to density function
# n.iter is an integer specifying the maximum number of reweighting iterations to use when fitting splines to the cumalative distribution functions
if (length(hi.thresh == 1))
hi.thresh <- rep(hi.thresh, length(dat))
if (length(low.thresh == 1))
low.thresh <- rep(low.thresh, length(dat))
### Estimate time derivative of P(x,t)
N <- sapply(dat, length)
cum.Y<-sapply(dat,function(z){
zz<-floor(approx(y=1:length(z),x=sort(z),xout=x.loc)$y)
zz[x.loc<min(z)]<-0
zz[x.loc>max(z)]<-length(z)
zz
})
### How many observations fall within each bin formed by adjacent x.loc values
Y<-rbind(cum.Y[1,],cum.Y[-1,]-cum.Y[-nrow(cum.Y),])
## Replace counts for x.loc values outside of dynamic range with NA
for(i in 1:ncol(Y)){
if(!is.null(low.thresh)) if(!is.na(low.thresh[i])) if(any(x.loc<low.thresh[i])) Y[x.loc<low.thresh[i],i]<-NA
if(!is.null(hi.thresh)) if(!is.na(hi.thresh[i])) if(any(x.loc>hi.thresh[i])) Y[x.loc>hi.thresh[i],i]<-NA
}
######################################
### This step needs work to handle ###
### left truncation: ###
cum.Y<- apply(Y,2,cumsum)
######################################
dP_dt<-NULL
w.mod<-rep(1,length(record))
p.smooth<-smooth.it<-0
#while((any(p.smooth<.01)|(!any(p.smooth<.9)))&smooth.it<n.iter){
while(any(p.smooth<.01)&smooth.it<n.iter){ ### Allows overfitting
plot.fit.P<-fit.P <- dP_dt <- NULL
for (i in 1:length(x.loc)) {
spline.dat<-data.frame(y=cum.Y[i,],n=N,x=record)
##If applicable, exclude time points for which x.loc[i] is outside of dynamic range (low.thresh,hi.thresh)
t.drop<-NULL
t.w.mod<-w.mod
if(!is.null(low.thresh)) t.drop<-c(t.drop,which(low.thresh>x.loc[i]))
if(!is.null(hi.thresh)) t.drop<-c(t.drop,which(hi.thresh<x.loc[i]))
t.drop<-sort(unique(t.drop))
t.keep<-(1:length(record))[!(1:length(record))%in%t.drop]
spline.dat<-spline.dat[t.keep,]
t.w.mod<-w.mod[t.keep]
b<- gam(cbind(y,n-y)~s(x,k=min(c(20,nrow(spline.dat)))),data=spline.dat,family="binomial",weights=t.w.mod)
t.fit.P<-rep(NA,length(record))
t.fit.P[t.keep]<-unlogit(as.vector(predict(b)))
fit.P<-rbind(fit.P,t.fit.P)
if (Plot) {
t.xx <- seq(min(record), max(record), length.out = 200)
plot.fit.P<-rbind(plot.fit.P,unlogit(predict(b,newdata=list(x=t.xx))))
}
###Numerically estimate slope of P(x.loc[i],t) w.r.t t
P1<-predict(b,newdata=list(x=record[t.keep]+1e-5))
P2<-predict(b,newdata=list(x=record[t.keep]-1e-5))
t.dP_dt<-rep(NA,length(record))
t.dP_dt[t.keep]<-(unlogit(P1)-unlogit(P2))/2e-5
dP_dt<-rbind(dP_dt,t.dP_dt)
}
fit.P[fit.P > 1 &!is.na(fit.P)] <- 1
fit.P[fit.P < 0 &!is.na(fit.P)] <- 0
t.stat <- NULL
t.fit<-fit.P-rbind(0,fit.P[-nrow(fit.P),])
t.fit<-rbind(t.fit,1-colSums(t.fit,na.rm=TRUE))
t.Y<-rbind(Y,N-colSums(Y,na.rm=TRUE))
expected<-scale(t.fit,scale=1/N,center=FALSE)
t.stat<-colSums( (t.Y-expected)^2/expected,na.rm=TRUE)
if(0){
for (i in 1:ncol(Y)){
tt.stat<-t.Y[,i]*log(t.fit[,i]*N[i]/t.Y[,i])
tt.stat[Y[,i]==0]<-0
t.stat <- c(t.stat,-2*sum(tt.stat,na.rm=TRUE)/(1+(sum(1/t.fit[,i],na.rm=TRUE)-1)/(6*sum(Y[,i],na.rm=TRUE)*(sum(!is.na(Y[,i]))-1))))
}
}
p.smooth <- 1 - pchisq(t.stat, colSums(!is.na(Y)))
# if(!any(p.smooth<.9)) k.deduct<-k.deduct+1 ### Used when trying to control overfitting
smooth.it <- smooth.it + 1
w.mod[which.max(t.stat)] <- w.mod[which.max(t.stat)] * exp(1- smooth.it/n.iter)
}
### Plot the emperical CDFs and their fitted splines
if(Plot){
dev.new(width = 12, height = 10)
nf <- layout(matrix(1:2, 1, 2), widths = c(1, 0.4))
par(mai = c(1, 1, 0.2, 0.1))
plot(-1e+06, 1e+06, ylab = "Pr(x<X)", xlab = "Time", ylim = c(0, 1), xlim = range(record),
main = "", cex.axis = 1.7, cex.lab = 1.7)
grid()
t.xx <- seq(min(record), max(record), length.out = 200)
for (i in 1:length(x.loc)) {
points(record, cum.Y[i,]/N, col = COL[i%%length(COL)+1])
lines(t.xx,plot.fit.P[i,], lwd = 2, col = COL[i%%length(COL)+1], lty = 1)
}
par(mai = c(1, 0.1, 0.2, 0.1))
plot(-1e+06, 1e+06, ylab = "", xlab = "", ylim = 0:1, xlim = 0:1, xaxt = "n",
yaxt = "n", main = "")
legend("topright", title = "X", legend = signif(x.loc, 3), ncol = 2, col = COL[c(2:length(COL),1)],
lwd = 3, cex = 1.3, bty = "n")
}
p <- dp_dx <- NULL
for (ind in 1:length(dat)) {
### Estimate x derivative of log(p(x.loc,record[ind]))
dp_dx <- cbind(dp_dx, den.fun(dat = dat[[ind]], x = x.loc, deriv = 1,bw=bw[ind],
low.thresh = low.thresh[ind], hi.thresh = hi.thresh[ind]))
p <- cbind(p, den.fun(dat = dat[[ind]], x = x.loc, deriv = 0, bw=bw[ind],
low.thresh = low.thresh[ind], hi.thresh = hi.thresh[ind]))
### Prevent estimates from tails of distributions
tails<-which(x.loc<quantile(dat[[ind]],.01)|x.loc>quantile(dat[[ind]],.99))
dP_dt[tails,ind]<-p[tails,ind]<-dp_dx[tails,ind]<-NA
}
rownames(p)<-x.loc
colnames(p)<-record
list(dP_dt = dP_dt, dp_dx = dp_dx, p = p)
}
Is.numeric <- function(x) {
z <- FALSE
if (!is.null(x)) {
if (any(!is.na(x))) {
if (!(any(!is.numeric(x[!is.na(x)]))))
z <- TRUE
}
}
z
}
### Estimate the relative entropy between each observed distribution the last observed distribution
rel.entropy <- function(dat,bw="nrd0", hi.thresh = NULL, low.thresh = NULL) {
p.low.ref <- p.hi.ref <- p.low <- p.hi <- rel.ent <- rep(0, length(dat))
n <- length(dat)
if (length(hi.thresh == 1))
hi.thresh <- rep(hi.thresh, n)
if (length(low.thresh == 1))
low.thresh <- rep(low.thresh, n)
if (length(bw == 1))
bw <- rep(bw, n)
for (i in 1:n) {
xx <- seq(min(dat[[i]]), max(dat[[i]]), length.out = 1000)
t.low.thresh <- t.hi.thresh <- NULL
if (Is.numeric(low.thresh[c(i, n)])) {
t.low.thresh <- max(low.thresh[c(i, n)], na.rm = TRUE)
xx <- xx[xx > t.low.thresh]
p.low[i] <- mean(dat[[i]] <= t.low.thresh)
p.low.ref[i] <- mean(dat[[length(dat)]] <= t.low.thresh)
}
if (Is.numeric(hi.thresh[c(i, n)])) {
t.hi.thresh <- min(hi.thresh[c(i, n)], na.rm = TRUE)
xx <- xx[xx < t.hi.thresh]
p.hi[i] <- mean(dat[[i]] >= t.hi.thresh)
p.hi.ref[i] <- mean(dat[[length(dat)]] >= t.hi.thresh)
}
t.den1 <- den.fun(dat = dat[[i]], bw=bw[i],x = xx, deriv = 0, low.thresh = t.low.thresh,
hi.thresh = t.hi.thresh)
tot.prob <- sum(t.den1, na.rm = TRUE) * mean(diff(xx)) + p.low[i] + p.hi[i]
if (abs(tot.prob - 1) > 0.005)
print(paste("Sum of estimated probabilities for data list element", i,
"is", round(tot.prob, 3)))
t.den2 <- den.fun(dat = dat[[n]],bw=bw[i], x = xx, deriv = 0, low.thresh = t.low.thresh,
hi.thresh = t.hi.thresh)
t.rel <- log(t.den1/t.den2)
t.rel[t.den1 == 0] <- 0
rel.ent[i] <- sum(t.den1 * t.rel * mean(diff(xx)), na.rm = TRUE)
}
suppressWarnings(rel.ent[p.low > 0] <- (rel.ent + p.low * log(p.low/p.low.ref))[p.low >
0])
suppressWarnings(rel.ent[p.hi > 0] <- suppressWarnings(rel.ent + p.hi * log(p.hi/p.hi.ref))[p.hi >
0])
rel.ent
}
DnA.analyze <- function(dat, x.loc, record, n.boot = 100, Adj = 1, hi.thresh = NULL, bw.method="nrd0",unnorm=FALSE,
low.thresh = NULL, dat.mat = NULL,mat.fun=NULL,plot.den=FALSE) {
n.t <- length(dat)
if (is.null(n.boot) | n.boot < (2 * length(x.loc))) {
print(paste("n.boot has been set to the minimum required number of 2*length(x.loc) = ",
2 * length(x.loc)))
n.boot <- 2 * length(x.loc)
}
if (length(hi.thresh) == 1)
hi.thresh <- rep(hi.thresh, n.t)
if (length(low.thresh) == 1)
low.thresh <- rep(low.thresh, n.t)
if (!length(hi.thresh) %in% c(0, 1, n.t))
stop(paste("length(hi.thresh)=", length(hi.thresh), ". hi.thresh must be one of: NULL (no threshold), scalar (constant threshold), or a vector of length(dat)=",
n.t, " (providing threshold for each time).", sep = ""))
if (!length(low.thresh) %in% c(0, 1, n.t))
stop(paste("length(low.thresh)=", length(low.thresh), ". low.thresh must be one of: NULL (no threshold), scalar (constant threshold), or a vector of length(dat)=",
n.t, " (providing threshold for each time).", sep = ""))
if (length(record) != length(dat))
stop(paste(length(record), " = length(record) != length(dat) = ", length(dat),
". length(record) must equal length(dat).", sep = ""))
if (!is.numeric(Adj) | Adj < 0) {
warning(paste("Adj=", Adj, ". Adj must be a positive real number and has been set to 1.",
sep = ""))
Adj <- 1
}
if (!is.vector(x.loc) | !is.numeric(x.loc))
stop("x.loc must be a numeric vector")
x.loc2 <- seq(min(x.loc, na.rm = TRUE), max(x.loc, na.rm = TRUE), length.out = 200)
max.den <- 0
### Estimate and plot (if plot.den==TRUE) densities
for (i in c(which.min(record), which.max(record))) {
t.den <- den.fun(dat = dat[[i]], x = x.loc2, deriv = 0, low.thresh = low.thresh[i],
hi.thresh = hi.thresh[i])
max.den <- max(c(max.den, t.den[t.den < Inf]), na.rm = TRUE)
}
if (n.t > 49) {
nc <- 7
nr <- 7
} else {
nc <- ceiling(sqrt(n.t))
nr <- ceiling(n.t/nc)
}
keep.den <- x.loc2
bw<-bw.est(dat,bw=bw.method,record=record)*Adj
for (i in 1:n.t) {
keep.den <- cbind(keep.den, den.fun(dat = dat[[i]], bw=bw[i],x = x.loc2, deriv = 0,
low.thresh = low.thresh[i], hi.thresh = hi.thresh[i]))
if(plot.den){
if (i %in% (1 + nr * nc * (0:10))) {
dev.new(width = 12, height = 12)
nf <- layout(matrix(1:(nc * nr), nr, nc, byrow = TRUE), widths = c(1.1 +
0.05 * nc, rep(1, nc - 1)), heights = c(rep(1, nr - 1), 1.1 + 0.05 *
nr))
}
b <- l <- t <- r <- 0
ylab <- xlab <- ""
yaxt <- xaxt <- "n"
if (i %in% (1 + nc * (0:nr))) {
l <- 1
ylab = "Density"
yaxt = "s"
}
if (i > nc * (nr - 1)) {
b <- 1
xlab = "X"
xaxt = "s"
}
par(mai = c(b, l, t, r))
plot(-10, -10, xlab = xlab, xaxt = xaxt, ylab = ylab, yaxt = yaxt, ylim = c(0,
max.den), xlim = range(x.loc), main = "", cex.lab = 1.7, cex.axis = 1.7)
grid()
lines(x.loc2, keep.den[, i + 1])
legend("top", legend = paste("t=", round(record[i], 2), sep = ""), col = "white",
pch = 1, bty = "n")
if (!is.null(low.thresh))
if (is.numeric(low.thresh) & !is.na(low.thresh[i])) {
if (any(dat[[i]] <= low.thresh[i])) {
legend("left", title = expression("Pr(x<" * tau[L] * ")"), bty = "n",
legend = round(mean(dat[[i]] <= low.thresh[i]), 3), pch = 1,
col = "white", cex = 1.7)
}
}
if (!is.null(hi.thresh))
if (is.numeric(hi.thresh) & !is.na(hi.thresh[i])) {
if (any(dat[[i]] >= hi.thresh[i])) {
legend("right", title = expression("Pr(x>" * tau[U] * ")"), bty = "n",
legend = round(mean(dat[[i]] >= hi.thresh[i]), 3), pch = 1, col = "white",
cex = 1.7)
}
}
}
}
rel.ent <- rel.entropy(dat, bw=bw, hi.thresh = hi.thresh, low.thresh = low.thresh)
boot.dat <- dat
## use parallel processing to speed up bootstrapping
try(registerDoMC(cores = 6))
hat <- foreach(boot.it = 1:n.boot, .combine = rbind) %dopar% {
# for(boot.it in 1:20){
if (boot.it %in% c(20, 50, 100, 200, 500, 1000, 2000, 5000))
print(paste("Iteration", boot.it))
# set seed for random number generation, which determines bootstrap sample
set.seed(boot.it)
# Construct bootstrapped sample
boot.dat <- dat
if (is.null(dat.mat)) {
for (i in 1:n.t) boot.dat[[i]] <- sample(dat[[i]], length(dat[[i]]),
replace = TRUE)
if (boot.it == 1) ## Use the original data during the first iteration
boot.dat <- dat
}
# Use this code when bootstrapping from image (uses variability across columns in place of assumed binomial variability)
if (!is.null(dat.mat)) {
for (i in 1:n.t) {
boot.col <- sample(1:ncol(dat.mat[[i]]), ncol(dat.mat[[i]]), replace = TRUE)
boot.cnt <- round(mat.fun(dat.mat[[i]][, boot.col]))
if (boot.it == 1)
boot.cnt <- mat.fun(dat.mat[[i]])
boot.cnt[boot.cnt < 0] <- 0
boot.dat[[i]] <- rep(as.numeric(rownames(dat.mat[[i]])), boot.cnt)
}
}
### Estimate required components from bootstrapped sample
parms <- DnA.ingredients(x.loc, record, boot.dat, Plot = FALSE,
hi.thresh = hi.thresh, low.thresh = low.thresh,bw=bw,boot.it=boot.it)
dP_dt <- parms$dP_dt
dp_dx <- parms$dp_dx
p <- parms$p
### To unnormalize estimated distributions (may be useful for truncated data)
if(unnorm){
dP_dt<-scale(dP_dt,center=FALSE,scale=1/(sapply(dat,length)))
dp_dx<-scale(dp_dx,center=FALSE,scale=1/(sapply(dat,length)))
p<-scale(p,center=FALSE,scale=1/(sapply(dat,length)))
}
dlnp_dx <- dp_dx/p
dP_p <- dP_dt/p
### Estimate (modified) drift and diffusion for each time pair
nm <- t.a.hat <- t.d.hat <- NULL
for (t1 in 1:(length(record) - 1)) { ## Time 1
for (t2 in (t1 + 1):length(record)) { ## Time 2
nm <- c(nm, paste("T1_", t1, "T2_", t2))
t.d.hat <- cbind(t.d.hat, (p[, t2] * dP_dt[, t1] - p[, t1] * dP_dt[,
t2])/(p[, t2] * dp_dx[, t1] - p[, t1] * dp_dx[, t2]))
t.a.hat <- cbind(t.a.hat, dlnp_dx[, t1] * t.d.hat[, ncol(t.d.hat)] -
dP_p[, t1])
}
}
colnames(t.a.hat) <- paste("A", nm)
colnames(t.d.hat) <- paste("D", nm)
rownames(t.a.hat) <- rownames(t.d.hat) <- paste("X", x.loc, "X", sep = "")
signif(cbind(t.d.hat, t.a.hat), 3)
} ### end parallel
### Get plots for original data if requested
if(plot.den){
parms <- DnA.ingredients(x.loc, record, dat, Plot = TRUE,
hi.thresh = hi.thresh, low.thresh = low.thresh,bw=bw,boot.it=100)
}
### Estimate relative entropy between each pair of times
rel.ent <- nm <- NULL
for (t1 in 1:(length(record) - 1)) {
for (t2 in (t1 + 1):length(record)) {
nm <- c(nm, paste("T1_", t1, "T2_", t2))
rel.ent <- c(rel.ent, rel.entropy(dat[c(t1, t2)], bw=bw[c(t1, t2)], hi.thresh = hi.thresh[c(t1,
t2)], low.thresh = low.thresh[c(t1, t2)])[1])
}
}
names(rel.ent) <- nm
return(list(d.hat = hat[, grep("D", colnames(hat))], a.hat = hat[, grep("A",
colnames(hat))], rel.ent = rel.ent, x.loc = x.loc, record = record, n.boot = n.boot,
Adj = Adj, keep.den = keep.den,bw=bw))
}
DnA.results <- function(obj, method = "weighted_median", ,smooth="poly", Adj = 1, plot.res = FALSE, df.spline.D = NULL, df.spline.A = NULL){
# 'obj' is the object returned from DnA.analyze
# 'method' describes how to combine estimates across time pairs. Supported values are "mean", "median", "weighted_median" (default), and "mode"
#'smooth' describes how to smooth and interpolate drift and diffusion estimates across values of obj$x.loc. Supported values are 'poly' (default) and 'spline'. 'poly' is recommended as it is the only supported method that incorporates uncertainty from the derivative of the diffusion function into the uncertainty of the drift profile.
#'plot.res' is an indicator. If TRUE, the estimated drift and a diffusion profiles are displayed.
#'df.spline.D' is an optional constraint for the degrees of freedom used when fitting a spline to the pointwise diffusion estimates (ignored if smooth!='spline')
#'df.spline.A' is an optional constraint for the degrees of freedom used when fitting a spline to the pointwise drift estimates (ignored if smooth!='spline')
for (i in 1:length(method)) {
t.method <- grep(method[i], c("mean", "median", "weighted_median", "mode"),
value = TRUE)
if (length(method[i]) == 0)
stop(paste("method[", i, "]=", method[i], " does match any of:\n'mean','median','weighted_median', or 'mode'",
sep = ""))
if (length(method[i]) > 1)
stop(paste("method[", i, "]=", method[i], " matches", t.method, ". method[",
i, "] must uniquely match one of: 'mean','median','weighted_median', or 'mode'",
sep = ""))
if (sum(grepl(method[i], c("mean", "median", "weighted_median", "mode"))) !=
1)
stop(paste("method[", i, "]=", method[i], ".\n Elements of method must uniquely match one of: 'mean','median','weighted_median', or 'mode'.",
sep = ""))
method[i] <- t.method
}
if (length(method) == 1)
method <- list(local.d = method, local.a = method)
if (is.null(method$local.d))
stop("method$local.d is not specified")
if (is.null(method$local.a))
stop("method$local.a is not specified")
rel.ent = obj$rel.ent
x.loc = obj$x.loc
record = obj$record
n.boot = obj$n.boot
obj$a.hat[obj$d.hat < 0 | is.na(obj$d.hat)] <- NA
obj$d.hat[obj$d.hat < 0] <- NA
if (!is.null(df.spline.D))
if (!is.numeric(df.spline.D))
stop(paste("If specified, df.spline.D must be a real number between 1 and length(x.loc)=",
length(x.loc)))
if (!is.null(df.spline.A))
if (!is.numeric(df.spline.A))
stop(paste("If specified, df.spline.A must be a real number between 1 and length(x.loc)=",
length(x.loc)))
if (is.null(Adj))
Adj = obj$Adj
dev.new(width = 10, height = 10)
layout(matrix(1:2, 2, 1))
for (Func in c("D", "A")) {
if (Func == "D") {
log.d.BOOT <- log.d.pair.est <- log.d <- se.log.d <- boot.sd.log.d.pair.est <- NULL
METHOD <- method$local.d
}
if (Func == "A") {
a.BOOT <- a.pair.est <- a <- se.a <- boot.sd.a.pair.est <- NULL
METHOD <- method$local.a
}
for (x in x.loc) {
boot <- NULL
if (Func == "D"){
t.dat <- suppressWarnings(log(obj$d.hat[grep(paste("X", x, "X", sep = ""),
rownames(obj$d.hat)), ]))
}
if (Func == "A"){
t.dat <- suppressWarnings(obj$a.hat[grep(paste("X", x, "X", sep = ""),
rownames(obj$a.hat)), ])
}
for(i in 1:ncol(t.dat))
if (grepl("median", METHOD)) {
w <- rep(1, ncol(t.dat))
if (METHOD == "weighted_median")
w <- 1/apply(t.dat, 2, var, na.rm = TRUE)
boot <- apply(t.dat, 1, weightedMedian, w = w, na.rm = TRUE)
}
if (METHOD == "mean") {
bad <- which(colMeans(t.dat < 0 | t.dat == Inf) > 0.1)
t.dat <- apply(t.dat, 2, function(x) {
suppressWarnings(M <- max(x[x < Inf]))
x[x == Inf] <- M
x
})
for (i in 1:nrow(t.dat)) {
if (any(!is.na(t.dat[i, ]))) {
invVAR <- solve(cov(as.matrix(t.dat[, !is.na(t.dat[i, ])]), use = "pairwise.complete"))
boot <- c(boot, sum(invVAR %*% t.dat[1, !is.na(t.dat[i, ])])/sum(invVAR))
} else boot <- c(boot, NA)
}
}
if (METHOD == "mode") {
for (i in 1:nrow(t.dat)) {
t.d.hat <- t.dat[i, !is.na(t.dat[i, ])]
dd <- seq(min(t.d.hat), max(t.d.hat), length.out = 500)
t.den <- den.fun(dat = t.d.hat, x = dd, deriv = 0, Adj = Adj)
boot <- c(boot, dd[which.max(t.den)])
}
}
if (Func == "D") {
log.d.pair.est <- rbind(log.d.pair.est, t.dat[1, ])
colnames(log.d.pair.est) <- colnames(t.dat)
log.d <- c(log.d, boot[1])
se.log.d <- c(se.log.d, sd(boot, na.rm = TRUE))
log.d.BOOT <- cbind(log.d.BOOT, boot)
boot.sd.log.d.pair.est <- rbind(boot.sd.log.d.pair.est, apply(t.dat, 2, sd,
na.rm = TRUE))
}
if (Func == "A") {
a.pair.est <- rbind(a.pair.est, t.dat[1, ])
colnames(a.pair.est) <- colnames(t.dat)
a.BOOT <- cbind(a.BOOT, boot)
boot.sd.a.pair.est <- rbind(boot.sd.a.pair.est, apply(t.dat, 2, sd,
na.rm = TRUE))
}
}
}
rownames(a.pair.est) <- rownames(log.d.pair.est) <- rownames(boot.sd.a.pair.est) <- rownames(boot.sd.log.d.pair.est) <- x.loc
if(plot.res){
dev.new(width = 10, height = 10)
layout(matrix(1:2, 2, 1))
par(mai = c(0, 1, 1, 0.1))
plot(x.loc, exp(log.d), type = "l", lwd = 3, log = "y", ylim = exp(range(c(log.d +
2.5 * se.log.d, log.d - 2.5 * se.log.d), na.rm = TRUE)), ylab = expression(hat(D)),
xlab = "", xaxt = "n")
grid(equilogs = FALSE)
for (i in 1:nrow(log.d.pair.est)) points(rep(x.loc[i], ncol(log.d.pair.est)), exp(log.d.pair.est[i,
]), col = COL, pch = rep(1:20, each = length(COL)))
lines(x.loc, exp(log.d), lwd = 3)
lines(x.loc, exp(log.d + 2 * se.log.d), lwd = 3)
lines(x.loc, exp(log.d - 2 * se.log.d), lwd = 3)
}
for (Func in c("D", "A")) {
if (Func == "D") {
use <- which(!is.na(log.d))
est <- log.d.BOOT[, use]
given.df <- df.spline.D
}
if (Func == "A") {
use<- which(!is.na(a))
est <- a.BOOT[,use]
given.df <- df.spline.A
}
full.est<-est
se.est <- apply(est, 2, sd)
nn <- ncol(est)
if (is.null(given.df))
given.df <- 0
if (given.df < 2) {
df.try <- 1
sig <- cov(est,use="pairwise.complete")
cov.it<-0
while(any(is.na(diag(sig)))&&cov.it<5){
use<-use[-which(is.na(diag(sig)))]
est<-est[,-which(is.na(diag(sig)))]
sig<-cov(est,use="pairwise.complete")
if(cov.it==4){
print(paste("Problem with missing values in bootstrap covariance matrix for", Func))
print(paste("Remaining column indices are:",use))
}
cov.it<-cov.it+1
}
invVAR <- NULL
try(invVAR <- solve(sig), silent = TRUE)
### If covariance matrix is singular, slightly increase diagonal elements
### before inverting
if (is.null(invVAR)) {
sig <- sig + diag(rep(min(diag(sig))/1000, sum(!is.na(log.d))))
invVAR <- solve(sig)
}
t.mn <- sum(invVAR %*% est[1, ])/sum(invVAR)
t.fit <- list(x = x.loc[!is.na(log.d)], y = rep(t.mn, nn))
test <- (est[1, ] - t.mn)
test <- (t(test) %*% invVAR %*% test * (n.boot - nn)/(nn * (n.boot -
1)))
p.constant <- pf(test, nn, n.boot - nn, lower.tail = FALSE)
print(paste("P-value for test of constant", Func,"=",signif(t.mn,3), "is", signif(p.constant, 3)))
if(smooth=="LASSO"){
library(lars)
Y<-est[1,]
V<-sig
V1_2<-chol(V)
Vneg1_2<-solve(V1_2)
Ymod<-as.vector(Vneg1_2%*%Y)
poly.x<-poly(x.loc,degree=(length(x.loc)-7))
x.matfun<-function(x){
xx<-t(t(matrix(x-mean(x.loc),length(x),length(x.loc)-2,byrow=FALSE))^(0:(length(x.loc)-3)))
}
x.mat<-x.matfun(x.loc)
Xmod<-Vneg1_2%*%x.mat
fit<-lars(x=Xmod,y=Ymod)
predict(fit,s=c(0:10)/10,mode="fraction",type="coef")
n<-length(Y)
coef.mat<-predict(fit,type="coef")$coefficients
pred.mat<-predict(fit,newx=Xmod,type="fit")$fit
mse<-colMeans((pred.mat-Ymod)^2)
### Only allow one model of each d.f.
df<-rowSums(coef.mat!=0)
use<-NULL
for(DF in unique(df)){
use<-c(use,max(which(df==DF)))
}
bic<-(mse+log(n)/n*df)[use]
PR<-exp(-.5*bic)
PR<-PR/sum(PR)
pred.mat<-pred.mat[,use]
x.mat_up<-Vneg1_2%*%x.matfun(x.loc+1e-6)
x.mat_dwn<-Vneg1_2%*%x.matfun(x.loc-1e-6)
pred.mat_up<-V1_2%*%predict(fit,newx=x.mat_up,type="fit")$fit[,use]
pred.mat_dwn<-V1_2%*%predict(fit,newx=x.mat_dwn,type="fit")$fit[,use]
fit$dpred.dx<-t(pred.mat_up-pred.mat_dwn)/2e-6
t.fit$x<-x.loc
t.fit$y<-as.vector(V1_2%*%pred.mat%*%PR)
}
if(smooth=="poly"){
poly.fit<-function(x.loc,deg,BIC=NULL,dpred.dx=NULL,pred=NULL,eps=1e-6){
xx<-matrix(rep(1,length(x.loc)),length(x.loc))
poly.x<-1
if(deg>0){
poly.x<-NULL
try(poly.x<-poly(x.loc,degree=deg),silent=TRUE)
xx<-cbind(xx,poly.x)
}
if(!is.null(poly.x)){
xtxi <- solve(t(xx) %*% invVAR %*% xx)
beta <- xtxi %*% t(xx) %*% invVAR %*% est[1,]
t.pred<-as.vector(xx%*%beta)
pred<-rbind(pred,t.pred)
bic<-NULL
try(bic<--2*dmt(est[1,],mean=t.pred,S=sig,df=n.boot-nn,log=TRUE)+(deg+1)*log(nn),silent=TRUE)
if(is.null(bic)){
nu<-n.boot-nn
resid<-est[1,]-t.pred
bic<--2*(lgamma(n.boot/2)-lgamma(nu/2)-nn/2*log(nu*pi)-log(det(sig))/2-n.boot/2*log(1+t(resid)%*%invVAR%*%resid/nu))+(deg+1)*log(nn)
}
BIC<-c(BIC,bic)
if(Func=="D"){
if(deg==0){dpred.dx<-rbind(dpred.dx,rep(0,length(x.loc)))}
else{
xx1<-cbind(rep(1,length(x.loc)),predict(poly.x,newdata=x.loc+eps))
xx2<-cbind(rep(1,length(x.loc)),predict(poly.x,newdata=x.loc-eps))
z<-as.vector(exp(xx1%*%beta)-exp(xx2%*%beta))/(2*eps)
dpred.dx<-rbind(dpred.dx,z)
}
}
} else{
BIC<-c(BIC,Inf)
pred<-rbind(pred,rep(0,length(x.loc)))
dpred.dx <- rbind(dpred.dx,rep(0,length(x.loc)))
}
return(list(BIC=BIC,dpred.dx=dpred.dx,pred=pred,xx=xx,beta=beta))
}
### Use Baysian model averaging across polynomial fits of different degrees
### to incorporate uncertainty in model choice in uncertainty in diffusion
### and its derivative
fit<-NULL
# plot(x.loc[use],est[1,],ylim=c(0,2))
# for(i in 1:length(use))
# lines(rep(x.loc[use[i]],2),est[1,i]+c(-2,2)*sqrt(diag(sig)[i]))
options(show.error.messages=FALSE)
for(deg in 0:(min(10,length(x.loc[use])-2))){
fit<-poly.fit(x.loc=x.loc[use],deg=deg,BIC=fit$BIC,dpred.dx=fit$dpred.dx,pred=fit$pred)
# lines(x.loc[use],as.vector(fit$xx%*%fit$beta),col=deg+1)
}
PR<-exp(-.5*fit$BIC)
PR<-PR/sum(PR)
round(PR,2)
while(PR[length(PR)]>max(PR)/1000°<(length(x.loc[use])-2)){
deg<-deg+1
fit<-poly.fit(x.loc=x.loc[use],deg=deg,BIC=fit$BIC,dpred.dx=fit$dpred.dx,pred=fit$pred)
# lines(x.loc[use],as.vector(fit$xx%*%fit$beta),col=deg+1)
PR<-exp(-.5*fit$BIC)
PR<-PR/sum(PR)
}
options(show.error.messages=TRUE)
t.fit$x<-x.loc[use]
t.fit$y<-as.vector(t(fit$pred)%*%PR)
}
}
if (Func == "D") {
smooth.log.d <- t.fit
d.mod.prob<-if(smooth=="poly") PR else NA
d.p.const<-p.constant
if(plot.res) lines(smooth.log.d$x, exp(smooth.log.d$y), col = 3, lwd = 3)
### Adjust drift estimates for derivative of diffusion function
dpred.dx<-matrix(NA,length(PR),length(x.loc))
dpred.dx[,use]<-fit$dpred.dx
a.BOOT[1,]<-a.BOOT[1,]+as.vector(t(dpred.dx)%*%PR)
mod.ind<-sample(1:length(PR),10*n.boot,replace=TRUE,prob=PR)
a.BOOT <- rbind(a.BOOT[1,],a.BOOT[rep(1:n.boot,10),]+dpred.dx[mod.ind,])
a<-a.BOOT[1,]
se.a<-apply(a.BOOT,2,sd)
}
if (Func == "A"){
smooth.a <- t.fit
a.mod.prob<-if(smooth=="poly") PR else NA
a.p.const<-p.constant
}
}
u.BOOT<--scale(a.BOOT[,use][,-1]+a.BOOT[,use][,-length(use)],center=FALSE,scale=2/diff(x.loc[use]))
u.BOOT <- cbind(rep(0,nrow(u.BOOT)),t(apply(u.BOOT,1,cumsum)))
se.u <- apply(u.BOOT, 2, sd)
u <- u.BOOT[1, ] - median(u.BOOT[1, ], na.rm = TRUE)
if(plot.res){
par(mai = c(1, 1, 0, 0.1))
plot(x.loc, a, type = "l", lwd = 3, ylim = quantile(c(a + 2.5 * se.a, a - 2.5 *
se.a),c(.2,.8), na.rm = TRUE), ylab = expression(hat(A)), xlab = "X", main = "")
grid()
for (i in 1:nrow(a.pair.est)) points(rep(x.loc[i], ncol(a.pair.est)), a.pair.est[i,
], col = COL, pch = rep(1:20, each = length(COL)))
lines(x.loc, a, lwd = 3)
lines(x.loc, a + 2 * se.a, lwd = 3)
lines(x.loc, a - 2 * se.a, lwd = 3)
lines(smooth.a$x, smooth.a$y, col = 3, lwd = 3)
}
return(list( x.loc = x.loc, u = u, se.u = se.u,
a.pair.est = a.pair.est, boot.sd.a.pair.est = boot.sd.a.pair.est, a = a, se.a = se.a, smooth.a =smooth.a, a.mod.prob=a.mod.prob,a.p.const=a.p.const,
log.d.pair.est = log.d.pair.est, boot.sd.log.d.pair.est = boot.sd.log.d.pair.est, log.d = log.d, se.log.d = se.log.d, smooth.log.d = smooth.log.d,d.mod.prob=d.mod.prob,d.p.const=d.p.const
))
}
|
8aa24f5e73e3921ec12c199773016569a9904656
|
78ed9e5357b26cceaf8329c404685de78dbe21b0
|
/R/gurobi_MIPsearch.R
|
73fc6fb5659ed5591ca14e107c0b980f45de5330
|
[] |
no_license
|
cran/DoE.MIParray
|
efad96f87a0099d075c57a894d5d75f020b9207d
|
15d5cfa8739d71da99e9707571664adedbff8567
|
refs/heads/master
| 2023-08-14T19:25:32.853829
| 2021-09-28T15:20:02
| 2021-09-28T15:20:02
| 105,168,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,951
|
r
|
gurobi_MIPsearch.R
|
gurobi_MIPsearch <- function(nruns, nlevels, resolution=3, maxtime=60,
stopearly=TRUE, listout=FALSE, orders=NULL,
distinct=TRUE, detailed=0,
forced=NULL, find.only=TRUE, nthread=2,
heurist=0.5, MIQCPMethod=0, MIPFocus=1,
gurobi.params = list(BestObjStop = 0.5, OutputFlag=0)){
aufruf <- sys.call()
nlev <- nlevels
## the function ensures the requested resolution and throws an error, if that is not possible
## it is in principle possible to interrupt the process and keep the result, but unstable situations may occur
## check inputs
stopifnot(is.logical(listout))
stopifnot(is.logical(stopearly))
stopifnot(is.null(orders) || is.list(orders))
if (!is.null(orders)) stopifnot(all(sapply(lapply(orders, sort), function(obj) all(obj==nlev))))
if (!is.numeric(detailed)) stop("detailed must be numeric")
detailed <- max(0, detailed)
detailed <- min(3, detailed)
detailed <- floor(detailed) ## 0,1,2,3 possible, 1.999 -> 1 (e.g.)
if (!is.numeric(nruns)) stop("nruns must be an integer number")
if (!is.numeric(nlev)) stop("nlev must have integer elements")
nfac <- length(nlev)
if (nfac < 2) stop("nlev must have at least two elements")
if (!is.numeric(resolution)) stop("resolution must be an integer number")
if (!is.null(maxtime)){
if (!is.numeric(maxtime)) stop("maxtime must be numeric")
if (!length(maxtime)==1) stop("maxtime must be scalar")
if (!maxtime>0) stop("maxtime must be positive")
}
if (!is.numeric(nthread)) stop("nthread must be numeric")
if (!length(nthread)==1) stop("nthread must be scalar")
if (!nthread>=0) stop("nthread must be non-negative")
if (!is.numeric(heurist)) stop("heurist must be numeric")
if (!length(heurist)==1) stop("heurist must be scalar")
if (!(heurist>=0 && heurist<=1)) stop("heurist must be between 0 and 1")
if (!is.numeric(MIQCPMethod)) stop("MIQPCMethod must be a valid integer number")
if (!length(nruns)==1) stop("nruns must be scalar")
if (!length(resolution)==1) stop("resolution must be scalar")
if (resolution < 2) stop("resolution must be at least 2")
if (resolution > nfac) stop("resolution must not be larger than the number of factors")
strength <- resolution - 1
if (!is.logical(distinct)) stop("distinct must be logical")
if (distinct && nruns>prod(nlev)) stop("too many runs for design with distinct runs")
if (!is.null(forced)){
if (strength==0) stop("forcing runs requires resolution > 1")
if (!is.numeric(forced)) stop("forced must be numeric")
if (!all(forced%%1==0)) stop("forced must have integer elements")
if (!all(forced>=0)) stop("forced must have non-negative elements")
if (is.matrix(forced)){
if (!ncol(forced)==nfac) stop("matrix forced has wrong number of columns")
if (nrow(forced)>=nruns) stop("matrix forced has too many rows")
if (!all(levels.no(forced)==nlev)) stop("forced and nlevels do not match")
for (i in 1:length(nlev)) if (length(setdiff(forced[,i],1:nlev[i]))>0)
stop("invalid entries in column ", i, " of matrix forced")
forced <- dToCount(forced-1)
}
if (!length(forced)==prod(nlev)) stop("vector forced has wrong length")
if (!sum(forced)<nruns) stop("vector forced fixes all runs")
if (distinct && !all(forced %in% c(0,1)))
stop("forced array does comply with option distinct")
}
## preliminary exclusion of definitely infeasible cases
N <- prod(nlev)
if (strength > 0)
if (!DoE.base::oa_feasible(nruns, nlev, strength)) stop("requested array does not exist")
## global optimum of n^2*A_R
globopt <- round(lowerbound_AR(nruns, nlevels, resolution)*nruns^2)
if (!is.list(gurobi.params)) stop("gurobi.params must be a named list")
params <- gurobi.params
if (is.null(params)) params <- vector(mode="list")
if (is.null(params$BestObjStop)) params$BestObjStop <- 0.5
## BestObjStop 0.5 enforced (user can specify larger value, if desired)
## for A_resolution, the bound can and will be made
## using internal function lowerbounds from DoE.base
## this happens in the loop
if (params$BestObjStop < 0) {
params$BestObjStop <- 0.5 ## objective values smaller than zero are nonsense
warning("negative gurobi parameter BestObjStop was replaced by 0.5")
}
## the strictest restriction of maximum time wins
params$TimeLimit <- min(params$TimeLimit, maxtime)
if (params$TimeLimit == Inf) params$TimeLimit <- NULL
## the largest permissible choice of Heuristics wins
params$Heuristics <- max(params$Heuristics, heurist)
if (params$Heuristics > 1) params$Heuristics <- 1
if (params$Heuristics < 0) params$Heuristics <- 0
## the smaller thread request wins
params$Threads <- min(nthread, params$Threads)
if (!MIQCPMethod %in% c(-1,0,1)){
MIQCPMethod <- 0
warning("invalid specification of MIQCPMethod ignored")
}
if (!is.null(params$MIQCPMethod)){
if (!MIQCPMethod==params$MIQCPMethod){
warning("conflicting specifications of MIQCPMethod ignored")
params$MIQCPMethod <- 0 ## default
}
}
else params$MIQCPMethod <- MIQCPMethod
if (!MIPFocus %in% 0:3){
MIPFocus <- 0
warning("invalid specification of MIPFocus ignored")
}
if (!is.null(params$MIPFocus)){
if (!MIPFocus==params$MIPFocus){
warning("conflicting specifications of MIPFocus ignored")
params$MIPFocus <- 0 ## default
}
}
else params$MIPFocus <- MIPFocus
if (is.null(orders)) ll <- unique(combinat::permn(nlevels))
else ll <- orders
opt <- Inf
planopt <- NULL
iGWLPopt <- NULL
optorder <- NULL
message(paste(length(ll), "orders to be examined"))
i <- 0
if (listout) liste <- vector(mode="list", length=length(ll))
for (l in ll){
i <- i+1
cat( "\n***********************************\n")
cat(paste("*********** order", i, "***********\n"))
cat( "***********************************\n\n")
if (is.null(forced))
plan <- try(gurobi_MIParray(nruns, l, resolution, maxtime=maxtime,
distinct=distinct, detailed=detailed,
find.only=find.only, nthread=nthread,
gurobi.params=gurobi.params))
else
plan <- try(gurobi_MIParray(nruns, l, resolution, maxtime=maxtime,
distinct=distinct, detailed=detailed,
forced=rearrange_forced(forced, levels.orig=nlevels, levels.new=l),
find.only=find.only, nthread=nthread,
gurobi.params=gurobi.params))
if (listout) liste[[i]] <- plan
cur <- Inf
if (!"try-error" %in% class(plan)){
suppressWarnings({curGWLP <- round(GWLP(plan)*nruns^2)}) ## integer
cur <- curGWLP[resolution + 1]
if (cur < opt){
opt <- cur
optorder <- l
planopt <- plan
iGWLPopt <- curGWLP
if (stopearly && !find.only){
MIPinfo <- attr(plan, "MIPinfo")
if (!"qco" %in% class(MIPinfo)){
if (MIPinfo$stati[2]=="OPTIMAL"){
message("optimum found")
break
}
}
}
if (stopearly && globopt==opt){
message("optimum found")
break
}
}
else if (cur==opt && opt < Inf && nfac > resolution){
# resolve ties by GWLP
for (gg in (resolution+2):(nfac+1)){
if (curGWLP[gg] > iGWLPopt[gg])
break
else if (curGWLP[gg] < iGWLPopt[gg]){
opt <- cur
optorder <- l
planopt <- plan
iGWLPopt <- curGWLP
break
}
}
}
}
}
aus <- planopt
attr(aus, "optorder") <- optorder
if (listout) {
attr(aus, "orders") <- ll
attr(aus, "allplans") <- liste
}
aus
}
|
69e44b7425734165511a015e466b5549527238bb
|
73c273fdf85a99b3d6156986537cf82b0876fc5f
|
/man/accessions_by_spp.Rd
|
6a9657f5a1f7155034f6fcd21e2c7e9435d9183e
|
[
"MIT"
] |
permissive
|
NCBI-Hackathons/GeneHummus
|
e55ce7d1fd231db5516ffac039a329c255a68316
|
1fb36181760e0c1b91e65dd3cbd05af27010d8c4
|
refs/heads/master
| 2021-06-03T15:49:38.606418
| 2020-09-02T21:10:25
| 2020-09-02T21:10:25
| 131,613,965
| 8
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,203
|
rd
|
accessions_by_spp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accessions_by_spp.R
\name{accessions_by_spp}
\alias{accessions_by_spp}
\title{Compute the total number of accession proteins per species}
\usage{
accessions_by_spp(my_accessions)
}
\arguments{
\item{my_accessions}{A data frame with accession protein ids and organisms}
}
\value{
A \code{data.frame} of summarized results including columns:
\itemize{
\item organism, taxonomic species
\item N.seqs, total number of sequences
}
}
\description{
Summarizes a dataframe of protein ids and return the total number of accessions
per organism.
}
\examples{
my_prots = data.frame(accession = c("XP_014620925", "XP_003546066",
"XP_025640041", "XP_019453956", "XP_006584791", "XP_020212415",
"XP_017436622", "XP_004503803", "XP_019463844"),
organism = c("Glycine max", "Glycine max", "Arachis hypogaea",
"Lupinus angustifolius", "Glycine max", "Cajanus cajan",
"Vigna angularis", "Cicer arietinum", "Lupinus angustifolius"))
accessions_by_spp(my_prots)
}
\seealso{
\code{\link{getAccessions}} to create the data frame with acession
id and organism for each protein identifier.
}
\author{
Jose V. Die
}
|
f2dbe528f2d455bf77b67771ba8308204099adb5
|
1b5dd93ca968d80b0fbdac792708b7aa5efe0512
|
/ibd/run_hmmIBD_per_category.R
|
51687fdab6430de07072332cd05cfb1a4461ac70
|
[] |
no_license
|
amyibrahim/malaria-hub
|
d509c8a37093874bf7781864891022a35341ba91
|
60b4008e260f958de83ac64da9234dbf0721db31
|
refs/heads/master
| 2023-04-02T20:02:26.079437
| 2021-02-09T15:50:50
| 2021-02-09T15:50:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,578
|
r
|
run_hmmIBD_per_category.R
|
options(scipen = 999)
require(optparse)
require(scales)
require(data.table)
require(dplyr)
require(crayon)
source("~/software/malaria-hub/utils/helpers.R")
option_list = list(
make_option(c("-d", "--workdir"), type = "character", default = NULL,
help = "Specify main directory",
metavar = "character"),
make_option(c("-b", "--binary_matrix"), type = "character", default = NULL,
help = "Input filename of filtered binary matrix",
metavar = "character"),
make_option(c("-m", "--metadata"), type = "character", default = NULL,
help = "Full dir to metadata file",
metavar = "character"),
make_option(c("-c", "--category"), type = "character", default = NULL,
help = "Name of country/region",
metavar = "character"),
make_option(c("--label_category"), type = "character", default = "country",
help = "Label name in metadata for category column",
metavar = "character"),
make_option(c("--label_fws"), type = "character", default = "fws",
help = "Label name in metadata for fws column",
metavar = "character"),
make_option(c("--fws_th"), type = "numeric", default = 0.95,
help = "Fws threshold",
metavar = "number"),
make_option(c("--label_id"), type = "character", default = "sra_run",
help = "Label name in metadata for id column",
metavar = "character"),
make_option(c("--maf"), type = "numeric", default = 0.01,
help = "MAF threshold [default %default]",
metavar = "number"),
make_option(c("--na_char"), type = "character", default = "NA",
help = "Specify NA characters",
metavar = "character"),
make_option(c("-t", "--threads"), type = "integer", default = 4,
help = "Specify threads number",
metavar = "numeric"),
make_option(c("--remove_chr"), type = "character", default = NULL,
help = "Chromosomes to remove ex. Pf3D7_API_v3,Pf_M76611",
metavar = "character"),
make_option("--regex_chr", type = "character", default = "(.*?)_(.+)_(.*)",
help = "Regex pattern for chromosome detection. Default matches Pf3D7_01_v3",
metavar = "character"),
make_option("--regex_groupid", type = "numeric", default = 3,
help = "Regex pattern group",
metavar = "numeric")
);
opt_parser = OptionParser(option_list = option_list);
opt = parse_args(opt_parser);
# Working directory
workdir <- opt$workdir
# Binary matrix file name
bin_mat_file <- opt$binary_matrix
# Metadata file name
met_file <- opt$metadata
# Category
category <- opt$category
# Metadata field wth category groupings
label_category <- opt$label_category
# Metadata field with sample ids
label_id <- opt$label_id
# Metadata field with fws information
label_fws <- opt$label_fws
# Fws hreshold
threshold_fws <- opt$fws_th
# MAF threshold
th_maf <- opt$maf
# Threads number
threads <- opt$threads
# Missing calls character
na_char <- opt$na_char
# Remove chromosomes
rm_chr <- opt$remove_chr
# Pattern for chromosome detection
pattern <- opt$regex_chr
# Pattern group
groupid <- opt$regex_groupid
# Specify threads number
setDTthreads(threads)
# Load metadata
metadata <- read.csv(met_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
if (label_category %in% colnames(metadata)) {
av_category <- metadata %>%
select(!!sym(label_category)) %>%
distinct() %>% pull()
message("Available category options: ", paste0(av_category, collapse = ", "))
cat("\n")
} else {
stop("Wrongly specified category. Column name does not exist in metadata file.\n")
}
# Load subset of SNP binary matrix for selected category
if (!is.null(category)) {
if (length(category) == 1 & category %in% av_category) {
message(category, " found, Processing...\n")
metadata <- metadata %>% filter(!!sym(label_category) == category)
samples <- c("chr", "pos", "ref", (metadata %>% select(!!sym(label_id)) %>% pull() %>% as.vector()))
snp <- fread(bin_mat_file, sep = "\t", select = samples,
header = TRUE, data.table = FALSE)
} else {
stop("Can not find " %+% bgBlue(category) %+% " category. Exitting...\n")
}
} else {
message("Category not specified. Processing for all...\n")
snp <- fread(bin_mat_file, sep = "\t", header = TRUE, data.table = FALSE)
}
# Trim category name
category_str <- as.character(gsub(" ", "_", category))
# Filter chromosome from matrix
if (!is.null(rm_chr)) {
rm_chr <- strsplit(rm_chr, ",")[[1]]
if (all(rm_chr %in% unique(snp$chr))) {
snp <- snp %>% filter(!chr %in% rm_chr)
} else {
stop("Wrong name for chromosomes to remove. Stopping...")
}
} else {
message("None chromosomes removed. Api and mito should be removed!")
}
# Transform chromosome from string to numeric
snp$chr <- as.numeric(stringr::str_match(snp$chr, pattern)[, groupid])
# Check if all samples match between binary matrix and metadata file
metadata <- metadata %>% filter(!!sym(label_id) %in% colnames(snp[, -c(1:3)]))
if (all(metadata[[label_id]] == colnames(snp[, -c(1:3)]))) {
message("Matrix matches metadata. Proceeding...")
}
# Create input file for hmmIBD
# Recode missing data
if (!is.na(na_char)) {
snp[snp == na_char] <- NA
snp[snp == "."] <- NA
}
# Reformat matrix to fit hmmIBD format
snp_hmmibd <- snp
snp_hmmibd <- snp_hmmibd[, -3]
snp_hmmibd[snp_hmmibd == 0.5] <- 1
snp_hmmibd[is.na(snp_hmmibd)] <- -1
colnames(snp_hmmibd)[1:2] <- c("chrom", "pos")
snp_hmmibd_1 <- snp_hmmibd[, 1:2]
snp_hmmibd_2 <- snp_hmmibd[, -c(1:2)]
snp_hmmibd_2[] <- lapply(snp_hmmibd_2, as.numeric)
rm(snp, snp_hmmibd)
# Subset reformatted matrices
snp_hmmibd_02_1 <- snp_hmmibd_1
write.table(snp_hmmibd_02_1,
file.path(workdir, "ibd_matrix_hap_leg.tsv"),
quote = FALSE, col.names = TRUE, row.names = FALSE, sep = "\t")
snp_hmmibd_02_2 <- snp_hmmibd_2
write.table(snp_hmmibd_02_2,
file.path(workdir, sprintf("ibd_matrix_hap_%s.tsv", category_str)),
quote = FALSE, col.names = TRUE, row.names = FALSE, sep = "\t")
# Run hmmIBD for population
message(category)
# Calculate MAF
maf_sti <- calculate_maf(snp_hmmibd_02_2)
to_keep_sti <- which(maf_sti >= th_maf)
# Select samples of category with Fws >= threshold_fws
fws_samples <- metadata %>%
filter(!!sym(label_fws) >= threshold_fws) %>%
select(!!sym(label_id)) %>%
pull() %>%
as.vector()
# Select SNPs that passed MAF threshold
snp_hmmibd_02_2 <- snp_hmmibd_02_2[to_keep_sti, ]
snp_hmmibd_leg <- snp_hmmibd_02_1[to_keep_sti, ]
# Select samples that passed Fws threshold
snp_hmmibd_02_2 <- snp_hmmibd_02_2 %>% select(all_of(fws_samples))
snp_hmmibd_merged <- cbind(snp_hmmibd_leg, snp_hmmibd_02_2)
# Write country matrix to file
write.table(format(snp_hmmibd_merged, digits = 0),
file.path(workdir, sprintf("hmmIBD_%s_maf%s.txt", category_str, as.character(th_maf))),
sep = "\t", quote = FALSE, row.names = FALSE)
# Run hmmIBD
string_i <- file.path(workdir,
sprintf("hmmIBD_%s_maf%s.txt", category_str, as.character(th_maf)))
string_o <- file.path(workdir,
sprintf("hmmIBD_%s_maf%s_out", category_str, as.character(th_maf)))
output <- system(command = sprintf("~/software/hmmIBD/hmmIBD -i %s -o %s", string_i, string_o), intern = TRUE)
write.table(output, file.path(workdir,
sprintf("hmmIBD_run_%s.log", category_str)), quote = FALSE, col.names = FALSE, row.names = FALSE)
|
0103e81d1a7565fb7fefab82aeb6d533a7529699
|
6492055107a6d56d4b5eeb9c778f20dcf614cc3c
|
/Plots/NMDS2.R
|
65a78e664f7df8baf318b69476b6c68ceddfd954
|
[] |
no_license
|
qmbautista/Bautista-de-los-Santos_EnvSci_2015
|
e03956f20d2d79f6e403b5aa81d6f08513be4dca
|
318d46d22a8b67f699cbc82604c6457b1064e9a8
|
refs/heads/master
| 2021-01-22T10:02:05.221421
| 2015-10-18T18:23:55
| 2015-10-18T18:23:55
| 42,651,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,467
|
r
|
NMDS2.R
|
#NMDS of metabolic potential (output from Tax4Fun)
#Input Tax4Fun phylotype table was subsampled to 11587 reads
#Using Bray Curtis and Jaccard distances
#From: http://userweb.eng.gla.ac.uk/umer.ijaz/bioinformatics/ecological.html
library(ggplot2)
library(vegan)
shared_subsampled<-read.table("nmds_input2.txt", header=TRUE, row.names=1)
shared_subsampled_percent<-shared_subsampled/(rowSums(shared_subsampled))
Env<-read.table("En4_final3.txt", header=TRUE)
sol1<-metaMDS(shared_subsampled_percent, distance = "bray", k = 2, trymax = 50)
sol2<-metaMDS(shared_subsampled_percent, distance = "jaccard", binary=TRUE, k = 2, trymax = 50)
NMDS_bray=data.frame(x=sol1$point[,1],y=sol1$point[,2], Location=as.factor(Env[,2]), Disinfection=as.factor(Env[,3]), Country=as.factor(Env[,4]), Platform=as.factor(Env[,5]))
NMDS_jaccard=data.frame(x=sol2$point[,1],y=sol2$point[,2], Location=as.factor(Env[,2]), Disinfection=as.factor(Env[,3]), Country=as.factor(Env[,4]), Platform=as.factor(Env[,5]))
######################Ellipses for Bray Curtis plot ###############################
plot.new()
ord1<-ordiellipse(sol1, as.factor(Env[,3]) ,display = "sites", kind ="sd", conf = 0.95, label = T)
dev.off()
veganCovEllipse<-function (cov, center = c(0, 0), scale = 1, npoints = 100)
{
theta <- (0:npoints) * 2 * pi/npoints
Circle <- cbind(cos(theta), sin(theta))
t(center + scale * t(Circle %*% chol(cov)))
}
df_ell1 <- data.frame()
for(g in levels(NMDS_bray$Disinfection)){
if(g!="" && (g %in% names(ord1))){
df_ell1 <- rbind(df_ell1, cbind(as.data.frame(with(NMDS_bray[NMDS_bray$Disinfection==g,],
veganCovEllipse(ord1[[g]]$cov,ord1[[g]]$center,ord1[[g]]$scale)))
,Disinfection=g))
}
}
NMDS1.mean=aggregate(NMDS_bray[,1:2],list(group=NMDS_bray$Disinfection),mean)
shape_values<-seq(1,11)
#######################################################################################
######################Ellipses for Jaccard plot ######################################
plot.new()
ord2<-ordiellipse(sol2, as.factor(Env[,3]) ,display = "sites", kind ="sd", conf = 0.95, label = T)
dev.off()
veganCovEllipse<-function (cov, center = c(0, 0), scale = 1, npoints = 100)
{
theta <- (0:npoints) * 2 * pi/npoints
Circle <- cbind(cos(theta), sin(theta))
t(center + scale * t(Circle %*% chol(cov)))
}
df_ell2 <- data.frame()
for(g in levels(NMDS_jaccard$Disinfection)){
if(g!="" && (g %in% names(ord2))){
df_ell2 <- rbind(df_ell2, cbind(as.data.frame(with(NMDS_jaccard[NMDS_jaccard$Disinfection==g,],
veganCovEllipse(ord2[[g]]$cov,ord2[[g]]$center,ord2[[g]]$scale)))
,Disinfection=g))
}
}
NMDS2.mean=aggregate(NMDS_jaccard[,1:2],list(group=NMDS_jaccard$Disinfection),mean)
shape_values<-seq(1,11)
#######################################################################################
NMDS_bray_1<-ggplot(data=NMDS_bray,aes(x,y, colour=Disinfection))
NMDS_bray_1<-NMDS_bray_1+geom_path(data=df_ell1, aes(x=NMDS1, y=NMDS2), size=1, linetype=2)
NMDS_bray_1<-NMDS_bray_1+geom_point(aes(shape=Country), size=4)+theme_bw()+ylab("NMDS2")+xlab("NMDS1")
pdf("NMDS_bray_ell.pdf")
print(NMDS_bray_1)
dev.off()
NMDS_jaccard_1<-ggplot(data=NMDS_jaccard,aes(x,y, colour=Disinfection))
NMDS_jaccard_1<-NMDS_jaccard_1+geom_path(data=df_ell2, aes(x=NMDS1, y=NMDS2), size=1, linetype=2)
NMDS_jaccard_1<-NMDS_jaccard_1+geom_point(aes(shape=Country), size=4)+theme_bw()+ylab("NMDS2")+xlab("NMDS1")
pdf("NMDS_jaccard_ell.pdf")
print(NMDS_jaccard_1)
dev.off()
|
eaea63a246e07459c8e74154e7283100d0340cc8
|
6d5efbc79c352e2c4adc525d9ce72ff4401ff1de
|
/R/common.R
|
7c8ce52004dca38c723377febc83109b22365e48
|
[] |
no_license
|
nicholasjhorton/textclassificationexamples
|
d86e96c2cd0071220efb30338cc54f44b4c7d72c
|
cb138e24665c5a321c9bc93147d0305912b2ad7a
|
refs/heads/master
| 2022-11-19T09:59:30.028888
| 2020-07-21T17:38:00
| 2020-07-21T17:38:00
| 274,704,594
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 170
|
r
|
common.R
|
#' Common Clickbait Phrases
#'
#' XX
#'
#' @format A data frame with 184 rows and 1 variable:
#' \describe{
#' \item{phrases}{String}
#' }
#' @source \url{XX}
"common"
|
c2cfd22602b95c4071791b6dd57ca3b7d370f979
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-175/tlc02-uniform-depth-175.R
|
ccc93b486a6ef602f1edea2a233954154bdee9d2
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 77
|
r
|
tlc02-uniform-depth-175.R
|
922c20b5845064fc81206888ce01773e tlc02-uniform-depth-175.qdimacs 40305 106296
|
9e10e8cbf9a430dfab194d90a6e6ab431bc43fe5
|
69c7e900c9894c61f433dc9c445bd636342a6af8
|
/sac1.R
|
65a291ef74e976e8c1f6910b9c087c72edcb7fa6
|
[] |
no_license
|
ZugenLiu/community-detection
|
c8eb85b7cb7711fddb1d6b99ed6de9bbcd665e58
|
6872fc0adb7944997d0140d32455ec6938c7cd94
|
refs/heads/master
| 2020-12-30T11:52:39.304384
| 2016-04-05T01:01:23
| 2016-04-05T01:01:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,438
|
r
|
sac1.R
|
library(igraph)
library(lsa)
graph=read_graph("C:/Users/viswa/Desktop/project/data/1.txt",format= c("edgelist"))
attribute_data <- read.csv("C:/Users/viswa/Desktop/project/data/1.csv",header = TRUE)
#SC1 Algorithm
#Cosine update of the function
cosine_update <- function(k,memebership,values,x)
{
indices=which(values==memebership)
sim=0
for(i in indices)
{
sim=sim+cosine(as.numeric(x[k,]),as.numeric(x[i,]))
}
sim <- sim/length(indices)
}
#Phase 1 of Sac1 Algorithm
phase1 <- function(graph,mapped_values=c(1:324),alpha,y=attribute_data){
for(k in 1:15)
{
x=mapped_values
for(i in 1:vcount(graph))
{
index <- 0
max <- 0
n <- neighbors(graph, i)
for(j in unique(mapped_values[n]))
{
membership1=mapped_values
mi=modularity(graph,membership1)
membership1[i]=j
ni=modularity(graph,membership1)
cosine_x <- (1-alpha)*(cosine_update(i,j,mapped_values,y))+(alpha)*(ni-mi)
if(i!=j && cosine_x > max){
index <- j
max <- cosine_x
}
}
if(index !=0){
mapped_values[i] <- index
}
}
if(isTRUE(all.equal(x,mapped_values)))
{
break
}
x=mapped_values
}
mapped_values
}
#Phase2 of sac1 algorithm
#By changing the value of alpha we can get different communities
sac1 <- function(alpha){
mapped_communities <- phase1(graph,alpha=alpha,mapped_values = c(1:324))
x=mapped_communities
for(h in 1:15)
{
g2 <- contract.vertices(graph, mapped_communities)
g3 <- simplify(g2, remove.multiple = TRUE, remove.loops = TRUE)
mapped_communities <- phase1(g3,mapped_communities,alpha,attribute_data)
if(isTRUE(all.equal(x,mapped_communities)))
{
break
}
x=mapped_communities
}
#writing to file
fileConn<-file("communities.txt","w")
for(i in 1:length(unique(mapped_communities)))
{
community <- vector("numeric")
for(j in 1:324)
{
if(mapped_communities[j]==unique(mapped_communities)[i]){
community <- append(community,j,after = length(community))
}
}
cat(as.character(community), file=fileConn,sep = ",")
cat("\n", file=fileConn)
}
close(fileConn)
}
args <- commandArgs(trailingOnly = TRUE)
sac1(alpha = as.numeric(args[1]))
|
f2d67f4167534d13a20f23bd006c30b4030b185f
|
4cd339b04f1fe93cd79bdbba27a7c0d9a304a3b9
|
/1 ActualizarR.R
|
5208113d9f37f9dff5857a3979336579f720dfff
|
[] |
no_license
|
rmunoz98/Metodos_estadisticos_biogeo
|
2f522f78529c4da3746abe85ab1fbc13ecc7e873
|
ee042189a090cd638ce7198c788b60a299484c32
|
refs/heads/master
| 2021-01-18T18:49:57.209005
| 2016-07-11T16:03:14
| 2016-07-11T16:04:11
| 63,081,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 603
|
r
|
1 ActualizarR.R
|
#Instalar el paquetes
install.packages("stringr")
install.packages("installr")
#Cargar el paquete
require(installr)
#Ayuda del paquete
#help("installr")
#Revisar si hay versiones nuevas de R
check.for.updates.R()
#Instalar y correr la ?ltima versi?n de R
#OJO Despues de la actualizaci?n deberas volver a instalar los programas
#para volver a ejecutar los comandos de este script
install.R()
#Copiar paquetes de la librer?a anterior a la nueva librer?a
#para que veas de nuevo tus librer?as es necesario en algunos casos cerrar el programa
#y volverlo a cargar
copy.packages.between.libraries()
|
7287bc6fa875d16d181ec1c5bfb755b8c073af7c
|
fb917906af7ed0f22b3508e62670be115d90d4c6
|
/plot4.R
|
a91cb6e56030511f949c4a75920cebb592e07fcf
|
[] |
no_license
|
mbelletato/ExData_Plotting1
|
6daf35fa0604c2e6a6df59c2f640cda958e3a26a
|
14b4f67148aa70fcff10f43eca622df541656c66
|
refs/heads/master
| 2021-01-18T11:19:12.215547
| 2014-05-12T00:09:57
| 2014-05-12T00:09:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,407
|
r
|
plot4.R
|
## Coursera : Exploratory Data Analysis
## @Author: mbelletato
## Course Project 1
## plot4.r
Data <- read.table(file = "household_power_consumption.txt",
header=T, sep=";", na.strings="?", skip=66636,
nrows=2880,
col.names=c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage",
"Global_intensity", "Sub_metering_1",
"Sub_metering_2", "Sub_metering_3")
)
Data$Date <- as.Date(Data$Date, format="%d/%m/%Y")
DateTime <- strptime(paste(Data$Date, Data$Time), "%Y-%m-%d %H:%M:%S")
Data <- cbind(Data, DateTime)
png(file = "plot4.png")
par(mfrow=c(2, 2))
plot(Data$DateTime , Data$Global_active_power, xlab="",
ylab="Global Active Power", type="l")
plot(Data$DateTime , Data$Voltage, xlab="datetime", ylab="Voltage", type="l")
plot(Data$DateTime, Data$Sub_metering_1, type = "n", xlab="", ylab="Energy sub metering")
points(Data$DateTime, Data$Sub_metering_1, col="black", type="l")
points(Data$DateTime, Data$Sub_metering_2, col="red", type="l")
points(Data$DateTime, Data$Sub_metering_3, col="blue", type="l")
legend("topright", lwd=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Data$DateTime , Data$Global_reactive_power, xlab="datetime", type="l")
dev.off()
|
c001abd8e360da0db4af3285897b0d0dbdb24d0b
|
7cfac30af2947ff10691175c6eba4c9c449af14b
|
/R/reg_RSE.R
|
973e3e7610485d73bd17f5cb645100309f1c02f1
|
[
"MIT"
] |
permissive
|
adriancorrendo/metrica
|
69257453e4f8be3c5357951e546012203f786b26
|
a5ca847f5b6dc85e89fb23988258e864fc26a033
|
refs/heads/master
| 2023-08-22T11:23:12.808032
| 2023-04-14T04:30:35
| 2023-04-14T04:30:35
| 414,721,777
| 69
| 9
|
NOASSERTION
| 2023-03-06T20:32:37
| 2021-10-07T18:52:35
|
R
|
UTF-8
|
R
| false
| false
| 1,806
|
r
|
reg_RSE.R
|
#' @title Relative Squared Error (RSE)
#' @name RSE
#' @description It estimates the RSE for a continuous predicted-observer dataset.
#' @param data (Optional) argument to call an existing data frame containing the data.
#' @param obs Vector with observed values (numeric).
#' @param pred Vector with predicted values (numeric).
#' @param tidy Logical operator (TRUE/FALSE) to decide the type of return. TRUE
#' returns a data.frame, FALSE returns a list; Default : FALSE.
#' @param na.rm Logic argument to remove rows with missing values
#' (NA). Default is na.rm = TRUE.
#' @return an object of class `numeric` within a `list` (if tidy = FALSE) or within a
#' `data frame` (if tidy = TRUE).
#' @details The RSE is the ratio between the residual sum of squares (RSS, error of
#' predictions with respect to observations) and the total sum of squares (TSS,
#' error of observations with respect to its mean). RSE is dimensionless, so it can be
#' used to compared models with different units.
#' For the formula and more details, see [online-documentation](https://adriancorrendo.github.io/metrica/articles/available_metrics_regression.html)
#' @examples
#' \donttest{
#' set.seed(1)
#' X <- rnorm(n = 100, mean = 0, sd = 10)
#' Y <- X + rnorm(n=100, mean = 0, sd = 3)
#' RSE(obs = X, pred = Y)
#' }
#' @rdname RSE
#' @importFrom rlang eval_tidy quo
#' @export
RSE <- function(data = NULL,
obs,
pred,
tidy = FALSE,
na.rm = TRUE){
RSS <- rlang::eval_tidy(
data = data,
rlang::quo(sum(({{obs}}-{{pred}})^2) ) )
RSE <- rlang::eval_tidy(
data = data,
rlang::quo(
RSS / sum(({{obs}}-mean({{obs}}))^2)
)
)
if (tidy==TRUE){ return(as.data.frame(RSE)) }
if (tidy==FALSE){ return(list("RSE" = RSE)) }
}
|
d1efb62b139a1a5e3ef524f47cc23a5ed6538a4b
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/output/sources/authors/2392/pln/nrbcpln.R
|
7a3ec002b447cff5abe203ff7f2b10a5030577e1
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,465
|
r
|
nrbcpln.R
|
nrbcpln<- function (x, ncat, nitem=NULL, alphas=NULL, betas=NULL, abound=c(-10,10),
bbound=c(-1,10), nq=48, mxiter=200, se=TRUE, iprint=FALSE) {
myInput<-check.input(x, ncat, nitem, nq, mxiter, iprint)
## get starting values if not present already
if(!check.alphas(alphas, myInput$nitem, myInput$ncat)){
alphas<-startpln.func(myInput$nitem, myInput$ncat, myInput$nrec, myInput$myX)$alphas
}
## prep betas
if(!check.betas(betas, myInput$nitem)){
betas<-startbetas.func(myInput$myX)
}
## check bounds
abound<-check.bounds(alphas, abound)
bbound<-check.bounds(betas, bbound)
nrbcplnout <- nrbcpln.func(myInput$ncat, myInput$nitem, myInput$nrec, myInput$myX, alphas,
betas, abound, bbound, myInput$nq, myInput$mxiter, myInput$iprint)
alphas<-nrbcplnout$bcpln[1:((myInput$ncat-1)*myInput$nitem)]
betas<-nrbcplnout$bcpln[((myInput$ncat-1)*myInput$nitem+1):(myInput$ncat*myInput$nitem)]
out<-list(alphas=alphas,betas=betas,nllk=nrbcplnout$nrbcpln,conv=nrbcplnout$iconv)
if(se){
nrbcplncov<-nrbcplncov.func(myInput$nitem, myInput$ncat, myInput$nrec, alphas, betas,
myInput$N, myInput$nq, myInput$iprint)
V<-matrix(nrbcplncov$V, nrow=myInput$nitem*myInput$ncat,ncol=myInput$nitem*myInput$ncat)
seVec<-sqrt(diag(V))
sealphas<-seVec[1:((myInput$ncat-1)*myInput$nitem)]
sebetas<-seVec[((myInput$ncat-1)*myInput$nitem+1):(myInput$ncat*myInput$nitem)]
out<-append(out,list(sealphas=sealphas, sebetas=sebetas, vcov=V))
}
return(out)
}
nrbcpln.func<-function(ncat, nitem, nrec, myX, alphas, betas, abound, bbound, nq,
mxiter, iprint){
nrbcplnout<-0
iconv<-0
np<-ncat*nitem
bcplnout<-rep(0,np)
out <- .C("Rnrbcpln",
as.integer(nitem), as.integer(ncat), as.integer(nrec), as.double(myX),
as.double(alphas), as.double(betas), as.double(abound), as.double(bbound),
nbcplnout=as.double(nrbcplnout), bcplnout=as.double(bcplnout), as.integer(nq),
as.integer(mxiter), iconv=as.integer(iconv), as.integer(iprint))
list(nrbcpln=out$nbcplnout,bcpln=out$bcplnout, iconv=out$iconv)
}
## asymptotic covariance matrix
nrbcplncov.func<-function(nitem, ncat, nrec, alphas, betas, N, nq, iprint) {
V<-rep(0,nitem*ncat*nitem*ncat)
params<-c(alphas,betas)
out<-.C("Rbclcov",
as.integer(nitem), as.integer(ncat), as.integer(N),
as.double(params), V=as.double(V), as.integer(nq), as.integer(iprint))
return(out)
}
|
c18e34addadd8d2dfce33b92a1e9a551bd14a8d7
|
8721a65ead5cbfe12ba224aedda27e532a5be289
|
/R/map_qc.R
|
1bb987b906e4b14955da0ba45946d021b43a8e2b
|
[] |
no_license
|
cran/mapfuser
|
d482e861ed76062303bf99151b67be85752cee29
|
89fb804542f627ae4a05b27f100c66199e3d8b5a
|
refs/heads/master
| 2021-07-09T22:10:44.228292
| 2017-10-10T09:13:54
| 2017-10-10T09:13:54
| 106,401,446
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,561
|
r
|
map_qc.R
|
# map_qc
#' Wrapper function of genetic map cleaning per linkage group.
#'
#' The raw data is first splitted to separate linkage groups if sublinkage groups exist (e.g LG 1.1 and 1.2).
#' Subsequently a graph is created from the adjacency matrix that counts the number of overlapping markers between the set of genetic maps.
#' Calculations are performed for each chromosome separately. Taken quality control steps are printed to the console and can be visualised using the plot function.
#' @param MF.obj A mapfuser object genetics maps loaded and optionally a reference map
#' @param anchors Number of minimum overlapping anchors marker between at least one other genetic map. At least 3 are required.
#' @return The input object is returned with filled QC slot containing genetic maps after quality control. Used parameters and inverted or names of removed data are saved to the config slot.
#' @author Dennis van Muijen
#' @examples
#' \dontshow{
#' fpath <- system.file("extdata", package="mapfuser")
#' maps <- list.files(fpath, pattern = "-1", full.names = TRUE)
#' MF.obj <- read_maps(mapfiles = maps, sep = ",", header = TRUE, type = "delim")
#' MF.obj <- map_qc(MF.obj)
#' }
#' \dontrun{
#' MF.obj <- map_qc(MF.obj = MF.obj, anchors = 3)
#' #Graphical overview of how different genetic maps are connected by overlapping markers
#' plot(MF.obj, which = "mapnetwork", chr = 1) ## Multiple chromosomes not supported
#' ## A minimal spanning tree using the number of anchors as edge weight,
#' plot(MF.obj, which = "mst", chr = 1)
#' #Visualize inverted maps
#' plot(MF.obj, which = "genetic_maps", maps = c("Col-0_Cvi-0.csv","Col-0_Sha.csv"), chr = 1:3)
#' }
#' @export
map_qc <- function(MF.obj, anchors = 3) {
MF.obj$config$n.anchors <- anchors
MF.obj$config$cohesion <- FALSE
if (class(MF.obj) != "mapfuser") {
stop("Object should be of class mapfuser")
}
if (is.null(MF.obj$raw_data)) {
stop("Load map(s) to mapfuser object first using read.maps")
}
MF.obj$config$LPmerge_ready <- FALSE
# Split maps to (sub) linkage group for LPmerge integration
MF.obj <- map_split(MF.obj)
# Check for sufficient anchors to orient and integrate maps
MF.obj <- check_anchors(MF.obj, anchors = anchors)
# Check if there are sets of maps that cannot be integrated
MF.obj <- map_cohesion(MF.obj)
# Correct map orientations
if (MF.obj$config$cohesion) {
MF.obj <- map_orient(MF.obj)
MF.obj$config$LPmerge_ready <- TRUE
cat("QC Finished")
}
return(MF.obj)
}
|
53cf63afbf85acd82a95e5da84df657d97780149
|
12d6bba3fb62d3569bf606e9ddd7a817be141577
|
/plot4.R
|
45924bdac41519f1a7f590dfd08b3526d4f8d64c
|
[] |
no_license
|
saurabh27/ExData_Plotting1
|
93e0187a175b44538da614a70afb9f359026478a
|
31aeaf401ae57300f4932f0d9fc490ce415f6015
|
refs/heads/master
| 2021-01-21T07:30:25.723818
| 2014-05-09T23:28:12
| 2014-05-09T23:28:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,465
|
r
|
plot4.R
|
# read in the complete dataset, would be better to filter first on
# the date field (e.g. using the grep program) but this will not
# work in Windows
# read the file assuming it is in the current working directory
e = read.table("household_power_consumption.txt", header = T, sep=";", na.strings="?")
# now we select on the two days that we want to chart
# note that there are no leading zeroes in the day and months fields
e = e[e$Date == "1/2/2007" | e$Date == "2/2/2007",]
# now convert the first two columns from text to a Date/Time object using the format dd/mm/yyyy HH:MM:SS
e$Date = strptime(paste(e$Date, e$Time), "%d/%m/%Y %H:%M:%S")
# the fourth plot is a 2x2 pane plot
# create a png device to save the plot, 480x480 ist the default value, it's included
# here just for documentation purposes
png(filename = "plot4.png", width = 480, height = 480, units = "px")
par(mfrow=c(2,2))
# top left chart, the same as chart #2
plot(e$Global_active_power, ylab="Global Active Power", type="l", xaxt="n", xlab="")
# plot day of week on axis - since I don't want to deal with any locale
# issued fo the weekday abbreviation, I just use constants for this
axis(1, at=c(1,nrow(e)/2+1,nrow(e)),labels=c("Thu","Fri","Sat"))
# top right chart
plot(e$Voltage, ylab="Voltage", type="l", xaxt="n", xlab="datetime")
# plot day of week on axis - since I don't want to deal with any locale
# issued fo the weekday abbreviation, I just use constants for this
axis(1, at=c(1,nrow(e)/2+1,nrow(e)),labels=c("Thu","Fri","Sat"))
# bottom left chart, the same as chart #3
plot(e$Sub_metering_1, ylab="Energy sub metering", type="l", xaxt="n", col="black", xlab="")
lines (e$Sub_metering_2, col="red")
lines (e$Sub_metering_3, col="blue")
# there's no box around the legend in this version of the graph
legend("topright", col=c("black", "red", "blue"), lwd=1, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# plot day of week on axis - since I don't want to deal with any locale
# issued fo the weekday abbreviation, I just use constants for this
axis(1, at=c(1,nrow(e)/2+1,nrow(e)),labels=c("Thu","Fri","Sat"))
# bottom right chart
plot(e$Global_reactive_power, ylab="Global_reactive_power", type="l", xaxt="n", xlab="datetime")
# plot day of week on axis - since I don't want to deal with any locale
# issued fo the weekday abbreviation, I just use constants for this
axis(1, at=c(1,nrow(e)/2+1,nrow(e)),labels=c("Thu","Fri","Sat"))
dev.off()
|
484c288fe87c8313df571d50850ad0f91fc24266
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/AeRobiology/examples/ma.Rd.R
|
72bbe5a78ca03847645f38735cf9ebf6c3c5eeaa
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 178
|
r
|
ma.Rd.R
|
library(AeRobiology)
### Name: ma
### Title: Moving Average Calculator
### Aliases: ma
### ** Examples
data("munich")
ma(data = munich$Betula, man = 10, warnings = FALSE)
|
27cf5b928ba54646f342b4da31774cf345949777
|
6d320eec67f4fe8b608a379044cffc98ce027b09
|
/06_scripts/data_analysis/4_GLMM_Metabar.R
|
ceacfdce58175881254726f7572ed868ccd9fa3a
|
[] |
no_license
|
WynneMoss/Moss_amphibian_eDNA
|
46945cf7a8dbfbef8e1cb41a72f84af84e5d70d6
|
59b88e0d13a8a85542843031e240cebc198330d0
|
refs/heads/main
| 2023-04-18T04:25:26.261476
| 2021-11-10T16:21:11
| 2021-11-10T16:21:11
| 349,119,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,122
|
r
|
4_GLMM_Metabar.R
|
#######################################
###### eDNA Amphibian Detection #######
#######################################
# Script 4: GLMMs for metabarcoding sample level data
##### Analyze probability of detection at the sample level
# using GLMMs
# metabarcoding detection data
# evaluate the probability of detection as a function of pond attributes and sample attributes
# each row of the data = detection of a species from one DNA extract (usually a total of 4 extracts per pond)
#### load libraries
library(tidyverse)
library(MuMIn)
library(glmmTMB)
rm(list=ls())
###########################################
###### PREPARE DATA #######################
###########################################
###### READ IN DATA #######################
# sample level metabarcoding data
mb <- read.csv("05_data/metabar_12s_postClean.csv")
# collection/survey covariates
sample_covars <- read.csv("05_data/Raw/eDNA_sample_covariates.csv")
# true occupancy
detection.all <- read.csv("05_data/Detection_all_methods.csv")
###### REFORMAT METABARCODING DATA #######
# Want a detection for each DNA extract (across all PCR replicates/sequences)
mb$Taxa <- str_replace_all(mb$Taxa, " ", "_")
# remove the unpooled replicate data
mb %>% filter(!str_detect(Original_Sample, "PCR0")) -> mb
# clean up columns/sites names
mb %>%
mutate(Species = case_when(
str_detect(Taxa, "Taricha")~"TATO", str_detect(Taxa, "Anaxyrus")~"BUBO", str_detect(Taxa, "Pseudacris")~"PSRE",
str_detect(Taxa, "draytonii")~"RADR",str_detect(Taxa, "Ambystoma")~"AMCA",str_detect(Taxa, "catesbeiana")~"RACA",
TRUE~"Other"
)) %>%
group_by(Species, Original_Sample, SiteCode, SampleName) %>%
summarise(ReadCount = sum(ReadCount)) %>%
mutate(Field_Sample = toupper(Original_Sample)) %>%
mutate(Field_Sample = str_replace_all(Field_Sample, "CA\\.", "CA-")) %>% # rename to match field data
mutate(Field_Sample = str_replace_all(Field_Sample, pattern = "Ca.P", "CA-P")) %>% # rename to match field data
mutate(Field_Sample = str_replace_all(Field_Sample, "GRAMPS", "Gramp")) %>% # rename to match field data
mutate(Field_Sample = str_replace_all(toupper(Field_Sample), "\\.", "_"), # rename to match field data
MB_Pos = case_when(ReadCount>0~1, ReadCount==0~0)) %>% # standardize to detection (1) nondetection (0)
separate(Field_Sample, into = c("SITE_CODE", "Collection_Date", "Replicate"), remove = FALSE, sep = "_") %>%
mutate(Collection_Date = as.Date(Collection_Date, "%Y%m%d")) %>%
group_by(SITE_CODE) %>% mutate(Visit = dense_rank(Collection_Date)) %>% # number the visits by date
mutate(SITE_VISIT = paste(SITE_CODE, Visit, sep = "_V0")) %>% ungroup() %>%
filter(Species!="Other") -> mb_clean
mb_clean %>% select(SITE_CODE, Sample_ID = Field_Sample, Species, MB_Pos) -> mb_clean
###### JOIN WITH COVARIATE DATA ###########
mb_clean %>% left_join(sample_covars, by = c("Sample_ID", "SITE_CODE")) -> joined_mb
joined_mb %>% select(Sample_ID, SITE_CODE, Species, Filter_volume_mL, Filter_type, Area, Turbidity, MB_Pos, Day) -> joined_mb
# true detection
detection.all %>% pivot_longer(AMCA:TATO, names_to = "Species", values_to = "DETECTION") %>%
group_by(SITE_CODE, Species) %>% summarise(True.Occupancy = max(DETECTION, na.rm=TRUE)) %>%
right_join(joined_mb, by = c("SITE_CODE", "Species")) -> joined_mb
joined_mb %>% mutate(log_Area = log(Area), log_Turbidity = log(Turbidity)) -> joined_mb
###########################################
###### FIT MODELS #########################
###########################################
###### CALIFORNIA TIGER SALAMANDER ######
# filter to sites where true occupancy (based on all methods pooled) is 1
joined_mb %>% filter(Species == "AMCA", True.Occupancy==1) %>% data.frame() -> true.amca
# correlation btw predictor variables
true.amca %>% mutate(scale_vol = scale(Filter_volume_mL), scale_log_area = scale(log_Area),
scale_log_turb = scale(log_Turbidity), scale_day = scale(Day)) %>%
select(scale_vol, scale_log_area, scale_log_turb, scale_day) %>% cor()
# turbidity and area are correlated; (-0.66)
# remove area and just use turbidity
# fit full model
amca.m.full <- glmmTMB(MB_Pos~scale(Filter_volume_mL) + Filter_type +
scale(log_Area) + scale(Day) +
(1|SITE_CODE), true.amca, family = "binomial", na.action = "na.fail")
# fit all combinations of predictors
amca_dredge <- dredge(amca.m.full, rank = "AICc")
# get models within 2 AICc of best ranking model
amca_mods <- get.models(amca_dredge, subset = delta <= 2)
# view model objects
amca_mods
###### CALIFORNIA RED-LEGGED FROG ########
# repeat above
joined_mb %>% filter(Species == "RADR",True.Occupancy==1) %>% data.frame() -> true.radr
true.radr %>% mutate(scale_vol = scale(Filter_volume_mL), scale_log_area = scale(log_Area),
scale_log_turb = scale(log_Turbidity), scale_day = scale(Day)) %>%
select(scale_vol, scale_log_area, scale_log_turb, scale_day) %>% cor()
# volume and turbidity are correlated (-0.67)
# take volume out and keep turbidity
radr.m.full <- glmmTMB(MB_Pos~ Filter_type +scale(log_Area) +
scale(log_Turbidity) + scale(Day) +
(1|SITE_CODE), true.radr, family = "binomial", na.action = "na.fail")
radr_dredge <- dredge(radr.m.full, rank = "AICc")
radr_mods <- get.models(radr_dredge, subset = delta <= 2)
###### WESTERN TOAD #######################
joined_mb %>% filter(Species == "BUBO") %>% filter(True.Occupancy==1) %>% data.frame() -> true.bubo
true.bubo %>% mutate(scale_vol = scale(Filter_volume_mL), scale_log_area = scale(log_Area),
scale_log_turb = scale(log_Turbidity), scale_day = scale(Day)) %>%
select(scale_vol, scale_log_area, scale_log_turb, scale_day) %>% cor()
# no correlations
bubo.m.full<- glmmTMB(MB_Pos~scale(Filter_volume_mL) + Filter_type +
scale(log_Turbidity) + scale(log_Area)+ scale(Day)+
(1|SITE_CODE),true.bubo, family = "binomial", na.action = "na.fail")
bubo_dredge<- dredge(bubo.m.full, rank = "AICc")
bubo_mods <- get.models(bubo_dredge, subset = delta <= 2)
###### PACIFIC CHORUS FROG ################
joined_mb %>% filter(Species == "PSRE") %>% filter(True.Occupancy==1) %>% data.frame() -> true.psre
true.psre %>% mutate(scale_vol = scale(Filter_volume_mL), scale_log_area = scale(log_Area),
scale_log_turb = scale(log_Turbidity), scale_day = scale(Day)) %>%
select(scale_vol, scale_log_area, scale_log_turb, scale_day) %>% cor()
# turbidity and area r = -0.55; remove area
psre.m.full <- glmmTMB(MB_Pos~scale(Filter_volume_mL) + Filter_type + scale(log_Turbidity) +
+ scale(Day) +
(1|SITE_CODE), true.psre, family = "binomial", na.action = "na.fail")
psre_dredge <- dredge(psre.m.full, rank = "AICc") #
psre_mods <- get.models(psre_dredge, subset = delta <= 2)
# example of getting coeff estimates for best model
summary(psre_mods$`11`)
confint(psre_mods$`11`)
###### AMERICAN BULLFROG ##################
joined_mb %>% filter(Species == "RACA", True.Occupancy==1) %>% data.frame() -> true.raca
# samples where metabarcoding missed the species:
true.raca%>% filter(MB_Pos == 0) %>% nrow() # only 4 samples
# not going to run a model because sample size is too low
###### CALIFORNIA NEWT #####################
joined_mb %>% filter(Species == "TATO") %>% filter(True.Occupancy==1) %>% data.frame() -> true.tato
true.tato %>% mutate(scale_vol = scale(Filter_volume_mL), scale_log_area = scale(log_Area),
scale_log_turb = scale(log_Turbidity), scale_day = scale(Day)) %>%
select(scale_vol, scale_log_area, scale_log_turb, scale_day) %>% cor()
# turbidity and area are correlated
tato.m.full <- glmmTMB(MB_Pos~scale(Filter_volume_mL) + Filter_type + scale(log_Turbidity) + scale(Day) +
(1|SITE_CODE), true.tato, family = "binomial", na.action = "na.fail")
tato_dredge <- dredge(tato.m.full, rank = "AICc")
tato_mods <- get.models(tato_dredge, subset = delta <=2)
|
ec3e8950bb846a39055d5e44f28a4c3e5f6c3f17
|
ef8dffd6d63d4338f6d7871c69c3cab21f585444
|
/LUAD_CNV_draft2.R
|
e18cf5b612f2c8d0a70bce6274ff7126eddf067a
|
[] |
no_license
|
natalie-stephenson/GRB2-SH2-Screen_WIP
|
34bd6ead60b104018bac681e5ee99603efa91626
|
6713fe7d3a6dad2cfe79050a42296b7c437c010c
|
refs/heads/master
| 2022-11-13T19:30:30.808847
| 2020-07-05T10:53:58
| 2020-07-05T10:53:58
| 277,148,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,843
|
r
|
LUAD_CNV_draft2.R
|
###################################################################################################################
# #
# Script to pull CNV data from the TCGA LUAD data files and annotate the genes altered in GRB2 depleted samples. #
# #
###################################################################################################################
# #
# AUTHOR LIST: #
# Natalie Stephenson. #
# #
# CURRENT STATUS OF SCRIPT: #
# #
# Although survival data has been downloaded and annotated onto one file from this script, the files used in the #
# reshaping step performed on MARC1 did not have this information within the script, as it was determined these #
# could be added at a later time more easily. The latest version of the script is currently at a stage where the #
# PCA step is being worked out. The point at which errors are occuring, and the errors being given, are annotated #
# onto the script at the appropriate place. #
# #
# This script currently utilises the Lung Adenocarcinoma dataset from TCGA (LUAD), however, this can be altered #
# to other cancer types by altering the appropriate sections of the CNV data download step. #
# #
# In addition, this script is currently looking at proteins containing Class I and II PxxP motifs, i.e. the #
# classical PxxP motifs, the GRB2_SH3.R script creates lists of all eight classes of PxxP motifs, so altering the #
# list used when reducing the data would offer information on the different PxxP motif containing proteins. #
# #
###################################################################################################################
# Intro text.
cat("[INSERT PAPER DETAILS HERE] \n\n",
"This script downloads Lung Adenocarcinoma TCGA copy number variation (CNV) data and processes it to provide a list of gene in each sample with CNV's. \n",
"NOTE: This script requires files generated by the script GRB2_SH3.R\n",
sep="")
# Install required packages
### make sure this is as efficient as possible (i.e. get rid of those not used!)
source("http://bioconductor.org/biocLite.R")
biocLite("TCGAbiolinks")
#biocLite(c("biorMart", "UniProt.ws", "RTCGA", "RTCGA.CNV", "AnnotationDbi", "SummarizedExperiment", "genomeIntervals", "GenomicRanges"))
install.packages("plyr")
install.packages("dplyr")
# install.packages("stringr")
#install.packages("cgdsr")
install.packages("data.table")
# Load required libraries.
### make sure this is as efficient as possible (i.e. get rid of those not used!)
# library(UniProt.ws)
# library(biomaRt)
library(dplyr)
# library(stringr)
# library(cgdsr)
# library(RTCGA)
library(data.table)
library(plyr)
library(TCGAbiolinks)
# library(SummarizedExperiment)
# library(genomeIntervals)
# library(GenomicRanges)
# library(AnnotationDbi)
##################################
##
## DOWNLOADING CNV DATA FROM THE TCGA DATABASE
##
##################################
# Create a GDC query to look into Copy Number Variations within Lung Adenocarcinoma in the TCGA database.
# Reference for creating further queries: https://bioconductor.org/packages/devel/bioc/vignettes/TCGAbiolinks/inst/doc/tcgaBiolinks.html#gdcquery-searching-tcga-open-access-data
# Note: can use data.category = "Transcriptome Profiling"/"Gene Expression" with data.type = "Gene Expression Quantification" [workflow.type ="HTseq - Counts??, flie.type ="normalized_results"/ experimental.strategy = "RNA-Seq"]
# Note: Used "Masked Copy Number Segment" for data.type as they are generated with the same method as "Copy Number Segment" files except that a filtering step is performed that removes Y chromosome and probe sets that were previously indicated to have frequent germline copy-number variation.
# Should I use Legacy datasets???
queryLUAD_CNV <- GDCquery(project = "TCGA-LUAD",
data.category = "Copy Number Variation",
data.type = "Masked Copy Number Segment",
legacy = FALSE)
# Download the LUAD CNV data
GDCdownload(queryLUAD_CNV)
LUAD_CNV_assay <- GDCprepare(queryLUAD_CNV)
# Sort the CNV data by chromosome position.
LUAD_CNV_sorted <- LUAD_CNV_assay[order(LUAD_CNV_assay$Chromosome, LUAD_CNV_assay$Start, LUAD_CNV_assay$End),]
# Define CNV from Segment Mean (Segment Mean < -0.2 = -1 LOSS, Segment Mean > 0.2 = 1 GAIN, else 0 NORMAL
LUAD_CNV_sorted <- LUAD_CNV_sorted %>%
mutate(CNV=Segment_Mean)
LUAD_CNV_sorted$CNV <- cut(LUAD_CNV_sorted$CNV, c(-Inf, -0.2, 0.2, Inf), labels = c("-1", "0", "1"), )
write.csv(LUAD_CNV_sorted, "LUAD_CNV_sorted.csv")
##################################
##
## ANNOTATING GENE NAMES ONTO THE CNV DATA
##
##################################
# Annotate genenames onto the CNV data (https://www.biostars.org/p/147916/)
# Import complete gene list generated from the script GRB2_SH3.R.
complete_gene_list <- read.csv(file = "complete_gene_list.csv")
complete_gene_list <- complete_gene_list[order(complete_gene_list$chromosome_name, complete_gene_list$start_position, complete_gene_list$end_position),]
# Reduce the data and assign gene names to the chromosome regions specified.
## NOTE: This step was performed on the ARC3 server details are below:
## DATE PERFORMED: 03MAY2017
## SUCCESSFUL JOB ID: 14682
## QNAME: 24core-128G.q
## TIME: 2.3 hours
## MEM: 533.6 TBs
## MAXVMEME: 73 GB
## NOTE: In order to perform this script on ARC3 the foverlaps utlilsed a LUAD_CNV file that was split into blocks of 5000 rows. The script for this can
## be found in the file foverlaps_script_split.R. The resulting data files were recombined using the bind.R script, this was also performed on ARC3,
## job ID: 18758, 24core-768G.q, 4.3 hours, 3039 TBs and maxvmem 224.7 GB.
## This should not need to be done if this is repeated on MARC1, however, at the time I did not have access/knowledge of this resource.
LUAD_CNV_dt <-data.table(LUAD_CNV_sorted)
complete_gene_list_dt <- data.table(complete_gene_list)
LUAD_CNV_dt$Num_Probes = NULL
write.csv(LUAD_CNV_dt, file = "LUAD_CNV_dt.csv")
complete_gene_list_dt <- complete_gene_list_dt[, c("uniprot_genename", "chromosome_name", "start_position", "end_position")]
write.csv(complete_gene_list_dt, file = "complete_gene_list_dt.csv")
setkey(LUAD_CNV_dt, Chromosome, Start, End)
LUAD_CNV_genenames <- foverlaps(complete_gene_list_dt, LUAD_CNV_dt, by.x = c("chromosome_name", "start_position", "end_position"), by.y = c("Chromosome", "Start", "End"), maxgap=0, minoverlap=1, type = "any", mult = "all", nomatch=0, which=FALSE)
write.csv(LUAD_CNV_genenames, file = "LUAD_CNV_genenames.csv")
##################################
##
## DOWNLOADING THE CLINICAL DATA ASSOCIATED WITH THE LUAD CNV DATA
##
##################################
#Create a GDC query to get clinical data from the Lung Adenocarcinoma dataset within the TCGA database.
queryLUAD_clinical <- GDCquery(project ="TCGA-LUAD",
data.category = "Clinical")
#Download the Lung Adeno clinical data.
LUAD_clinical <- tryCatch(GDCdownload(queryLUAD_clinical),
error=function(e) GDCdownload(queryLUAD_clinical, method = "client"))
#Access patient suvival data.
LUAD_clinical_patient <- GDCprepare_clinic(queryLUAD_clinical, clinical.info = "patient")
LUAD_clinical_patientsurvival <- LUAD_clinical_patient[,c("bcr_patient_barcode", "vital_status", "days_to_death")]
# Score patient survival data at 5 years (1825 days), 10 years (3650 days) and 15 years (5475 days)
LUAD_clinical_patientsurvival <- LUAD_clinical_patientsurvival %>%
mutate(Survival_Score5y=days_to_death/1825)
LUAD_clinical_patientsurvival$Survival_Score5y <- ifelse (LUAD_clinical_patientsurvival$vital_status == "Alive", 1, LUAD_clinical_patientsurvival$Survival_Score5y)
LUAD_clinical_patientsurvival <- LUAD_clinical_patientsurvival %>%
mutate(Survival_Score10y=days_to_death/3650)
LUAD_clinical_patientsurvival$Survival_Score10y <- ifelse (LUAD_clinical_patientsurvival$vital_status == "Alive", 1 ,LUAD_clinical_patientsurvival$Survival_Score10y)
LUAD_clinical_patientsurvival <- LUAD_clinical_patientsurvival %>%
mutate(Survival_Score15y=days_to_death/5475)
LUAD_clinical_patientsurvival$Survival_Score15y <- ifelse (LUAD_clinical_patientsurvival$vital_status == "Alive", 1 ,LUAD_clinical_patientsurvival$Survival_Score15y)
# Write files and print session information
write.csv(LUAD_clinical_patientsurvival, "LUAD_clinical_patientsurvival.csv")
##################################
##
## ANNOTATING CNV DATA WITH PATIENT DATA
##
##################################
# Splitting Sample ID into information about the patient - Patient ID and sample type.
LUAD_CNV_genenames_samplesplit <- within(LUAD_CNV_genenames, {
Patient_ID <- substr(LUAD_CNV_genenames$Sample, 1, 12)
Sample_Type_ID <- substr(LUAD_CNV_genenames$Sample, 14, 15)
})
#Reducing the data down into components required for next step (#### FOR GENENAMES ADD "uniprot_genename" AT END ####)
LUAD_CNV_genenames_samplesplit <- LUAD_CNV_genenames_samplesplit[, c("Patient_ID", "Sample_Type_ID", "Sample", "Segment_Mean")]
#Characterising the Sample Type ID into Sample Type (tumour vs normal vs control)
LUAD_CNV_genenames_samplesplit <- LUAD_CNV_genenames_samplesplit %>%
mutate(Sample_Type=Sample_Type_ID)
for (i in c("01", "02", "03", "04", "05", "06", "07", "08", "09")){
LUAD_CNV_genenames_samplesplit$Sample_Type <- ifelse (LUAD_CNV_genenames_samplesplit$Sample_Type == i, "Tumour", LUAD_CNV_genenames_samplesplit$Sample_Type)
}
for (j in c("10", "11", "12", "13", "14", "15", "16", "17", "18", "19")){
LUAD_CNV_genenames_samplesplit$Sample_Type <- ifelse (LUAD_CNV_genenames_samplesplit$Sample_Type == j, "Normal", LUAD_CNV_genenames_samplesplit$Sample_Type)
}
for (k in c("20", "21", "22", "23", "24", "25", "26", "27", "28", "29")){
LUAD_CNV_genenames_samplesplit$Sample_Type <- ifelse (LUAD_CNV_genenames_samplesplit$Sample_Type == k, "Control", LUAD_CNV_genenames_samplesplit$Sample_Type)
}
# Ensuring data is in the correct format and altering patient survival data table so the "Patient_ID" column is correctly labelled.
LUAD_clinical_patientsurvival <- data.table(LUAD_clinical_patientsurvival)
LUAD_CNV_genenames_samplesplit <- data.table(LUAD_CNV_genenames_samplesplit)
colnames(LUAD_clinical_patientsurvival)[1] <- "Patient_ID"
# Merging the CNV data with the patient data based on the patient ID given by the TCGA database.
merge <- merge(LUAD_CNV_genenames_samplesplit, LUAD_clinical_patientsurvival, by="Patient_ID")
write.csv(merge, file = "LUAD_CNV_genenames_survival.csv")
##################################
##
## IMPORTING GENE LIST GENERATED BY PREVIOUS SCRIPTS
##
##################################
# Load lists of proteins with SH3 and PxxP regions
PR_Class1and2 <- read.csv(file = "PR_Class1and2_containing_proteins_list.csv")
SH3_proteins <- read.csv(file = "SH3_domain_containing_proteins_list.csv")
PR_Class1and2$X <- NULL
SH3_proteins$X <- NULL
SH3_PR12_proteins <- rbind(PR_Class1and2, SH3_proteins)
SH3_PR12_proteins_dedup <- SH3_PR12_proteins[!duplicated(SH3_PR12_proteins$uniprot_genename),]
SH3_PR12_proteins_dedup <- as.data.frame(SH3_PR12_proteins_dedup)
##################################
##
## FILTERING CNV DATA TO SHOW ONLY INFORMATION RELATING TO PROTEINS CONTAINING PXXP MOTIFS OR SH3 DOMAINS
##
##################################
# Filter CNV data to show only information on proteins containing canonical (Class 1 and 2) PxxP motifs or SH3 domains
## NOTE: This step was performed on the MARC1 server details are below:
## DATE PERFORMED: 08DEC2017
## SUCCESSFUL JOB ID: 498770
## QNAME: 48core-3T.q
## TIME: 8.9 hours
## MEM: 9.5 MGBs
## MAXVMEME: 486 GB
LUAD_CNV_genenames <- read.csv(file = "LUAD_CNV_genenames.csv")
LUAD_CNV_genenames_reduced <- data.frame(LUAD_CNV_genenames$Sample, LUAD_CNV_genenames$Segment_Mean, LUAD_CNV_genenames$uniprot_genename)
LUAD_CNV_SH3_PR12 <- merge(LUAD_CNV_reduced, SH3_PR12, by.x="LUAD_CNV.uniprot_genename", by.y="uniprot_genename")
write.csv(LUAD_CNV_SH3_PR12, file="LUAD_CNV_SH3_PR12.csv")
##################################
##
## PRINCIPLE COMPONENT ANALYSIS
##
##################################
# Removing any duplicated and incomplete rows and reshaping the dataframe for PCA analysis
## NOTE: This step was performed on the MARC1 server details are below:
## DATE PERFORMED: 03JAN2018
## SUCCESSFUL JOB ID: 506468
## QNAME: 48core-3T.q
## TIME: 52.1 hours
## MEM: 17.5 MGBs
## MAXVMEME: 131 GB
LUAD_CNV <- read.csv(file = "LUAD_CNV_SH3_PR12.csv")
deduped_LUAD_CNV <- unique(LUAD_CNV)
LUAD_reshaped <- reshape(deduped_LUAD_CNV, idvar = "LUAD_CNV.Sample", timevar = "LUAD_CNV.uniprot_genename", direction = "wide")
write.csv(LUAD_reshaped, file="LUAD_reshaped.csv")
LUAD_reshaped_NAremoved <- LUAD_reshaped[complete.cases(LUAD_reshaped),]
write.csv(LUAD_reshaped_NAremoved, file="LUAD_reshaped_NAremoved.csv")
# The previous step produced an extra column for each gene during reshaping that was not required, this step removes these columns and renames the remaining columns to correctly reflect their contents.
## NOTE: This step was performed on the MARC1 server details are below:
## DATE PERFORMED: 05JAN2018
## SUCCESSFUL JOB ID: 507575
## QNAME: 48core-3T.q
## TIME: 0.5 hours
## MEM: 32.1 KGBs
## MAXVMEME: 19 GB
###LUAD_reshaped <- read.csv(file = "LUAD_reshaped.csv")
LUAD_reshaped_redacted <- LUAD_reshaped[, -grep("X.", colnames(LUAD_reshaped))]
names(LUAD_reshaped_redacted) = gsub(pattern = "LUAD_CNV.Segment_Mean.", replacement = "", x = names(LUAD_reshaped_redacted))
write.csv(LUAD_reshaped_redacted, file = "LUAD_reshaped_redacted.csv")
## NOTE: This step was performed on the MARC1 server details are below - currently in process of performing.
# Removing any duplicated and incomplete rows for PCA analysis
LUAD_reshaped_redacted_complete <- LUAD_reshaped_redacted[complete.cases(LUAD_reshaped_redacted),]
write.csv(LUAD_reshaped_redacted_complete, file = "LUAD_reshaped_redacted_complete.csv")
# Standardising the Segment Mean for each gene
standardised_SM <- as.data.frame(scale(LUAD_reshaped_redacted_complete[, -1]))
#### Error in colMeans(x, na.rm = TRUE) : 'x' must be numeric
#### Calls: as.data.frame -> scale -> scale.default -> colMeans
#### Execution halted
standardised_SM <- as.matrix(standardised_SM)
write.csv(standardised_SM, file = "standardised_SM.csv")
# PCA
LUAD_pca <- prcomp(standardised_SM, center = TRUE, scale = TRUE)
write.csv(LUAD_pca, file = "LUAD_PCA.csv")
biplot_LUAD_PCA <- biplot(LUAD_pca)
dev.copy(biplot_LUAD_PCA, "biplot_LUAD_PCA.png")
dev.off()
summary_LUAD_PCA <- summary(LUAD_pca)
write.csv(summary_LUAD_PCA, file = "summary_LUAD_PCA.csv")
####### Multivariate analysis tool for CNV??? https://cran.r-project.org/web/packages/CNVassoc/CNVassoc.pdf
# Prints Session information for this code.
sessionInfo()
|
1d61f403362a67a39e4dc08343dff5e99b3929e6
|
57854e2a3731cb1216b2df25a0804a91f68cacf3
|
/man/moveToGroup.Rd
|
13104ff8177d44ec56ca8ef014e652edb40971b3
|
[] |
no_license
|
persephonet/rcrunch
|
9f826d6217de343ba47cdfcfecbd76ee4b1ad696
|
1de10f8161767da1cf510eb8c866c2006fe36339
|
refs/heads/master
| 2020-04-05T08:17:00.968846
| 2017-03-21T23:25:06
| 2017-03-21T23:25:06
| 50,125,918
| 1
| 0
| null | 2017-02-10T23:23:34
| 2016-01-21T17:56:57
|
R
|
UTF-8
|
R
| false
| true
| 832
|
rd
|
moveToGroup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shoji-order.R
\name{moveToGroup}
\alias{moveToGroup}
\alias{moveToGroup<-}
\title{Move entities to a group}
\usage{
moveToGroup(x, value)
moveToGroup(x) <- value
}
\arguments{
\item{x}{VariableGroup}
\item{value}{Variable, VariableCatalog subset, or Dataset subset}
}
\value{
\code{x} with the entities in \code{value} appended to it. If the
containing order object has duplicates=FALSE, the entities will be "moved"
to this group. Otherwise, their references will be copied to the group.
}
\description{
The function has two versions: a regular function and a setter. They do the
same thing, but the setter probably results in less verbose code for you.
}
\examples{
\dontrun{
moveToGroup(ordering(ds)[["Demographics"]]) <- ds[c("gender", "age")]
}
}
|
429926a66452c6805c19b21efd454a53a19c944a
|
814605d65bc126d0a5332ae5a41606b64de20ac1
|
/notebooks/estudo_bandwidth.R
|
87b51e88618c9d69affca10349b92f5a8b855605
|
[] |
no_license
|
netoluizbezerra/dissertacao
|
a559233bde42214722a1bec1bb27b9b17d6e311f
|
cebb922e3f9f29e4cc7ac34affbd25a41fb0921d
|
refs/heads/master
| 2021-05-04T20:41:11.590678
| 2018-02-05T10:34:13
| 2018-02-05T10:34:13
| 119,832,013
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,844
|
r
|
estudo_bandwidth.R
|
####################
# Bandwidth issues #
####################
rm(list = ls())
library(kedd)
ndraws=100
horizon.simul=2000
alpha1=0.10
beta1 =0.85
mu=0.00
omega= 2
burnin = 100
model = garchSpec(model = list(mu = mu ,omega = omega, alpha = alpha1, beta = beta1, shape = 5), cond.dist = "std")
R <- ndraws #Numero de repeticoes do estudo
paramEstimados_GAS <- t(data.frame(omega = rep(0,R) ,alpha = rep(0,R), beta = rep(0,R), mu = rep(0,R), loglike = rep(0,R)))
paramEstimados_GAS_band <- t(data.frame(omega.band = rep(0,R) ,alpha.band = rep(0,R), beta.band = rep(0,R), mu.band = rep(0,R), loglike.band = rep(0,R)))
paramEstimados_GAS.prior <- t(data.frame(omega.prior = rep(0,R) ,alpha.prior = rep(0,R), beta.prior = rep(0,R), mu.prior = rep(0,R), loglike.prior = rep(0,R)))
Bandwidth <- t(data.frame(h = rep(0,R) , h2 = rep(0,R)))
for(n in 1:ndraws){
vy <- as.numeric(garchSim(model, n = (horizon.simul)))
vy = vy[burnin:horizon.simul]
#Gas Paramétrico - Gaussiano
df=0
gas <- optim.gas(vy, df)
beta.til <- gas$optimizer.new$par[3] - gas$optimizer.new$par[2]
omega <- gas$optimizer.new$par[1]/(1 - beta.til)
#bandwidth selection
vP = c(0.5,0.5)
list = get.ft(vy, horizon = (horizon.simul - burnin), gas)
vf.temp = list$f_t; sigma = list$sigma
dmu = gas$optimizer.new$par[4]
std.res <- (vy - dmu)/exp(vf.temp/2)
std.res <- scale(std.res, scale = TRUE)
optimizer.sp.band = nlminb(start = vP, objective = sp.gas.likelihood.bandwidth,
gradient = NULL, hessian = NULL,
vy = vy, std.res = std.res, gas,
control = list(trace = 1),
lower = 0.01, upper = 0.9)
#Gas Semi-Paramétrico
band = optimizer.sp.band$par
h = band[1]
h2 = band[2]
sp_gas = sp.gas(vy, h, h2, horizon = length(vy), gas)
beta.sp.til <- sp_gas$optimizer.sp$par[3] - sp_gas$optimizer.sp$par[2]
omega.sp <- sp_gas$optimizer.sp$par[1]/(1 - beta.sp.til)
#Gas Semi-Paramétrico - t-student
h = 0.5
h2 = 0.5
sp_gas_prior = sp.gas(vy, h, h2, horizon = length(vy), gas)
beta.sp.til_prior <- sp_gas_prior$optimizer.sp$par[3] - sp_gas_prior$optimizer.sp$par[2]
omega.sp_prior <- sp_gas_prior$optimizer.sp$par[1]/(1 - beta.sp.til)
paramEstimados_GAS[,n] <- unlist(c(omega, gas$optimizer.new$par[2], beta.til, gas$optimizer.new$par[4],-length(vy)*gas$optimizer.new$objective))
paramEstimados_GAS_band[,n] <- unlist(c(omega.sp, sp_gas$optimizer.sp$par[2], beta.sp.til, sp_gas$optimizer.sp$par[4],-length(vy)*sp_gas$optimizer.sp$objective))
paramEstimados_GAS.prior[,n] <- unlist(c(omega.sp_prior, sp_gas_prior$optimizer.sp$par[2], beta.sp.til_prior, sp_gas_prior$optimizer.sp$par[4] ,-length(vy)*sp_gas_prior$optimizer.sp$objective))
Bandwidth[,n] <- unlist(c(band[1],band[2]))
}
|
d5bd04698aa3ec1e81d5b4cf450d92609ed1c68a
|
bb9d1aa63b3d9b0a9cfe233bb7e41f59ee35afc6
|
/man/gf_add_font.Rd
|
9bb5989bca59f3465a75d83eda59d2a25f8f6474
|
[] |
no_license
|
timelyportfolio/googlefontR
|
b438108b8462c7c19e2518ad16eacd969bb48863
|
39b568785112bb06fd41fa4eeb541a8c0204f3e7
|
refs/heads/master
| 2020-04-19T15:50:08.607959
| 2016-09-01T18:58:02
| 2016-09-01T18:58:02
| 67,147,173
| 17
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 759
|
rd
|
gf_add_font.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dependencies.R
\name{gf_add_font}
\alias{gf_add_font}
\title{Add Google Fonts to 'tags' or 'tagList'}
\usage{
gf_add_font(tag_list = htmltools::tagList(), fontname = NULL,
customstyle = character(), addstyle = TRUE)
}
\arguments{
\item{tag_list}{\code{htmltools::tags} or \code{htmltools::tagList}}
\item{fontname}{\code{character} valid Google Font name}
\item{customstyle}{\code{character} of custom weights and styles}
\item{addstyle}{\code{logical} to add style tag to use
the Google Font for body text}
}
\value{
\code{htmltools::tags} or \code{htmltools::tagList} with
attached Google Font dependency
}
\description{
Add Google Fonts to 'tags' or 'tagList'
}
|
6b5b1c229dbe58e97942959d920472b203db3042
|
0c96e7150dbf6fe7aab21e1bbe85d72b3df9201b
|
/man/substrRight.Rd
|
c2764d801819dcaaea8bfcf16dec2a8561f7d6d3
|
[] |
no_license
|
KangarooCourt/ascendedge
|
fceaf94767fa5de822e31bc94c9f5ad433c2dc53
|
a055751520b1e281044130968c85f3520f2d014f
|
refs/heads/master
| 2020-08-31T02:30:41.415323
| 2019-10-30T15:27:27
| 2019-10-30T15:27:27
| 218,553,360
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 484
|
rd
|
substrRight.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/substrRight.R
\name{substrRight}
\alias{substrRight}
\title{Last Letters}
\usage{
substrRight(x, n)
}
\arguments{
\item{x}{A character string.}
\item{n}{A number. This is the number of letters from the end of a string to be displayed.}
}
\value{
The last 'n' characters in string 'x'
}
\description{
A function to grab the last 'n' letters from string 'x'
}
\examples{
substrRight("Random String", 3)
}
|
dce71b4c5feee789b895d9eff9c03e712a94a596
|
0cc8920c857ada69f8d15d734663062c1337f109
|
/R/spaceopt/scoring/space_allocation_model.R
|
c69042ef65049367fb01c87588ae7185e8446f5b
|
[] |
no_license
|
kpushkar/scalene
|
783f08a13ade0da14454f3b19b71b1c1cc92cdae
|
d38412597dcb8b3081b6e0b9567fd2c4e93a91df
|
refs/heads/master
| 2021-01-12T02:27:28.296528
| 2017-01-26T10:36:02
| 2017-01-26T10:36:02
| 77,958,383
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,848
|
r
|
space_allocation_model.R
|
# now lets run the space_allocation_loop for all possible space
# and then use it for our graphical interface
source("R/utils/scalene_utilities.R")
source("R/spaceopt/scoring/space_allocation_loop_FN.R")
# let's read the store space info
STORE_DB <- read.csv("data/spaceopt/input/Store_DB.csv",
stringsAsFactors = F)
names(STORE_DB)
fivenum(STORE_DB$STORE_SPACE)
POSSIBLE_SPACE_LB = min(STORE_DB$STORE_SPACE)-5
POSSIBLE_SPACE_UB = max(STORE_DB$STORE_SPACE)+10
STORE_CURRENT_KPI <- read.csv("data/spaceopt/input/Current_KPI_Stores_DB.csv",
stringsAsFactors = F)
names(STORE_CURRENT_KPI)
# for each of the possible scenarios create the most efficient space allocation
MAX_POSSIBLE_KPI <- data.frame(SPACE_COUNT=seq(POSSIBLE_SPACE_LB,POSSIBLE_SPACE_UB))
MAX_POSSIBLE_KPI$OPT_KPI <- unlist(sapply(MAX_POSSIBLE_KPI$SPACE_COUNT,
optimise_kpi)[1,])
names(MAX_POSSIBLE_KPI)
STORE_KPI_COMP <- merge(x = STORE_DB,
y = STORE_CURRENT_KPI)
names(STORE_KPI_COMP)
STORE_KPI_COMP <- merge(x = STORE_KPI_COMP,
y = MAX_POSSIBLE_KPI,
by.x = "STORE_SPACE",
by.y = "SPACE_COUNT",
all.x = T)
names(STORE_KPI_COMP)
# lets create some graphs
#
library("ggplot2")
# compare CURRENT_KPI and OPT_KPI for each store
p<-ggplot(data = STORE_KPI_COMP) + geom_point(aes(x=CURRENT_KPI,y=OPT_KPI))+
ggtitle("Current vs Optimal KPI for stores")
ggsave(filename = "data/spaceopt/output/Compare_Optimised_KPI_1.png",
plot = p)
library("reshape2")
graph.data <- melt(STORE_KPI_COMP[,c(1,2,4,6,7)], id.vars = c("STORE_SPACE",
"STORE_ID",
"STATE"))
names(graph.data)
colnames(graph.data) <- c("STORE_SPACE","STORE_ID","STATE","KPI_TYPE","KPI")
p <- ggplot(data = graph.data) + geom_bar(aes(x=STORE_ID,y=KPI,fill=KPI_TYPE),
stat="identity",position="dodge") + facet_grid(STATE~.) +
ggtitle("Current vs Optimal KPI by State")
ggsave(filename = "data/spaceopt/output/Compare_Optimised_KPI.png",
plot = p)
p <- ggplot(data = MAX_POSSIBLE_KPI) + geom_bar(aes(x=SPACE_COUNT,y=OPT_KPI),stat="identity") +
ggtitle("Optimised KPI vs Space Count")
ggsave(filename = "data/spaceopt/output/Opt_KPI_Space.png",
plot = p)
# write the csvs
write.csv(x = MAX_POSSIBLE_KPI,file = "data/spaceopt/output/Optimised_KPI_DB.csv", row.names = F)
write.csv(x = STORE_KPI_COMP, file = "data/spaceopt/output/Optimised_KPI_Store_DB.csv", row.names = F)
rm(graph.data)
rm(MAX_POSSIBLE_KPI)
rm(STORE_CURRENT_KPI)
rm(STORE_DB)
rm(STORE_KPI_COMP)
rm(p)
rm(POSSIBLE_SPACE_UB)
rm(POSSIBLE_SPACE_LB)
|
d7faf646ea3534bb92d013c46f3a410f3f264c42
|
899420d8106be354a2010f5964fc5802f533294c
|
/man/ncdf_stack.Rd
|
584d03f29bb89922c4612e2cf4077be701ee1a13
|
[] |
no_license
|
annakrystalli/sedMaps
|
4cea5a3e51feb27427d01188b607efe7c40b160c
|
a93da7c5ba1125f5716cbb60674b80cfb74ad36b
|
refs/heads/master
| 2021-06-24T17:26:34.668563
| 2021-06-19T12:25:27
| 2021-06-19T12:25:27
| 149,792,890
| 1
| 1
| null | 2018-09-22T10:16:53
| 2018-09-21T16:59:44
|
R
|
UTF-8
|
R
| false
| true
| 360
|
rd
|
ncdf_stack.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ncdf-read.R
\name{ncdf_stack}
\alias{ncdf_stack}
\title{Load a ncdf file into a rasterBrick}
\usage{
ncdf_stack(path)
}
\arguments{
\item{path}{path to ncdf file}
}
\value{
a rasterBrick, one layer for each varname in the ncdf.
}
\description{
Load a ncdf file into a rasterBrick
}
|
df1727a6c06765a81b28aec922664d8cf1ec9793
|
98bed9cd8ba75660bc63fc359f767dd7925c8441
|
/R/test_brackets_balanced.R
|
787eeba51ff2dc3de7b5d176d77058bc790d1e44
|
[
"MIT"
] |
permissive
|
CoEDL/yinarlingi
|
91e0418528ad8da2ba9d3301d2f091b465ab3e72
|
5d720213c51776408aa94493a66a25776f8fbfd3
|
refs/heads/master
| 2020-03-21T21:49:48.123051
| 2020-02-13T23:18:09
| 2020-02-13T23:18:09
| 139,087,134
| 0
| 0
|
MIT
| 2019-02-28T23:41:51
| 2018-06-29T01:43:23
|
R
|
UTF-8
|
R
| false
| false
| 765
|
r
|
test_brackets_balanced.R
|
#' Test that all brackets and parentheses are well-balanced
#'
#' @param wlp_lexicon a Warlpiri lexicon data frame, or path to a Warlpiri dictionary file
#'
#' @importFrom stringr str_extract_all
#'
#' @export
#'
test_brackets_balanced <- function(wlp_lexicon) {
wlp_lexicon %>%
skeletonise_df() %>%
mutate(
data = str_remove_all(data, use_wlp_regex("source_codes")),
l_bracs = map_chr(data, ~ str_extract_all(., "[<|\\(|\\[]") %>% unlist(use.names = FALSE) %>% paste0(collapse = "")),
r_bracs = map_chr(data, ~ str_extract_all(., "[>|\\)|\\]]") %>% unlist(use.names = FALSE) %>% paste0(collapse = "")),
bracs_ok = nchar(l_bracs) == nchar(r_bracs)
) %>%
filter(!bracs_ok)
}
|
6f7296b0a2996c9d7bc2ace400078eabae80aa20
|
86a4fee27f34bc1c641bbd336ee8ad458bd8e135
|
/ReadIn_TRENDEFV2.r
|
18a4a4ba578b0da5f9cf17ccef27384851eee974
|
[] |
no_license
|
ETHPOP/BSPS2015Model
|
8b8d6bf11122133434e501f3e34b99bcb9cd4991
|
ee661a0834269a277d437b2722446a45abdf558a
|
refs/heads/master
| 2016-09-05T12:07:36.380607
| 2015-08-20T12:58:13
| 2015-08-20T12:58:13
| 40,672,143
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,586
|
r
|
ReadIn_TRENDEFV2.r
|
#setwd("C:\\Workspace\\Projections\\Projections\\Rprojection\\Januar2010Projections\\Trend")
#setwd("C:/WorkSpace/CSAP/EthnicProjections/Projections/Rprojection/August2011Version2Projections/4TRENDEFV2")
#DIR<-"E:/ProjectionTestRuns/4TRENDEFV2"
#DIR<-"N:/ProjectionTestRuns/4TRENDEFV2"
MainDIR<-"N:\\Earth&Environment\\Geography\\Research-2\\Projects\\NewETHPOP\\Projections_Pia\\BSPSProjection"
setwd(MainDIR)
#Reading in 2001 population midyear ethnic population estimates
setwd(".\\Population")
population<-read.csv("MYpop2001.csv")[-1]
####################
###### Fertility #####
####################
setwd(MainDIR)
setwd(".\\Fertility")
###Sex ratio
bm<-0.513 ##birth proportion men
bf<-0.487 ##birth proportion women
###Possible assumed TFR at
asTFR<-1.84
##Fertility rates
allfert2001<-read.csv("FERT2001.csv")[-1]
allfert2002<-read.csv("FERT2002.csv")[-1]
allfert2003<-read.csv("FERT2003.csv")[-1]
allfert2004<-read.csv("FERT2004.csv")[-1]
allfert2005<-read.csv("FERT2005.csv")[-1]
allfert2006<-read.csv("FERT2006.csv")[-1]
allfert2007<-read.csv("FERT2007.csv")[-1]
##Ethnic mixing matrix
mixingmatrix<-read.csv("Mixingmatrix_dec09.csv")
##Fix fertility rates from 2008 onwards
#######This might be changed to BSPS
Rsfert2007 <-rowSums(allfert2007)
fertfact<-asTFR/mean(Rsfert2007)
allfert2008 <- allfert2007* fertfact
####################
###### Mortality #####
####################
setwd(MainDIR)
setwd(".\\Rfiles")
source("Mortality.r")
####################
###### Migration #####
####################
setwd(MainDIR)
setwd(".\\Migration\\IN")
##Internal migration-inmigration
allinm2001<-read.csv("INTI2001.csv")[-1]
allinm2002<-read.csv("INTI2002.csv")[-1]
allinm2003<-read.csv("INTI2003.csv")[-1]
allinm2004<-read.csv("INTI2004.csv")[-1]
allinm2005<-read.csv("INTI2005.csv")[-1]
allinm2006<-read.csv("INTI2006.csv")[-1]
allinm2007<-read.csv("INTI2007.csv")[-1]
##Internal migration - outmigration
setwd(MainDIR)
setwd(".\\Migration\\OUT")
alloutm2001<-read.csv("INOU.csv")[-1]
alloutm2002<-read.csv("INOU.csv")[-1]
alloutm2003<-read.csv("INOU.csv")[-1]
alloutm2004<-read.csv("INOU.csv")[-1]
alloutm2005<-read.csv("INOU.csv")[-1]
alloutm2006<-read.csv("INOU.csv")[-1]
alloutm2007<-read.csv("INOU.csv")[-1]
#setwd("C:/WorkSpace/CSAP/EthnicProjections/Projections/Rprojection/August2011Version2Projections/4TRENDEFV2")
setwd(MainDIR)
setwd(".\\Migration\\IMM")
##International migration-Immigration
allImm2001<-read.csv("IMMI2001.csv")[-1]
allImm2002<-read.csv("IMMI2002.csv")[-1]
allImm2003<-read.csv("IMMI2003.csv")[-1]
allImm2004<-read.csv("IMMI2004.csv")[-1]
allImm2005<-read.csv("IMMI2005.csv")[-1]
allImm2006<-read.csv("IMMI2006.csv")[-1]
allImm2007<-read.csv("IMMI2007.csv")[-1]
allImm2008<-read.csv("IMMI2008.csv")[-1]
allImm2009<-read.csv("IMMI2009.csv")[-1]
allImm2010<-read.csv("IMMI2010.csv")[-1]
allImm2011<-read.csv("IMMI2011.csv")[-1]
allImm2012<-read.csv("IMMI2012.csv")[-1]
allImm2013<-read.csv("IMMI2013.csv")[-1]
allImm2014<-read.csv("IMMI2014.csv")[-1]
##International migration-Emigration
setwd(MainDIR)
setwd(".\\Migration\\EMI")
allEMflow2001<-read.csv("EMIG2001.csv")[-1]
allEMflow2002<-read.csv("EMIG2002.csv")[-1]
allEMflow2003<-read.csv("EMIG2003.csv")[-1]
allEMflow2004<-read.csv("EMIG2004.csv")[-1]
allEMflow2005<-read.csv("EMIG2005.csv")[-1]
allEMflow2006<-read.csv("EMIG2006.csv")[-1]
allEMflow2007<-read.csv("EMIG2007.csv")[-1]
allEMflow2008<-read.csv("EMIG2008.csv")[-1]
allEMflow2009<-read.csv("EMIG2009.csv")[-1]
allEMflow2010<-read.csv("EMIG2010.csv")[-1]
allEMflow2011<-read.csv("EMIG2011.csv")[-1]
allEMflow2012<-read.csv("EMIG2012.csv")[-1]
allEMflow2013<-read.csv("EMIG2013.csv")[-1]
allEMflow2014<-read.csv("EMIG2014.csv")[-1]
#fix(allemrates2001)
setwd(MainDIR)
setwd(".\\AdditionalFiles")
#ethgroups5680<-read.csv("ethgroups5680.csv")
#LA5680<-read.csv("LA5680.csv")
GorLaEth<-cbind.data.frame(
rep(1:327,12),
rep(1:12,each=327))
names(GorLaEth)<-c("X2","X3")
#GORSlist<-read.csv("GORSlist.csv")
#GORSlist2<-GORSlist[1:355,]
#GorLaEth<-cbind(GORSlist2$GorNo,LA5680$LA,ethgroups5680$ethgroup)
#zones<-read.csv("Zones.csv")
#zones_long<-read.csv("Zones_long.csv")
#setwd("C:/WorkSpace/CSAP/EthnicProjections/Projections/Rprojection/August2011Version2Projections/V2InputData/MortalityV2")
#emNo2001_2<-339475
#emNo2002_3<-360166
#emNo2003_4<-353421
#emNo2004_5<-338298
#emNo2005_6<-386859
#emNo2006_7<-406417
#emNo2007_8<-377191
#emNo2008_9<-334490
#emNo2009_10<-370810
#emNo2010_11<-365096
#emNo2011_12<-359643
#emNo2012_13<-350061
#emNo2013_14<-341032
#emNo2014_15<-330866
|
61fde7ff86f3d3b65ee030b175e7a8b3952059b8
|
f78f96c58629c2296e2ad3abb758f275f59afdd2
|
/tests/testthat/test-ts_resolve_names.R
|
ac6c4de1add5a53cb1732b5fb335a676cb9d953f
|
[
"MIT"
] |
permissive
|
joelnitta/taxastand
|
e1ff81a343abdd710e8a15882333b77c9a6ef2ff
|
3894436b120367976fb6331ced1c19d001c251f7
|
refs/heads/main
| 2022-10-06T19:45:08.438418
| 2022-09-20T07:20:58
| 2022-09-20T07:20:58
| 192,684,959
| 18
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
test-ts_resolve_names.R
|
data(filmy_taxonomy)
test_that("Input checks work", {
expect_error(
ts_resolve_names(10, data.frame(genus = "Foogenus")),
"query must be of class"
)
expect_error(
ts_resolve_names(data.frame(genus = "Foogenus"), 10),
"ref_taxonomy must be of class"
)
})
test_that("Produces expected output with docker", {
skip_if_no_docker()
# Query a misspelled name
match_results <- ts_match_names(
query = "Gonocormus minutum",
reference = unique(filmy_taxonomy$scientificName),
simple = TRUE, docker = TRUE)
expect_s3_class(
ts_resolve_names(match_results, filmy_taxonomy),
"data.frame")
expect_s3_class(
ts_resolve_names("Gonocormus minutum", filmy_taxonomy, docker = TRUE),
"data.frame")
expect_snapshot(match_results)
})
test_that("Produces expected output without docker", {
skip_if_no_tt()
# Query a misspelled name
match_results <- ts_match_names(
query = "Gonocormus minutum",
reference = unique(filmy_taxonomy$scientificName),
simple = TRUE)
expect_s3_class(
ts_resolve_names(match_results, filmy_taxonomy),
"data.frame")
expect_s3_class(
ts_resolve_names("Gonocormus minutum", filmy_taxonomy),
"data.frame")
expect_snapshot(match_results)
})
|
a0e1a7a7ea9224ddf41a3b874b96cc39a20a102f
|
d8ad1a37ee95792d1ac896526b60d2ca4d05192e
|
/Classificacao e pesquisa de dados/Lab1CPD/GraficoLab01.R
|
fb548c3e5987ce00d2cd157f2a6db38a875f94f4
|
[] |
no_license
|
Ghilga/trabalhos-de-programacao-UFRGS
|
bdd438f8d14d8deb5aa6d469b9e0e6ebf047a894
|
15aa33c8e0d621437e5bdd586db8a0662302c15e
|
refs/heads/master
| 2020-03-17T02:36:02.847591
| 2018-06-10T01:35:10
| 2018-06-10T01:35:10
| 133,196,754
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
GraficoLab01.R
|
data <- read.csv("Table.csv")
plot(x, y, type = 'l')
binaryInsertion = c(2.013, 136.126, 1303.799)
plot(x, binaryInsertion, type = 'l')
xdata <- c(1000,10000, 100000)
Bubble <- data$tempo[1:3]
Insertion <-
insertion <- c(2.158, 158.35, 16053.568)
BinInsertion <- c(2.013, 136.126, 1303.799)
y3 <- c(1, 6, 14, 28, 47, 73, 106 )
# plot the first curve by calling plot() function
# First curve is plotted
plot(xdata, insertion, type="o", col="blue", pch="o", lty=1, ylim=c(0,max(insertion)) )
# Add second curve to the same plot by calling points() and lines()
# Use symbol '*' for points.
points(xdata, y2, col="red", pch="*")
lines(xdata, y2, col="red",lty=2)
# Add Third curve to the same plot by calling points() and lines()
# Use symbol '+' for points.
points(xdata, y3, col="dark red",pch="+")
lines(xdata, y3, col="dark red", lty=3)
|
e3fbfea7087304ed76c6c33596ba0c7407d61e76
|
fa68ef96ae1da0b56d535855ec1035cc35d0a490
|
/R/zzz.R
|
729760bde098c3b904518a6d3cc68cbfb1da2cb7
|
[] |
no_license
|
TanguyBarthelemy/rjdoutliers
|
a03e16964ea919c40c03b0dcb095b3677b49bd73
|
2691a8388ec3a7aa35b8f32b3b4743a04d43356c
|
refs/heads/main
| 2023-07-21T04:56:36.607754
| 2021-06-02T19:00:54
| 2021-06-02T19:00:54
| 546,089,599
| 0
| 0
| null | 2022-10-05T14:00:52
| 2022-10-05T14:00:51
| null |
UTF-8
|
R
| false
| false
| 813
|
r
|
zzz.R
|
#' @import rJava RProtoBuf
NULL
.onLoad <- function(libname, pkgname) {
# For debugging: to see if Jars are effectively loaded
# options(java.parameters = "-verbose:class")
# TODO : devtools will look only in RJDemetra3\java for JAR files so copied them there too
result <- .jpackage(pkgname, lib.loc=libname)
if (!result) stop("Loading java packages failed")
# what's your java version? Need > 1.5.0.
jversion <- .jcall('java.lang.System','S','getProperty','java.version')
if (jversion < "1.8.0") {
stop(paste("Your java version is ", jversion,
". Need 1.8.0 or higher.", sep=""))
} else {
cat("Java requirements fullfilled, found version ",jversion,"\n")
}
proto.dir <- system.file("proto", package = pkgname)
readProtoFiles2(protoPath = proto.dir)
}
|
f7281ed48a126996e3367d9c4f80a692335988ac
|
28583a31129a7b363413653717b55d2f2460cc15
|
/LBSPR/Raw Files/Adrian Assessment Code/All_Functions/GridSearchFunction.r
|
32ed375c885922621f91e229aee0a7fcca3518d1
|
[] |
no_license
|
DanOvando/Galapagos-Code
|
209409033684c0e75e36acc8d23b7ff2e806cb21
|
7dd7bb23ce3af766f9d81451652f05c03b4237c9
|
refs/heads/master
| 2021-01-01T15:18:24.158617
| 2014-07-10T23:25:50
| 2014-07-10T23:25:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,702
|
r
|
GridSearchFunction.r
|
RunGridSearchFunction <- function(LowerSelL50, UpperSelL50, LowerSelL95, UpperSelL95, InitialSelPrec, FinalSelPrec, LowerF_M, UpperF_M, InitialF_MPrec, FinalF_MPrec,
ObservedFreqVector, ObservedPropVector, genM, genLinf, genLinfCV, genLengthSD, genK, gent0, genAges, genLengths, genminLen, genmaxLen, gennumClass, GridSearchResultPlot, LikePlot=FALSE)
{
# The Grid Search is in two stages. (More stages could be added if needed)
# Stage 1 searches over a wide range of parameter space, but with reasonably large distances between each parameter
# Stage 2 searches over a small range of parameter space around the best fit values from Stage 1, but with a much smaller increment between parameter values
###########
# Stage 1 #
###########
# Set up Stage 1 Search Grid
IntiSel50Vec <- seq(from=LowerSelL50, to=UpperSelL50, by= InitialSelPrec)
IntiSel95Vec <- seq(from=LowerSelL95, to=UpperSelL95, by= InitialSelPrec)
InigenFVec <- seq(from=LowerF_M * genM, to=UpperF_M * genM, by=InitialF_MPrec*genM)
Stage1Grid <- expand.grid(IntiSel50Vec, IntiSel95Vec, InigenFVec)
RealisticSelL95 <- which(Stage1Grid[,2] > Stage1Grid[,1]) # Identify realistic SelL95. i.e. SelL95 > SelL50
Stage1Grid <- Stage1Grid[RealisticSelL95,]
Stage1Grid <- as.matrix(Stage1Grid) # Probably a much better way to convert it to be accepted by the sapply function, but this is my workaround for the moment
NumRows <- nrow(Stage1Grid)
Stage1Likes <- NULL
bestLikes1 <- NULL
stm <- proc.time()
print("Stage 1 Started")
print(paste("Stage 1 Grid. Number of rows =", NumRows, sep=""))
Stage1Likes <- sapply(1:NumRows, function (X) FindLikeFunction(ObservedFreqVector, ObservedPropVector, genM, genLinf, genLinfCV, genLengthSD, genK, gent0, genAges, genLengths, genminLen, genmaxLen, gennumClass, ParVec=Stage1Grid[X,], LikePlot))
Stage1Time <- round((proc.time() - stm)[3],0)
print("Stage 1 Completed")
print(paste("Stage 1 duration: ", Stage1Time, " seconds", sep=""))
# Set up results of Stage 1 in a matrix. Only best fit for each parameter is saved
Stage1SelL50 <- unique(Stage1Grid[,1])
Stage1SelL95 <- unique(Stage1Grid[,2])
Stage1genF <- unique(Stage1Grid[,3])
Stage1SelL50Mat <- matrix(NA, length(Stage1SelL50), 2)
Stage1SelL95Mat <- matrix(NA, length(Stage1SelL95), 2)
Stage1genFMMat <- matrix(NA, length(Stage1genF), 2)
Stage1SelL50Mat[,1] <- Stage1SelL50
Stage1SelL95Mat[,1] <- Stage1SelL95
Stage1genFMMat[,1] <- Stage1genF/genM
for (j in seq_along(Stage1SelL50)) {
temp <- which(Stage1Grid[,1]== Stage1SelL50[j])
Stage1SelL50Mat[j,2] <- min(Stage1Likes[temp])
}
for (j in seq_along(Stage1SelL95)) {
temp <- which(Stage1Grid[,2]== Stage1SelL95[j])
Stage1SelL95Mat[j,2] <- min(Stage1Likes[temp])
}
for (j in seq_along(Stage1genF)) {
temp <- which(Stage1Grid[,3]== Stage1genF[j])
Stage1genFMMat[j,2] <- min(Stage1Likes[temp])
}
bestLikes1 <- min(Stage1genFMMat[,2])
if (GridSearchResultPlot == TRUE) {
par(mfrow=c(2,3))
# YLim <- c(floor(min(Stage1Likes)), min(500, abs(100*min(Stage1Likes))))
plot(Stage1SelL50Mat, bty="l", xlab="Sel L50", ylab="Likelihood", type="b")
plot(Stage1SelL95Mat, bty="l", xlab="Sel L95", ylab="Likelihood", type="b")
plot(Stage1genFMMat, bty="l", xlab="F/M", ylab="Likelihood", type="b")
}
MinStage1Likes <- min(Stage1Likes)
Stage1BestValues <- Stage1Grid[Stage1Likes==MinStage1Likes, ]
Stage1BestValues[3] <- Stage1BestValues[3]/genM
Stage1BestValues[4] <- bestLikes1
names(Stage1BestValues) <- c("SelL50 ", "SelL95 ", "F/M est", "Like")
###########
# Stage 2 #
###########
# Setup Stage 2 Grid
SelRange <- InitialSelPrec
FparRange <- InitialF_MPrec
Stage2Sel50Vec <- seq(from= max(round(Stage1BestValues[1],2) - SelRange,0.1), to= min(round(Stage1BestValues[1],2) + SelRange, 1), by=FinalSelPrec)
Stage2Sel95Vec <- seq(from= max(round(Stage1BestValues[2],2) - SelRange,0.1), to= min(round(Stage1BestValues[2],2) + SelRange, 1), by=FinalSelPrec)
Stage2genFVec <- seq(from= max(round(Stage1BestValues[3],2) - FparRange, 0.1)* genM, to= min(round(Stage1BestValues[3],2) + FparRange, 30) * genM, by=FinalF_MPrec * genM)
Stage2Grid <- expand.grid(Stage2Sel50Vec, Stage2Sel95Vec, Stage2genFVec)
RealisticSelL95 <- which(Stage2Grid[,2] > Stage2Grid[,1]) # Identify realistic SelL95. i.e. SelL95 > SelL50
Stage2Grid <- Stage2Grid[RealisticSelL95,]
Stage2Grid <- as.matrix(Stage2Grid) # Probably a much better way to convert it to be accepted by the sapply function, but this is my workaround for the moment
Stage2Grid <- as.array(Stage2Grid)
Stage1Likes <- NULL
bestLikes2 <- NULL
stm <- proc.time()
print("Stage 2 Started")
print(paste("Stage 2 Grid. Number of rows =", nrow(Stage2Grid), sep=""))
Stage2Likes <- sapply(1:nrow(Stage2Grid), function (X) FindLikeFunction(ObservedFreqVector, ObservedPropVector, genM, genLinf, genLinfCV,genLengthSD, genK, gent0, genAges, genLengths, genminLen, genmaxLen, gennumClass, ParVec=Stage2Grid[X,], LikePlot))
Stage2Time <- round((proc.time() - stm)[3],0)
print("Stage 2 Completed")
print(paste("Stage 2 duration: ", Stage2Time, " seconds", sep=""))
# Set up results of Stage 2 in a matrix. Only best fit for each parameter is saved
Stage2SelL50 <- unique(Stage2Grid[,1])
Stage2SelL95 <- unique(Stage2Grid[,2])
Stage2genF <- unique(Stage2Grid[,3])
Stage2SelL50Mat <- matrix(NA, length(Stage2SelL50), 2)
Stage2SelL95Mat <- matrix(NA, length(Stage2SelL95), 2)
Stage2genFMMat <- matrix(NA, length(Stage2genF), 2)
Stage2SelL50Mat[,1] <- Stage2SelL50
Stage2SelL95Mat[,1] <- Stage2SelL95
Stage2genFMMat[,1] <- Stage2genF / genM
for (j in seq_along(Stage2SelL50)) {
temp <- which(Stage2Grid[,1]== Stage2SelL50[j])
Stage2SelL50Mat[j,2] <- min(Stage2Likes[temp])
}
for (j in seq_along(Stage2SelL95)) {
temp <- which(Stage2Grid[,2]== Stage2SelL95[j])
Stage2SelL95Mat[j,2] <- min(Stage2Likes[temp])
}
for (j in seq_along(Stage2genF)) {
temp <- which(Stage2Grid[,3]== Stage2genF[j])
Stage2genFMMat[j,2] <- min(Stage2Likes[temp])
}
bestLikes2 <- min(Stage2genFMMat[,2])
if (GridSearchResultPlot == TRUE) {
plot(Stage2SelL50Mat, bty="l", xlab="Sel L50", ylab="Likelihood", type="b")
plot(Stage2SelL95Mat, bty="l", xlab="Sel L95", ylab="Likelihood", type="b")
plot(Stage2genFMMat, bty="l", xlab="F/M", ylab="Likelihood", type="b")
}
MinStage2Likes <- min(Stage2Likes)
tempNum <- which(Stage2Likes==MinStage2Likes)
Stage2BestValues <- Stage2Grid[tempNum[1], ]
Stage2BestValues[3] <- Stage2BestValues[3]/genM
Stage2BestValues[4] <- bestLikes2
names(Stage2BestValues) <- c("SelL50 ", "SelL95 ", "F/M est", "Like")
##################
# Stage 3: Optim #
##################
stm <- proc.time()
print("Stage 3 Started")
print(paste("Stage 3 = optim search from stage 2 value"))
RunLike <- function(ParVec,...) {
temp <- FindLikeFunction(ObservedFreqVector, ObservedPropVector, genM, genLinf, genLinfCV,genLengthSD, genK, gent0, genAges, genLengths, genminLen, genmaxLen, gennumClass, ParVec, LikePlot)
return(temp)
}
stt <- Stage2Grid[Stage2Likes==MinStage2Likes, ]
# res <- optim(stt, RunLike, hessian=T, method="L-BFGS-B", lower=c(0.01,0.02,0.001), upper=c(0.98,0.999,4)) # Changed method
# Stage3BestValues <- c(res$par[1:2], res$par[3]/genM, res$value)
res <- nlm(RunLike, p=stt, hessian=T, steptol=1e-4, gradtol=1e-4)
Stage3BestValues <- c(res$estimate[1:2], res$estimate[3]/genM, res$minimum)
Stage3Time <- round((proc.time() - stm)[3],0)
print("Stage 3 Completed")
print(paste("Stage 3 duration: ", Stage3Time, " seconds", sep=""))
Output <- NULL
Output$Stage1$Sel50Mat <- Stage1SelL50Mat
Output$Stage1$Sel95Mat <- Stage1SelL95Mat
Output$Stage1$genFMat <- Stage1genFMMat
Output$Stage1$BestEstimates <- Stage1BestValues
Output$Stage1$ModTime <- Stage1Time
Output$Stage2$Sel50Mat <- Stage2SelL50Mat
Output$Stage2$Sel95Mat <- Stage2SelL95Mat
Output$Stage2$genFMat <- Stage2genFMMat
Output$Stage2$BestEstimates <- Stage2BestValues
Output$Stage2$ModTime <- Stage2Time
Output$Stage3$Sel50Mat <- res$par[1]
Output$Stage3$Sel95Mat <- res$par[2]
Output$Stage3$genFMat <- res$par[3]/genM
Output$Stage3$BestEstimates <- Stage3BestValues
Output$Stage3$ModTime <- Stage3Time
Output$Stage3$Hessian <- res$hessian
return(Output)
}
|
3c6842f010f42c6caee7bcd34561e08e2a8bf938
|
f0352034f8467e2c82a31443ae6e3125039879ac
|
/man/assignUnassigned.Rd
|
de5ca1de06b7476b84cb811994be2e3b6340e944
|
[] |
no_license
|
epurdom/clusterExperiment
|
8d5d43a250a1a3c28d4745aae4b72285458ba1a2
|
ae86ee09697c13ccd5d32f964e28ab7d82b455d6
|
refs/heads/master
| 2022-11-04T01:54:19.806886
| 2022-10-11T22:00:27
| 2022-10-11T22:00:27
| 47,139,877
| 39
| 15
| null | 2021-01-27T21:26:28
| 2015-11-30T19:06:53
|
R
|
UTF-8
|
R
| false
| true
| 3,235
|
rd
|
assignUnassigned.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assignUnassigned.R
\name{assignUnassigned}
\alias{assignUnassigned}
\alias{assignUnassigned,ClusterExperiment-method}
\alias{removeUnassigned,ClusterExperiment-method}
\alias{removeUnassigned}
\title{Assign unassigned samples to nearest cluster}
\usage{
\S4method{assignUnassigned}{ClusterExperiment}(
object,
whichCluster = "primary",
clusterLabel,
makePrimary = TRUE,
whichAssay = 1,
reduceMethod = "none",
...
)
\S4method{removeUnassigned}{ClusterExperiment}(object, whichCluster = "primary")
}
\arguments{
\item{object}{A Cluster Experiment object}
\item{whichCluster}{argument that can be a single numeric or character value
indicating the \emph{single} clustering to be used. Giving values that result in more than one clustering will result in an error. See details of \code{\link{getClusterIndex}}.}
\item{clusterLabel}{if missing, the current cluster label of the cluster will
be appended with the string "_AllAssigned".}
\item{makePrimary}{whether to make the added cluster the primary cluster
(only relevant if \code{y} is a vector)}
\item{whichAssay}{which assay to use to calculate the median per cluster and
take dimensionality reduction (if requested)}
\item{reduceMethod}{character. A method (or methods) for reducing the size of
the data, either by filtering the rows (genes) or by a dimensionality
reduction method. Must either be 1) must match the name of a built-in
method, in which case if it is not already existing in the object will be
passed to \code{\link{makeFilterStats}} or \code{link{makeReducedDims}}, or
2) must match a stored filtering statistic or dimensionality reduction in
the object}
\item{...}{arguments passed to \code{\link{getReducedData}} specifying the
dimensionality reduction (if any) to be taken of the data for calculating
the medians of the clusters}
}
\value{
The function \code{assignUnassigned} returns a
\code{ClusterExperiment} object with the unassigned samples assigned to one
of the existing clusters.
The function \code{removeUnassigned} returns a
\code{ClusterExperiment} object with the unassigned samples removed.
}
\description{
Assigns the unassigned samples in a cluster to the nearest
cluster based on distance to the medians of the clusters.
}
\details{
The function \code{assignUnassigned} calculates the median values of
each variable for each cluster, and then calculates the euclidean distance
of each unassigned sample to the median of each cluster. Each unassigned
sample is assigned to the cluster for which it closest to the median.
All unassigned samples in the cluster are given a clustering,
regardless of whether they are classified as -1 or -2.
\code{removeUnclustered} removes all samples that are unclustered
(i.e. -1 or -2 assignment) in the designated cluster of \code{object} (so
they may be unclustered in other clusters found in
\code{clusterMatrix(object)}).
}
\examples{
#load CE object
\dontrun{
data(rsecFluidigm)
smallCE<-rsecFluidigm[,1:50]
#assign the unassigned samples
assignUnassigned(smallCE, makePrimary=TRUE)
#note how samples are REMOVED:
removeUnassigned(smallCE)
}
}
\seealso{
\code{\link{getReducedData}}
}
|
93f00aa6fa8494a437800b0867597032a6bfed53
|
55bfb6f0c613d1beb67b40aa99e531eb644d4351
|
/R/get-sequence.R
|
7080d34474298848224ed84bf5e3219c8a887553
|
[] |
no_license
|
EricBryantPhD/mutagenesis
|
3bc391acb86b4796eff0c2ae826d6c65af507d6f
|
0fe642a2addf0734f31df29aa3be1c069cf420d2
|
refs/heads/master
| 2020-12-09T11:03:54.657411
| 2018-01-27T23:10:32
| 2018-01-27T23:10:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,852
|
r
|
get-sequence.R
|
#' Get genomic sequences given ranges
#'
#' @param chr `[character]`
#'
#' Chromosome names. Must match names returned by `names(genome)`.
#'
#' @param strand `[character]`
#'
#' Sequence strands (+|-).
#'
#' @param start `[integer]`
#'
#' Start coordinates of ranges.
#'
#' @param end `[integer]`
#'
#' End coordinates of ranges
#'
#' @param genome `[BSgenome|DNAStringSet]`
#'
#' A reference genome. See Details.
#'
#' @details The reference genome can be either a `BSgenome` object from a
#' BSgenome reference package (see [BSgenome::BSgenome]), or a `DNAStringSet`
#' object (see [Biostrings::DNAStringSet]). `BSgenome` objects offer faster
#' sequence aquisition, but are limited to existing BSgenome packages
#' (see [BSgenome::available.genomes]) whereas `DNAStringSet` objects can
#' be easily created from any standard FASTA file using
#' [Biostrings::readDNAStringSet].
#'
#' @importFrom Biostrings getSeq
#' @importFrom GenomicRanges GRanges
#' @importFrom tibble tibble
#' @export
#' @md
get_genomic_sequence <- function(chr, strand, start, end, genome) {
tibble::tibble(seqnames = chr, strand, start, end) %>%
as('GRanges') %>%
Biostrings::getSeq(genome, .) %>%
as.character()
}
#' @rdname get_genomic_sequence
#' @export
get_genomic_variant <- function(chr, strand, start, end, vcf, genome) {
upstream <- get_sequence_range(genome, chr, '+', start, ref_start - 1L)
dnstream <- get_sequence_range(genome, chr, '+', ref_end + 1L, end)
plus_strand <- stringr::str_c(upstream, alt, dnstream)
ifelse(strand == '+', plus_strand, reverse_complement(plus_strand))
}
#' @rdname get_genomic_sequence
#' @export
get_coding_sequence <- function(chr, strand, start, end, cds, genome) {
}
#' @rdname get_genomic_sequence
#' @export
get_coding_variant <- function(chr, strand, start, end, cds, vcf, genome) {
}
|
ff4b353aa6af09fa5d2b59cab872e0413612d5ce
|
aec820a0c7109fe2184b7d956742915023fd30c1
|
/R_Operation_Sequencing/gantt_johnson.R
|
17d71df1182d65ec671d6540099f823d746e4ed8
|
[] |
no_license
|
hendry062105/Practice_2020
|
350dc2996652c7f658157d08946a1c78da1fa240
|
98f26b1b599c9d09f2c8d08cdc362c7823e887be
|
refs/heads/main
| 2023-08-27T18:47:12.021806
| 2021-10-14T22:30:20
| 2021-10-14T22:30:20
| 417,289,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
gantt_johnson.R
|
gantt_johnson=function(t0,starts,ends){
limits_interval_v2()
df_limits = data.frame(starts,ends)
library(plotrix)
gantt.info<-list(labels=c("M1","M2"),
starts=starts,
ends=ends,
priorities=c(1,4))
months <- seq(t0, by=1, length.out=ends[length(ends)])
monthslab <- format(months, format="%b")
vgridpos<-(months)#,format=Ymd.format)
vgridlab<-monthslab
colfunc <- colorRampPalette(c("darkgoldenrod1"))
timeframe <- c(t0,ends[length(ends)])
gantt.chart(gantt.info, taskcolors=colfunc(length(gantt.info$labels)),xlim=timeframe, main="Gantt Chart",
priority.legend=TRUE,vgridpos=vgridpos,vgridlab=vgridlab,hgrid=TRUE
)
colores=c();for (i in 1:length(trabajos)){colores=c(colores,rep(rainbow(length(trabajos))[i],2))}
gantt.chart(gantt.info,vgridlab=t0:ends[length(ends)],vgridpos=t0:ends[length(ends)],
main="Gantt Chart",
taskcolors=colores,#c(1,2,3,4,5,6,7,8,"purple")
border.col="black")
makespan=df_limits$ends[length(df_limits$ends)]
total_idle_time=sum(idle)
print(t(data.frame(idle)))
print(t(data.frame(makespan,total_idle_time)))
}
|
4fe30af39d85675b3638458bea33b85425ec4164
|
dd5e447826e762b22762f8f522641b3172d71d9e
|
/intermediate-r/Functions in R-495.r
|
8d2eaf0ea3b74cdb16c5fc8a6ec73dd3a538f9be
|
[] |
no_license
|
Guarinho/Data_Analyst-R
|
32494e4fb31d4d7bf6c68c3e450d3d2be97e64aa
|
ff701fcdf4f8cc107364f7086cfaf32e40cfca3c
|
refs/heads/master
| 2022-11-25T04:54:37.117485
| 2020-08-02T14:51:54
| 2020-08-02T14:51:54
| 282,522,307
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,098
|
r
|
Functions in R-495.r
|
## 2. Components Of A Function ##
first_vec <- c(1, 5, 4, 2, 3, 7, 6)
second_vec <- c(9, 2, 1, 8, 3, 4, 5, 6, 10, 7, 12, 11)
third_vec <- c(8, 3, 5, 1, 7, 1, 10)
find_longer_vector <- function(vec_one, vec_two) {
if (length(vec_one) > length(vec_two)) {
return("First")
} else if (length(vec_one) < length(vec_two)) {
return("Second")
} else {
return("Equal Length")
}
}
first_vs_second <- find_longer_vector(first_vec, second_vec)
first_vs_third <- find_longer_vector(first_vec, third_vec)
## 4. Using Multiple Inputs ##
is_divisible <- function(divisor, dividend) {
whole <- floor(divisor / dividend)
rem <- divisor - (whole * dividend)
if (rem == 0) {
return(TRUE)
} else {
return(FALSE)
}
}
div_5731_by_11 <- is_divisible(5731, 11)
## 6. Handling Variable Numbers of Arguments ##
subtract_all <- function(start, ...) {
current_num <- start
for (num in list(...)) {
current_num <- current_num - num
}
return(current_num)
}
first_subtraction <- subtract_all(10, 1, 2, 3)
second_subtraction <- subtract_all(100, 71, 22)
|
47d902919c85eaeb79467587cb88f9caeaf64fc7
|
25bae842912a19b7295da3b5e8dbdda94081b28f
|
/Exploratory_Analysis/Week2_Assignment/Code/Plot2.R
|
7bd2863c20411894ab049072e902e7ff29831320
|
[] |
no_license
|
prateeksarangi/DataScienceCoursera
|
97823363f49bc00c55d67e22fe58f2fb3e999691
|
4665a8ff1c922d669187b120bc859eaec2f359c1
|
refs/heads/master
| 2022-11-23T07:14:27.300289
| 2020-04-17T19:41:43
| 2020-04-17T19:41:43
| 228,541,800
| 1
| 0
| null | 2022-11-22T04:40:38
| 2019-12-17T05:43:36
|
HTML
|
UTF-8
|
R
| false
| false
| 710
|
r
|
Plot2.R
|
data <- read.csv("~/ExData_Plotting1/household_power_consumption.txt", sep=";"
, na.strings = "?"
,colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
)
data$Date <- as.Date(data$Date, "%d/%m/%Y")
dataSelect <- data[(data$Date >= "2007-02-01" & data$Date <="2007-02-02"),]
par(col = "black", mfcol = c(1, 1))
plot( dataSelect$Global_active_power
, type = "l"
, ylab = "Global Active Power (kilowatts)"
, xlab = ""
, xaxt="n"
)
axis(1, at=c(1, 1440, 2880),labels = c("Thu", "Fri", "Sat"))
dev.copy(png, file = "~/ExData_Plotting1/figure/plot2.png", width=480, height=480)
dev.off()
|
f4f9f020a8c0404a2cc07d35fdb2af56026d7b14
|
ca427633f94cf6e220bafbd6287791af0579183e
|
/bael_growth.R
|
841e8f60896a9cc5c88294ee40f58ac6932420de
|
[
"MIT"
] |
permissive
|
elahi/cupCorals
|
78f204b9a3e3c9b68029da231113e1d32af7bdf8
|
4150f7875ba283e9159a032c620a5b093e55ba97
|
refs/heads/master
| 2020-05-21T15:10:03.216493
| 2020-05-19T02:31:07
| 2020-05-19T02:31:07
| 41,657,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,682
|
r
|
bael_growth.R
|
#################################################
# Author: Robin Elahi
# Date: 151208
# Coral growth
# Figure 4
#################################################
# rm(list=ls(all=TRUE))
##### LOAD PACKAGES, DATA #####
library(lme4)
library(ggplot2)
theme_set(theme_classic(base_size = 12))
library(AICcmodavg)
library(dplyr)
# library(lmerTest)
dat <- read.csv("./data/bael_growthData.csv", header=TRUE, na.strings="NA")
source("./R/graphicalParams.R")
dat$ini.areaLN <- log(dat$ini.area)
dat$fin.areaLN <- log(dat$fin.area)
datSC <- dat
datSC$delta <- with(datSC, fin.area - ini.area)
datSC %>% filter(delta < -0.1) # check coral B24 in photograph
# Create dataset for SC present
datSCpresent <- datSC[datSC$time == "present", ]
# Create dataset for SC past
datSCpast <- datSC[datSC$time == "past", ]
# Truncate initial size range to reflect the largest observed size in 2007-2010 (1.0 cm2)
set_graph_pars(ptype = "panel1")
presMaxSize <- max(datSCpresent$ini.area) # max size of present corals for growth data
plot(fin.area ~ ini.area, data = datSCpast)
points(fin.area ~ ini.area, data = datSCpresent, col = "red")
abline(v = presMaxSize, col = "red", lty = 3)
abline(v = 0.95, col = "black", lty = 3)
abline(a = 0, b = 1, col = "darkgray", lty = 2, lwd = 2)
# set size cut-off
sizeCutOff <- 0.95
datSCTrunc <- datSC[datSC$ini.area <= sizeCutOff, ]
head(datSC)
ggplot(data = datSCpast, aes(ini.area, fin.area, color = quadOriginal)) +
geom_point() + geom_smooth(method = "lm", se = FALSE) +
geom_abline(slope = 1, intercept = 0, linetype = 'dashed')
##### USE LMER TO TEST THE EFFECT OF ERA ON CORAL GROWTH #####
### Use truncated historical dataset to match observed size range in modern dataset
lmerDat <- datSCTrunc
# lmerDat <- datSC
# Random effects are quadrat (original quadrats for historical study, not subquads)
# Don't need varying slopes by quadrat
Cand.mod <- list()
Cand.mod[[1]] <- lmer(fin.area ~ ini.area * time + (1|quad),
REML = FALSE, data=lmerDat)
Cand.mod[[2]] <- lmer(fin.area ~ ini.area + time + (1|quad),
REML = FALSE, data=lmerDat)
Cand.mod[[3]] <- lmer(fin.area ~ ini.area + (1|quad),
REML = FALSE, data=lmerDat)
Cand.mod[[4]] <- lmer(fin.area ~ time + (1|quad),
REML = FALSE, data = lmerDat)
Cand.mod[[5]] <- lmer(fin.area ~ 1 + (1|quad),
REML = FALSE, data=lmerDat)
#create a vector of names to trace back models in set
mod_numbers <- paste("Cand.mod", 1:length(Cand.mod), sep=" ")
mod_text <- c("Era x Size", "Era + Size", "Size", "Era", "Null model")
#generate AICc table with names
mod.aicctab <- aictab(cand.set= Cand.mod, modnames= mod_text,
sort=TRUE, second.ord=TRUE) # second.ord =TRUE means AICc is used
print(mod.aicctab, digits=2, LL=TRUE)
write.csv(mod.aicctab, "./output/growthAIC.csv")
summary(Cand.mod[[1]])
# Save lmerDat as different file
dat_growth <- lmerDat
##### GET MODEL PARAMETERS BY ERA #####
datSCpastTrunc <- datSCpast[datSCpast$ini.area <= sizeCutOff, ]
summary(datSCpastTrunc)
presMod <- lmer(fin.area ~ ini.area + (1|quad),
REML = FALSE, data = datSCpresent)
pastMod <- lmer(fin.area ~ ini.area + (1|quad),
REML = FALSE, data = datSCpastTrunc)
pastModAll <- lmer(fin.area ~ ini.area + (1|quad),
REML = FALSE, data = datSCpast)
summary(presMod)
summary(pastMod)
summary(pastModAll)
sd(resid(pastMod))
sd(resid(pastModAll))
##### FINAL FIGURE - GROWTH SCALING BY ERA #####
datSC
datSCTrunc
ylab_growth <- expression(paste("Size at time t+3 (", cm^2, ")"))
xlab_growth <- expression(paste("Size at time t (", cm^2, ")"))
ULClabel <- theme(plot.title = element_text(hjust = -0.15, vjust = 1, size = rel(1.2)))
size1 <- ggplot(datSCTrunc, aes(ini.area, fin.area, color = time, shape = time)) +
ylab(ylab_growth) + xlab(xlab_growth) +
theme(legend.justification = c(0, 0), legend.position = c(0.5, 0)) +
theme(legend.title = element_blank()) +
geom_point(size = 2.5, alpha = 0.6,
position = position_jitter(h = 0.05)) +
scale_colour_manual(breaks = c("past", "present"),
values = c("darkgray", "black"),
labels = c("1969-1972", "2007-2010")) +
scale_shape_manual(breaks = c("past", "present"),
values = c(18, 20),
labels = c("1969-1972", "2007-2010"))
sizePlot <- size1 +
geom_smooth(method = "lm", se = FALSE, size = 0.75) +
geom_abline(intercept = 0, slope = 1, linetype = 2, color = "black", size = 0.5)
sizePlot
# ggsave("./figs/growthPlot.pdf", height = 3.5, width = 3.5)
###
sizeAll <- ggplot(datSC, aes(ini.area, fin.area, color = time, shape = time)) +
ylab(ylab_growth) + xlab(xlab_growth) +
theme(legend.justification = c(0, 0), legend.position = c(0.5, 0)) +
theme(legend.title = element_blank()) +
geom_point(size = 2.5, alpha = 0.6,
position = position_jitter(h = 0.05)) +
scale_colour_manual(breaks = c("past", "present"),
values = c("darkgray", "black"),
labels = c("1969-1972", "2007-2010")) +
scale_shape_manual(breaks = c("past", "present"),
values = c(18, 20),
labels = c("1969-1972", "2007-2010"))
sizePlotAll <- sizeAll +
geom_smooth(method = "lm", se = FALSE, size = 0.75) +
geom_abline(intercept = 0, slope = 1, linetype = 2, color = "black", size = 0.5)
sizePlotAll
# ggsave("./figs/growthPlotAll.pdf", height = 3.5, width = 3.5)
|
28dda657ab198850512d07ddb6401fe2759ba905
|
1ed42775543aab64d1376bb521b2f7360862cd73
|
/webSite/images/demo/View Data/analyze/analyze.R
|
69d6738130c2b27e9d3a4b0b26bdef42bfe25138
|
[] |
no_license
|
yja2397/wayne-s-crop
|
6aa4bac7f7418c6d3dab7f6fd148c159b68dab44
|
ede7d95a4d205e079e2f787ac84146dfaefe31e5
|
refs/heads/master
| 2020-04-14T06:54:28.419328
| 2019-02-18T19:11:46
| 2019-02-18T19:11:46
| 163,698,625
| 1
| 0
| null | 2018-12-31T21:34:43
| 2018-12-31T21:34:42
| null |
UTF-8
|
R
| false
| false
| 4,191
|
r
|
analyze.R
|
# library, setup
library(ggplot2)
library(UsingR)
library(car)
library(gridExtra)
par("mar")
par(mar=c(1,1,1,1))
test = read.csv("data.csv",header = TRUE)
acc = read.csv("tem_hum_com.csv", header = TRUE)
test <- na.omit(test)
test[,4] <- test[,4] * 0.01
data = test[,3:4]
time = test[,1]
data$time = time
colnames(data) = c("temperature", "humidity", "time")
colnames(acc) = c("temperature_com", "humidity_com")
## View data
### information of current data
str(data)
### information of accurate data
str(acc)
### summarize of current data
summary(data)
### summarize of accurate data
summary(acc)
### histogram of current data
current = data[seq(nrow(data), nrow(data)-45),]
current = current[c(order(current$time)),]
p = ggplot(data = current, aes(x=time, y=temperature))+geom_point()
p1 = ggplot(data = current, aes(x=time, y=humidity))+geom_point()
grid.arrange(p,p1,ncol=2)
## Correlation with temperature and humidity data
##### Source : http://rstudio-pubs-static.s3.amazonaws.com/189354_277dfb3a83a34a2abaae855b90fcf269.html
### correlation test for current data
cor.test(data$temperature, data$humidity)
### correlation test for accurate data
cor.test(acc$temperature_com, acc$humidity_com)
### ggplot for current data
ggplot(data = data, aes(x=temperature, y=humidity))+geom_count()+geom_smooth(method="lm")
### ggplot for accurate data
ggplot(data = acc, aes(x=temperature_com, y=humidity_com))+geom_count()+geom_smooth(method="lm")
### fit current data and draw for linear regression
fit <- lm(humidity~temperature, data = data)
summary(fit)
### fit accurate data and draw for linear regression
fit_com <- lm(humidity_com~temperature_com, data = acc)
summary(fit_com)
### graph with current data and linear regression line
plot(humidity~temperature, data=data)
abline(fit, col="blue")
### graph with accurate data and linear regression line
plot(humidity_com~temperature_com, data=acc)
abline(fit_com, col="red")
### graph with current data and linear regression line with accurate data
plot(humidity~temperature, data=data)
abline(fit_com, col="red")
### fit current data and draw for 2nd polynomial regression
fit2 <- lm(humidity~temperature+I(temperature^2), data=data)
summary(fit2)
### fit accurate data and draw for 2nd polynomial regression
fit2_com <- lm(humidity_com~temperature_com+I(temperature_com^2), data=acc)
summary(fit2_com)
### graph with current data and 2nd polynomial regression line
plot(humidity~temperature, data=data)
lines(data$temperature, fitted(fit2), col="blue")
### graph with accurate data and 2nd polynomial regression line
plot(humidity_com~temperature_com, data=acc)
lines(acc$temperature_com, fitted(fit2_com), col="red")
### graph with current data and 2nd polynomial regression line with accurate data
plot(humidity~temperature, data=data)
lines(acc$temperature_com, fitted(fit2_com), col="red")
### fit current data and draw for 3rd polynomial regression
fit3 <- lm(humidity~temperature + I(temperature^2) + I(temperature^3), data=data)
summary(fit3)
### fit accurate data and draw for 3rd polynomial regression
fit3_com <- lm(humidity_com~temperature_com + I(temperature_com^2) + I(temperature_com^3), data=acc)
summary(fit3_com)
### graph with current data and 3rd polynomial regression line
plot(humidity~temperature, data=data)
lines(data$temperature, fitted(fit3), col="blue")
### graph with accurate data and 3rd polynomial regression line
plot(humidity_com~temperature_com, data=acc)
lines(acc$temperature_com, fitted(fit3_com), col="red")
### graph with current data and 3rd polynomial regression line with accurate data
plot(humidity~temperature, data=data)
lines(acc$temperature_com, fitted(fit3_com), col="red")
### made scatterplot with current data
scatterplot(temperature~humidity, data=data, pch=19,
spread=FALSE, smoother.args=list(lty=2),
main="current data",
xlab="Temperature(Celsius)", ylab="Humidity(%)")
### made scatterplot with accurate data
scatterplot(temperature_com~humidity_com, data=acc, pch=19,
spread=FALSE, smoother.args=list(lty=2),
main="Compare data",
xlab="Temperature(Celsius)", ylab="Humidity(%)")
|
3cd59b60018dde57abf464d35aa74c5fb1d4e8d7
|
f4ee4f52b8e2e685c0518ebe94eb14a8028b845b
|
/QualityControlForGEO.R
|
a5215819347f00416edf0af5c1fd214192907571
|
[] |
no_license
|
TaoyuMei/Masters_Thesis_public
|
37f0dc46610ff9d7f3d57732aa4dd73b239b18c8
|
093bc175a90806fc90f4b1b6f152322c41440f3b
|
refs/heads/main
| 2023-01-23T08:56:00.108469
| 2020-11-16T05:20:16
| 2020-11-16T05:20:16
| 313,198,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,410
|
r
|
QualityControlForGEO.R
|
# QualityConotrolForGEO.R
library(fastqcr)
library(ngsReports)
library(stringr)
# dealing with GSE raw data ------------------------------------------
## generating reports for each fastq files
setwd("/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna")
GSEs<- grep(pattern = "GSE.*", x = list.files(), value = TRUE)
for(gse in GSEs){
fastqc(fastqc.path = "/binf-isilon/alab/students/vrw936/software/FastQC/fastqc",
fq.dir = gse)
}
for (gse in c("GSE104704", "GSE110731", "GSE125050",
"GSE125583", "GSE53697", "GSE95587")) {
qc_report(qc.path = paste0("./", gse, "/FASTQC"),
result.file = paste0("./", gse, "/FASTQC/", gse, "multi-qc-report"),
interpret = TRUE, experiment = gse)
}
# fastqcr and ngsReports for the 6 GSE after trimmomatic ------------------
setwd("/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna")
GSEs<- grep(pattern = "GSE.*", x = list.files(), value = TRUE)
for(gse in GSEs){
print(gse)
fastqc(fastqc.path = "/binf-isilon/alab/students/vrw936/software/FastQC/fastqc",
fq.dir = file.path(gse, "trimmed"))
}
altTemplate <- file.path("/binf-isilon", "alab", "students", "vrw936",
"Master_Thesis", "MyCode",
"ngsReports_Fastqc_template_no_overrepresent.Rmd")
for (gse in GSEs) {
fileDir <- file.path(".", gse, "trimmed", "FASTQC")
writeHtmlReport(fileDir, overwrite = TRUE, template = altTemplate)
}
# QC for each AMP-AD datasets ---------------------------------------------
### functionalise
qcAndReport <- function(wd = "/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna",
fastq_dir){
setwd(wd)
fastqc(fastqc.path = "/binf-isilon/alab/students/vrw936/software/FastQC/fastqc",
fq.dir = fastq_dir)
}
### each dataset, before and after trimmomatic
# ROSMAP before trimmomatic
# moving ROSMAP fastqc results into 7 folders
for(i in 1:7){
writeHtmlReport(file.path("ROSMAP/fastq/", "FASTQC", paste0("FASTQC_", i)),
overwrite = TRUE, template = altTemplate)
}
altTemplate <- file.path("/binf-isilon", "alab", "students", "vrw936",
"Master_Thesis", "MyCode",
"ngsReports_Fastqc_template_no_overrepresent.Rmd")
qcAndReport(fastq_dir = "MSBB/fastq/")
qcAndReport(fastq_dir = "MayoRNAseq/fastq/Mayo_CBE_sample_FASTQs/")
qcAndReport(fastq_dir = "MayoRNAseq/fastq/Mayo_TCX_sample_FASTQs/")
# ROSMAP after trimmomatic
qcAndReport(fastq_dir = "ROSMAP/fastq/trimmed")
setwd("/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna/ROSMAP/fastq/trimmed/FASTQC")
for(i in 1:6){
system(paste0("mkdir ", "FASTQC_", i))
system(paste0("mv `ls | grep \".[html|zip]\" | head -360` ", "FASTQC_", i))
}
system("mkdir FASTQC_7")
system("mv `ls | grep \".[html|zip]\"` FASTQC_7")
altTemplate <- file.path("/binf-isilon", "alab", "students", "vrw936",
"Master_Thesis", "MyCode",
"ngsReports_Fastqc_template_no_overrepresent.Rmd")
for(i in 1:7){
writeHtmlReport(file.path("ROSMAP/fastq/", "trimmed", "FASTQC", paste0("FASTQC_", i)),
overwrite = TRUE, template = altTemplate)
}
# functionalise
SeparateAndngsReports <- function(wd, num_fol){
# a function to separate the FASTQC results into several folders
# and combine the results using ngsReports
# only for web RStudio in the server, not for background processes
setwd(wd)
for(i in 1:(num_fol - 1)){
system(paste0("mkdir ", "FASTQC_", i))
system(paste0("mv `ls | grep \".[html|zip]\" | head -360` ", "FASTQC_", i))
}
# actually 180 htmls and 180 zips for each folder, corresponding to 180 fastqs
system(paste0("mkdir FASTQC_", num_fol))
system(paste0("mv `ls | grep \".[html|zip]\"` FASTQC_", num_fol))
altTemplate <- file.path("/binf-isilon", "alab", "students", "vrw936",
"Master_Thesis", "MyCode",
"ngsReports_Fastqc_template_no_overrepresent.Rmd")
for(i in 1:num_fol){
writeHtmlReport(file.path(paste0("FASTQC_", i)),
overwrite = TRUE, template = altTemplate)
}
}
# MSBB before trimmmatic
SeparateAndngsReports(wd = "/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna/MSBB/fastq/FASTQC",
num_fol = 6)
setwd("/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna/MSBB/fastq/FASTQC")
for(i in 1:5){
system(paste0("mkdir ", "FASTQC_", i))
system(paste0("mv `ls | grep \".[html|zip]\" | head -360` ", "FASTQC_", i))
}
system("mkdir FASTQC_6")
system("mv `ls | grep \".[html|zip]\"` FASTQC_6")
altTemplate <- file.path("/binf-isilon", "alab", "students", "vrw936",
"Master_Thesis", "MyCode",
"ngsReports_Fastqc_template_no_overrepresent.Rmd")
for(i in 1:6){
writeHtmlReport(file.path("MSBB/fastq/", "FASTQC", paste0("FASTQC_", i)),
overwrite = TRUE, template = altTemplate)
}
# MSBB after trimmomatic
qcAndReport(fastq_dir = "MSBB/fastq/trimmed")
SeparateAndngsReports(wd = "/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna/MSBB/fastq/trimmed/FASTQC",
num_fol = 6)
# MayoRNAseq TCX before trimmomatic
SeparateAndngsReports(wd = "/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna/MayoRNAseq/fastq/Mayo_TCX_sample_FASTQs/FASTQC",
num_fol = 3)
# MayoRNAseq TCX after trimmomatic
qcAndReport(fastq_dir = "MayoRNAseq/fastq/Mayo_TCX_sample_FASTQs/trimmed")
SeparateAndngsReports(wd = "/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna/MayoRNAseq/fastq/Mayo_TCX_sample_FASTQs/trimmed/FASTQC",
num_fol = 3)
# MayoRNAseq CBE before trimmomatic
SeparateAndngsReports(wd = "/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna/MayoRNAseq/fastq/Mayo_CBE_sample_FASTQs/FASTQC",
num_fol = 3)
# MayoRNAseq CBE after trimmomatic
qcAndReport(fastq_dir = "MayoRNAseq/fastq/Mayo_CBE_sample_FASTQs/trimmed")
SeparateAndngsReports(wd = "/binf-isilon/alab/students/vrw936/scratch/rna_seq_for_mrna/MayoRNAseq/fastq/Mayo_CBE_sample_FASTQs/trimmed/FASTQC",
num_fol = 3)
|
25d39afc3ea0bff3fba49deba81a7cc376464acf
|
d6fefc7986e9e912bc20a216381952c7c2dd56d4
|
/functions/summarizedGenes_genelevel.r
|
232b4a707bb9baccb293a6e31a968fea8bcde9f8
|
[] |
no_license
|
YalanBi/AA
|
74674ebfc778eedfd9f9221f9177f5f7d8b6b4fc
|
b9be902c90e4d86b5f0152d67479145051741db8
|
refs/heads/master
| 2021-01-10T20:44:20.030509
| 2013-10-03T14:25:22
| 2013-10-03T14:25:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,741
|
r
|
summarizedGenes_genelevel.r
|
#
# Functions for analysing A. Thaliana Tiling Arrays
# last modified: 01-07-2013
# first written: 15-04-2013
# (c) 2013 GBIC Yalan Bi, Danny Arends, R.C. Jansen
#
#************************************************* this is the final version for summarizing probes into gene level! ^_^ *************************************************#
#******************* one probe for one gene, and the order of RILs is: first all individuals in Env1, next all individuals in Env2, then Env3 and Env4 *******************#
#****************************************************** this is used for H E A T M A P C L U S T E R I N G ! ! ! ******************************************************#
setwd("D:/Arabidopsis Arrays")
menvironment <- read.table("Data/ann_env.txt", sep="\t")[ ,2]
#load exp genes
load(file="Data/ExpGenes/expGenes_final.Rdata")
#direction selection
probesDir <- function(exp_data = rawexp){
if(unique(exp_data[,"strand"]) == "sense"){
direction_id <- which(exp_data[, "direction"] == "reverse")
}
if(unique(exp_data[,"strand"]) == "complement"){
direction_id <- which(exp_data[, "direction"] == "forward")
}
return(direction_id)
}
#summarize probes in one gene into one probe, each gene has one probe only, chr1-5 all in one file
newprobematrix <- NULL
rownameList <- NULL
colnameList <- NULL
for(chr in 1:5){
location <- paste0("Data/Raw/chr", chr, "_norm_hf_cor/")
cat("Now chr", chr, "starts!\n")
st <- proc.time()[3]
genenames <- expGeneList[[chr]]
#filename="AT1G01010"
for(filename in genenames){
rawexp <- read.table(paste0(location, filename, ".txt"), row.names=1, header=TRUE)
probes_dir <- probesDir(rawexp)
exonID <- probes_dir[grepl("tu", rawexp[probes_dir, "tu"])]
newprobe <- NULL
#Levels-numbers: 6H-35, Dry_AR-38, Dry_Fresh-39, RP-36
for(env in 1:4){
ind_env <- which(as.numeric(menvironment) == env)
#make the colname as the same order as the env for just one time
if(length(colnameList) != 148){
colnameList <- c(colnameList, colnames(rawexp[17:164])[ind_env])
cat("colnames of env", env, "got!\n")
}
for(i in (ind_env + 16)){
newprobe <- c(newprobe, median(rawexp[exonID, i]))
}
#cat("RILs in env", env, "finished\n")
}
newprobematrix <- rbind(newprobematrix, newprobe)
}
rownameList <- c(rownameList, genenames)
et <- proc.time()[3]
cat("chr", chr, "finished in", et-st, "s!\n")
}
rownames(newprobematrix) <- rownameList
colnames(newprobematrix) <- colnameList
write.table(newprobematrix, file="Data/summarizedGene/expGenes_1gene1probe.txt")
#to load this file
read.table("Data/summarizedGene/expGenes_1gene1probe.txt", row.names=1, header=TRUE)
|
0c16aa51b91bc0cc75a3b9f2eddeaf7083c4f924
|
3c258c7fe3244f4a41dea7d264098ac614eef19a
|
/man/extremes.Rd
|
d33f0ccf58cd53f796e22cd846c52179bcc3328c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
USGS-R/repgen
|
379be8577f3effbe7067e2f3dc5b5481ca69999e
|
219615189fb054e3b421b6ffba4fdd9777494cfc
|
refs/heads/main
| 2023-04-19T05:51:15.008674
| 2021-04-06T20:29:38
| 2021-04-06T20:29:38
| 31,678,130
| 10
| 25
|
CC0-1.0
| 2023-04-07T23:10:19
| 2015-03-04T20:24:02
|
R
|
UTF-8
|
R
| false
| true
| 593
|
rd
|
extremes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sig-extremes.R
\docType{methods}
\name{extremes}
\alias{extremes}
\alias{extremes,list-method}
\title{Extremes report.}
\usage{
extremes(data, ...)
\S4method{extremes}{list}(data, ...)
}
\arguments{
\item{data}{Local data (as list), or URL.}
\item{...}{Everything else.}
}
\description{
Extremes report.
}
\examples{
library(jsonlite)
library(dplyr)
data <-
fromJSON(
system.file(
'extdata', 'extremes', 'extremes-example-site-train.json', package = 'repgen'
)
)
extremes(data, 'Author Name')
}
|
5bb9250e9a27d2a9e016cd93c84a44d5f20d4552
|
a22e8e1b9ff3f4ed8a589bcbbbd280fccb4475d0
|
/Diagnóstico/01_Mercado Laboral/viz-ml.R
|
cf1c277ba1d472e2359d24a0b4bfbc2abcfa5414
|
[] |
no_license
|
paulapereda/estilo_cess
|
15257c4b761f838c63d88973edc50338432946b6
|
43cf5baeb8a1b158c1551cf636c9f7730784949f
|
refs/heads/main
| 2023-04-26T21:02:53.036329
| 2021-05-04T13:50:08
| 2021-05-04T13:50:08
| 312,658,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,013
|
r
|
viz-ml.R
|
library(lubridate)
library(tidyverse)
library(scales)
library(readxl)
library(here)
source(here::here("estilo_cess.R"))
# Gráfico - Informalidad Sectorial
g <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Informalidad Sectorial.xlsx")) %>%
pivot_longer(- formal, names_to = "sector", values_to = "valor") %>%
mutate(sector = ifelse(sector == "Actividades de los hogares en calidad de empeladores",
"Actividades de los hogares en calidad de empleadores", sector),
sector = factor(sector, levels = c("Servicios sociales y relacionados con la Salud humana",
"Enseñanza",
"Industrias Manufactureras",
"Comercio",
"Alojamiento y servicios de comida",
"Producción agropecuaria",
"Construcción",
"Actividades de los hogares en calidad de empleadores"),
labels = c("Servicios sociales \ny relacionados con \nla Salud humana",
"Enseñanza",
"Industrias \nManufactureras",
"Comercio",
"Alojamiento y \nservicios de \ncomida",
"Producción \nagropecuaria",
"Construcción",
"Actividades de los \nhogares en calidad\n de empleadores")))
g %>%
ggplot(aes(sector, valor, fill = formal)) +
geom_bar(position = "fill", stat = "identity", width = .6) +
scale_fill_manual(values = c(verde_cess, rosado_cess)) +
scale_y_continuous(expand = expansion(mult = c(0, NA)),
labels = scales::percent_format(accuracy = 1)) +
labs(x = "",
y = "",
fill = "",
title = "Estructura sectorial del empleo por formalidad/informalidad (2019)",
caption = "Fuente: elaboración propia en base a ECH") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf.png"),
dpi = 300, width = 14, height = 8)
# Gráfico 0 - Informalidad según sexo
g0 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Informalidad CESS.xlsx"),
sheet = "Sheet1") %>%
mutate(valor = valor/100)
g0 %>%
ggplot(aes(anio, valor, color = sexo)) +
geom_line(size = 1.5) +
scale_color_manual(values = c(verde_cess, violeta_cess, amarillo_cess)) +
scale_x_continuous(breaks = seq(2006, 2018, by = 1)) +
scale_y_continuous(limits = c(.2, .4),
expand = expansion(mult = c(0, NA)),
labels = scales::percent_format(accuracy = 1)) +
labs(x = "",
y = "",
color = "",
title = "Tasa de informalidad por sexo") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf0.png"),
dpi = 300, width = 12, height = 7)
# Gráfico 1 - Tasa de actividad por sexo
g1 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g1") %>%
pivot_longer(- anio, names_to = "tasa_actividad", values_to = "value") %>%
mutate(tasa_actividad = factor(tasa_actividad, levels = c("Hombres", "Mujeres", "Global")))
ggplot(g1, aes(as.numeric(anio), value, color = tasa_actividad, group = tasa_actividad)) +
geom_line(size = 1.5) +
scale_color_manual(values = c(verde_cess, violeta_cess, amarillo_cess)) +
scale_x_continuous(breaks = seq(1980, 2020, by = 5)) +
scale_y_continuous(limits = c(.3, .8),
expand = expansion(mult = c(0, NA)),
labels = scales::percent_format(accuracy = 1)) +
labs(x = "",
y = "",
color = "",
title = "Tasa de actividad por sexo") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf1.png"),
dpi = 300, width = 12, height = 7)
# Gráfico 2 - Tasa de actividad por sexo y tramo de edad
g2 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g2") %>%
pivot_longer(- c(anio, sexo), names_to = "tramo_edad", values_to = "value")
ggplot(g2, aes(anio, value, color = tramo_edad, group = tramo_edad)) +
geom_line(size = 1.5) +
scale_color_manual(values = c(verde_cess, rosado_cess, amarillo_cess)) +
scale_x_continuous(breaks = seq(1980, 2020, by = 5)) +
scale_y_continuous(limits = c(0, 1),
expand = expansion(mult = c(0, NA)),
labels = scales::percent_format(accuracy = 1)) +
facet_wrap(~ sexo) +
theme(panel.spacing = unit(2, "lines")) +
labs(x = "",
y = "",
color = "",
title = "Tasa de actividad por sexo y tramo de edad") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf2.png"),
dpi = 300, width = 11, height = 6)
# Gráfico 3 - Tasa de actividad por tramo de edad
g3 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g3") %>%
pivot_longer(- anio, names_to = "tramo_edad", values_to = "value")
ggplot(g3, aes(anio, value, color = tramo_edad, group = tramo_edad)) +
geom_line(size = 1.5) +
scale_color_manual(values = c(verde_cess, rosado_cess, amarillo_cess)) +
scale_x_continuous(breaks = seq(1980, 2020, by = 5)) +
scale_y_continuous(limits = c(.3, .9),
breaks = seq(.3, .9, by = .1),
expand = expansion(mult = c(0, .01)),
labels = scales::percent_format(accuracy = 1)) +
theme(panel.spacing = unit(2, "lines")) +
labs(x = "",
y = "",
color = "",
title = "Tasa de actividad por tramo de edad") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf3.png"),
dpi = 300, width = 11, height = 6)
# Gráfico 4 - Tasa de empleo por sexo
g4 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g4") %>%
pivot_longer(- anio, names_to = "tasa_empleo", values_to = "value") %>%
mutate(tasa_empleo = factor(tasa_empleo, levels = c("Hombres", "Mujeres", "Total")))
ggplot(g4, aes(anio, value, color = tasa_empleo, group = tasa_empleo)) +
geom_line(size = 1.5) +
scale_color_manual(values = c(verde_cess, violeta_cess, amarillo_cess)) +
scale_x_continuous(breaks = seq(1980, 2020, by = 5)) +
scale_y_continuous(limits = c(.3, .75),
breaks = seq(.3, .75, by = .1),
expand = expansion(mult = c(0, NA)),
labels = scales::percent_format(accuracy = 1)) +
labs(x = "",
y = "",
color = "",
title = "Tasa de empleo por sexo") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf4.png"),
dpi = 300, width = 12, height = 7)
# Gráfico 5 - Tasa de empleo por tramo de edad
g5 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g5") %>%
pivot_longer(- anio, names_to = "tramo_edad", values_to = "value")
ggplot(g5, aes(anio, value, color = tramo_edad, group = tramo_edad)) +
geom_line(size = 1.5) +
scale_color_manual(values = c(verde_cess, rosado_cess, amarillo_cess)) +
scale_x_continuous(breaks = seq(1980, 2020, by = 5)) +
scale_y_continuous(limits = c(.2, .85),
breaks = seq(.2, .85, by = .1),
expand = expansion(mult = c(0, .01)),
labels = scales::percent_format(accuracy = 1)) +
labs(x = "",
y = "",
color = "",
title = "Tasa de empleo por tramo de edad") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf5.png"),
dpi = 300, width = 11, height = 6)
# Gráfico 6 - Evolución de la cantidad de ocupados totales
g6 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g6")
ggplot(g6, aes(anio, valor)) +
geom_line(size = 1.5, color = verde_cess) +
scale_x_continuous(breaks = seq(1998, 2018, by = 2)) +
scale_y_continuous(limits = c(800000, 1500000),
breaks = seq(800000, 1500000, by = 200000),
expand = expansion(mult = c(0, .01)),
labels = function(x) format(x, big.mark = ".", scientific = FALSE)) +
labs(x = "",
y = "",
color = "",
title = "Evolución de la cantidad de ocupados totales") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf6.png"),
dpi = 300, width = 11, height = 6)
# Gráfico 7 - Evolución de las categorías de ocupación
g7 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g7") %>%
pivot_longer(- anio, names_to = "categoria_ocupacion", values_to = "value")
ggplot(g7, aes(anio, value, fill = categoria_ocupacion)) +
geom_bar(position = "fill", stat = "identity") +
scale_fill_manual(values = c(verde_cess, amarillo_cess, verde_cess2, violeta_cess, violeta_cess2, amarillo_cess2, rosado_cess)) +
scale_x_continuous(breaks = seq(1981, 2019, by = 3)) +
scale_y_continuous(expand = expansion(mult = c(0, .01)),
labels = scales::percent_format(accuracy = 1)) +
labs(x = "",
y = "",
fill = "",
title = "Evolución de las categorías de ocupación",
caption = "Fuente: elaboración propia con base en ECH") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf7.png"),
dpi = 300, width = 11, height = 6)
# Gráfico 8 - Distribución de los ocupados totales según categoría ocupacional (2011 - 2019)
g8 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g8") %>%
pivot_longer(- anio, names_to = "categoria_ocupacion", values_to = "value") %>%
mutate(categoria_ocupacion = case_when(
categoria_ocupacion == "Asalariado Privado" ~ "Asalariado \nPrivado",
categoria_ocupacion == "Asalariado Público" ~ "Asalariado \nPúblico",
categoria_ocupacion == "Cuenta propia sin local" ~ "Cuenta propia \nsin local",
categoria_ocupacion == "Cuenta propia con local" ~ "Cuenta propia \ncon local",
T ~ categoria_ocupacion))
ggplot(g8, aes(categoria_ocupacion, value, fill = as.character(anio))) +
geom_bar(position = "dodge", stat = "identity") +
scale_fill_manual(values = c(verde_cess, rosado_cess)) +
scale_y_continuous(limits = c(0, .6),
expand = expansion(mult = c(0, .01)),
labels = scales::percent_format(accuracy = 1)) +
labs(x = "",
y = "",
fill = "",
title = "Distribución de los ocupados totales según categoría ocupacional (2011 - 2019)") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf8.png"),
dpi = 300, width = 12, height = 7)
# Gráfico 9 - Proyección de la Población en Edad de Trabajar (2020 - 2100)
g9 <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g9") %>%
pivot_longer(- anio, names_to = "sexo", values_to = "value")
ggplot(g9, aes(anio, value, fill = as.character(sexo))) +
geom_area() +
scale_fill_manual(values = c(verde_cess, violeta_cess)) +
scale_x_continuous(breaks = seq(2015, 2100, by = 5)) +
scale_y_continuous(limits = c(0, 3500000),
breaks = seq(0, 3500000, by = 500000),
expand = expansion(mult = c(0, .01)),
labels = function(x) format(x, big.mark = ".", scientific = FALSE)) +
labs(x = "",
y = "",
fill = "",
title = "Población en Edad de Trabajar proyectada a 2100") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf9.png"),
dpi = 300, width = 12, height = 7)
# Gráfico 10 - Brecha de género en el mundo
ta_todos <- read_rds(here("Diagnóstico", "01_Mercado Laboral", "ta_todos.rds"))
ta_todos %>%
mutate(country = case_when(
country == "Azerbaijan" ~ "Azerbaiyán",
country == "Brunei Darussalam" ~ "Brunéi",
country == "Brazil" ~ "Brasil",
country == "Canada" ~ "Canadá",
country == "Switzerland" ~ "Suiza",
country == "Cyprus" ~ "Chipre",
country == "Czech Republic" ~ "Chequia",
country == "Germany" ~ "Alemania",
country == "Denmark" ~ "Dinamarca",
country == "Dominican Republic" ~ "República Dominicana",
country == "Spain" ~ "España",
country == "Finland" ~ "Finlandia",
country == "United Kingdom" ~ "Reino Unido",
country == "Greece" ~ "Grecia",
country == "Hungary" ~ "Hungría",
country == "Ireland" ~ "Irlanda",
country == "Japan" ~ "Japón",
country == "Korea, Rep." ~ "Corea del Sur",
country == "St. Lucia" ~ "Santa Lucía",
country == "Luxembourg" ~ "Luxemburgo",
country == "Latvia" ~ "Letonia",
country == "North Macedonia" ~ "República de Macedonia",
country == "Myanmar" ~ "Birmania",
country == "Malta" ~ "República de Malta",
country == "Mexico" ~ "México",
country == "Netherlands" ~ "Holanda",
country == "Norway" ~ "Noruega",
country == "Panama" ~ "Panamá",
country == "Philippines" ~ "Filipinas",
country == "Poland" ~ "Polonia",
country == "Romania" ~ "Rumania",
country == "Russian Federation" ~ "Rusia",
country == "Rwanda" ~ "Ruanda",
country == "Singapore" ~ "Singapur",
country == "Slovenia" ~ "Eslovenia",
country == "Slovak Republic" ~ "Eslovaquia",
country == "Thailand" ~ "Tailandia",
country == "Ukraine" ~ "Ucrania",
country == "United States" ~ "Estados Unidos",
T ~ country)) %>%
arrange(brecha_na) %>%
mutate(country = fct_inorder(country)) %>%
ggplot(aes(country, brecha_na)) +
geom_col(aes(fill = country == "Uruguay")) +
scale_fill_manual(values = c(violeta_cess, verde_cess)) +
scale_y_continuous(expand = expansion(add = c(0, 0)),
limits = c(0, 40),
breaks = c(0, 5, 10, 15, 20, 25, 30, 35, 40)) +
coord_flip() +
geom_hline(aes(yintercept = mean(brecha_na)),
color = "grey35", size = .7) +
annotate("text",
x = 10,
y = 20,
size = 5,
label = glue::glue("Media: {round(mean(ta_todos$brecha_na), 2)}%")) +
labs(y = "Brecha de género en la tasa de actividad (%)",
x = "",
caption = "Fuente: Organización Internacional del Trabajo",
title = "Brecha de género en el mundo") +
theme(legend.position = "none") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf10.png"),
dpi = 300, width = 12, height = 10)
# Gráfico 11 - Cierre de brecha proyectada a 2100
# un df con sexo, tramo, year y tasas
forecast_tasa <- readRDS(here("Diagnóstico", "01_Mercado Laboral", "forecast_tasas_modelo.rds"))
poblacion_forecast <- readRDS(here("Diagnóstico", "01_Mercado Laboral", "forecast_poblacion.rds")) %>%
filter(year(year) >= 1990)
# Pegar tablas
df <- poblacion_forecast %>%
left_join(forecast_tasa, by = c("year", "sexo", "tramo")) %>%
mutate(pea = (tasa * pob) / 100)
# Calcular brecha global
df_tasas_globales <- df %>%
group_by(sexo, year = year(year)) %>%
summarize(pea = sum(pea),
pob = sum(pob),
tasa = pea/pob)
brecha <- df_tasas_globales %>%
pivot_wider(id_cols = year, names_from = sexo, values_from = tasa) %>%
mutate(brecha = Hombres - Mujeres)
brecha_labels <- filter(brecha, year %in% c(1990, 2100))
brecha %>%
ggplot(aes(year, brecha)) +
geom_line(color = verde_cess, size = 1.5) +
geom_point(data = brecha_labels, color = verde_cess, size = 1.7) +
scale_y_continuous(expand = expansion(mult = c(0, 0.08)),
limits = c(0.08, 0.32),
breaks = seq(0.08, 0.32, by = 0.04),
labels = scales::percent_format(accuracy = 1)) +
scale_x_continuous(breaks = c(1990, 2000, 2010, 2020, 2030, 2040, 2050,
2060, 2070, 2080, 2090, 2100)) +
geom_text(data = brecha_labels,
size = 5,
nudge_y = .75e-2,
aes(label = scales::percent_format(scale = 100, accuracy = .1)(brecha))) +
labs(y = "",
x = "Año",
caption = "Nota: la brecha se define la diferencia entre tasas de actividad masculina y femenina.\nFuente: elaboración propia con base en Organización Internacional del Trabajo",
title = "Cierre de brecha proyectada a 2100") +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf11.png"),
dpi = 300, width = 10, height = 7)
# Gráfico 12
pea_proyectada <- read_excel(here("Diagnóstico", "01_Mercado Laboral", "Graficos_cap1.xlsx"), sheet = "g10") %>%
select(- PET) %>%
mutate(anio = as.numeric(year(`Año`))) %>%
mutate(proyeccion = year(`Año`) >= 2020) %>%
add_row(`Año` = as.Date("2020-01-01"), PEA = 1774834, anio = 2020, proyeccion = FALSE)
# PEA Total
pea_labels <- filter(pea_proyectada, anio == 2100)
pea_proyectada %>%
ggplot(aes(anio, PEA)) +
geom_line(aes(linetype = proyeccion), color = verde_cess, size = 1.5) +
scale_y_continuous(expand = expansion(mult = c(0, 0.08)),
limits = c(1000000, 2000000),
breaks = seq(1000000, 2000000, by = 250000),
labels = function(x) format(x, big.mark = ".", scientific = FALSE)) +
scale_x_continuous(breaks = c(1990, 2000, 2010, 2020, 2030, 2040, 2050,
2060, 2070, 2080, 2090, 2100)) +
annotate("text", x = 2100, y = 1320000, label = "1.343.991", size = 5) +
labs(x = "",
y = "",
caption = "Fuente: elaboración propia con base en Organización Internacional del Trabajo",
title = "Población Económicamente Activa proyectada a 2100") +
guides(linetype = FALSE) +
ggsave(here("Diagnóstico", "01_Mercado Laboral", "plots", "graf12.png"),
dpi = 300, width = 11, height = 7)
|
09ee8f120d1fb1ce9282e06b4810460d28b21900
|
1a83ac47bb1ffe39b416dfce1964051fa77d5b7c
|
/man/perimeter.Rd
|
9981bd74c056f26e1ad06644763769f422945402
|
[] |
no_license
|
cran/sampSurf
|
9052ab60378e3295ecade04e573e6770c145cf74
|
9388a099e8cef6109c544bcc95770bc9a60670e6
|
refs/heads/master
| 2021-06-05T21:01:53.864724
| 2021-03-05T14:50:02
| 2021-03-05T14:50:02
| 17,699,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,028
|
rd
|
perimeter.Rd
|
\name{perimeter}
\alias{perimeter}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Function to Return the Graphical Perimeter of an Object in
Package \sQuote{sampSurf} }
\description{ Most classes in the \pkg{sampSurf} package have some kind
of spatial representation that conforms to a class in \pkg{sp} (for
polygons) or \pkg{raster} (for grids). This generic has been defined
to return graphical polygon object that most nearly matches the
perimeter. For some objects this means returning the bounding box for,
perhaps, a collection of logs, or for a \dQuote{Tract} object.
}
\usage{
perimeter(object, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{ Signature object, which differs for each method.}
\item{\dots}{ See methods. }
}
\details{ The methods defined for this generic are described in
\code{\link{perimeter-methods}}. The function is quite simple, and
works essentially the same for each type of object. Again, some leeway
in exactly what is returned is taken because we can have individual
objects, collections, or grid rather than polygonal objects. In the
latter two cases, the perimeter normally would be the minimal bounding
box. For other objects in classes that have a well-defined perimeter,
such as a downLog, or a circular plot, these are returned. One can
always plot their bounding box separately with the help of
\code{link{bboxToPoly}}. }
\value{
A "\code{\linkS4class{SpatialPolygons}}" object that can be plotted directly.
}
\author{
Jeffrey H. Gove %, \email{jhgove@unh.edu}
}
\seealso{
\code{\link{bbox}}
}
\examples{
showMethods("perimeter")
dlogs = downLogs(15, xlim=c(0,20), ylim=c(0,20), buttDiams=c(25,35))
dlogs.perim = perimeter(dlogs)
plot(dlogs.perim, axes=TRUE)
plot(dlogs, add=TRUE)
bbox(dlogs.perim)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%% \keyword{ ~kwd1 }
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
7c6bfac237b2e4cc9b5e7f69d592c13fe2270d6d
|
b3d6bc3df6ab9e65c05c625edbae736bcf7cd56f
|
/man/BDMethod-setter.Rd
|
38a78be04ed3511b229da5fd269cbc78ebd8cd75
|
[] |
no_license
|
areyesq89/SummarizedBenchmark
|
fbd1976902330ed4630b29981eef055fd855ac35
|
6ac1a724155a316d9773afdffc9243d6434d9389
|
refs/heads/master
| 2021-08-29T07:31:28.673604
| 2021-08-24T05:48:26
| 2021-08-24T05:48:26
| 102,158,369
| 14
| 6
| null | 2018-02-13T18:11:04
| 2017-09-01T22:46:56
|
R
|
UTF-8
|
R
| false
| true
| 1,544
|
rd
|
BDMethod-setter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/BDMethodList-accessors.R,
% R/BenchDesign-accessors.R
\docType{methods}
\name{BDMethod<-}
\alias{BDMethod<-}
\alias{BDMethod<-,BDMethodList,character,BDMethod-method}
\alias{BDMethod<-,BDMethodList,character,NULL-method}
\alias{BDMethod<-,BenchDesign,character,BDMethod-method}
\alias{BDMethod<-,BenchDesign,character,NULL-method}
\title{Set method in list or BenchDesign object}
\usage{
BDMethod(x, i) <- value
\S4method{BDMethod}{BDMethodList,character,BDMethod}(x, i) <- value
\S4method{BDMethod}{BDMethodList,character,`NULL`}(x, i) <- value
\S4method{BDMethod}{BenchDesign,character,BDMethod}(x, i) <- value
\S4method{BDMethod}{BenchDesign,character,`NULL`}(x, i) <- value
}
\arguments{
\item{x}{\code{\link[=BenchDesign-class]{BenchDesign}} or
\code{\link[=BDMethodList-class]{BDMethodList}} object.}
\item{i}{character name of method.}
\item{value}{\code{\link[=BDMethod-class]{BDMethod}} or \code{NULL}.}
}
\value{
modified BenchDesign object
}
\description{
Adds, replaces or removes a named \code{\link[=BDMethod-class]{BDMethod}} method
in a \code{\link[=BDMethodList-class]{BDMethodList}} or
\code{\link[=BenchDesign-class]{BenchDesign}} object with a specified
\code{\link[=BDMethod-class]{BDMethod}} object.
An existing method can be removed by setting the value to \code{NULL}.
}
\examples{
bd <- BenchDesign()
BDMethod(bd, "avg") <- BDMethod(x = base::mean)
bd
}
\seealso{
\code{\link{BDMethod}}
}
\author{
Patrick Kimes
}
|
fe522553ade0096ef5f9bd54191ce3ae8027f7d1
|
24851be32893bfb1027b2a33164ef515fc4fb76b
|
/stan/old/bayesr2prod.r
|
22a8a80eecb287645949caa7ff8e37b8f47c6179
|
[] |
no_license
|
qdread/forestlight
|
acce22a6add7ab4b84957d3e17d739158e79e9ab
|
540b7f0a93e2b7f5cd21d79b8c8874935d3adff0
|
refs/heads/master
| 2022-12-14T03:27:57.914726
| 2022-12-01T23:43:10
| 2022-12-01T23:43:10
| 73,484,133
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,351
|
r
|
bayesr2prod.r
|
# Calculate Bayesian R2 of the production models, manually.
# Workflow
# -------------------------------------------------------------------------
# 1. Load the model fit
# 2. Load the data by sourcing the stan rdump for that model
# 3. Extract the parameter estimates for each draw from the model fit
# 4. Plug the dbh (x) from the loaded data in to get the linear predictors for production
# 5. Get the residuals by subtracting the observed production (y) from predicted production
# 6. Calculate predicted variance / (predicted + residual variance) to get R2.
bayesian_rsquared_production <- function(dens_model, prod_model, fg, year) {
require(rstan)
require(purrr)
# 1. Load CSVs with model fit as stanfit object
fp <- '~/forestlight/stanoutput'
files <- paste0('fit_', dens_model, 'x', prod_model, '_', fg, '_', year, '_', 1:3, '.csv')
if (fg == 'alltree') files <- paste0('ss', files) # Use the 25K subset for all trees.
fit <- read_stan_csv(file.path(fp, files))
# 2. Load data
fpdump <- '~/forestlight/stanrdump'
dumpfile <- paste0('dump_', fg, '_', year, '.r')
if (fg == 'alltree') dumpfile <- paste0('ss', dumpfile) # Use the 25K subset for all trees.
source(file.path(fpdump, dumpfile)) # Creates variables x and y.
# 3. Extract parameter estimates.
pars_to_get <- c('beta0', 'beta1')
if (prod_model == 'exp') pars_to_get <- c(pars_to_get, 'a', 'b', 'c')
pars <- extract(fit, pars_to_get)
pars <- as.data.frame(do.call(cbind, pars))
# 4. Plug in dbh (x) to get posterior estimates of linear predictor of production
powerlaw_exp_log <- function(x, a, b, c, beta0, beta1) exp(-beta0) * x^beta1 * (-a * x ^ -b + c)
powerlaw_log <- function(x, beta0, beta1) exp(-beta0) * x^beta1
# Take the log of the fitted values
if (prod_model == 'power') {
prod_fitted <- log(do.call(rbind, pmap(pars, powerlaw_log, x = x)))
} else {
prod_fitted <- log(do.call(rbind, pmap(pars, powerlaw_exp_log, x = x)))
}
# 5. Get residuals by subtracting log y from linear predictor
resids <- -1 * sweep(prod_fitted, 2, log(y))
# 6. Calculate variances and ratio
pred_var <- apply(prod_fitted, 1, var)
resid_var <- apply(resids, 1, var)
r2s <- pred_var / (pred_var + resid_var)
# Quantiles of rsq
r2_quant <- quantile(r2s, probs = c(0.025, 0.05, 0.25, 0.5, 0.75, 0.95, 0.975))
setNames(r2_quant, c('q025', 'q05', 'q25', 'q50', 'q75', 'q95', 'q975'))
}
mod_df <- expand.grid(dens_model = c('pareto', 'weibull'),
prod_model = c('power', 'exp'),
fg = c('fg1', 'fg2', 'fg3', 'fg4', 'fg5', 'alltree', 'unclassified'),
year = seq(1990, 2010, 5),
stringsAsFactors = FALSE)
task <- as.numeric(Sys.getenv('PBS_ARRAYID'))
r2 <- bayesian_rsquared_production(mod_df$dens_model[task], mod_df$prod_model[task], mod_df$fg[task], mod_df$year[task])
save(r2, file = paste0('~/forestlight/stanoutput/fitinfo/r2_', task, '.r'))
###
# Combine output
r2list <- lapply(1:nrow(mod_df), function(i) {
load(paste0('~/forestlight/stanoutput/fitinfo/r2_', i, '.r'))
r2
})
r2df <- cbind(mod_df, do.call(rbind, r2list))
r2df <- subset(r2df, dens_model == 'weibull', select = c(prod_model, fg, year, q025, q50, q975))
write.csv(r2df, '~/forestlight/r2_by_fg.csv', row.names = FALSE)
|
d669653ea6ecdf210b065c31f99ab2434d2aad4d
|
ec345334e4334be7cfbdc760d0027301418082b1
|
/R Code/3 Indicator Selection+Portfolio Management/Contract_numbers7_9.R
|
0a62655c89a2a3f469ca18ddc33746de996710e2
|
[] |
no_license
|
MaryShao/Back-Testing-for-Futures-Trading-Strategies
|
92085c83020d193dac4207b9c5ca481423b534a9
|
a7247e3bf3f6a2314ecc9a964232cef44ef5ee22
|
refs/heads/master
| 2021-02-06T01:41:35.229932
| 2020-02-29T02:36:51
| 2020-02-29T02:36:51
| 243,861,450
| 1
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 4,262
|
r
|
Contract_numbers7_9.R
|
# Data import
# 每手
contract<- read.csv("C:/Users/m8sha/Desktop/contract_number.csv", stringsAsFactors=FALSE)
rownames(contract)=contract[,1]
contract=contract[,2:3]
#coef=contract[,-1]
# 合约价格
setwd("C:/Users/m8sha/Desktop/DATA/Sig_Price")
f = list.files()
tmp=read.table(f[1],header = TRUE)
tmp = tmp[which(substr(tmp[,1],12,19)=="15:00:00"),]
rownames(tmp)= substr(tmp[,1],1,11)
tmp = tmp[,2:ncol(tmp)]
tmp = tmp[c(1,endpoints(as.xts(tmp),on='month')),]
price = as.matrix(tmp[,1])
rownames(price) = rownames(tmp)
amount = c((ncol(tmp)-1))
for(i in f[-1]){
tmp = read.table(i,header = TRUE)
tmp[,1] = as.character(tmp[,1])
tmp = tmp[which(substr(tmp[,1],12,19)=="15:00:00"),]
rownames(tmp)= substr(tmp[,1],1,11)
tmp = tmp[,2:ncol(tmp)]
tmp = tmp[c(1,endpoints(as.xts(tmp),on='month')),]
price = cbind(price,tmp[,1])
amount = c(amount,(ncol(tmp)-1))
}
amount = as.matrix(amount)
names = c('a','hc','i','IF','j','PP','rb','SR','T','ZC')
colnames(price)=rownames(amount)=names
######
setwd("C:/Users/m8sha/Desktop/DATA/Strategy")
Strategy.returns <- read.csv("Strategy.returns.txt",
row.names=NULL, sep="", stringsAsFactors=FALSE)
allRt = Strategy.returns[,3:98]
rownames(allRt) = paste(Strategy.returns[,1],Strategy.returns[,2])
total.contract = price
coef.T = coef[which(coef[,1]=='T'),]
amount.T = as.numeric(amount[which(rownames(amount)=='T'),])
mark_p_T = price[,which(colnames(price)=='T')]*as.numeric(coef.T[,2])*amount.T
for (i in names){
if (i=='T'){total.contract[,which(colnames(total.contract)==i)]=1}
else{
coef.i= coef[which(coef[,1]==i),]
amount.i = as.numeric(amount[which(rownames(amount)==i),])
num = mark_p_IF/(price[,which(colnames(price)==i)]*as.numeric(coef.i[,2])*amount.i)
total.contract[,which(colnames(total.contract)==i)]=round(num)
}
}
# setwd("C:/Users/m8sha/Desktop")
# write.csv(total.contract,'total.contract.csv')
############# return #################
Strategy.Dailylinreturn <- read.csv("C:/Users/m8sha/Desktop/DATA/Strategy/Strategy.Dailylinreturn.txt",
row.names=1, sep="")
rt = matrix(ncol=ncol(Strategy.Dailylinreturn),nrow=nrow(Strategy.Dailylinreturn))
rownames(rt)=substr(rownames(Strategy.Dailylinreturn),1,10)
colnames(rt)=colnames(Strategy.Dailylinreturn)
ind1 = which(substr(rownames(allRt),12,19)=='09:01:00')
ind2 = which(substr(rownames(allRt),12,19)=='15:15:00')
for (i in 1:ncol(Strategy.Dailylinreturn)){
for (j in 1:nrow(Strategy.Dailylinreturn)){
if (j ==1){day.rt = allRt[ind1[j]:ind2[j],i]}
else{day.rt = c(allRt[ind1[j]:ind2[j],i],allRt[ind2[j-1]:ind1[j],i])}
rt[j,i]=sum(day.rt)
}
}
rt = rt[,!colnames(rt)%in%c("MA_ind30.02.LN_30", "v_LN_indv15_03_15","MA_ind120.07.LN_120","MA_SYL.MA.5min.index.1_5","MA_WS01.GT.15m60m_15","MA_WS07.GT.15m30m60m_60",
"ni_ind10.04.LN_10" ,"ni_LJ.Avanti02PP.5min_60","ni_LJ.MB02PP.5min_30" ,"ni_LJ.ninexiuPP.5min_60","ni_LJ.TW02PP.1H_15","OI_01.4H_240","OI_02.2H_120","OI_LJ.Kelther.Tsi01T.30min_240",
"OI_LJ.multsig03PP.15min_120","OI_LJ.multsig.A.4H_120","OI_LJ.multsig.A.4H_240","OI_LJ.threeswordPP.5min_30")]
ind = endpoints(rt,on='month')
#tmp.rt=rt
rt=tmp.rt
# for (i in 1:ncol(rt)){
# char = strsplit(colnames(rt)[i],'_')
# char = char[[1]]
# k = which(colnames(total.contract)==char[1])
# #print(k)
# for (j in 1:(length(ind)-1)){
# #print(j)
# if (j ==1){
# rt[(ind[j]+1):ind[j+1],i]=rt[(ind[j]+1):ind[j+1],i]*total.contract[j,k]*contract$coef[k]}
# else{
# rt[(ind[j]+1):ind[j+1],i]=rt[(ind[j]+1):ind[j+1],i]*total.contract[j,k]*contract$coef[k]
# if (rt[(ind[j]),i]!=0){
# rt[(ind[j]+1),i]=rt[(ind[j]+1),i]+(total.contract[j,k]-total.contract[j+1,k])*contract$cost[k]
# }
# }
# }
# }
X_lin <- (prices/lag(prices) - 1)[-1]
####################################################################
total.num <- matrix(ncol=96,nrow=19)
colnames(total.num) = colnames(Strategy.returns[,3:ncol(Strategy.returns)])
rownames(total.num) = rownames(price)
|
94a0fcd20a3f3cea261f3c6ecc35abf9465d1422
|
b58131c3658c3c0e7e975eeb92904d299b472c96
|
/R/chrom2chrom.R
|
dd0f9f50263760762c8ec529714211a770918766
|
[] |
no_license
|
ibn-salem/hyperbolic_nucleus
|
28f7f338a1cd6ab7ca81ec602272831b500a79d1
|
8242826da801cd2fca8a625cf71638d7b18de882
|
refs/heads/master
| 2021-01-19T10:17:45.301835
| 2017-10-20T14:26:34
| 2017-10-20T14:26:34
| 82,173,313
| 1
| 0
| null | 2017-10-20T13:46:34
| 2017-02-16T11:24:34
|
R
|
UTF-8
|
R
| false
| false
| 442
|
r
|
chrom2chrom.R
|
library(dplyr)
library(circlize)
load("results/edge_lists.RData")
load("results/noteDF.RData")
df <- tibble(from = left_join(inData, noteDF, by = c("g1" = "id"))$chr,
to = left_join(inData, noteDF, by = c("g2" = "id"))$chr)
df <- df %>%
group_by(from, to) %>%
summarise(value = n()) %>%
filter(!is.na(from) & !is.na(to))
chordDiagram(df)
# Apply log10 + 1 to counts
df$value <- log10(df$value) + 1
chordDiagram(df)
|
5b6acb7cbb176a6d7c4bbcc9a44b30b119295c88
|
7672493ea65b7c97dfebb1fef2d4da4132f2bbf3
|
/codes/generate_lncrna_dataframe.R
|
722c295bb23361f961c58a72007a75412dcaebac
|
[
"Artistic-2.0"
] |
permissive
|
andronekomimi/SNPVizualTools
|
a49c9869e651970aa9e288c1e1cb7b54eb78f13d
|
504b70be6d2d42eef33be499eb984690f630e7f9
|
refs/heads/master
| 2016-09-11T02:55:29.744674
| 2015-04-09T13:06:27
| 2015-04-09T13:06:27
| 33,318,730
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 888
|
r
|
generate_lncrna_dataframe.R
|
library(parallel)
chr_list = c("chr1","chr2","chr3","chr4","chr5","chr6","chr7","chr8","chr9",
"chr10","chr11","chr12","chr13","chr14","chr15","chr16","chr17",
"chr18","chr19","chr20","chr21","chr22")
# chr_list = c("chr1","chr2","chr3")
files_path <- "/home/nekomimi/Workspace/COLLAB/mitranscriptome.gtf/mitranscriptome_"
for (current_chr in chr_list) {
my.file <- paste0(files_path, current_chr)
df <- read.table(my.file, header=FALSE, stringsAsFactors=FALSE, quote = "\"", sep="\t")
saveRDS(df, file=paste0("~/Workspace/SNPVIZU/SNPVizualTools/data/",current_chr,"_mitrans.Rda"))
}
lncrna_expr <- "/home/nekomimi/Workspace/COLLAB/mitranscriptome.expr.fpkm_select.tsv"
df <- read.table(lncrna_expr, header=TRUE, stringsAsFactors=FALSE, quote = "\"", sep="\t")
saveRDS(df, file=paste0("~/Workspace/SNPVIZU/SNPVizualTools/data/lncrna_expr.Rda"))
|
eb1c67fd6ce0e2aa8c433b4779c66bfcdafa92b2
|
51306eddda32ae60a1782ed52c4c4b5aeb8845b1
|
/man/mpe.Rd
|
3c37f70d9257efe9ab4c48b6849a849e0cd92be1
|
[
"MIT"
] |
permissive
|
max-graham/metrics
|
0d1fac194b5b2a248e59291fadd5be6819bbcdbe
|
7254729e547d567c8f5539c25607335390112d9f
|
refs/heads/master
| 2021-01-20T05:10:43.284914
| 2019-03-20T00:33:23
| 2019-03-20T00:33:23
| 89,755,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 704
|
rd
|
mpe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/error.R
\name{mpe}
\alias{mpe}
\title{MPE}
\usage{
mpe(x, target, na.rm = FALSE)
}
\arguments{
\item{x}{numeric vector}
\item{target}{numeric vector}
\item{na.rm}{logical; should NAs be removed before calculation?}
}
\value{
An atomic numeric vector containing the calculated MPE
}
\description{
Calculates the Mean Percentage Error(MPE) between x and the
target.
}
\examples{
\dontrun{mpe(c(1, 2, 3))} #> Error: different lengths (3, 0)
mpe(c(1, 2, 3), c(1, 4, 4)) #> 25.0
mpe(c(1, 2, NA, 4), c(2, 2, 5, NA)) #> NA
mpe(c(1, 2, NA, 4),
c(2, 2, 5, NA),
na.rm = TRUE) #> 12.5
}
|
a3864eaa10436bde97f4587c17c8cad30064a6b8
|
ee7bfc39868fce34a55dbf3ecc075d262e6f8e36
|
/Red-deer-browsing.R
|
896c3cef0d11b8a07b54196b7e48bb6e141900b0
|
[] |
no_license
|
marielaj/Red-deer-browsing-
|
245ac95241c53d03bee6469590d3a5358cac7744
|
1cafe7c873a25ac111c493a4c5f1a2dae3d504a2
|
refs/heads/master
| 2020-04-14T16:31:20.468411
| 2019-01-15T10:16:18
| 2019-01-15T10:16:18
| 163,954,099
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 229
|
r
|
Red-deer-browsing.R
|
#Master prosjekt - Red deer browsing
#Importerer Species_2009-2018 dataset
library(readxl)
Species_2009_2018_2_ <- read_excel("~/Master/Data/Species 2009_2018 (2).xlsx")
View(Species_2009_2018_2_)
View(Species_2009_2018_2_)
|
2bdcd9cfd1482db4def0a173d061b3631063a13f
|
c8b4efc2d2ad4424322ecb64a2e04e5f97fdd048
|
/binomial/tests/testthat/test_summary.R
|
50edfe9ef41065702fa8c40576c46b9f0e377620
|
[] |
no_license
|
stat133-sp19/hw-stat133-gwynethjocelyn
|
e038bccb55ff084624094dc532b06f23b1c9db5d
|
641c1c9210598ed63cfc473b66bb4923d459747a
|
refs/heads/master
| 2020-04-28T07:14:16.583311
| 2019-05-02T08:49:18
| 2019-05-02T08:49:18
| 175,084,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,037
|
r
|
test_summary.R
|
library(testthat)
context("tests for summary functions")
#Test aux_mean
test_that("aux_mean works as expected", {
expect_equal(aux_mean(10, 0.2), 2)
expect_length(aux_mean(10, 0.2), 1)
expect_type(aux_mean(10, 0.2), 'double')
})
#Test aux_variance
test_that("aux_variance works as expected", {
expect_equal(aux_variance(10, 0.2), 1.6)
expect_length(aux_variance(10, 0.2), 1)
expect_type(aux_variance(10, 0.2), 'double')
})
#Test aux_mode
test_that("aux_mode works as expected", {
expect_equal(aux_mode(10, 0.2), 2)
expect_length(aux_mode(10, 0.2), 1)
expect_type(aux_mode(10, 0.2), 'double')
})
#Test aux_skewness
test_that("aux_skewness works as expected", {
expect_equal(round(aux_skewness(10, 0.2), 3), 0.474)
expect_length(aux_skewness(10, 0.2), 1)
expect_type(aux_skewness(10, 0.2), 'double')
})
#Test aux_kurtosis
test_that("aux_kurtosis works as expected", {
expect_equal(aux_kurtosis(10, 0.2), 0.025)
expect_length(aux_skewness(10, 0.2), 1)
expect_type(aux_skewness(10, 0.2), 'double')
})
|
b1f4e78e629be8ab619d7bec1a228780e834bc6e
|
04d0a997364ad1bab775fb920edfe5b60cf6d740
|
/man/ToWrd.Rd
|
826e7496bace7c9912c70a9458fe902e73f90671
|
[] |
no_license
|
mainwaringb/DescTools
|
a2dd23ca1f727e8bbfc0e069ba46f44567e4be24
|
004f80118d463c3cb8fc2c6b3e934534049e8619
|
refs/heads/master
| 2020-12-22T15:12:41.335523
| 2020-03-21T17:30:52
| 2020-03-21T17:30:52
| 236,836,652
| 0
| 0
| null | 2020-01-28T20:40:03
| 2020-01-28T20:40:02
| null |
UTF-8
|
R
| false
| false
| 7,234
|
rd
|
ToWrd.Rd
|
\name{ToWrd}
\alias{ToWrd}
\alias{ToWrd.table}
\alias{ToWrd.ftable}
\alias{ToWrd.character}
\alias{ToWrd.lm}
\alias{ToWrd.TOne}
\alias{ToWrd.Freq}
\alias{ToWrd.default}
\alias{ToWrd.data.frame}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Send Objects to Word
%% ~~function to do ... ~~
}
\description{Send objects like tables, ftables, lm tables, TOnes or just simple texts to a MS-Word document.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
ToWrd(x, font = NULL, ..., wrd = DescToolsOptions("lastWord"))
\method{ToWrd}{Freq}(x, font = NULL, main = NULL, ..., wrd = DescToolsOptions("lastWord"))
\method{ToWrd}{table}(x, font = NULL, main = NULL, align = NULL,
tablestyle = NULL, autofit = TRUE,
row.names = FALSE, col.names = TRUE, ..., wrd = DescToolsOptions("lastWord"))
\method{ToWrd}{data.frame}(x, font = NULL, main = NULL, row.names = NULL, ...,
wrd = DescToolsOptions("lastWord"))
\method{ToWrd}{ftable}(x, font = NULL, main = NULL, align = NULL,
method = "compact", ..., wrd = DescToolsOptions("lastWord"))
\method{ToWrd}{TOne}(x, font = NULL, para = NULL, main = NULL, align = NULL,
autofit = TRUE, ..., wrd = DescToolsOptions("lastWord"))
\method{ToWrd}{lm}(x, font = NULL, ..., wrd = DescToolsOptions("lastWord"))
\method{ToWrd}{character}(x, font = NULL, para = NULL, style = NULL, bullet = FALSE,
..., wrd = DescToolsOptions("lastWord"))
\method{ToWrd}{default}(x, font = NULL, ..., wrd = DescToolsOptions("lastWord"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{the object to be transferred to Word.
%% ~~Describe \code{x} here~~
}
\item{font}{the font to be used to the output. This should be defined as a list containing fontname, fontsize, bold and italic flags:\cr \code{list(name="Arial", size=10, bold=FALSE, italic=TRUE)}.
%% ~~Describe \code{font} here~~
}
\item{para}{list containing paragraph format properties to be applied to the inserted text. For right align the paragraph one can set: \cr
\code{list(alignment="r", LineBefore=0.5)}. See details for the full set of properties.}
\item{main}{a caption for a table. This will be inserted by \code{\link{WrdCaption}} in Word and can be listed afterwards in a specific index. Default is \code{NULL}, which will insert nothing. Ignored if \code{x} is not a table.
%% ~~Describe \code{main} here~~
}
\item{align}{character vector giving the alignment of the table columns. \code{"l"} means left, \code{"r"} right and \code{"c"} center alignement. The code will be recyled to the length of thenumber of columns.
%% ~~Describe \code{align} here~~
}
\item{method}{string specifying how the \code{"ftable"} object is formatted
(and printed if used as in \code{write.ftable()} or the \code{print}
method). Can be abbreviated. Available methods are (see the examples):
\describe{
\item{\code{"non.compact"}}{the default representation of an
\code{"ftable"} object.}
\item{\code{"row.compact"}}{a row-compact version without empty cells
below the column labels.}
\item{\code{"col.compact"}}{a column-compact version without empty cells
to the right of the row labels.}
\item{\code{"compact"}}{a row- and column-compact version. This may imply
a row and a column label sharing the same cell. They are then
separated by the string \code{lsep}.}
}
}
\item{autofit}{logical, defining if the columns of table should be fitted to the length of their content.
%% ~~Describe \code{autofit} here~~
}
\item{row.names}{logical, defining whether the row.names should be included in the output. Default is \code{FALSE}.
%% ~~Describe \code{row.names} here~~
}
\item{col.names}{logical, defining whether the col.names should be included in the output. Default is \code{TRUE}.
%% ~~Describe \code{row.names} here~~
}
\item{tablestyle}{either the name of a defined Word tablestyle or its index.
%% ~~Describe \code{row.names} here~~
}
\item{style}{character, name of a style to be applied to the inserted text.}
\item{\dots}{further arguments to be passed to or from methods.
%% ~~Describe \code{\dots} here~~
}
\item{bullet}{logical, defines if the text should be formatted as bullet points.}
\item{wrd}{the pointer to a word instance. Can be a new one, created by \code{GetNewWrd()}
or an existing one, created by \code{GetCurrWrd()}.
Default is the last created pointer stored in \code{DescToolsOptions("lastWord")}.
%% ~~Describe \code{wrd} here~~
}
}
\value{if \code{x} is a table a pointer to the table will be returned
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\details{The paragraph format can be defined by means of these properties:
\code{LeftIndent}, \code{RightIndent}, \code{SpaceBefore}, \code{SpaceBeforeAuto}, \code{SpaceAfter}, \code{SpaceAfterAuto}, \code{LineSpacingRule},
\code{Alignment}, \code{WidowControl}, \code{KeepWithNext}, \code{KeepTogether}, \code{PageBreakBefore}, \code{NoLineNumber}, \code{Hyphenation},
\code{FirstLineIndent}, \code{OutlineLevel}, \code{CharacterUnitLeftIndent}, \code{CharacterUnitRightIndent}, \code{CharacterUnitFirstLineIndent},
\code{LineUnitBefore}, \code{LineUnitAfter}, \code{MirrorIndents}.
}
\author{Andri Signorell <andri@signorell.net>
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{GetNewWrd}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
\dontrun{
# we can't get this through the CRAN test - run it with copy/paste to console
wrd <- GetNewWrd()
ToWrd("This is centered Text in Arial Black\n",
para=list(Alignment=wdConst$wdAlignParagraphCenter,
SpaceBefore=3, SpaceAfter=6),
font=list(name="Arial Black", size=14),
wrd=wrd)
sel <- wrd$Selection()$Borders(wdConst$wdBorderBottom)
sel[["LineStyle"]] <- wdConst$wdLineStyleSingle
t1 <- TOne(x = d.pizza[, c("temperature","delivery_min","driver","wine_ordered")],
grp=d.pizza$wine_delivered)
ToWrd(t1, font=list(name="Algerian"), wrd=wrd)
tab <- table(d.pizza$driver, d.pizza$area)
tab <- table(d.pizza$driver, d.pizza$area)
ToWrd(tab, font = list(size=15, name="Arial"), row.names = TRUE, col.names = TRUE,
main= "my Title", wrd=wrd)
ToWrd(tab, font = list(size=10, name="Arial narrow"),
row.names = TRUE, col.names=FALSE, wrd=wrd)
ToWrd(tab, font = list(size=15, name="Arial"), align="r",
row.names = FALSE, col.names=TRUE, wrd=wrd)
ToWrd(tab, font = list(size=15, name="Arial"),
row.names = FALSE, col.names=FALSE, wrd=wrd)
ToWrd(tab, tablestyle = "Mittlere Schattierung 2 - Akzent 4",
row.names=TRUE, col.names=TRUE, wrd=wrd)
ToWrd(Format(tab, big.mark = "'", digits=0), wrd=wrd)
zz <- ToWrd(Format(tab, big.mark = "'", digits=0), wrd=wrd)
zz$Rows(1)$Select()
WrdFont(wrd = wrd) <- list(name="Algerian", size=14, bold=TRUE)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ print }
% use one of RShowDoc("KEYWORDS")
|
682e9e94b63c8873a3ab9150b30c096c38fde3e2
|
03775e3d1331e2ffe8c6595872a8128273baf67e
|
/man/season_rosters.Rd
|
a979aa414abbabd165c27b41e0d0fd071b98aeb1
|
[] |
no_license
|
bensoltoff/nflscrapR
|
a11686f92221fd510d10c0b2dc692dcc06bc2f8f
|
7f647893eadca6c6b5253faeae9c991394a285f9
|
refs/heads/master
| 2020-12-25T01:06:41.025582
| 2016-06-14T17:41:43
| 2016-06-14T17:41:43
| 61,142,565
| 1
| 0
| null | 2016-06-14T17:32:11
| 2016-06-14T17:32:10
| null |
UTF-8
|
R
| false
| true
| 958
|
rd
|
season_rosters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GameandRosterFunctions.R
\name{season_rosters}
\alias{season_rosters}
\title{Season Rosters for Teams}
\usage{
season_rosters(Season, TeamInt)
}
\arguments{
\item{Season:}{A 4-digit year associated with a given NFL season}
\item{TeamInt:}{A string containing the abbreviations for an NFL Team}
}
\value{
A dataframe with columns associated with season/year, team, playerID,
players who played and recorded some measurable statistic, and the
last column specifyng the number of games they played in.
}
\description{
This function intakes a year and a team abbreviation and outputs
a dataframe with each player who has played for the specified team and
recorded a measurable statistic
}
\details{
To find team associated abbrevations use the nflteams dataframe
stored in this package!
}
\examples{
# Roster for Baltimore Ravens in 2013
season_rosters(2013, TeamInt = "BAL")
}
|
1b06c39652a050ba6b22159c14420aad36e2eb1a
|
99df423066e647677dc2cabb8f77a936a56e1998
|
/[3].Check_ES.R
|
1664b3c61762f95d3523cdea95a6142b13d244ed
|
[] |
no_license
|
martin-vasilev/reading_sounds
|
661e080c35240d0c64bae4bed9e8780d3150c3c7
|
484e3cc1cf054e26930c85cae4a26ba6467bf1de
|
refs/heads/master
| 2023-04-10T05:41:19.162364
| 2023-03-24T11:28:21
| 2023-03-24T11:28:21
| 65,729,588
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,725
|
r
|
[3].Check_ES.R
|
# A script for visualisation and checking of effect sizes
### Violin Plots
rm(list=ls())
load("Data/data_old.Rda")
data<- data_old
source("functions/settings.R")
library(vioplot)
# code adapted from:
# https://www.r-bloggers.com/exploratory-data-analysis-combining-box-plots-and-kernel-density-plots-into-violin-plots-for-ozone-pollution-data/
# overall ES:
png('Plots/descriptives/vio_All.png', width = 1600, height = 1600, units = "px", res = 300)
plot(1, 1, xlim = c(0.5,1.5), ylim = range(data$g), type = 'n', xlab = 'All studies', ylab = 'Effect size (g)',
xaxt = 'n', family="serif", cex.lab=1.5, cex.axis=1.5)
vioplot(data$g, col=pallete[2], add=T)
dev.off()
# breakdown by sound
png('Plots/descriptives/vio_sound.png', width = 3200, height = 1600, units = "px", res = 300)
plot(1, 1, xlim = c(0.5,3.5), ylim = range(data$g), type = 'n', xlab = 'Background sound',
ylab = 'Effect size (g)', xaxt = 'n', family="serif", cex.lab=1.5, cex.axis=1.5)
vioplot(data$g[data$sound=="noise"], col=pallete[2], at=1, add=T)
vioplot(data$g[data$sound=="speech"], col=pallete[3], at=2, add=T)
vioplot(data$g[data$sound=="music"], col=pallete[4], at=3, add=T)
axis(1, at = c(1,2, 3), labels = c('Noise', 'Speech', 'Music'))
dev.off()
# breakdown by measure:
png('Plots/descriptives/vio_measure.png', width = 3200, height = 1600, units = "px", res = 300)
plot(1, 1, xlim = c(0.5,3.5), ylim = range(data$g), type = 'n', xlab = 'Dependent measure',
ylab = 'Effect size (g)', xaxt = 'n', family="serif", cex.lab=1.5, cex.axis=1.5)
vioplot(data$g[data$measure!="reading_speed" & is.element(data$task, RC)], col=pallete[2], at=1, add=T)
vioplot(data$g[data$measure=="reading_speed" & is.element(data$task, RC)], col=pallete[3], at=2, add=T)
vioplot(data$g[is.element(data$task, Proof)], col=pallete[4], at=3, add=T)
axis(1, at = c(1,2, 3), labels = c('Reading comprehension', 'Reading speed', 'Proofreading accuracy'))
dev.off()
# breakdown by design:
png('Plots/descriptives/vio_design.png', width = 2400, height = 1600, units = "px", res = 300)
plot(1, 1, xlim = c(0.5,2.5), ylim = range(data$g), type = 'n', xlab = 'Design',
ylab = 'Effect size (g)', xaxt = 'n', family="serif", cex.lab=1.5, cex.axis=1.5)
vioplot(data$g[data$design=="between"], col=pallete[2], at=1, add=T)
vioplot(data$g[data$design=="within"], col=pallete[3], at=2, add=T)
axis(1, at = c(1,2), labels = c('Between-subject', 'Within-subject'))
dev.off()
# breakdown by age:
png('Plots/descriptives/vio_age.png', width = 2400, height = 1600, units = "px", res = 300)
plot(1, 1, xlim = c(0.5,2.5), ylim = range(data$g), type = 'n', xlab = 'Age category',
ylab = 'Effect size (g)', xaxt = 'n', family="serif", cex.lab=1.5, cex.axis=1.5)
vioplot(data$g[data$sample=="adults"], col=pallete[2], at=1, add=T)
vioplot(data$g[data$sample=="children"], col=pallete[3], at=2, add=T)
axis(1, at = c(1,2), labels = c('Adults (> 18 years)', 'Children (< 18 years)'))
dev.off()
#-----------------------------
# Descriptives Layout plot :
#-----------------------------
png('Plots/Descriptive_panel.png', width = 4200, height = 6000, units = "px", res=600, type="cairo")
layout(mat = matrix(c(1,1,2,2,3,4),nrow = 3,ncol = 2,byrow = TRUE),heights = c(0.333,0.333, 0.333))
par(mar=c(6.5,5,4,0.5))
# All sounds:
plot(1, 1, xlim = c(0.5,3.5), ylim = range(data$g), type = 'n', xlab = 'Background sound',
ylab = 'Effect size (g)', xaxt = 'n', family="serif", cex.lab=1.8, cex.axis=1.8,
main="a", cex.main=2.8)
vioplot(data$g[data$sound=="noise"], col=pallete[2], at=1, add=T)
vioplot(data$g[data$sound=="speech"], col=pallete[3], at=2, add=T)
vioplot(data$g[data$sound=="music"], col=pallete[4], at=3, add=T)
axis(1, at = c(1,2, 3), labels = c('Noise', 'Speech', 'Music'), cex.axis=1.5)
rect(xleft = 2.9, ybottom = 3, xright = 3.1, ytop = 3.5, col = NA, border = "darkred", lwd=1.8 )
# breakdown by measure:
plot(1, 1, xlim = c(0.5,3.5), ylim = range(data$g), type = 'n', xlab = 'Dependent measure',
ylab = 'Effect size (g)', xaxt = 'n', family="serif", cex.lab=1.8, cex.axis=1.8,
main="b", cex.main=2.8)
vioplot(data$g[data$measure!="reading_speed" & is.element(data$task, RC)], col=pallete[2], at=1, add=T)
vioplot(data$g[data$measure=="reading_speed" & is.element(data$task, RC)], col=pallete[3], at=2, add=T)
vioplot(data$g[is.element(data$task, Proof)], col=pallete[4], at=3, add=T)
axis(1, at = c(1,2, 3), labels = c('Reading comprehension', 'Reading speed',
'Proofreading accuracy'), cex.axis=1.5)
rect(xleft = 0.9, ybottom = 3, xright = 1.1, ytop = 3.5, col = NA, border = "darkred", lwd=1.8 )
# breakdow by age
plot(1, 1, xlim = c(0.5,2.5), ylim = range(data$g), type = 'n', xlab = 'Age category',
ylab = 'Effect size (g)', xaxt = 'n', family="serif", cex.lab=1.8, cex.axis=1.8,
main="c", cex.main=2.8)
vioplot(data$g[data$sample=="adults"], col=pallete[2], at=1, add=T)
vioplot(data$g[data$sample=="children"], col=pallete[3], at=2, add=T)
axis(1, at = c(1,2), labels = c('Adults', 'Children'), cex.axis=1.5)
rect(xleft = 1.9, ybottom = 3, xright = 2.1, ytop = 3.5, col = NA, border = "darkred", lwd=1.8 )
# breakdow by design:
plot(1, 1, xlim = c(0.5,2.5), ylim = range(data$g), type = 'n', xlab = 'Design',
ylab = 'Effect size (g)', xaxt = 'n', family="serif", cex.lab=1.8, cex.axis=1.8,
main="d", cex.main=2.8)
vioplot(data$g[data$design=="between"], col=pallete[2], at=1, add=T)
vioplot(data$g[data$design=="within"], col=pallete[3], at=2, add=T)
axis(1, at = c(1,2), labels = c('Between-subject', 'Within-subject'), cex.axis=1.5)
rect(xleft = 0.9, ybottom = 3, xright = 1.1, ytop = 3.5, col = NA, border = "darkred", lwd=1.8 )
dev.off()
|
896029e763a29feec3ef19a15aacbcd1ee4bf6b3
|
abea0b5d000d7c01d390eeb615427bc0322aa30f
|
/src/ndfd_extract/R_extract_mypass.R
|
801128325dbee91a143a2d4a8fd7361ad385dde2
|
[] |
no_license
|
janmandel/firewx-evaluation
|
5e176d8762f34b4e88a9446f1d898b3698abc5e5
|
51ca3c4a1c63d8c6ba00e910a87f4c87c2c0ac53
|
refs/heads/master
| 2020-05-05T01:10:49.662013
| 2017-08-24T17:40:06
| 2017-08-24T17:40:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,813
|
r
|
R_extract_mypass.R
|
###### This will extract the forecast for six variables for each day
###### in months Jan-June. This corresponds to the months on the 'My Passport' drive
## Define variable Year/Month/Day and base path name
Month = c("01","02","03","04","05","06")
blabel = "/media/wpage/My Passport/NDFD/nomads.ncdc.noaa.gov/NDFD/"
Year = "2015"
Day = as.character(seq(1,31,1))
Day[1:9] = c("01","02","03","04","05","06","07","08","09")
Variables = c("YEUZ98_KWBN_","YAUZ98_KWBN_","YRUZ98_KWBN_","YBUZ98_KWBN_",
"YCUZ98_KWBN_","YIUZ98_KWBN_")
### Import/Create locations
Location = read.csv("/home/wpage/Documents/raws_longs.csv")
Location$X = NULL
Locations = as.matrix(Location)
#Locations = Location[1:(length(Location[,1])/2),]
#Locations = Location[((length(Location[,1])/2)+1):(length(Location[,1])),]
#Matrix of long and lats
#Locations = matrix(c(-112.4975,45.9548,-112.5525,45.2554,-114.2636,48.3042,
#-110.45,45.7,-114.0925,46.9208),ncol=2,byrow=T)
## For loop for month
for (m in 1:length(Month)) {
## Set working directory
wd = paste(blabel,Year,Month[m],"/",sep="")
setwd(wd)
# Read in all files for month
files = list.files(".",recursive = TRUE,full.names=FALSE)
#Master Output
M.out = data.frame(Date=NA,Variable=NA,Forecast=NA,Long=NA,Lat=NA,Value=NA)
### For loop for all days in month
for (l in 1:length(Day)) {
Days = paste(Year,Month[m],Day[l],"/",sep="")
label = paste(blabel,Year,Month[m],"/",Days,sep="")
### For loop for all 6 variables
for (k in 1:length(Variables)) {
### Get the file names for each day
CONUS_TMP = Variables[k]
list.files = grep(paste(Year,Month[m],Day[l],"/",CONUS_TMP,sep=""),files)
names = as.character() #get file names
for (i in 1:length(list.files)) {
names[i] = files[list.files[i]] }
### Pull the 1 hour forecast out for each lat long for each hour in day
for (j in 1:length(names)) { tryCatch({
# Build file names to pull from system
dir = "cd /media/wpage/'My Passport'/NDFD/nomads.ncdc.noaa.gov/NDFD/"
changeDir = paste(dir,Year,Month[m],"/",Year,Month[m],Day[l],"/"," && ",sep="")
file.name = gsub(".*/","",paste(names[j],sep=""))
tmp = character()
loc = character()
for (i in 1:length(Locations[,1])) {
tmp[i] = paste(" -lon ",Locations[i,1]," ",Locations[i,2],sep="")
loc = paste(tmp,collapse="") }
wgrib2 = paste(changeDir,"wgrib2 -d 1 -s ",file.name,loc,sep="")
wgrib3 = paste(changeDir,"wgrib2 -d 2 -s ",file.name,loc,sep="")
#Run wgrib2
run = system(wgrib2,intern=TRUE)
run2 = system(wgrib3,intern=TRUE)
run = paste(unlist(run),collapse="")
run2 = paste(unlist(run2),collapse="")
#Pull out results and organize
temp3 = unlist(strsplit(run,"[,:]"))[6]
temp4 = unlist(strsplit(run2,"[,:]"))[6]
if(temp3 == "1 hour fcst" | temp3 == "1-7 hour acc fcst") {
temp2 = unlist(strsplit(run,"[,:]"))} else if(temp4 == "1 hour fcst") {
temp2 = unlist(strsplit(run2,"[,:]"))} else temp2=0
Output = data.frame(Date=NA,Variable=NA,Forecast=NA,Long=NA,Lat=NA,Value=NA)
Output[1:length(Locations[,1]),1] = gsub("[^0-9]","",temp2[grep("d=",temp2)])
Output[1:length(Locations[,1]),2] = temp2[4]
Output[1:length(Locations[,1]),3] = temp2[6]
for (i in 1:length(Locations[,1])) {
Output[i,4] = gsub("lon=","",temp2[grep("lon",temp2)[i]])
Output[i,5] = gsub("lat=","",temp2[grep("lat",temp2)[i]])
Output[i,6] = gsub("val=","",temp2[grep("val",temp2)[i]]) }
Output = Output[complete.cases(Output),] # removes any NA's
#Take Output and merge with master output file
M.out = rbind(Output,M.out) }, error=function(e){cat ("ERROR :",conditionMessage(e), "\n")})
}
}
}
#### Send output to csv file
bsavedir = "/home/wpage/Documents/Output/"
savedir = paste(bsavedir,"RAWS","_",Year,Month[m],".csv",sep="")
write.csv(M.out,file = savedir)
}
|
3065c0dd19db6d926238ebd20a43b23b8d397728
|
eb127bbb4e75966296b4a2234250ba6819e513b1
|
/__old_code_analysis/analyze_transmissions_FCT.R
|
d08f7df2df247a223419a51a1b51869ac342ce58
|
[] |
no_license
|
davidchampredon/stiagent
|
29cc33cc8e1a54763ccd5f12f05949ac80354575
|
dc6cd187b7649ee4517fc27ea66aff377c8ff892
|
refs/heads/master
| 2021-01-10T12:50:45.273558
| 2016-03-21T03:45:58
| 2016-03-21T03:45:58
| 43,753,973
| 0
| 0
| null | 2015-11-18T01:53:12
| 2015-10-06T13:56:06
|
C++
|
UTF-8
|
R
| false
| false
| 1,504
|
r
|
analyze_transmissions_FCT.R
|
plot.infector.gender <- function(transm){
t.gender <- ddply(transm,c("stiname","gender_from"),summarize,n=length(gender_from))
t.gender
g = ggplot(t.gender)+geom_bar(aes(x=factor(gender_from),y=n,fill=factor(gender_from)),stat="identity")+facet_wrap(~stiname,scales="free_y")
g = g + ggtitle("Transmission by infector's gender")+xlab("Infector Gender")
return(g)
}
plot.infector.riskGroup<- function(transm){
x <- ddply(transm,c("stiname","riskGroup_from","riskGroup_to"),summarize,n=length(stiname))
x
g = ggplot(x)+geom_bar(aes(x=factor(riskGroup_from),y=n,fill=factor(riskGroup_to)),stat="identity")+facet_wrap(~stiname,scales="free_y")
g = g + scale_fill_brewer(palette="Reds")
g = g + ggtitle("Transmission by infector's risk group")+xlab("Infector's risk group")
return(g)
}
plot.generation.interval <- function(transm)
{
transm$GI <- round(transm$stiduration_from,digits = 2)
x <- ddply(transm,c("stiname","GI"),summarize,n=length(stiname))
x
g = ggplot(x)+geom_density(aes(x=GI,fill=stiname))+facet_wrap(~stiname,scales="free")
g = g+ggtitle("Generation Interval") + xlab("Years")
}
plot.incid.risk.sti.time <- function(transm){
z = ddply(transm,c("time","riskGroup_from","stiname"),summarize,n=length(time))
g = ggplot(z)+geom_step(aes(x=time,y=n,colour=factor(riskGroup_from)),size=1.5)
g = g +facet_wrap(~riskGroup_from+stiname,ncol=2)
g = g+scale_colour_brewer(palette="Reds")
g = g + ggtitle("Incidence by risk group and STI")
return(g)
}
|
210f552a9b71994599719182b8e3a9991f0c4995
|
332de09406153981b30b41206afe4b5618b7f248
|
/cachematrix.R
|
f196d14e297cc158849c88e4f05026c1ed907f25
|
[] |
no_license
|
samadari/ProgrammingAssignment2
|
d916d9f56a666d1409605b4b189bf1ddb9b41d42
|
01e5cd834fd7b1b49cc0d6669daf7b951be908ba
|
refs/heads/master
| 2021-01-16T21:47:03.152944
| 2015-03-15T18:42:52
| 2015-03-15T18:42:52
| 32,274,879
| 0
| 0
| null | 2015-03-15T17:48:51
| 2015-03-15T17:48:49
| null |
UTF-8
|
R
| false
| false
| 1,678
|
r
|
cachematrix.R
|
## Functions to compute the inverse of a matrix and save it in the cache
## so next time the inverse of the matrix is needed it's faster to get
## Function to create an object associated to a matrix; it contains a list with
## a function to set the value of the matrix (and its inverse to NULL) in cache
## a function to get the value of the matrix
## a function to set the value of the matrix inverse in cache
# a functiojn to get the value of the matrix inverse from cache
makeCacheMatrix <- function(m = matrix()) {
inv <- NULL
# set the values of the matrix and its inverse (NULL) in cache
setmatrix <- function(y) {
mcache <<- y
inv <<- NULL
}
# get the matrix
getmatrix <- function() m
# set the value of the matrix inverse in cache
setinv <- function(auxinv) inv <<- auxinv
# get the matrix inverse from cache
getinv <- function() inv
list(setmatrix = setmatrix, getmatrix = getmatrix,
setinv = setinv,
getinv = getinv)
}
## Function to calculate the inverse of a matrix m given as parameter
## It first checks whether the inverse matrix is already in cache
cacheSolve <- function(m, ...) {
# get the inverse matrix from cache
inv <- m$getinv()
# check whether inv is stored in cache
if(!is.null(inv)) {
# if so, return the cached value
message("getting cached matrix inverse")
return(inv)
}
# if not, compute and store it
mat <- m$getmatrix()
inv <- solve(mat)
m$setinv(inv)
inv
}
|
b04f492581ee2c151ed0511115d9529a4c70be0d
|
d133e983aeddf91d6f933c8ce2f7ac60b2b192d5
|
/shiny/edit_data.R
|
a2c823d4707c0e2da59e9f385fee7d2296f6e240
|
[] |
no_license
|
amd112/thesis-sp18-driscoll-envjustice
|
b6ae3651e4a4989b9f259fbaf494ec6054b9208b
|
4d6b8dd2539a862c95cb0a9b521c31376b29db67
|
refs/heads/master
| 2021-09-11T23:25:05.209075
| 2018-04-12T22:00:58
| 2018-04-12T22:00:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,757
|
r
|
edit_data.R
|
library(shinydashboard)
library(leaflet)
library(dplyr)
library(curl)
library(ggmap)
library(rgdal)
library(readr)
library(data.table)
library(stringr)
library(rmapshaper)
data_county = read_feather("shapes/data_county.feather")
data_tract = as.data.table(read_feather("shapes/data_tract.feather"))
race_tract = as.data.table(read_feather("shapes/race_tract.feather"))
#1000 on readin
names(data_county) = c("county", "tox")
names(data_tract) = c("tract", "tox", "area")
race_tract$id = str_pad(race_tract$id, 11, "left", pad = "0")
data_tract = merge(data_tract, race_tract, by.x = "tract", by.y = "id", all = TRUE) #300 ms
data_tract[is.na(data_tract$tox), "tox"] = 1*10^-6
states = readRDS("shapes/states.rds")
counties = readRDS("shapes/counties.rds")
counties = subset(counties, !(counties$STATEFP %in% c("15", "02", "72")))
counties@data = counties@data[, c(1, 5, 6, 8)]
counties@data$GEOID = as.character(counties@data$GEOID)
counties@data$STATEFP = as.character(counties@data$STATEFP)
counties@data = data.frame(counties@data, data_county[match(counties@data$GEOID, data_county$county), ])
counties@data$tox[is.na(counties@data$tox)] = 1*(10^-6)
#counties = ms_simplify(counties, keep_shapes)
write_rds(counties, "counties_data.rds")
data_tract = as.data.table(read_feather("shapes/data_tract_race.feather"))
race_tract = as.data.table(read_feather("shapes/race_tract.feather"))
#1000 on readin
names(data_county) = c("county", "tox")
names(data_tract) = c("tract", "tox", "area")
race_tract$id = str_pad(race_tract$id, 11, "left", pad = "0")
data_tract = merge(data_tract, race_tract, by.x = "tract", by.y = "id", all = TRUE) #300 ms
data_tract[is.na(data_tract$tox), "tox"] = 1*10^-6
write_feather(data_tract, "data_tract_race.feather")
|
72aa91751e764ce57e739c96be5de70b95e6afff
|
11be48c2bb50eb77193ee1830a0d569d09079756
|
/R_microhaplotypes_diversity.R
|
20c8382c9b15cb560e471ba45b91366cbd997562
|
[] |
no_license
|
wangdang511/APG_salamanders_R_code
|
bd53026f5a4b1e32af8bafd8a4991077d834907f
|
3cd530bf27045c12afb7d03bb6eb924bb3d6a745
|
refs/heads/main
| 2023-06-12T17:08:59.862073
| 2021-07-01T10:30:29
| 2021-07-01T10:30:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,941
|
r
|
R_microhaplotypes_diversity.R
|
library(seqinr)
library(ape)
library(hillR)
library(tidyverse)
# The main function is get_div
# - gets rds produced by R_microhaplotypes.R
# - extracts binary genotypes, sequences of alleles, and calculates diversities for each segment
#
# The body makes filtering and calculated weighted means per gene
# The effect is:
# - dataframe of per segment diversities for segments, genes and species (saved as .rds and .txt)
# - dataframe of per species per gene diversites (saved as .rds and .txt)
#Inputs:
# - *seg_microhap*.rds produced by R_microhaplotypes.R
#IMPORTANT: this version of the script uses only "raw_MIP_seg" - code dealaing with "black" and "white" in in old/
# - gene_ids.txt - translation table for gene names as in reference files into gene symbols and gene class
#when we have data on codon frame, add functional measures
#load common functions
source("https://raw.githubusercontent.com/wbabik/R_functions/main/R_functions_WB_01.R")
#raw_MIP_seg - segments based on all MIPs
#raw_MIP_seg_min20cov - segments based on MIPs with median coverage >=20
#categ is a general purpose suffix, which can for example be used to analyse 15 in
#requires Grantham_Distance_table.csv in the same folder
#this version calculates both species wide alpha and gamma diversity and alpha diversity for each individual
#it also saves aa sequeneces of all segment haplotypes together with their sequences
get_div <- function(t, categ = "raw_MIP_seg_min20cov", fr = frame, stop.rm = TRUE){
#gets rds produced by R_microhaplotypes.R
#extracts binary genotypes, sequences of alleles, and calculates diversities
#returns list of named elements
#"sp_div"
#"aa_seq"
bin_gen <- function(se, df, categ, N = tot_N){
#for a segment (not collapsed - for collapsed look at the old code)
#filters by PAF and coverage and returns a list with named elements:
# "DNA", "codon", "AA" - each of these is a list of three elements:
#1) df with binary encoded genotypes (inds in rows, alleles in cols)
#2) a single row df with basic statistics of the segment
#3) dataframe with allele seuences
# add gene, info etc to df2
d <- df %>% select(-c(MIP, rank, full_haplo, seg_start_MIP, seg_end_MIP)) %>%
filter((seg_id == se & polymorphic == TRUE & id_loc_cov >=20 & PAF > 0.1)|
(seg_id == se & polymorphic == FALSE & id_loc_cov >=3)) %>%
group_by(species, gene, id, id_cov, seg_start_ref, seg_end_ref, seg_id,
polymorphic, seg_hap, len, cds_len, codon_hap, aa_hap, lencodon, lenAA) %>%
summarize(depth = sum(depth)) %>% ungroup() %>% distinct(id, seg_hap, .keep_all = TRUE) %>%
as.data.frame()
sp <- as.character(d[1, "species"])
gene <- as.character(d[1, "gene"])
cds_len <- as.numeric(d[1, "cds_len"])
#if stop.rm = TRUE then haplotypes with stop codons are filtered out early,
#so they don't even enter DNA-based calculations
if (stop.rm == TRUE) d <- d %>% filter(!grepl("\\*", aa_hap))
dDNA <- d %>% distinct(id, seg_hap, .keep_all = TRUE)
dcodon <- d %>% distinct(id, codon_hap, .keep_all = TRUE)
dAA <- d %>% distinct(id, aa_hap, .keep_all = TRUE)
if(nrow(dDNA) == 0){
summary <- data.frame("species" = sp, "gene" = gene, "segment" = se, "len" = NA, "cds_len_bp" = NA, "n_hap" = NA,
"N_typed" = 0, "fr_typed" = 0, "S" = NA, "S_unamb" = NA, "dmax" = NA)
r <- list("genotypes" = NULL, "summary" = summary, "seq" = NULL)
res <- list("DNA" <- r, "codon" <- r, "AA" <- r, "AAGhm" <- r)
} else {
res <- NULL
for(s in c("DNA", "codon", "AA")){
if(s == "DNA"){
d <- dDNA
len <- d$len[1]
h <- "seg_hap"
} else if(s == "codon"){
d <- dcodon %>% filter(!is.na(codon_hap))
if(nrow(d) > 0){
len <- d$lencodon[1]
h <- "codon_hap"
}
} else if(s == "AA"){
#this removes also haplotypes with stop codons from AA alignent
d <- dAA %>% filter(!is.na(aa_hap))
if(nrow(d) > 0){
len <- d$lenAA[1]
h <- "aa_hap"
}
}
if(nrow(d) == 0){
summary <- data.frame("species" = sp, "gene" = gene, "segment" = se, "len" = NA, "cds_len_bp" = NA, "n_hap" = NA,
"N_typed" = 0, "fr_typed" = 0, "S" = NA, "S_unamb" = NA, "dmax" = NA)
r <- list("genotypes" = NULL, "summary" = summary, "seq" = NULL)
res[[s]] <- r
} else {
a <- d %>% group_by(across(h)) %>% summarize(tot_depth = sum(depth)) %>%
arrange(desc(tot_depth), desc(h)) %>% mutate(allele = paste0("all_", sprintf("%02d",row_number())))
seqdf <- a %>% select(allele, h) %>% as.data.frame()
if (s == "AA") seq <- df2prot(seqdf) else seq <- df2DNA(seqdf)
d_a <- left_join(d, a, by = h) %>% select(id, allele) %>% mutate(presence = 1)
d_a <- d_a %>% pivot_wider(names_from = allele, values_from = presence, values_fill = list(presence = 0)) %>%
as.data.frame()
n_hap <- ncol(d_a) - 1
n_ind <- nrow(d_a)
summary <- data.frame("species" = sp, "gene" = gene, "segment" = se, "len" = len, "cds_len_bp" = cds_len, "n_hap" = n_hap,
"N_typed" = n_ind, "fr_typed" = n_ind/N)
rownames(summary) <- c()
r <- list("genotypes" = d_a, "summary" = summary, "seq" = seq)
res[[s]] <- r
if(s == "AA") res[["AAGhm"]] <- r
}
}
}
return(res)
}
#takes binary genotypes for DNA, codons or AA -
#one element of the list produced by bin_gen
#vectorized function used to identify the beginning of the first codon in the segment
#handles also cases when the segment ends after the end of the cds
c_start <- Vectorize(function(g, fs, fe, ssr, sf = sp_frame){
if(is.na(fs)){
if(is.na(fe)){
cs <- NA
} else {
gmin <- sf %>% filter(gene_sym == g) %>% pull(seg_start_ref) %>% min()
cs <- gmin - ssr + 1
}
} else {
cs <- case_when(fs == 1 ~ 1,
fs == 2 ~ 3,
fs == 3 ~ 2)
}
return(cs)
})
#vectorized function used to identify the end of the last codon in the segment
#handles also cases when the segment ends after the end of the cds
c_end <- Vectorize(function(g, fs, fe, ser, sf = sp_frame){
if(is.na(fe)){
if(is.na(fs)){
ce <- NA
} else {
gmax <- sf %>% filter(gene_sym == g) %>% pull(seg_end_ref) %>% max()
ce <- -(ser - gmax + 1)
}
} else {
ce <- case_when(fe == 1 ~ -2,
fe == 2 ~ -3,
fe == 3 ~ -1)
}
return(ce)
})
#vectorized function calculating the length of the segment that covers cds (not necessarily full codons)
cds_l <- Vectorize(function(g, fs, fe, ssr, ser, sf = sp_frame){
if (!is.na(fs) & !is.na(fe)) l = ser - ssr else {
gmin <- sf %>% filter(gene_sym == g) %>% pull(seg_start_ref) %>% min()
gmax <- sf %>% filter(gene_sym == g) %>% pull(seg_end_ref) %>% max()
if (is.na(fs)) {
if (is.na(fe)) l = 0 else {
l = ser - gmin
}
} else {
l = gmax - ssr
}
}
return(l)
})
#read df created by R_microhaplotypes,
#replaces Xs with Ns
print(paste0("Processing ", t))
gene_ids <- read.table("gene_ids.txt", sep = "\t", header = T, stringsAsFactors = F)
mh <- readRDS(paste0("rds/", t, "_seg_microhap_", categ, ".rds")) %>%
left_join(gene_ids, by = "gene") %>%
mutate(seg_hap = str_replace_all(seg_hap, "X", "N")) %>%
as.data.frame()
tot_N <- mh %>% pull(id) %>% unique() %>% length()
#gets Grantham_table
Ghm <- read.csv("Grantham_Distance_table.csv", header = T, row.names = 1,encoding = "UTF-8")
#gets frame positions for the species
sp_frame <- fr %>% filter(species == t)
#gets df with distinct haplotypes and other info
haps <- mh %>% select(species, gene_sym, seg_id, seg_start_ref, seg_end_ref, seg_hap) %>% distinct()
#gets df with distinct segments and other info
seg_starts_ends <- haps %>% select(-seg_hap) %>% distinct()
#adds frame information to segments
seg_frame <- seg_starts_ends %>%
left_join(select(sp_frame,-c(seg_end_ref, frame_end)), by = c("species", "gene_sym", "seg_start_ref")) %>%
left_join(select(sp_frame, -c(seg_start_ref, frame_start)), by = c("species", "gene_sym", "seg_end_ref")) %>%
select(species, gene_sym, seg_id, frame_start, frame_end)
#print(seg_frame)
#joins frame infomation to haplotypes
#extracts full codons into new variable
#and their aa sequence into another variable
codon_haps <- haps %>% left_join(seg_frame, by = c("species", "gene_sym", "seg_id")) %>%
mutate(codon_start = c_start(gene_sym, frame_start, frame_end, seg_start_ref),
codon_end = c_end(gene_sym, frame_start, frame_end, seg_end_ref),
trim_hap = str_sub(seg_hap, codon_start, codon_end),
trim_len = str_length(trim_hap),
cds_len = cds_l(gene_sym, frame_start, frame_end, seg_start_ref, seg_end_ref),
codon_hap = ifelse(trim_len >=3, trim_hap, NA),
aa_hap = v_translate(codon_hap),
lencodon = nchar(codon_hap)/3,
lenAA = nchar(aa_hap))
#joins codon and aa haplotypes to the rest
#saveRDS(codon_haps, paste0("codon_haps", t, ".rds"))
mh <- mh %>% left_join(select(codon_haps, species, gene_sym, seg_id, seg_hap, lencodon, cds_len, codon_hap, lenAA, aa_hap),
by = c("species", "gene_sym", "seg_id", "seg_hap"))
seg_info <- mh %>% distinct(species, gene_sym, seg_id, seg_start_ref, seg_end_ref, cds_len)
segs <- unique(mh$seg_id)
segs <- segs[!is.na(segs)]
dd <- vector("list", length(segs))
aa_sequences <- vector("list", length(segs))
for(i in seq_along(segs)){
seg <- segs[i]
print(paste0("segment ", seg))
bg <- bin_gen(seg, mh, categ)
if(!is.null(bg[["DNA"]][[1]])){
#save protein haplotypes with additional info
if(!is.null(bg[["AA"]][["seq"]])){
fr_typed <- bg[["AA"]][["summary"]][1, "fr_typed"]
N_typed <- bg[["AA"]][["summary"]][1, "N_typed"]
all_counts <- bg[["AA"]][["genotypes"]] %>%
pivot_longer(-id, names_to = "label", values_to = "pres_abs" ) %>%
group_by(label) %>% summarise(count = sum(pres_abs))
aa_seq <- bg[["AA"]][["seq"]] %>% bin2df() %>% mutate(species = t,
seg_id = seg) %>% select(3, 4, 1, 2) %>%
left_join(seg_info, by = c("species", "seg_id")) %>%
left_join(all_counts, by = "label") %>%
mutate(fr_typed = fr_typed,
N_typed = N_typed) %>%
select(species, gene_sym, seg_id, seg_start_ref, seg_end_ref, label, seq, count, fr_typed, N_typed)
aa_sequences[[i]] <- aa_seq
}
for(z in c("DNA", "codon", "AA", "AAGhm")){
if(!is.null(bg[[z]][["genotypes"]])){
#d will contain two lists: alpha_gamma and individial_alpha
d <- calc_div(bg[[z]], sp = t, typ = z, Ghm_table = Ghm)
dd[[i]][[z]] <- d
}
}
}
}
#for alpha_gamma
alpha_gamma <- NULL
#for individual_alpha
individual_alpha <- NULL
for(z in c("DNA", "codon", "AA", "AAGhm")){
#b will contain two lists: alpha_gamma and individial_alpha
b <- lapply(dd, "[[", z)
seq_type <- data.frame(seq_type = z)
a_g <- lapply(b, "[[", "alpha_gamma") %>% bind_rows()
a_g <- bind_cols(seq_type, a_g)
print(a_g)
i_a <- lapply(b, "[[", "individual_alpha") %>% bind_rows()
i_a <- bind_cols(seq_type, i_a)
print(i_a)
alpha_gamma[[z]] <- a_g
individual_alpha[[z]] <- i_a
}
sp_div <- bind_rows(alpha_gamma)
ind_div <- bind_rows(individual_alpha)
dir.create("out", showWarnings = FALSE)
saveRDS(sp_div, paste0("out/", t, "_sp_div_", categ, ".rds"))
saveRDS(ind_div, paste0("out/", t, "_individual_alphadiv_", categ, ".rds"))
saveRDS(aa_sequences, paste0("out/", t, "_aa_seq_segments_", categ, ".rds"))
r <- list("sp_div" = sp_div,
"ind_div" = ind_div,
"aa_seq" = aa_sequences)
return(r)
}
#vectorized function used to translate nt sequence trimmed to full codons
v_translate <- Vectorize(function(x){
l <- str_split(str_to_lower(x), pattern = "")[[1]]
if(is.na(l[[1]])){
return(NA)
} else {
t <- paste(translate(l, ambiguous = TRUE), collapse='')
return(t)}
})
# BODY #
#Read frame position of each base in the refseq ####
#this is done only once
#IMPORTANT - coordinates here are 1-based,
#while segments have 0-based, half open
#mutate deals with that
#plus two frame columns are created for convenience
#pull(frame, gene_sym) %>% unique()
frame <- read_delim("frame/frame_table.txt", delim = " ") %>% filter(!is.na(frame)) %>%
mutate(seg_start_ref = Npos - 1,
seg_end_ref = Npos,
frame_end = frame) %>% rename(frame_start = frame) %>%
select(species, gene_sym, seg_start_ref, seg_end_ref, frame_start, frame_end)
#check whether each frame ends at 3rd codon position
frame_check <- frame %>% group_by(species, gene_sym) %>%
mutate(max = max(seg_end_ref)) %>%
filter(seg_end_ref == max)
#Calculate and save cds length for gene/species ####
#calculate cds length for each gene in each species
cdses <- frame %>% select(-c(frame_start, frame_end)) %>% group_by(species, gene_sym) %>%
summarise(cds_length = n()) %>% as.data.frame()
saveRDS(cdses, "CDS_lengths.rds")
write_lnx_head(cdses, "CDS_lengths.txt")
#Calculate diversities ####
taxa <- c("Amb_tex", "Amb_tig", "And_dav", "Bat_att", "Bat_nig", "Des_fus", "Eur_bis", "Hyd_ita", "Hyd_stri",
"Hyn_lee", "Hyn_ret", "Hyn_tok", "Ich_alp", "Kar_kor", "Lis_bos", "Lis_hel", "Lis_ita", "Omm_nes", "Omm_oph",
"Plet_cin", "Pleu_wal", "Prot_ang", "Sal_sal", "Tri_cri", "Tri_dob", "Tri_iva", "Tri_kar", "Tri_mac", "Tri_mar", "Tri_pyg")
ds <- c("raw_MIP_seg_min20cov", "raw_MIP_seg_min20cov_15_best_covered")
for (i in ds) {
x <- lapply(taxa, get_div, stop.rm = TRUE)
sp_div <- lapply(x, "[[", "sp_div")
ind_div <- lapply(x, "[[", "ind_div")
suff <- str_replace(i, "raw_MIP_seg_", "")
saveRDS(bind_rows(sp_div), paste0("All_sp_div_excluding_stop_", suff, ".rds"))
saveRDS(bind_rows(ind_div), paste0("All_individual_diversities_excluding_stop_", suff, ".rds"))
if (suff == "min20cov") {
y <- lapply(taxa, get_div, stop.rm = FALSE)
aa_seq <- bind_rows(lapply(y, "[[", "aa_seq"))
aa_seq_STOP <- aa_seq %>% filter(grepl("\\*", seq))
sp_div_STOP<- bind_rows(lapply(y, "[[", "sp_div"))
saveRDS(aa_seq, "All_aa_seg_including_stop_min20cov.rds")
saveRDS(aa_seq_STOP, "All_aa_seg_WITH_stop_codons_min20cov.rds")
saveRDS(sp_div_STOP, "All_sp_div_including_stop_min20cov.rds")
}
}
|
560b532083acb03f6632a6b32e3d070cc5de670a
|
2b16b69d6ed4e31a6796bf90d5b1dbe70269d506
|
/R/MTWhoPrimaryOutcomes.R
|
8094113fa97282fef36571d19e10d700cf664535
|
[
"MIT"
] |
permissive
|
andrewbrownphd/MetaTurkR
|
4e35da8fcdae742a4f85e3ba6223f46b062763e4
|
e1d6d81029ff08a5b4a0313f3498515f285882bb
|
refs/heads/master
| 2021-01-17T06:48:21.649056
| 2019-11-22T22:43:29
| 2019-11-22T22:43:29
| 53,510,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,339
|
r
|
MTWhoPrimaryOutcomes.R
|
#' Extract Primary Outcomes from WHO trial database
#'
#' This method extracts the HTML elements associated with primary outcomes in the
#' WHO trial database, which also pulls in records from additional registries.
#' Primary outcomes are denoted by \code{DataList12} in the HTML table. Multiple
#' entries are tab delimited. \cr
#' Although the WHO search allows searching multiple registries at once,
#' it is slow, and often times out. Use of \code{MTClinicalTrials} is
#' much faster when needed.
#' @param registryNumber A registry number. Various registry string types work, e.g.:
#' \itemize{
#' \item{NCT01110447}
#' \item{ISRCTN30870177}
#' }
# @param unique Logical. Default is \code{TRUE}.
# Removes multiple primary outcomes that are exact duplicates.
#' @param quiet Logical. If \code{FALSE}, additional messages should print.
#'
#' @details More details to come.
#'
#' @return Returns a named vector the length of and with names derived from
#' \code{registryNumber}. Multiple primary outcomes are tab-delimited.
MTWhoPrimaryOutcomes <- function(registryNumber = NULL,
#unique = TRUE,
quiet = TRUE)
{
warning("This function will be deprecated in future versions.")
if(is.null(registryNumber)) stop("Must specify at least one registry number.")
whoURL <- "http://apps.who.int/trialsearch/Trial2.aspx?TrialID="
#Create empty regOut object
regOut <- rep(NA,length(registryNumber))
names(regOut) <- registryNumber
#Placeholder for redos; the WHO trial registry tends to have connection problems
todo <- c(1:length(registryNumber))
cont <- ""
#Until told to stop
while(cont != "N"){
#Iterate through registryNumbers
for(r in todo)
{
if(!quiet) message(paste("Starting registration",registryNumber[r]))
con <- paste0("http://apps.who.int/trialsearch/Trial2.aspx?TrialID=",
registryNumber[r])
reg <- tryCatch(readLines(con),
error=function(e) "Connection Error")
unlink(con)
#Test for a successful connection
if(reg != "Connection Error"){
regP <- XML::htmlParse(reg)
tmpNode <- XML::getNodeSet(regP,"//table[@id='DataList12']/*")
tmpVal <- NULL
if(length(tmpNode)>1)
{
#Iterate through each primary outcome; first returned row is not an outcome
for (i in 2:length(tmpNode)){
tmpVal[i-1] <- MTXPath(XML::xmlDoc(tmpNode[[i]]),"//span[1]")
}
} else {
tmpVal <- ""
}
regOut[r] <- paste(unique(tmpVal),collapse="\t")
} else {
warning(paste("Connection Error for reg",r,": ",registryNumber[r]))
regOut[r] <- "Connection Error"
}
}
if("Connection Error" %in% regOut)
{
while(!cont %in% c("Y","N"))
{
cont <- readline(paste(length(which(regOut=="Connection Error")),
"registries had errors. Try these again?
(N=stop; Y=continue rest of set)"))
}
if(cont == "N") next #End redo loop
cont<-"" #begin new redo loop with only the redos todo
todo <- which(regOut == "Connection Error")
} else {
cont <- "N" #End redo loop
}
} #Close while loop
return(regOut)
}
|
8cb46366e6089fb868642911c312aee4b821c3cc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/oro.nifti/examples/afni-class.Rd.R
|
5f8b2956655ab9f96a5621107631f796cabdc672
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 169
|
r
|
afni-class.Rd.R
|
library(oro.nifti)
### Name: afni-class
### Title: Class "afni"
### Aliases: afni-class show,afni-method
### Keywords: classes
### ** Examples
showClass("afni")
|
bbf3f301d8700422005b7a65532a3beae2214296
|
84b5ea895d1b31c59130ac0ce07d09a34f1abb24
|
/plot4.R
|
96c5a19bcd3534f7223e45f89365692c4218dd05
|
[] |
no_license
|
chrisfmontes/ExData_Plotting1
|
ad68d69e96843d8e7911f06de0eb9b40fc929105
|
4973d5acac1e110da0276b135c50c8faf13d9321
|
refs/heads/master
| 2021-05-28T22:28:42.777472
| 2015-06-05T03:59:38
| 2015-06-05T03:59:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,235
|
r
|
plot4.R
|
power <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?",)
power$Date <- as.Date.factor(power$Date, "%d/%m/%Y")
power <- subset(power, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
power$Time <- paste(power$Date, power$Time, sep = " ")
power$Time <- strptime(power$Time, "%Y-%m-%d %H:%M:%S")
png(filename = "plot4.png", width = 480, height = 480, bg = "transparent")
par(mfcol = c(2,2))
plot(power$Time,power$Global_active_power, type = "n", ylab = "Global Active Power (kilowatts)", xlab = "")
lines(power$Time,power$Global_active_power)
plot(power$Time,power$Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = "")
lines(power$Time,power$Sub_metering_1)
lines(power$Time,power$Sub_metering_2, col = "red")
lines(power$Time,power$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue"))
plot(power$Time,power$Voltage, type = "n", ylab = "Voltage", xlab = "datetime")
lines(power$Time,power$Voltage)
plot(power$Time,power$Global_reactive_power, type = "n", ylab = "Global_reactive_power", xlab = "datetime")
lines(power$Time,power$Global_reactive_power)
dev.off()
|
8c3e392dcab15f64400598b055df66f19291afee
|
647e39424fe0ec8b784c6285be1cbc9e929c8e17
|
/AI/R/MachineLearning/Unsupervised/Clustering/PAM.R
|
d6cefc9063ba6346b90b19cefd1fdc09a7c265d5
|
[
"Unlicense"
] |
permissive
|
FedeScience/myrepo
|
765b92add2ea245bc626d5b81fedc726005adde4
|
cd2834fa37cf94d3b21208387adc1bb61d7ec243
|
refs/heads/main
| 2023-02-06T15:42:31.496965
| 2020-12-24T14:42:59
| 2020-12-24T14:42:59
| 322,318,564
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 274
|
r
|
PAM.R
|
#@ https://www.datanovia.com/en/blog/types-of-clustering-methods-overview-and-quick-start-r-code/
# Compute PAM
library(cluster)
library(factoextra)
library(magrittr)
# Load and prepare the data
data("USArrests")
pam.res <- pam(my_data, 3)
# Visualize
fviz_cluster(pam.res)
|
cbdaeaaa1ecd0fe3c4b8a6228b32f21ec949d4c7
|
72908a67604889444952b56ba9cb570bdc29c426
|
/easy_r/Scripts/sc05-2.R
|
692ee6f2a110941cf1b59e69eb93600f2c155a59
|
[] |
no_license
|
ckiekim/BigDataWithR-Lecture
|
01c37469725892373cb288ec22cbd230774e6c2d
|
85ad5ed2043e4db2ea3784fc74a6e3635c051cde
|
refs/heads/master
| 2020-04-30T13:11:49.089291
| 2019-03-21T07:47:56
| 2019-03-21T07:47:56
| 176,849,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,562
|
r
|
sc05-2.R
|
# Iris data
head(iris)
tail(iris)
View(iris)
dim(iris)
str(iris)
summary(iris)
iris3
data(mtcars)
head(mtcars)
?mtcars
dim(mtcars)
View(mtcars)
str(mtcars)
summary(mtcars)
data(AirPassengers)
ap <- AirPassengers
head(ap)
View(ap)
head(AirPassengers)
View(AirPassengers)
?AirPassengers
summary(ap)
data(airquality)
head(airquality)
tail(airquality)
View(airquality)
str(airquality)
summary(airquality)
?airquality
data(Titanic)
head(Titanic)
View(Titanic)
dim(Titanic)
str(Titanic)
summary(Titanic)
?Titanic
require(graphics)
mosaicplot(Titanic, main = "Survival on the Titanic")
## Higher survival rates in children?
apply(Titanic, c(3, 4), sum)
## Higher survival rates in females?
apply(Titanic, c(2, 4), sum)
## Use loglm() in package 'MASS' for further analysis ...
data(InsectSprays)
head(InsectSprays)
View(InsectSprays)
?InsectSprays
dim(InsectSprays)
str(InsectSprays)
summary(InsectSprays)
data(Orange)
head(Orange)
?Orange
View(Orange)
str(Orange)
summary(Orange)
data(swiss)
dim(swiss)
View(swiss)
str(swiss)
summary(swiss)
rbind(c(1, 2, 3), c(4, 5, 6))
(x <- data.frame(id=c(1, 2), name=c("a", "b"), stringsAsFactors=F))
str(x)
(y <- rbind(x, c(3, "c")))
(y <- cbind(x, greek=c("alpha", "beta"), stringsAsFactors=F))
str(y)
(x$greek = c("alpha", "beta"))
str(x)
sum(1:10)
d <- matrix(1:9, ncol=3)
d
apply(d, 1, sum)
apply(d, 2, sum)
apply(d, c(1, 2), sum)
apply(d, sum)
apply(d, 3, sum)
?apply
apply(d, c(1, 2), sum)
head(iris)
apply(iris[, 1:4], 2, sum)
apply(iris[, 1:4], 2, mean)
colSums(iris[, 1:4])
colMeans(iris[, 1:4])
install.packages("doBy")
library(doBy)
summary(iris)
summaryBy(Sepal.Width + Sepal.Length ~ Species, iris)
order(iris$Sepal.Width)
iris[order(iris$Sepal.Width),]
iris[order(iris$Sepal.Length, iris$Sepal.Width), ]
orderBy(~ Sepal.Width, iris)
orderBy(~ Species + Sepal.Width, iris)
sample(1:10, 5)
sample(1:10, 5, replace = T)
sample(1:3, 2, prob = c(3, 1, 6))
sample(1:10, 10)
iris[sample(NROW(iris), NROW(iris)), ]
sampleBy(~ Species, frac=0.1, data=iris)
split(iris, iris$Species)
lapply(split(iris$Sepal.Length, iris$Species), mean)
subset(iris, Species == "setosa")
subset(iris, Species == "setosa" & Sepal.Length > 5.0)
subset(iris, select = c(Species, Sepal.Width))
subset(iris, select = -c(Species, Sepal.Width))
iris[, !names(iris) %in% c("Sepal.Width", "Species")]
x <- data.frame(name=c("a", "b", "c"), math=c(1, 2, 3))
y <- data.frame(name=c("c", "b", "a"), english=c(4, 5, 6))
x
y
merge(x, y)
cbind(x, y)
y <- data.frame(name=c("d", "b", "a"), english=c(4, 5, 6))
merge(x, y)
merge(x, y, all = T)
|
cf30392b1ea83220229e5f06bde1bb4eed9be333
|
5740ab7010175765df3d4a5aac84cddad11e5c2d
|
/src/R/get_cpu_cores.R
|
b309e9bb39283032cdc643209b41f445b1c1ad86
|
[] |
no_license
|
howl-anderson/sdmengine.common
|
4cb53e3391ff1fe9c3182fabc85bd7354e9915d7
|
727c2eab56771297d2d6fe2475482a52aca3de4d
|
refs/heads/master
| 2020-06-13T23:04:25.026311
| 2015-09-04T07:00:55
| 2015-09-04T07:00:55
| 41,902,305
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 647
|
r
|
get_cpu_cores.R
|
#!/usr/bin/env Rscript
#' get_cpu_cores
#' @param workshop_dir Directory of workshop
#' @return CPU cores that can be used
#'
#' @export
get_cpu_cores <- function(workshop_dir) {
configure <- load_configure_file(workshop_dir)
cpu_setting <- configure$cpu
if (is.null(cpu_setting)) {
# default reserved one CPU core
cpu_setting <- -1
}
if (cpu_setting > 0) {
cpu_number <- cpu_setting
} else {
cpu_number <- detectCores() - abs(cpu_setting)
if (cpu_number <= 0) {
stop('Error: CPU reserved too much, there are no CPU to use.')
}
}
return(cpu_number)
}
|
724237d526805b29d1b16944eace00dc69be39f2
|
38373485330e50b09d27ea265ee0535b368f0579
|
/code/soccer-preprocessor.R
|
6988bd53cbd87dcff3079b4afb55881d166cc7cc
|
[] |
no_license
|
s81320/vis
|
5300e346349acd568cd7ff4ad06751960aeb42b8
|
b96755388ebdbd50c42d145e9e6fc26b2c1c45c4
|
refs/heads/master
| 2022-11-18T03:34:05.794807
| 2020-07-21T17:25:05
| 2020-07-21T17:25:05
| 270,222,860
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,178
|
r
|
soccer-preprocessor.R
|
preprocess <- function(io_dir, input_csv_name) {
# directory and file exists check
if (!dir.exists(io_dir)) {
stop(
paste0(
"The provided input-output directory does not exist! (", io_dir, ")"
)
)
}
input_csv <- file.path(io_dir, input_csv_name, fsep=.Platform$file.sep)
if (!file.exists(input_csv)) {
stop(
paste0(
"The provided input_csv does not exist! (", input_csv, ")"
)
)
}
# installing and loading required packages
if (!("stringr" %in% installed.packages())) {
install.packages("stringr")
}
require(stringr)
# reading unprocessed soccer data
soccer.raw <- read.csv(
file=input_csv,
encoding = "UTF-8",
stringsAsFactors=FALSE
)
# pre-processing
# step-1 [features_removal]
# remove columns -
# 1. serial ("X.U.FEFF"),
# 2. "ID",
# 3. "Photo",
# 4. "Flag",
# 5. "Club.Logo",
# 6. "Real.Face",
# 7. "Joined",
# 8. "Loaned.From",
# 9. "Contract.Valid.Until"
# listing column names of unprocessed soccer data
columns <- colnames(soccer.raw)
removable_columns_indices <- c(
which(columns=="X.U.FEFF."),
which(columns=="ID"),
which(columns=="Photo"),
which(columns=="Flag"),
which(columns=="Club.Logo"),
which(columns=="Real.Face"),
which(columns=="Joined"),
which(columns=="Loaned.From"),
which(columns=="Contract.Valid.Until")
)
soccer.preprocessed <- soccer.raw[, -removable_columns_indices]
# remaining_columns=80
# step-2 [conversion]
# convert amount strings of "Value", "Wage", "Release.Clause" to numeric,
# e.g. €1M -> 1000000
euro_string_to_euro_numeric <- function(euro_strings) {
result <- str_match(euro_strings, "(€)(\\d+)(\\.)?(\\d+)?(M|K)?")[, c(3:6)]
part1 <- result[, 1]
part2 <- result[, 2]
part3 <- result[, 3]
part4 <- ifelse(result[, 4]=="M", 1e6, 1e3)
part4[is.na(part4)] <- 1
part0 <- paste0(
ifelse(is.na(part1), "", part1),
ifelse(is.na(part2), "", part2),
ifelse(is.na(part3), "", part3)
)
part0 <- sapply(part0, as.numeric, USE.NAMES = FALSE)
part0 * part4
}
columns <- colnames(soccer.preprocessed)
amount_columns_indices <- c(
which(columns=="Value"),
which(columns=="Wage"),
which(columns=="Release.Clause")
)
soccer.preprocessed[amount_columns_indices] <- apply(
X=soccer.preprocessed[amount_columns_indices],
MARGIN=2,
FUN=euro_string_to_euro_numeric
)
# step-3 [conversion]
# convert "Height" feet'inch values to cm
# e.g. 5'7 (5 feet 7 inch) -> 170 (cm)
feet_inches_to_cm <- function(heights_in_fi) {
result <- apply(str_split(heights_in_fi, "'", simplify=TRUE), 2, as.numeric)
result[, 1] <- sapply(result[, 1], function(x) x * 30.48)
result[, 2] <- sapply(result[, 2], function(x) x * 2.54)
result <- round(apply(result, 1, sum), 0)
}
soccer.preprocessed$Height <- feet_inches_to_cm(soccer.preprocessed$Height)
# step-4 [conversion]
# convert "Weight" lbs values to kg
# e.g. 150lbs (150 pound) -> 68 (kg)
pounds_to_kg <- function(weights_in_pound) {
result <- str_replace_all(weights_in_pound, "lbs", "")
result <- sapply(result, as.numeric, USE.NAMES=FALSE)
result <- round(result * 0.4535924, 0)
}
soccer.preprocessed$Weight <- pounds_to_kg(soccer.preprocessed$Weight)
# step-5 [conversion]
# position-wise scores in columns (e.g. "LS", "ST", "RS" ...) are in format x+y,
# transformed to use only x value, removed +y, parsed into numerical value
# e.g. "80+2" -> 80
scores_to_flat_numeric <- function(scores) {
sapply(str_match(scores, "(\\d{1,2})(?:\\+\\d{1})")[, 2], as.numeric, USE.NAMES=FALSE)
}
position_columns_indices <- c(
which(columns=="LS"),
which(columns=="ST"),
which(columns=="RS"),
which(columns=="LW"),
which(columns=="LF"),
which(columns=="CF"),
which(columns=="RF"),
which(columns=="RW"),
which(columns=="LAM"),
which(columns=="CAM"),
which(columns=="RAM"),
which(columns=="LM"),
which(columns=="LCM"),
which(columns=="CM"),
which(columns=="RCM"),
which(columns=="RM"),
which(columns=="LWB"),
which(columns=="LDM"),
which(columns=="CDM"),
which(columns=="RDM"),
which(columns=="RWB"),
which(columns=="LB"),
which(columns=="LCB"),
which(columns=="CB"),
which(columns=="RCB"),
which(columns=="RB")
)
soccer.preprocessed[position_columns_indices] <- apply(
X=soccer.preprocessed[position_columns_indices],
MARGIN=2,
FUN=scores_to_flat_numeric
)
# step-6 [conversion]
# converting to factor datatype for columns with catergorical values
# 6a. setting the values with empty string in category type columns to NA
columns <- colnames(soccer.preprocessed)
factor_columns_indices <- c(
which(columns=="Nationality"),
which(columns=="Club"),
which(columns=="Preferred.Foot"),
which(columns=="Work.Rate"),
which(columns=="Body.Type"),
which(columns=="Position")
)
soccer.preprocessed[, factor_columns_indices][soccer.preprocessed[, factor_columns_indices] == ""] <- NA
# 6b. converting to factor datatype
soccer.preprocessed$Nationality <- as.factor(soccer.preprocessed$Nationality)
soccer.preprocessed$Club <- as.factor(soccer.preprocessed$Club)
soccer.preprocessed$Preferred.Foot <- as.factor(soccer.preprocessed$Preferred.Foot)
soccer.preprocessed$Work.Rate <- as.factor(soccer.preprocessed$Work.Rate)
soccer.preprocessed$Body.Type <- as.factor(soccer.preprocessed$Body.Type)
soccer.preprocessed$Position <- as.factor(soccer.preprocessed$Position)
# step-7 [missing_value_handling]
# removing rows with missing value (NA) step wise
# 7a. removing all rows missing "Height", "Weight", "Body.Type" etc.
soccer.preprocessed <- soccer.preprocessed[!is.na(soccer.preprocessed$Height), ]
# nrow(soccer.preprocessed)
# remaining_rows = 18159
# 7b. removing all rows missing "Position"
soccer.preprocessed <- soccer.preprocessed[!is.na(soccer.preprocessed$Position), ]
# nrow(soccer.preprocessed)
# remaining_rows = 18147
# 7c. removing all rows missing "Club"
soccer.preprocessed <- soccer.preprocessed[!is.na(soccer.preprocessed$Club), ]
# nrow(soccer.preprocessed)
# remaining_rows = 17918
# 7d. imputing positional columns values to zero (0) for players with "Position"=GK
soccer.preprocessed[which(soccer.preprocessed$Position == "GK"), position_columns_indices] <- 0
# 7e. imputing missing "Release.Clause" values with zero (0)
soccer.preprocessed[which(is.na(soccer.preprocessed$Release.Clause)), which(columns=="Release.Clause")] <- 0
# writing preprocessed soccer data to output
output_csv_name <- "soccer-preprocessed.csv"
output_csv <- file.path(io_dir, output_csv_name, fsep=.Platform$file.sep)
write.csv(
x=soccer.preprocessed,
file=output_csv,
fileEncoding="UTF-8",
row.names=FALSE
)
}
preprocess(
io_dir="D:/msc-ds/course-resource/data-visualization/project",
input_csv_name="soccer-data-kaggle.csv"
)
|
1448e6876cec530e03ca09172d26b1d4e94c2729
|
815906cb89ebcf9683dd355a27025638c8a0850a
|
/man/Barplots.Rd
|
3d5ae84fd6fcce04613edd55b7f0b306422f1d19
|
[] |
no_license
|
mathiaskalxdorf/IceR
|
7510690cc7e8784e36d2adb30f04e68d8f8566a4
|
ebf1a670e9f64007a85352f9ba0c925bd5dfb949
|
refs/heads/master
| 2023-04-08T04:27:42.515154
| 2022-07-30T04:39:34
| 2022-07-30T04:39:34
| 271,002,267
| 16
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,319
|
rd
|
Barplots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/General.R
\name{Barplots}
\alias{Barplots}
\title{Generate barplots}
\usage{
Barplots(
Data,
ErrbarData = NA,
Name = "",
xlab = "X-axis",
ylab = "Y-axis",
main = "Titel",
col = "lightblue",
AvgLine = T,
digits_average = 0,
Legends = NA,
Legendscol = NA,
Legendtitle = "Legend",
Legendpos = "topright",
shownumbers = T,
shownumbers_digits = 1,
ylim = NA,
logy = F,
margins = c(10.1, 4.1, 4.1, 4.1),
inset = c(-0.1, 0)
)
}
\arguments{
\item{Data}{Numeric vector or table of samples in columns}
\item{ErrbarData}{Data for errorbars}
\item{Name}{Names}
\item{xlab}{X-Axis label}
\item{ylab}{Y-Axis label}
\item{main}{Plot main title}
\item{col}{Color}
\item{AvgLine}{Show average line?}
\item{digits_average}{Number of digits of average indication}
\item{Legends}{Legends}
\item{Legendscol}{Color of legends}
\item{Legendtitle}{Titel for legends}
\item{Legendpos}{Legend position}
\item{shownumbers}{Show numbers on top of bars}
\item{shownumbers_digits}{Number of digits for shown numbers}
\item{ylim}{y-axis limits}
\item{logy}{Y-Axis in log-scale?}
\item{margins}{Margins}
\item{inset}{Inset for legend}
}
\value{
Plot.
}
\description{
Generate barplots
}
\details{
Generate barplots
}
|
fe2254e279d807bf045cd4ec3710ecd1c51667ac
|
0e92c0b362b230341f9cc31207df8139dbc3ac18
|
/man/boundaries.Rd
|
e2ce1914328dff0d8028f5472864bcbf4a5e01b3
|
[] |
no_license
|
cran/raster
|
b08740e15a19ad3af5e0ec128d656853e3f4d3c6
|
dec20262815cf92b3124e8973aeb9ccf1a1a2fda
|
refs/heads/master
| 2023-07-09T20:03:45.126382
| 2023-07-04T10:40:02
| 2023-07-04T10:40:02
| 17,699,044
| 29
| 35
| null | 2015-12-05T19:06:17
| 2014-03-13T06:02:19
|
R
|
UTF-8
|
R
| false
| false
| 1,521
|
rd
|
boundaries.Rd
|
\name{boundaries}
\alias{boundaries}
\alias{boundaries,RasterLayer-method}
\title{boundaries (edges) detection}
\description{
Detect boundaries (edges). boundaries are cells that have more than one class in the 4 or 8 cells surrounding it, or, if \code{classes=FALSE}, cells with values and cells with \code{NA}.
}
\usage{
\S4method{boundaries}{RasterLayer}(x, type='inner', classes=FALSE, directions=8, asNA=FALSE, filename="", ...)
}
\arguments{
\item{x}{RasterLayer object}
\item{type}{character. 'inner' or 'outer'}
\item{classes}{character. Logical. If \code{TRUE} all different values are (after rounding) distinguished, as well as \code{NA}. If \code{FALSE} (the default) only edges between \code{NA} and non-\code{NA} cells are considered}
\item{directions}{integer. Which cells are considered adjacent? Should be 8 (Queen's case) or 4 (Rook's case)}
\item{asNA}{logical. If \code{TRUE}, non-edges are returned as \code{NA} instead of zero}
\item{filename}{character. Filename for the output RasterLayer (optional)}
\item{...}{additional arguments as for \code{\link{writeRaster}}}
}
\value{
RasterLayer. Cell values are either 1 (a border) or 0 (not a border), or \code{NA}
}
\seealso{ \code{\link{focal}}, \code{\link{clump}} }
\examples{
r <- raster(nrow=18, ncol=36, xmn=0)
r[150:250] <- 1
r[251:450] <- 2
plot( boundaries(r, type='inner') )
plot( boundaries(r, type='outer') )
plot( boundaries(r, classes=TRUE) )
}
\keyword{methods}
\keyword{spatial}
|
0959664ad8cc6ee40b50ad49d5ca006bb20d7569
|
6b629e8bc4bb0b1c93bb217cb218af5ae5e587c8
|
/gender_differences/old/associate_ext_factors_to_phenotypes.R
|
86d23fb63f652b298c05ab0cf6ccd5728e5a8cc1
|
[] |
no_license
|
DashaZhernakova/umcg_scripts
|
91b9cbffea06b179c72683145236c39f5ab7f8c2
|
1846b5fc4ae613bec67b2a4dd914733094efdb23
|
refs/heads/master
| 2023-08-31T10:45:17.057703
| 2023-08-23T14:47:43
| 2023-08-23T14:47:43
| 237,212,133
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,737
|
r
|
associate_ext_factors_to_phenotypes.R
|
source("/groups/umcg-lifelines/tmp01/users/umcg-dzhernakova/scripts/umcg_scripts/gender_differences/preprocessing_gam_fitting_functions.R")
setwd("/groups/umcg-lifelines/tmp01/users/umcg-dzhernakova/gender_difs/factors")
rm_outliers <- function(merged_tab){
merged_tab <- na.omit(merged_tab)
w <- merged_tab[merged_tab$gender_F1M2 == 1,]
m <- merged_tab[merged_tab$gender_F1M2 == 2,]
mq1 <- quantile(m[,1], probs = 0.25)
mq3 <- quantile(m[,1], probs = 0.75)
miqr <- mq3 - mq1
m_clean <- m[m[,1] < mq3 + 1.5*miqr & m[,1] > mq1 - 1.5*miqr,]
wq1 <- quantile(w[,1], probs = 0.25)
wq3 <- quantile(w[,1], probs = 0.75)
wiqr <- wq3 - wq1
w_clean <- w[w[,1] < wq3 + 1.5*wiqr & w[,1] > wq1 - 1.5*wiqr,]
tab_nooutliers <- rbind(w_clean, m_clean)
return(tab_nooutliers)
}
traits_path <- "../v4/data/LL_phenotypes_merged_all.log_some.v5.txt"
pheno_path <- "factors+diet+med.txt"
cat("Data paths:\nphenotype traits:", traits_path, "\r\ncovariates:", pheno_path, "\n")
#pheno_to_log: "LEU,LY,MO,MOP,GR,BA,BAP,EO,EOP,TGL,HAL1,HALB,AST,ALT,AF,GGT,LCRP,TSH,UKRO,UKR24,LLDS_T1A,total_scor_VAL"
# read phenotype traits of interest
traits <- read.delim(traits_path, header = T, row.names = 1, sep = "\t", as.is = T, check.names = F)
# read age, gender and other covariate phenotypes
pheno <- read.table(pheno_path, header = T, row.names = 1, sep = "\t", as.is = T, check.names = F)
#order samples in the two tables
traits_m <- traits[match(row.names(pheno), row.names(traits), nomatch = 0 ), , drop = F]
pheno_m <- pheno[match(row.names(traits_m), row.names(pheno), nomatch = 0), ]
all(row.names(traits_m) == row.names(pheno_m))
num_traits <- ncol(traits_m)
pheno_m <- subset(pheno_m, select = -c(TEVREDEN, total_mwk_VAL, MVPA_mwk_VAL))
indices = c(5,10,13,15,20,21,22,23,27,28,29,42)
#for (idx in indices){
#med="HT_med"
merged_tab <- cbind(traits_m[, idx], pheno_m[,1:30])
pheno_name <- colnames(merged_tab)[1]
print(pheno_name)
colnames(merged_tab)[1] <- "phenotype"
#colnames(merged_tab)[ncol(merged_tab)] <- "med"
merged_tab <- rm_outliers(merged_tab)
preds <- colnames(merged_tab)[c(4,5,8:ncol(merged_tab))]
zterms_inter3 <- paste0(" + ti(", paste(preds, collapse = ", age, by = gender_F1M2)+ ti("), ", age, by = gender_F1M2)")
terms_inter_age <- paste0(" + s(", paste(preds, collapse = ", age)+ s("), ", age)")
terms_inter_sex <- paste0(" + s(", paste(preds, collapse = ", by = gender_F1M2)+ s("), ", by = gender_F1M2)")
terms_binary <- " + SMK1 + SMK3 + s(age, by = SMK1) + s(age, by = SMK3) + interaction(SMK1,gender_F1M2) + interaction(SMK3, gender_F1M2) + s(age, by = interaction(SMK1, gender_F1M2)) + s(age, by = interaction(SMK3, gender_F1M2))"
full_formula <- as.formula(paste("phenotype ~ gender_F1M2 + s(age) + s(age, by = gender_F1M2) ", terms, terms_inter3, terms_inter_age, terms_inter_sex, terms_binary, sep = " "))
#full_formula <- as.formula("phenotype ~ gender_F1M2 + s(age) +
# s(LTE_SUM)+ s(LDI_SUM)+
# s(total_mwk_VAL)+ s(total_scor_VAL)+ s(MVPA_mwk_VAL)+ s(MVPA_scor_VAL)+
# s(LLDS_T1A)+ s(SumOfalcohol)+ s(med)+
# ti(LTE_SUM, age, by = gender_F1M2)+ ti(LDI_SUM, age, by = gender_F1M2)+
# ti(total_mwk_VAL, age, by = gender_F1M2)+ ti(total_scor_VAL, age, by = gender_F1M2)+
# ti(MVPA_mwk_VAL, age, by = gender_F1M2)+ ti(MVPA_scor_VAL, age, by = gender_F1M2)+
# ti(LLDS_T1A, age, by = gender_F1M2)+ ti(SumOfalcohol, age, by = gender_F1M2)+
# ti(med, age, by = gender_F1M2) +
# s(LTE_SUM, age)+ s(LDI_SUM, age)+
# s(total_mwk_VAL, age)+ s(total_scor_VAL, age)+ s(MVPA_mwk_VAL, age)+ s(MVPA_scor_VAL, age)+
# s(LLDS_T1A, age)+ s(SumOfalcohol, age)+ s(med, age) +
# s(LTE_SUM, by = gender_F1M2)+ s(LDI_SUM, by = gender_F1M2)+
# s(total_mwk_VAL, by = gender_F1M2)+ s(total_scor_VAL, by = gender_F1M2)+ s(MVPA_mwk_VAL, by = gender_F1M2)+ s(MVPA_scor_VAL, by = gender_F1M2)+
# s(LLDS_T1A, by = gender_F1M2)+ s(SumOfalcohol, by = gender_F1M2)+ s(med, by = gender_F1M2) +
# SMK1 + SMK3 + s(age, by = SMK1) + s(age, by = SMK3) + interaction(SMK1,gender_F1M2) + interaction(SMK3, gender_F1M2) ")
full_fit <- gam(full_formula, data = merged_tab, method = "REML", select=T)
#fit <- gam(phenotype ~ SMK1 + SMK3 + gender_F1M2 + s(age) + s(LTE_SUM) + s(LDI_SUM) + s(total_mwk_VAL) + s( total_scor_VAL) + s(MVPA_mwk_VAL) + s(MVPA_scor_VAL) + s(LLDS_T1A) + s(SumOfalcohol),data = merged_tab, method = "REML", select=T)
s <- summary(full_fit)
write.table(s$p.table, file = paste0(out_path, pheno_name, ".p.table.txt"), sep = "\t", quote = F, col.names = NA)
write.table(s$s.table, file = paste0(out_path, pheno_name, ".s.table.txt"), sep = "\t", quote = F, col.names = NA)
#}
|
7df25efe5be0105d853a83fc749fb7e040602f46
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2019/nonfatal_code/resp_asthma/crosswalk/run_MR_BRT_asthma_ild_clean.R
|
d0af6863ab56ed394ec100806bb3c4975290a2c3
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,515
|
r
|
run_MR_BRT_asthma_ild_clean.R
|
##########################################################################################################################################
## Purpose: Prep and run MR-BRT
## Created by: USERNAME, column re-arrangement based on script by USERNAME
## Date: March 2019
##
## Step 1: Create master file of all crosswalks for a given cause - keep and standardize order of needed columns, bind together
## Step 2: Run MR-BRT
##
#########################################################################################################################################
rm(list=ls())
cause_path <- "FILEPATH"
cause_name <- "asthma_"
main_dir <- "FILEPATH"
##Load csv files with crosswalk pairs for each covariate to be included in network analysis
df1 <- as.data.table(read.csv(paste0("FILEPATH")))
df2 <- as.data.table(read.csv(paste0("FILEPATH")))
df3 <- as.data.table(read.csv(paste0("FILEPATH")))
df4 <- as.data.table(read.csv(paste0("FILEPATH")))
df5 <- as.data.table(read.csv(paste0("FILEPATH")))
df6 <- as.data.table(read.csv(paste0("FILEPATH")))
df7 <- as.data.table(read.csv(paste0("FILEPATH")))
df1 <- as.data.table(read.csv(paste0("FILEPATH")))
df2 <- as.data.table(read.csv(paste0("FILEPATH")))
#add nid.x and nid.y to WHS datasets
df1$nid.x <- df1$nid
df1$nid.y <- df1$nid
df2$nid.x <- df2$nid
df2$nid.y <- df2$nid
df1$cv_self_report_current.x <- 0
df1$cv_self_report_ever.x <- 0
df2$cv_self_report_current.x <- 0
df2$cv_self_report_ever.x <- 0
df1$cv_self_report_current.y <- 0
df1$cv_self_report_ever.y <- 0
df2$cv_self_report_current.y <- 0
df2$cv_self_report_ever.y <- 0
df1$cv_diagnosis.x <- 0
df1$cv_wheezing.x <- 0
df2$cv_diagnosis.x <- 0
df2$cv_wheezing.x <- 0
df1$cv_diagnosis.y <- 0
df1$cv_wheezing.y <- 1
df2$cv_diagnosis.y <- 1
df2$cv_wheezing.y <- 0
#safety check for all covariates
df3$cv_self_report_current.y <- 1
df3$cv_self_report_ever.y <- 0
df3$cv_diagnosis.y <- 0
df3$cv_wheezing.y <- 0
df4$cv_self_report_current.y <- 0
df4$cv_self_report_ever.y <- 1
df4$cv_diagnosis.y <- 0
df4$cv_wheezing.y <- 0
df5$cv_self_report_current.y <- 0
df5$cv_self_report_ever.y <- 0
df5$cv_diagnosis.y <- 0
df5$cv_wheezing.y <- 1
df6$cv_self_report_current.y <- 0
df6$cv_self_report_ever.y <- 0
df6$cv_diagnosis.y <- 1
df6$cv_wheezing.y <- 0
df7$cv_self_report_ever.x <- 1
df7$cv_self_report_current.y <- 0
df7$cv_self_report_ever.y <- 0
df7$cv_diagnosis.y <- 0
df7$cv_wheezing.y <- 1
#Order columns of crosswalk csv files consistently and drop unneeded columns
reorder_columns <- function(datasheet){
## set ordered list of columns for master crosswalk csv
template_cols <- c("nid.x", "nid.y", "location_match", "region_name.x", "super_region_name.x", "age_start", "age_end", "sex", "year_start", "year_end",
"mean", "standard_error", "ratio", "se", "log_ratio", "delta_log_se", "diff_logit", "se_diff_logit", "cv_wheezing.x", "cv_diagnosis.x",
"cv_self_report_ever.x", "cv_self_report_current.x", "cv_wheezing.y", "cv_diagnosis.y", "cv_self_report_ever.y", "cv_self_report_current.y", "cv_gold_std.x")
col_order <- template_cols
## find which column names are in the extraction template but not in your datasheet
to_fill_blank <- c()
for(column in col_order){
if(!column %in% names(datasheet)){
to_fill_blank <- c(to_fill_blank, column)
}
}
## create blank column which will be filled in for columns not in your datasheet
len <- length(datasheet$nid.x)
blank_col <- rep.int(NA,len)
## for each column not found in your datasheet, add a blank column and rename it appropriately
for(column in to_fill_blank){
datasheet <- cbind(datasheet,blank_col)
names(datasheet)[names(datasheet)=="blank_col"]<-column
}
## for columns in datasheet but not in epi template or cv list, delete
dt_cols <- names(datasheet)
datasheet <- as.data.table(datasheet)
for(col in dt_cols){
if(!(col %in% col_order)){
datasheet[, c(col):=NULL]
}
}
## reorder columns with template columns
setcolorder(datasheet, col_order)
## return
return(datasheet)
}
#Create master file with all crosswalks for cause, remove duplicate rows, write csv
df_vector <- list(df1, df2, df3, df4, df5, df6, df7)
master_xwalk <- lapply(df_vector, reorder_columns) %>% rbindlist()
master_xwalk <- unique(master_xwalk)
#Create matrix for mr-brt
master_xwalk$cv_wheezing <- master_xwalk$cv_wheezing.y-master_xwalk$cv_wheezing.x
master_xwalk$cv_diagnosis <- master_xwalk$cv_diagnosis.y-master_xwalk$cv_diagnosis.x
master_xwalk$cv_self_report_current <- master_xwalk$cv_self_report_current.y-master_xwalk$cv_self_report_current.x
master_xwalk$cv_self_report_ever <- master_xwalk$cv_self_report_ever.y - master_xwalk$cv_self_report_ever.x
#Add study ID
master_xwalk[, id := .GRP, by = c("nid.x", "nid.y")]
write.csv(master_xwalk, paste0("FILEPATH"), row.names = F)
#########################################################################################################################################
## Link with launching and loading an MR-BRT model ##
library(dplyr)
library(data.table)
library(metafor, lib.loc = "FILEPATH")
library(msm, lib.loc = "FILEPATH")
library(readxl)
repo_dir <- "FILEPATH"
source(paste0(repo_dir, "run_mr_brt_function.R"))
source(paste0(repo_dir, "cov_info_function.R"))
source(paste0(repo_dir, "check_for_outputs_function.R"))
source(paste0(repo_dir, "load_mr_brt_outputs_function.R"))
source(paste0(repo_dir, "predict_mr_brt_function.R"))
source(paste0(repo_dir, "check_for_preds_function.R"))
source(paste0(repo_dir, "load_mr_brt_preds_function.R"))
## Plotting function ----------------------------------------------------------------
#Verify model names
covariate_name <- "master_xwalk"
cause_path <- "FILEPATH/"
cause_name <- "ILD_"
#Need to use/create column to specify if within study comparison or between
fit1 <- run_mr_brt(
output_dir = paste0("FILEPATH"),
model_label = paste0(cause_name, covariate_name, "log"),
data = paste0("FILEPATH"),
mean_var = "log_ratio",
se_var = "delta_log_se",
overwrite_previous = TRUE,
remove_x_intercept = TRUE,
method = "trim_maxL",
trim_pct = 0.10,
study_id = "id",
#lasso=FALSE,
covs = list(
#cov_info("cv_wheezing", "X"),
#cov_info("cv_diagnosis", "X"),
#cov_info("cv_self_report_current", "X"),
#cov_info("cv_self_report_ever", "X")
#cov_info("age_start", "X", degree = 3, n_i_knots = 4, r_linear = T, l_linear = T))
cov_info("cv_IPF.y", "X"),
cov_info("cv_sarc.y","X"))
)
plot_mr_brt(fit1)
## CREATE A RATIO PREDICTION FOR EACH OBSERVATION IN THE ORIGINAL DATA
#########################################################################################################################################
#Prep original data
actual_data <- as.data.table(read.csv("FILEPATH"))
#Logit transform original data
actual_data$mean_logit <- logit(actual_data$mean)
actual_data$se_logit <- sapply(1:nrow(actual_data), function(i) {
mean_i <- actual_data[i, mean]
se_i <- actual_data[i, standard_error]
deltamethod(~log(x1/(1-x1)), mean_i, se_i^2)
})
#Predict MR-BRT --------------------------------------------------------------------------------------------------------------
# Check for outputs from model
check_for_outputs(fit1, wait_seconds = 15)
# Read raw outputs from model
results1 <- load_mr_brt_outputs(fit1)
names(results1)
coefs <- results1$model_coefs
metadata <- results1$input_metadata
train <- results1$train_data
df_pred <- data.table("cv_diagnosis"=c(0, 1,0,0,0), "cv_wheezing"=c(0, 0,1,0,0), "cv_self_report_current"=c(0, 0,0,1,0), "cv_self_report_ever"=c(0, 0,0,0,1))
df_pred <- data.table("cv_IPF.y"=c(1,0), "cv_sarc.y"=c(0,1))
pred <- predict_mr_brt(fit1, newdata = df_pred, z_newdata = df_pred, write_draws = T)
check_for_preds(pred)
pred_object <- load_mr_brt_preds(pred)
predicted <- pred_object$model_summaries
#If already ran MR-Brt - open pred_object
predicted <- as.data.table(read.csv(paste0("FILEPATH")))
predicted <- unique(predicted)
predicted <- predicted[2:5, ]
setnames(predicted, "X_cv_wheezing", "cv_wheezing")
setnames(predicted, "X_cv_diagnosis", "cv_diagnosis")
setnames(predicted, "X_cv_self_report_current", "cv_self_report_current")
setnames(predicted, "X_cv_self_report_ever", "cv_self_report_ever")
setnames(predicted, "X_cv_IPF.y", "cv_IPF")
setnames(predicted, "X_cv_sarc.y", "cv_sarc")
##: APPLY RATIOS TO THE ORIGINAL DATA AND CREATE THE FINAL DATASET USED FOR NONFATAL MODELING
#########################################################################################################################################
#USING DAMIAN'S CODE
predicted <- as.data.table(predicted)
names(predicted) <- gsub("model_summaries.", "", names(predicted))
names(predicted) <- gsub("X_d_", "cv_", names(predicted))
predicted[, `:=` (Y_se = (Y_mean_hi - Y_mean_lo)/(2*qnorm(0.975,0,1)))]
predicted[, `:=` (Y_se_norm = (deltamethod(~exp(x1)/(1+exp(x1)), Y_mean, Y_se^2)))]
pred1 <- predicted[1,]
pred2 <- predicted[2,]
pred3 <- predicted[3,]
pred4 <- predicted[4,]
pred1[, `:=` (Y_se_norm = (deltamethod(~exp(x1)/(1+exp(x1)), Y_mean, Y_se^2)))]
pred1<- pred1[,Y_se_norm]
pred2[, `:=` (Y_se_norm = (deltamethod(~exp(x1)/(1+exp(x1)), Y_mean, Y_se^2)))]
pred2<- pred2[,Y_se_norm]
pred3[, `:=` (Y_se_norm = (deltamethod(~exp(x1)/(1+exp(x1)), Y_mean, Y_se^2)))]
pred3<- pred3[,Y_se_norm]
pred4[, `:=` (Y_se_norm = (deltamethod(~exp(x1)/(1+exp(x1)), Y_mean, Y_se^2)))]
pred4<- pred4[,Y_se_norm]
Y_se_norm <- c(pred1,pred2,pred3,pred4)
predicted <- cbind(predicted,Y_se_norm)
crosswalk_reporting <- copy(predicted) # for reporting later
predicted[, (c("Z_intercept", "Y_negp", "Y_mean_lo", "Y_mean_hi", "Y_mean_fe", "Y_negp_fe", "Y_mean_lo_fe", "Y_mean_hi_fe")) := NULL]
no_cv <- data.frame("cv_wheezing" = 0,"cv_diagnosis" =0, "cv_self_report_current"=0, "cv_self_report_ever"=0, "Y_mean"=0, "Y_se"=0, "Y_se_norm"=0)
predicted <- rbind(predicted, no_cv)
review_sheet_final <- merge(actual_data, predicted, by=c("cv_wheezing", "cv_diagnosis", "cv_self_report_ever", "cv_self_report_current"))
review_sheet_final <- merge(actual_data, predicted, by=c("cv_IPF","cv_sarc"))
review_sheet_final <-as.data.table(review_sheet_final)
setnames(review_sheet_final, "mean", "mean_orig")
review_sheet_final[, `:=` (log_mean = log(mean_orig), log_se = deltamethod(~log(x1), mean_orig, standard_error^2)), by = c("mean_orig", "standard_error")]
review_sheet_final[Y_mean != predicted[3,Y_mean], `:=` (log_mean = log_mean - Y_mean, log_se = sqrt(log_se^2 + Y_se^2))]
review_sheet_final[Y_mean != predicted[3,Y_mean], `:=` (mean_new = exp(log_mean), standard_error_new = deltamethod(~exp(x1), log_mean, log_se^2)), by = c("log_mean", "log_se")]
review_sheet_final[Y_mean != predicted[3,Y_mean], `:=` (cases_new = NA, lower_new = NA, upper_new = NA)]
review_sheet_final[Y_mean == predicted[3,Y_mean], `:=` (mean_new = mean_orig, standard_error_new = standard_error)]
review_sheet_final[standard_error_new == "NA", `:=` (standard_error_new = standard_error)]
review_sheet_final[, (c("Y_mean", "Y_se", "log_mean", "log_se")) := NULL]
setnames(review_sheet_final, "mean", "mean_orig")
review_sheet_final[Y_mean != predicted[5,Y_mean], `:=` (mean_logit = mean_logit - Y_mean, se_logit = sqrt(se_logit^2 + Y_se^2))]
review_sheet_final[Y_mean != predicted[5,Y_mean], `:=` (mean_new = inv.logit(mean_logit), standard_error_new = deltamethod(~exp(x1)/(1+exp(x1)), mean_logit, se_logit^2)), by = c("mean_logit", "se_logit")]
review_sheet_final[Y_mean != predicted[5,Y_mean], `:=` (lower_new = NA, upper_new = NA)]
review_sheet_final[Y_mean == predicted[5,Y_mean], `:=` (mean_new = mean_orig, standard_error_new = standard_error)]
review_sheet_final[standard_error_new == "NA", `:=` (standard_error_new = sqrt(standard_error^2 + Y_se_norm^2))]
review_sheet_final[, (c("Y_mean", "Y_se", "mean_logit", "se_logit")) := NULL]
# For upload validation #
setnames(review_sheet_final, "lower", "lower_orig")
setnames(review_sheet_final, "upper", "upper_orig")
setnames(review_sheet_final, "standard_error", "standard_error_orig")
setnames(review_sheet_final, "lower_new", "lower")
setnames(review_sheet_final, "upper_new", "upper")
setnames(review_sheet_final, "standard_error_new", "standard_error")
setnames(review_sheet_final, "mean_new", "mean")
review_sheet_final[is.na(lower), uncertainty_type_value := NA]
#THIS IS THE DATASET THAT WILL BE USED FOR NONFATAL MODELING
write.csv(review_sheet_final, paste0("FILEPATH"), row.names = F)
|
225af4d81e0ebe3c83a2829d9dd2d754002dabf0
|
c2d9b62b1fff20d16c3f425c981b05a4398aef55
|
/2_R_Programming/r_programming_week2.R
|
8dedb53f687339e7f2cdabca2fb26713a92c29a4
|
[] |
no_license
|
Ailuropoda1864/coursera-jhu-ds
|
214808800cd4cbb5f587821c990f3b32af47bb1a
|
a210076085f6953a7a149596339fb7f4b2061756
|
refs/heads/master
| 2018-12-09T18:49:22.695056
| 2018-09-12T01:56:08
| 2018-09-12T01:56:08
| 120,560,640
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,295
|
r
|
r_programming_week2.R
|
DIR <- '/home/fay/code/r_coursera/2_R_Programming/data/week2/specdata'
# Part 1
#' Calculate the mean of a pollutant (sulfate or nitrate) across a specified
#' list of monitors.
#'
#' @param directory A character vector of length 1 indicating the location of
#' the CSV files.
#' @param pollutant A character vector of length 1 indicating the name of the
#' pollutant for which the mean will be calculated; either 'sulfate' or
#' 'nitrate'.
#' @param id An integer cector indicating the monitor ID numbers to be used.
#'
#' @return The mean of the pollutant across all monitors list in \code{id}
#' (ignoring NA values).
#' @export
#'
#' @examples
#' pollutantmean(DIR, "sulfate", 1:10)
pollutantmean <- function(directory, pollutant, id = 1:332) {
files <- file.path(directory, sprintf('%03d.csv', id))
x <- vector(mode = 'numeric') # initiate empty numeric vector
for(i in seq_along(files)) {
df <- read.csv(files[i])
x <- c(x, df[[pollutant]]) # subset as vector (not data.frame)
}
mean(x, na.rm = TRUE)
}
# Part 2
#' Read a directory full of files and reports the number of completely observed
#' cases in each data file.
#'
#' @param directory A character vector of length 1 indicating the location of
#' the CSV files.
#' @param id An integer cector indicating the monitor ID numbers to be used.
#'
#' @return A data frame where the first column is the name of the file ('id')
#' and the second column is the number of complete cases ('nobs').
#' @export
#'
#' @examples
#' complete(DIR, c(2, 4, 8, 10, 12))
complete <- function(directory, id = 1:322) {
result <- data.frame() # initiate empty dataframe
files <- file.path(directory, sprintf('%03d.csv', id))
for(i in seq_along(files)) {
df <- read.csv(files[i])
nobs <- sum(complete.cases(df))
result <- rbind(result, data.frame(id=id[i], nobs=nobs))
}
result
}
# Part 3
corr <- function(directory, threshold = 0) {
result <- vector(mode = 'numeric') # initiate empty numeric vector
files <- file.path(directory, dir(directory))
for(i in seq_along(files)) {
df <- read.csv(files[i])
if(sum(complete.cases(df)) > threshold) {
result <- c(result, cor(x = df$sulfate,
y = df$nitrate,
use = "pairwise.complete.obs"))
}
}
result
}
# quiz
# Q1
pollutantmean(DIR, "sulfate", 1:10)
# Q2
pollutantmean(DIR, "nitrate", 70:72)
# Q3
pollutantmean(DIR, "sulfate", 34)
# Q4
pollutantmean(DIR, "nitrate")
# Q5
cc <- complete(DIR, c(6, 10, 20, 34, 100, 200, 310))
print(cc$nobs)
# Q6
cc <- complete(DIR, 54)
print(cc$nobs)
# Q7
set.seed(42)
cc <- complete(DIR, 332:1)
use <- sample(332, 10)
print(cc[use, "nobs"])
# Q8
cr <- corr(DIR)
cr <- sort(cr)
set.seed(868)
out <- round(cr[sample(length(cr), 5)], 4)
print(out)
# Q9
cr <- corr(DIR, 129)
cr <- sort(cr)
n <- length(cr)
set.seed(197)
out <- c(n, round(cr[sample(n, 5)], 4))
print(out)
# Q10
cr <- corr(DIR, 2000)
n <- length(cr)
cr <- corr(DIR, 1000)
cr <- sort(cr)
print(c(n, round(cr, 4)))
|
42d5197627503de9df388bd92a0efd89aed3d465
|
09f4710323cae92f8af5d96f54b070bbf93bc4df
|
/Biomass/R_scripts/archives/overlay_cropped.R
|
acb1378a31d68c4cac5398ae98c9b72967d4c369
|
[
"MIT"
] |
permissive
|
WoodResourcesGroup/EPIC_AllPowerLabs
|
0a7885345d2228aedb66ec9355f80ae38fef5545
|
bf3240672f02fa93243cb2241e9c49249ce710aa
|
refs/heads/master
| 2021-01-11T19:48:03.613416
| 2018-04-11T23:53:07
| 2018-04-11T23:53:07
| 79,397,605
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,469
|
r
|
overlay_cropped.R
|
### ATTEMPTS TO ACTUALLY CALCULATE BY PARK
### SOME OF THIS CODE IS PASTED FROM units_overlay.R on Carmen's PC
## Open LEMMA
if( Sys.info()['sysname'] == "Windows" ) {
setwd("C:/Users/Battles Lab/Box Sync/EPIC-Biomass/GIS Data/LEMMA_gnn_sppsz_2014_08_28/")
} else {
setwd("~/Documents/Box Sync/EPIC-Biomass/GIS Data/LEMMA_gnn_sppsz_2014_08_28/")
}
### Open GNN LEMMA data (see script crop_LEMMA.R for where LEMMA.gri comes from)
LEMMA <- raster("LEMMA.gri")
## Open shapefile for each park
library(rgdal)
setwd("C:/Users/Battles Lab/Box Sync/EPIC-Biomass/R Results")
LNP_1215 <- readOGR(dsn="Results_1215_crop", layer = "LNP_1215")
## Crop down to just 2014 and 2015 and check it out
LNP_1415 <- subset(LNP_1215, LNP_1215$RPT_YR > 2013)
plot(LNP_1415, pch=".")
plot(lnp, add=T, border = "pink")
## Check that results look OK - compare NO_TREES, biomass per tree, biomass per pixel
max(LNP_1415$D_BM_kg)
max(LNP_1415$D_BM_Mg)
# Max Mg per pixel of dead biomass is 96,000,000. That's way too high! Investigate below.
hist(LNP_1415$D_BM_Mg)
hist(LNP_1415$D_BM_Mg, xlim=c(0,1000), breaks=10000000, ylim=c(0,100000))
# How do dead trees per polygon and relative number of dead trees per pixel look?
hist(LNP_1415$relNO)
# It looks like my results are showing relNO of trees per pixel as high as 1,000,000, which is awfully high
# Investigate how high that is by comparing to THA
hist(LNP_1415$THA*.09)
hist(LEMMA_LNP@data@attributes[[1]]$TPH_GE_3) # total TPH should not exceed 10,000, so the above results are definitely wrong
# Check average dead biomass per pixel averaged across all pixels in each polygon to see if they look ok
plot(unique(LNP_1415$P_NO_TR/LNP_1415$Pl_Sh_A), main="number of dead trees per sq m in polygon")
# Compare to that of the original drought polygon layer
plot(drought$NO_TREE/drought$Shap_Ar, main="number of dead trees per sq m in polygon from original drought data")
### THESE TWO NUMBERS ARE DRASTICALLY DIFFERENT. SOMETHING IS WRONG
### LOOK AT JUST THE ORIGINAL DROUGHT POLYGONS THAT FALL WITHIN LASSEN
library(rgeos)
drought.lnp <- crop(drought, extent(LNP_1415))
drought.lnp.1415 <- subset(drought.lnp, drought.lnp$RPT_YR>2013)
plot(drought.lnp.1415)
plot(LNP_1415, add=T, pch=".", col="orange")
plot(unique(LNP_1415$P_NO_TR/LNP_1415$Pl_Sh_A), main="number of dead trees per sq m in polygon")
plot(drought.lnp.1415$NO_TREE/drought.lnp.1415$Shap_Ar, main="number of dead trees per sq m in polygon from original drought data")
plot(sort(drought.lnp.1415$NO_TREE))
plot(sort(LNP_1415$P_NO_TR))
# Check number of dead trees in polygon against relNO * number of pixels in polygon
LNP_1415$D_BM_Mgha <- (LNP_1415$D_BM_kg/1000)/.09
hist(LNP_1415$D_BM_Mgha)
LNP_1415_D_BM_sum_kg <- sum(LNP_1415$D_BM_kg)
## Overlay with LEMMA
LEMMA_LNP <- crop(LEMMA, extent(lnp)) # crop LEMMA GLN data to the size of that polygon
LEMMA_LNP <- mask(LEMMA_LNP, lnp) # fit the cropped LEMMA data to the shape of the polygon
plot(LEMMA_LNP@data$, add=T)
length(LEMMA_LNP)
# Use area of LNP to calculate dead biomass density
area(lnp)
LNP_DBM_kgha <- LNP_D_BM_sum_kg/area(lnp)
LNP_DBM_kgha
LNP_DBM_Mgha <- LNP_D_BM_sum_kg/area(lnp)/1000
LNP_1415_DBM_Mgha <- LNP_1415_D_BM_sum_kg/area(lnp)/1000
# Check against drought mortality polygons
if( Sys.info()['sysname'] == "Windows" ) {
+ setwd("C:/Users/Battles Lab/Box Sync/EPIC-Biomass/GIS Data/")
+ } else {
+ setwd("~/Documents/Box Sync/EPIC-Biomass/GIS Data/")
+ }
drought <- readOGR("tempdir", "drought")
plot(drought, add=T, col="green")
# To find biomass from LEMMA, need to repeat some of the steps from the original analysis
### Repeat for other units
### Open Results
# Crop and resave for faster opening in the future
result_16_crop <- crop(result_16, extent(units))
result_1215_crop <- crop(result_1215, extent(units))
writeOGR(obj=result_16_crop, dsn = "")
writeOGR(obj=spdf, dsn="Results_2012-2015",layer = "Results_2012-2015", driver="ESRI Shapefile")
### Crop and mask results once for each unit spdf
library(sp)
results_1215_MH <- crop(results_1215, extent(MH))
results_1215_MH <- spTransform(results_1215_MH, crs(Mtn_hm))
results_1215_FS <- crop(results_1215, extent(FS))
results_1215_kc <- crop(results_1215, extent(kc))
results_1215_lnp <- crop(results_1215, extent(lnp))
### Trying with gIntersect
# First find which points in results fall within MH
MH.intersect <- gIntersection(Mtn_hm, results_1215_MH, byid=T)
plot(Mtn_hm, add=T, border="orange")
MH.pts.intersect <- strsplit(dimnames(MH.intersect@coords)[[1]], " ")
MH.pts.intersect.id <- as.numeric(sapply(MH.pts.intersect,"[[",2))
MH.pts.extract <- results_1215_MH[MH.pts.intersect.id, ]
results_1215_MH_ex <- subset(results_1215_MH, results_1215_MH$key %in% MH.pts.intersect.id)
plot(results_1215_MH_ex)
plot(Mtn_hm, add=T, border="orange")
# Repeat for st_p
results_1215_SP <- crop(results_1215, extent(st_p))
results_1215_SP <- spTransform(results_1215_SP, crs(st_p))
### Divide into the two parks
CSP <- st_p[1,]
ESP <- st_p[2,]
### Calculate separately for each park
CSP.intersect <- gIntersection(CSP, results_1215_SP, byid=T)
CSP.pts.intersect <- strsplit(dimnames(CSP.intersect@coords)[[1]], " ")
CSP.pts.intersect.id <- as.numeric(sapply(CSP.pts.intersect,"[[",2))
CSP.pts.extract <- results_1215_CSP[CSP.pts.intersect.id, ]
results_1215_CSP <- subset(results_1215_SP, results_1215_SP$key %in% CSP.pts.intersect.id)
plot(results_1215_CSP)
plot(st_p, add=T, border = "orange")
|
b26c70e6aa3831ae7bc37e46dd2435d190df53a8
|
33e13418d80d2a094071bf41b94aff10c0e3204a
|
/metrics/Compartments/rpgms/genset.r
|
710f953ed1b4a55a71689cf2e6bfc4ff88c011b6
|
[] |
no_license
|
jpbida/FKS
|
97f5102ef311924c215d4dac11485da0ec507366
|
2587c06adc8ed52a02a4be68d84afbdfd8672040
|
refs/heads/master
| 2020-04-24T14:36:38.318250
| 2012-07-25T19:43:34
| 2012-07-25T19:43:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 852
|
r
|
genset.r
|
### For each of the datasets ###
datasets<-c(
"allr4_75.px",
"allr4_50.px",
"allr4_25.px",
"allr3_75.px",
"allr3_50.px",
"allr3_25.px",
"allr2_75.px",
"allr2_50.px",
"allr2_25.px",
"allr1_75.px",
"allr1_50.px",
"allr1_25.px"
)
#### Break the space into cross sections squares ####
for(dataset in datasets[1])
{
print(dataset)
fin<-paste("allpixs_",dataset,sep="")
dat<-read.table(file=fin,header=F)
names(dat)<-c("p1","p2","p3","p4","p5","p6","x","y","z")
out<-NULL
for(x in seq(0,100,by=5)){
print(x)
d1<-dat[dat$x<x+2 & dat$x>=x,]
write.table(d1,file=paste(dataset,"_d",x,sep=""),row.names=F,col.names=F)
#### Calculate all compartments in the space ###
pix<-read.table(file=paste(dataset,"_d",x,sep=""),nrows=1)
write.table(pix[1,],file="t1.x",row.names=F,col.names=F)
while(dim(pix)[1]>0){
system(paste("./gset.pl ",dataset,"_d",x,sep=""))
}
}
}
|
bf3d45d30a5263255858c265923ba9335a3366fa
|
ead04fbe576b37496435e375b904100381b6fa08
|
/comparison_lda_qda_mda.R
|
b5d5a0d6a498da11fef8737be8c0841aa635a9f1
|
[] |
no_license
|
akshayvkale/LDA
|
e313b0b3a9ba4b1ff40cf9db9577ae0f47afeb28
|
35238c72d220cb8c9810768252742f78c338afa0
|
refs/heads/master
| 2022-11-27T09:47:41.669489
| 2020-08-04T10:30:54
| 2020-08-04T10:30:54
| 284,947,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
comparison_lda_qda_mda.R
|
library(MASS)
library(mvtnorm)
library(mda)
library(ggplot2)
?rmvnorm
n<-500
x11<-rmvnorm(n,mean = c(-4,-4))
x12<-rmvnorm(n,mean = c(0,4))
x13<-rmvnorm(n,mean = c(4,-4))
x21<-rmvnorm(n,mean=c(-4,4))
x22<-rmvnorm(n,mean = c(4,4))
x23<-rmvnorm(n,mean = c(0,0))
x31<-rmvnorm(n,mean = c(-4,0))
x32<-rmvnorm(n,mean = c(0,-4))
x33<-rmvnorm(n,mean = c(4,0))
x<-rbind(x11,x12,x13,x21,x22,x23,x31,x32,x33)
x
?gl()
train<-data.frame(x,y=gl(3,n*3))
View(train)
lda_out<-lda(y~.,train)
qda_out<-qda(y~.,train)
mda_out<-mda(y~.,train)
#Generates test data that will be used to generates desision boundaries
contour_data<-expand.grid(X1=seq(-8,8,length=300),X2=seq(-8,8,length=300))
contour_data
dim(contour_data)
dim(train)
#classifing test data
set.seed(123)
lda_prdict<- data.frame(contour_data,
y=as.numeric(predict(lda_out,contour_data)$class))
lda_prdict
qda_predict<-data.frame(contour_data,
y=as.numeric(predict(qda_out,contour_data)$class))
p <- ggplot(train,aes(x=X1,y=X2,color=y))+geom_point()
p
colnames(train)
p+stat_contour(aes(x=X1,y=X2,z=y),data=lda_prdict)
p+stat_contour(aes(x=X1,y=X2,z=y),data=qda_predict)
lda_prdict
mda_predict <- data.frame(contour_data,
y = as.numeric(predict(mda_out, contour_data)))
p+stat_contour(aes(x=X1,y=X2,z=y),data=mda_predict)
ggplot(data=mda_predict,aes(x=X1,y=X2,color=y))+geom_point()+
stat_contour(aes(x=X1,y=X2,z=y),data=mda_predict)
ggplot(data=qda_predict,aes(x=X1,y=X2,color=y))+geom_point()+
stat_contour(aes(x=X1,y=X2,z=y),data=qda_predict)
ggplot(data=lda_prdict,aes(x=X1,y=X2,color=y))+geom_point()
View(lda_prdict)
#________________________________________________________________
|
a04708c161590a3ed57094a6f0bfbb3d67c4179b
|
71e7a3518e75dba5226c7c5224068910c60bfb7e
|
/R/aoi_map.R
|
1c747db8b6e735a20aef73fbbfff9a6fe8898fc6
|
[
"MIT"
] |
permissive
|
mikejohnson51/AOI
|
f430078cdae4aeb0720e78bbf6a6987af8b5f677
|
a7d54a0f6951a8e61f8f7a5f09056d8069fbe1f6
|
refs/heads/master
| 2023-08-04T22:39:28.531795
| 2023-07-26T18:21:38
| 2023-07-26T18:21:38
| 139,353,238
| 31
| 2
|
MIT
| 2021-04-02T04:28:39
| 2018-07-01T18:47:25
|
R
|
UTF-8
|
R
| false
| false
| 3,348
|
r
|
aoi_map.R
|
#' @title Generate Leafet map and tool set for AOI
#' @description
#' Provides a precanned leaflet layout for checking, and refining AOI queries.
#' Useful \code{leaflet} tools allow for the marking of points, measuring of
#' distances, and panning and zooming.
#' @param AOI any spatial object (\code{raster}, \code{sf}, \code{sp}).
#' Can be piped (\%>\%) from \code{\link{aoi_get}}.
#' If \code{AOI = NULL}, base map of CONUS will be returned.
#' @param returnMap \code{logical}. If \code{FALSE} (default) the input
#' AOI is returned and the leaflet map printed.
#' If \code{TRUE} the leaflet map is returned and printed.
#' @return a \code{leaflet} html object
#' @examples
#' \dontrun{
#' ## Generate an empty map:
#' aoi_map()
#'
#' ## Check a defined AOI:
#' AOI <- getAOI(clip = list("UCSB", 10, 10))
#' aoi_map(AOI)
#'
#' ## Chain to AOI calls:
#' getAOI(clip = list("UCSB", 10, 10)) %>% aoi_map()
#'
#' ## Add layers with standard leaflet functions:
#' r <- getAOI("UCSB") %>% # get AOI
#' HydroData::findNWIS() # get SpatialPointsDataframe of local USGS gages
#'
#' aoi_map(r$AOI) %>%
#' addMarkers(data = r$nwis, popup = r$nwis$site_no)
#'
#' ## Save map for reference:
#' m <- getAOI("Kansas City") %>% aoi_map()
#' htmlwidgets::saveWidget(m, file = paste0(getwd(), "/myMap.html"))
#' }
#' @export
#' @importFrom sf st_geometry_type
aoi_map <- function(AOI = NULL, returnMap = FALSE) {
check_pkg('leaflet')
p <- "+proj=longlat +datum=WGS84"
m <- NULL
bb <- NULL
pts <- NULL
type <- NULL
out <- NULL
orig <- AOI
if (!inherits(AOI, "list")) {
AOI <- list(AOI = AOI)
}
for (i in seq_len(length(AOI))) {
tmp <- make_sf(AOI[[i]])
if (!is.null(tmp)) {
out[[length(out) + 1]] <- st_transform(tmp, p)
type[length(type) + 1] <- as.character(unique(st_geometry_type(tmp)[1]))
}
}
if ("POINT" %in% type) {
pts <- out[[which(grepl("POINT", type))]]
}
if ("POLYGON" %in% type) {
bb <- out[[which(grepl("POLYGON", type))]]
}
if ("MULTIPOLYGON" %in% type) {
bb <- out[[which(grepl("MULTIPOLYGON", type))]]
}
m <- leaflet::leaflet() %>%
leaflet::addProviderTiles("Esri.NatGeoWorldMap", group = "Terrain") %>%
leaflet::addProviderTiles("CartoDB.Positron", group = "Grayscale") %>%
leaflet::addProviderTiles("Esri.WorldImagery", group = "Imagery") %>%
leaflet::addScaleBar("bottomleft") %>%
leaflet::addMiniMap(
toggleDisplay = TRUE,
minimized = TRUE
) %>%
leaflet::addMeasure(
position = "bottomleft",
primaryLengthUnit = "feet",
primaryAreaUnit = "sqmiles",
activeColor = "red",
completedColor = "green"
) %>%
leaflet::addLayersControl(
baseGroups = c("Terrain", "Grayscale", "Imagery"),
options = leaflet::layersControlOptions(collapsed = TRUE)
)
if (is.null(orig)) {
m <- leaflet::setView(m, lat = 39.311825, lng = -101.275972, zoom = 4)
} else {
if (!is.null(pts)) {
m <- leaflet::addMarkers(m, data = pts)
}
if (!is.null(bb)) {
m <- leaflet::addPolygons(m,
data = bb,
stroke = TRUE,
fillColor = "transparent",
color = "red",
opacity = 1
)
}
}
if (returnMap) {
m
} else {
print(m)
orig
}
}
|
ce0501ed09eda06bf4578ce503ec10342fafde32
|
b03b4b9bb4ff8a48f39d3bad19e24509a463369c
|
/DEseq.R
|
6aae3a95ad651fdae9cff32c21993796c0019905
|
[] |
no_license
|
rkweku/Additional-Scripts
|
9d7a5082e800b11558b410a900520a32749037c4
|
bf2ad15db661bd3ad3b687b27286ca5c259ae44f
|
refs/heads/master
| 2020-04-18T01:09:18.660966
| 2018-11-27T05:18:22
| 2018-11-27T05:18:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,469
|
r
|
DEseq.R
|
## RNA-seq analysis with DESeq2
# Import & pre-process ----------------------------------------------------
library("DESeq2")
# Import data from featureCounts
## Previously ran at command line something like this:
## featureCounts -a genes.gtf -o counts.txt -T 12 -t exon -g gene_id GSM*.sam
# countdata <- read.table("static_clustering.txt", header=TRUE, row.names=1)
countdata <- read.table("ExpressionTable_DESEQ.txt", header=TRUE, row.names=1)
# Create a countdata for each sample comparison. Used ceiling becuase some number are not intengers to round these numbers.
countdata1 <- ceiling(countdata[c(1,2,3,4,5,6)])
countdata2 <- ceiling(countdata[c(1,2,3,7,8,9)])
countdata3 <- ceiling(countdata[c(1,2,3,10,11,12)])
countdata4 <- ceiling(countdata[c(1,2,3,13,14,15)])
countdata5 <- ceiling(countdata[c(1,2,3,16,17,18)])
countdata6 <- ceiling(countdata[c(1,2,3,19,20,21)])
#print (countdata1)
# # For libraries
condition1 <- factor(c("control", "control", "control", "OX","OX","OX"))
condition2 <- factor(c("control", "control", "control", "m1_4","m1_4","m1_4"))
condition3 <- factor(c("control", "control", "control", "m123","m123","m123"))
condition4 <- factor(c("control", "control", "control", "m134","m134","m134"))
condition5 <- factor(c("control", "control", "control", "m3_4","m3_4","m3_4"))
condition6 <- factor(c("control", "control", "control", "m4","m4","m4"))
# # Convert to matrix
countdata1 <- as.matrix(countdata1)
countdata2 <- as.matrix(countdata2)
countdata3 <- as.matrix(countdata3)
countdata4 <- as.matrix(countdata4)
countdata5 <- as.matrix(countdata5)
countdata6 <- as.matrix(countdata6)
# # Analysis with DESeq2 ----------------------------------------------------
# # Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix
(coldata1 <- data.frame(row.names=colnames(countdata1), condition1))
(coldata2 <- data.frame(row.names=colnames(countdata2), condition2))
(coldata3 <- data.frame(row.names=colnames(countdata3), condition3))
(coldata4 <- data.frame(row.names=colnames(countdata4), condition4))
(coldata5 <- data.frame(row.names=colnames(countdata5), condition5))
(coldata6 <- data.frame(row.names=colnames(countdata6), condition6))
dds1 <- DESeqDataSetFromMatrix(countData=countdata1, colData=coldata1, design=~condition1)
dds2 <- DESeqDataSetFromMatrix(countData=countdata2, colData=coldata2, design=~condition2)
dds3 <- DESeqDataSetFromMatrix(countData=countdata3, colData=coldata3, design=~condition3)
dds4 <- DESeqDataSetFromMatrix(countData=countdata4, colData=coldata4, design=~condition4)
dds5 <- DESeqDataSetFromMatrix(countData=countdata5, colData=coldata5, design=~condition5)
dds6 <- DESeqDataSetFromMatrix(countData=countdata6, colData=coldata6, design=~condition6)
dds1$condition1 <- relevel(dds1$condition1, ref="control")
dds2$condition2 <- relevel(dds2$condition2, ref="control")
dds3$condition3 <- relevel(dds3$condition3, ref="control")
dds4$condition4 <- relevel(dds4$condition4, ref="control")
dds5$condition5 <- relevel(dds5$condition5, ref="control")
dds6$condition6 <- relevel(dds6$condition6, ref="control")
# # Run the DESeq pipeline
dds1 <- DESeq(dds1)
dds2 <- DESeq(dds2)
dds3 <- DESeq(dds3)
dds4 <- DESeq(dds4)
dds5 <- DESeq(dds5)
dds6 <- DESeq(dds6)
# # Remove genes with fewer than 1 read
dds1 <- dds1[ rowSums(counts(dds1)) > 1, ]
dds2 <- dds2[ rowSums(counts(dds2)) > 1, ]
dds3 <- dds3[ rowSums(counts(dds3)) > 1, ]
dds4 <- dds4[ rowSums(counts(dds4)) > 1, ]
dds5 <- dds5[ rowSums(counts(dds5)) > 1, ]
dds6 <- dds6[ rowSums(counts(dds6)) > 1, ]
# # Create MA plots of each analysis
png('MAplot_control_OX.png')
plotMA(dds1,ylim=c(-2,2), main='DESeq2')
dev.off()
png('MAplot_control_m1_4.png')
plotMA(dds2,ylim=c(-2,2), main='DESeq2')
dev.off()
png('MAplot_control_m123.png')
plotMA(dds3,ylim=c(-2,2), main='DESeq2')
dev.off()
png('MAplot_control_m134.png')
plotMA(dds4,ylim=c(-2,2), main='DESeq2')
dev.off()
png('MAplot_control_m3_4.png')
plotMA(dds5,ylim=c(-2,2), main='DESeq2')
dev.off()
png('MAplot_control_m4.png')
plotMA(dds6,ylim=c(-2,2), main='DESeq2')
dev.off()
# # Plot dispersions
png("qc-dispersions_MAplot_control_OX.png", 1000, 1000, pointsize=20)
plotDispEsts(dds1, main="Dispersion plot")
dev.off()
png("qc-dispersions_control_m1_4.png", 1000, 1000, pointsize=20)
plotDispEsts(dds2, main="Dispersion plot")
dev.off()
png("qc-dispersions_control_m123.png", 1000, 1000, pointsize=20)
plotDispEsts(dds3, main="Dispersion plot")
dev.off()
png("qc-dispersions_control_m134.png", 1000, 1000, pointsize=20)
plotDispEsts(dds4, main="Dispersion plot")
dev.off()
png("qc-dispersions_control_m3_4.png", 1000, 1000, pointsize=20)
plotDispEsts(dds5, main="Dispersion plot")
dev.off()
png("qc-dispersions_control_m4.png", 1000, 1000, pointsize=20)
plotDispEsts(dds6, main="Dispersion plot")
dev.off()
# Get differential expression results
res1 <- results(dds1)
table(res1$padj<0.05)
res1 <- res1[order(res1$padj), ]
res2 <- results(dds2)
table(res2$padj<0.05)
res2 <- res2[order(res2$padj), ]
res3 <- results(dds3)
table(res3$padj<0.05)
res3 <- res3[order(res3$padj), ]
res4 <- results(dds4)
table(res4$padj<0.05)
res4 <- res4[order(res4$padj), ]
res5 <- results(dds5)
table(res5$padj<0.05)
res5 <- res5[order(res5$padj), ]
res6 <- results(dds6)
table(res6$padj<0.05)
res6 <- res6[order(res6$padj), ]
# Order by adjusted p-value
# Merge with normalized count data
res1data <- merge(as.data.frame(res1), as.data.frame(counts(dds1, normalized=TRUE)), by="row.names", sort=FALSE)
res2data <- merge(as.data.frame(res2), as.data.frame(counts(dds2, normalized=TRUE)), by="row.names", sort=FALSE)
res3data <- merge(as.data.frame(res3), as.data.frame(counts(dds3, normalized=TRUE)), by="row.names", sort=FALSE)
res4data <- merge(as.data.frame(res4), as.data.frame(counts(dds4, normalized=TRUE)), by="row.names", sort=FALSE)
res5data <- merge(as.data.frame(res5), as.data.frame(counts(dds5, normalized=TRUE)), by="row.names", sort=FALSE)
res6data <- merge(as.data.frame(res6), as.data.frame(counts(dds6, normalized=TRUE)), by="row.names", sort=FALSE)
# Write results
write.csv(res1data, file="diffexpr-results_control_OX.csv")
write.csv(res2data, file="diffexpr-result_control_m1_4.csv")
write.csv(res2data, file="diffexpr-result_control_m123.csv")
write.csv(res3data, file="diffexpr-results_control_m134.csv")
write.csv(res4data, file="diffexpr-results_control_m3_4.csv")
write.csv(res5data, file="diffexpr-results_control_m4.csv")
|
9173975e2a4044d5538ac7d5f15fa5bb3274fece
|
c77938ab77375bd8a524daba269e83a201c22cdf
|
/modules/predictBirds/R/fitModel.R
|
ac1866966490ff87caf1fb81776ecc5e54f6191f
|
[] |
no_license
|
tati-micheletti/borealBirdsAndForestry
|
dbe385d5129770caec12e986371fc76f2317801e
|
27d11df65759ed720d2a6646132597a117c3fc39
|
refs/heads/master
| 2021-03-19T17:13:20.851222
| 2020-09-17T19:07:29
| 2020-09-17T19:07:29
| 121,669,535
| 0
| 4
| null | 2018-08-02T21:52:30
| 2018-02-15T19:03:31
|
HTML
|
UTF-8
|
R
| false
| false
| 1,521
|
r
|
fitModel.R
|
fitModel <- function(inRas,
inputModel,
x,
tileYear){
# Not a raster predict anymore. This is now a data.frame predict.
if ("glmerMod" %in% class(inputModel)){
prediction <- predict(object = inputModel,
newdata = inRas,
re.form = NA,
type = "response") # re.form = NA drops the random effects from the models; "response" gives us the density, not log
attr(prediction, "prediction") <- paste0(x, tileYear)
} else {
if ("glm" %in% class(inputModel)){
tempCol <- rep(0, times = nrow(inRas))
off <- names(inputModel$data)[grepl(pattern = "OF", x = names(inputModel$data)) &
grepl(pattern = x, x = names(inputModel$data))]
origNames <- names(inRas)
inRas <- cbind(inRas, tempCol)
names(inRas) <- c(origNames, off)
prediction <- predict(newdata = inRas,
object = inputModel,
type = "response") # No re.form in glm
attr(prediction, "prediction") <- paste0(x, tileYear)
}
}
# I will try without first... If it doesn't fit, we multiply
#prediction[] <- prediction[]*1000 # Results are already in density scale and multiplied by 1000 for saving space
#suppressWarnings(storage.mode(prediction[]) <- "integer")
message(crayon::green(paste0(x, " prediction finalized for year ", tileYear)))
return(prediction)
}
|
41dc9363ff32c8737af3dc6b1ba54267fd8446a5
|
5d7c787375367158323d48bea255f1422b522ef9
|
/SF360/server.R
|
649c5566705f1c70b00ce88acf055fc29ae7c4d9
|
[] |
no_license
|
nightowl21/Extra-Projects
|
21087d0a6aae4486b9dfa85d893d25a9f3f064f9
|
82be6e6b47f60ff169e1985aec78f61fae4c8df2
|
refs/heads/master
| 2021-06-19T09:05:03.201565
| 2017-06-14T05:46:21
| 2017-06-14T05:46:21
| 50,462,583
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,721
|
r
|
server.R
|
source("helper_file.R")
icons <- iconList(
active = makeIcon(makepath("active.png"), iconWidth=18, iconHeight=18),
arts = makeIcon(makepath("art-and-culture.png"), iconWidth=24, iconHeight=24),
restaurants = makeIcon(makepath("dinner-eat-restaurant-icon.png"),
iconWidth=24, iconHeight=24),
nightlife = makeIcon(makepath("glass_icon1.png"), iconWidth=24, iconHeight=24)
)
shinyServer(function(input, output, session) {
## Interactive Map ###########################################
# Create the map
output$map <- renderLeaflet({
leaflet() %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
setView(-122.42, 37.78, zoom = 13)
})
# This observer is responsible for maintaining the icon markers and legend,
# according to the variables the user has chosen to map to color and size.
observeEvent(input$go, {
data1 = restaurants[restaurants$day == input$var, ]
data <- subset(data1, category %in% input$choices)
myday <- input$var
mytime <- input$bins
mychoices <- input$choices
data <- subset(data, (data$opening_1_hours<= mytime) &
(data$closing_1_hours> mytime))
if (nrow(data)==0){
leafletProxy("map", data = data) %>%
clearMarers()
} else {
leafletProxy("map", data = data) %>%
clearMarkers() %>%
addMarkers(icon = ~icons[category],
popup = ~paste0("<b><h4>", name, "</b></h4>",
"<b>Category:</b> ", category, "<br>",
"<b>Timings:</b> ", opening_1, ' - ', closing_1,
"<br>",
"<b>Price:</b> ", price_range, "<br>",
"<b>Ratings:</b> ", rating,
" (", reviews, " reviews)", "<br>",
"<a href=", url, ">Yelp Link</a>")
)
}
})
observeEvent(input$go, {
data2 <- subset(crime.data, subset = ((Date..Time == floor(input$bins)) &
Weekday == input$var))
crime_array <- toJSONArray2(subset(data2, select = c("Latitude", "Longitude")),
json = FALSE, names = FALSE)
leafletProxy("map", data = data2) %>%
clearMarkerClusters() %>%
addCircleMarkers(popup = data2$Crime.Type,
clusterOptions = markerClusterOptions(lng = data2$Latitude,
lat = data2$Longitude))
})
})
|
11fb68e71d490608003d34df90be9544a876558e
|
5177b6787faf6aa0975a14e716502d395c42d5f1
|
/tidymodels.R
|
1476c8659fde0e957309f64113aa859c46310fc8
|
[] |
no_license
|
rpodcast/renv_learning
|
d50465c9c40e561f1975220cb054744e7eb7434b
|
b6ac53fb8d7beb69f5b67f73871f27ea7984c87c
|
refs/heads/master
| 2023-08-22T19:19:31.579084
| 2021-10-08T03:27:55
| 2021-10-08T03:27:55
| 414,778,030
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32
|
r
|
tidymodels.R
|
library(pak)
library(tidymodels)
|
3336f9f8919fa702e298971733e01303275e53f0
|
0116fa27069272135b3cb53efa60c1d5e8fc5bfc
|
/man/make_qsubfile.Rd
|
e63d091d1cbbe3d89f5e41961dbb452fb4e4dfb9
|
[] |
no_license
|
sinnhazime/jobwatcher
|
540496726f828d50e3e24ec83b69b19f541b39e6
|
2e4c3e7be4985484c25270f9fdaf85fab41b1d55
|
refs/heads/master
| 2020-04-13T14:07:00.888210
| 2019-08-29T04:20:00
| 2019-08-29T04:20:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,094
|
rd
|
make_qsubfile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qsub.R
\name{make_qsubfile}
\alias{make_qsubfile}
\title{make a file suitable for \emph{qsub}}
\usage{
make_qsubfile(..., name = NA_character_, first_line = binbash(),
parallel = parallel_option(), arrayjob = arrayjob_option(),
directory = directory_option(), use_bash_profile = TRUE,
other_req = character(0))
}
\arguments{
\item{...}{Your codes (default: \emph{bash} codes). Each argument should be a character vector. Multiple arguments and multiple elements will be separated with a line break.}
\item{name}{A character}
\item{first_line}{A character. It is written in the first line.}
\item{parallel}{A character}
\item{arrayjob}{A character}
\item{directory}{A character}
\item{use_bash_profile}{A logical. Whether \emph{source ~/.bash_profile} or not.}
\item{other_req}{A character. Other requirements for \emph{qsub}}
}
\value{
qsub script as a character. In order to write this in a file, use \code{write} or \code{\link{write_qsubfile}}.
}
\description{
make a file suitable for \emph{qsub}
}
|
6376af6b14e4b91cf68288adf9c3c20aa081b4cf
|
f06e99784403917490dc25e848c26d9f49410027
|
/lecture 8.R
|
a868af47c2d0726e9952a4bbf4dea3be9baffaed
|
[] |
no_license
|
prl907/st503
|
4220122927ba3b29d620027b38978fa7845d826c
|
53a83740206e4ad813d9930f1766c430ed7f2df8
|
refs/heads/master
| 2020-03-27T12:06:46.089698
| 2018-12-20T18:43:31
| 2018-12-20T18:43:31
| 146,526,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,286
|
r
|
lecture 8.R
|
library(faraway)
data(teengamb)
o <- lm(gamble ~ sex + status + income + verbal, data=teengamb)
summary(o)
e <- residuals(o)
r <- rstandard(o)
id <- row.names(teengamb)
# partial regression/residual plots
#method 1
d <- residuals(lm(gamble ~ sex + status + verbal, data=teengamb))
g <- residuals(lm(income ~ sex + status + verbal, data=teengamb))
#our inital thought is that income should be included on it's own and no transformation
plot(g, d, xlab="income - effect of others", ylab="gamble - effect of others")
#method 2
p <- e + o$coefficients[4] * teengamb$income
plot(teengamb$income, p, xlab="income", ylab="partial resisuals")
termplot(o, partial.resid=TRUE, terms=3) # centers the income variable first
#both methods come to the same conclusion
# checking for outliers
#extract the leverages
h <- hatvalues(o)
#half norm plot, 5 tell how many extreme points to label
#id is the ro numbers
halfnorm(h, 5, labs=id, ylab="Leverages")
qqnorm(r); qqline(r)
# influential cases
D <- cooks.distance(o)
halfnorm(D, 5, labs=id, ylab="Cook's distance")
which.max(p)
which.max(r)
oo <- lm(gamble ~ sex + status + income + verbal, data=teengamb, subset=(D < max(D)))
summary(oo)
cbind(all=o$coefficients, removed=oo$coefficients)
# built-in lm diagnostic plots in R
plot(o)
|
c360916501a9e765bc89b0eb074efe9ad3733b38
|
14870a84eaf1f692d7a7f6212ae1e4a31f2458e2
|
/completeApp/app/global.R
|
86ae5acd2dea75d6c39e4ae8cf8fe5c5d178b172
|
[] |
no_license
|
MathieuMarauri/shinyApps
|
2476bd0199deb7eaa5e315fbb90d08f577f9c1d4
|
f7c787a4f974db481b98048d44813d3d730b4c39
|
refs/heads/master
| 2021-07-09T18:15:28.796178
| 2017-10-12T08:44:40
| 2017-10-12T08:44:40
| 104,908,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,744
|
r
|
global.R
|
# Packages ----------------------------------------------------------------
library('shiny')
library('shinydashboard')
library('shinyjs')
library('shinyBS')
library('shinyWidgets') # the dev version 0.3.4.930
library('highcharter')
library('DT')
library('data.table')
# Function ----------------------------------------------------------------
#'
#' This function builds the data used to create one serie and its associated drilldown serie for a grouped-stacked bar chart
#'
#' @param data a data.table with the data to structure
#' @param kpi the name of the kpi to analyze
#' @param daterange a character vector of length 2 with the date limits
#' @param stack the name of the stack
#'
#' @return a list with the 2 levels
#'
barchartData <- function(data, kpi, daterange, stack){
data <- data[date >= daterange[1] & date <= daterange[2], .(y = sum(get(kpi))), by = list(source, model)]
data_level1 <- data[, .(y = sum(y)), by = source]
data_level1 <- data_level1[, .(name = source, y, drilldown = paste0(tolower(source), '_', stack, '_', kpi))]
setorderv(data_level1, cols = 'y', order = -1L)
data_level2 <- lapply(X = data_level1$name,
FUN = function(x){
id <- paste0(tolower(x), '_', stack, '_', kpi)
name <- paste0('source_', stack, '_', kpi)
data <- data[source == x, .(name = model, y)]
setorderv(data, cols = 'y', order = -1L)
data <- list_parse2(data)
stack <- stack
return(list(id = id, name = name, data = data, stack = stack))
})
return(list(level1 = list_parse(data_level1), level2 = data_level2))
}
# Highchart theme ---------------------------------------------------------
hearder_color <- '#17C6A3'
danger_color <- '#FF5555'
warning_color <- '#FABB3D'
dark_grey <- '#374649'
soft_grey <- '#5F6B6D'
info_color <- '#67C2EF'
element_color <- soft_grey
theme <- hc_theme(
colors = c(hearder_color, dark_grey, danger_color, warning_color, info_color, soft_grey),
chart = list(
backgroundColor = "transparent",
style = list(
fontFamily = 'Century Gothic, sans-serif'
)
),
xAxis = list(
gridLineColor = element_color,
gridLineWidth = 1,
gridLineDashStyle = 'dot',
lineColor = element_color,
lineWidth = 1,
tickColor = element_color,
labels = list(
style = list(
color = element_color
)
),
title = list(
style = list(
color = element_color
)
)
),
yAxis = list(
gridLineColor = element_color,
gridLineWidth = 1,
gridLineDashStyle = 'dot',
lineColor = element_color,
lineWidth = 1,
tickColor = element_color,
labels = list(
style = list(
color = element_color
)
),
title = list(
style = list(
color = element_color
)
)
),
tooltip = list(
backgroudColor = element_color,
borderColor = element_color,
borderRadius = 0,
style = list(
color = element_color
)
),
drilldown = list(
activeAxisLabelStyle = list(
textDecoration = 'none',
fontStyle = 'italic',
color = element_color
)
),
title = list(
style = list(
color = element_color,
fontSize = '23px',
fontWeight = 'bold'
)
),
legend = list(
itemStyle = list(
color = element_color
),
itemHiddenStyle = list(
color = '#222222'
)
)
)
# Modules -----------------------------------------------------------------
# tab global_view
source('modules/global view/valuebox.R')
source('modules/global view/drilldownchart.R')
|
c1cdf738d94bce0c5a37b3b5d52628915b1b9f61
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.database/man/redshiftserverless_list_snapshots.Rd
|
ebb413d51d7e4dd1667546b671fcc4abc3f1c471
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,404
|
rd
|
redshiftserverless_list_snapshots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshiftserverless_operations.R
\name{redshiftserverless_list_snapshots}
\alias{redshiftserverless_list_snapshots}
\title{Returns a list of snapshots}
\usage{
redshiftserverless_list_snapshots(
endTime = NULL,
maxResults = NULL,
namespaceArn = NULL,
namespaceName = NULL,
nextToken = NULL,
ownerAccount = NULL,
startTime = NULL
)
}
\arguments{
\item{endTime}{The timestamp showing when the snapshot creation finished.}
\item{maxResults}{An optional parameter that specifies the maximum number of results to
return. You can use \code{nextToken} to display the next page of results.}
\item{namespaceArn}{The Amazon Resource Name (ARN) of the namespace from which to list all
snapshots.}
\item{namespaceName}{The namespace from which to list all snapshots.}
\item{nextToken}{If \code{nextToken} is returned, there are more results available. The value
of \code{nextToken} is a unique pagination token for each page. Make the call
again using the returned token to retrieve the next page.}
\item{ownerAccount}{The owner Amazon Web Services account of the snapshot.}
\item{startTime}{The time when the creation of the snapshot was initiated.}
}
\description{
Returns a list of snapshots.
See \url{https://www.paws-r-sdk.com/docs/redshiftserverless_list_snapshots/} for full documentation.
}
\keyword{internal}
|
97bf8953ba012fcd4f1a7638de128d8401563a7a
|
c743e20bebbaf6f59056e697c23b29e291877915
|
/man/node_preprocess.Rd
|
7729b35f62725639f0185668c447e15c9c910665
|
[] |
no_license
|
cran/netregR
|
f22457d928c27c4b54280f97e1d0bdd2ff86c815
|
c831e31a8a6bed1093ecc1e43669ea73c982a435
|
refs/heads/master
| 2020-03-08T20:40:55.031462
| 2018-08-01T22:10:02
| 2018-08-01T22:10:02
| 128,388,283
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 318
|
rd
|
node_preprocess.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{node_preprocess}
\alias{node_preprocess}
\title{Pre-processes data for ordering etc.}
\usage{
node_preprocess(Y, X, directed, nodes, subtract = NULL)
}
\description{
Pre-processes data for ordering etc.
}
\keyword{internal}
|
e0508fc8800d01f447274708bb3aabc1eacd0924
|
ce828d49e40d96aa975b792e23d0ed4172828ef7
|
/Pass-through Cambio Inflacao/R/scripts/BACEN-SelectMerge.r
|
4080e9224b9ac11373ce196e5053f78b87bc568b
|
[] |
no_license
|
btebaldi/MetodosQuantitativos
|
bd5ee54a088707f0408636c3955f3087ee3f7ae2
|
c38ae095064e0b021b3ebdd0427bd3d383742b80
|
refs/heads/master
| 2021-01-20T06:26:20.750079
| 2020-07-12T16:54:38
| 2020-07-12T16:54:38
| 89,879,517
| 0
| 0
| null | null | null | null |
MacCentralEurope
|
R
| false
| false
| 969
|
r
|
BACEN-SelectMerge.r
|
## --- Bibliotecas R
library(RODBC);
## --- Programa Principal
databasefile = "../database/MQA-PassThrough-Database.accdb"
## Abre a conex„o com o banco de dados
conn = odbcConnectAccess2007(databasefile);
query = "select CODE, DT_REFERENCIA, VALOR from SERIES_DEFINICAO, SERIES_DADOS_ECONOMIA where SERIES_DEFINICAO.ID = SERIES_DADOS_ECONOMIA.ID_SERIE and SERIES_DEFINICAO.CODE IN ('IPCA','IGPM','IGPDI') and DT_REFERENCIA>#2010-01-01# and DT_REFERENCIA<#2012-01-01#";
result = sqlQuery(conn,query,stringsAsFactors=FALSE);
## Fecha a conex„o com o banco de dados
odbcClose(conn);
print(result);
#result = result[-32,]
data = data.frame(DT_REFERENCIA=unique(result[['DT_REFERENCIA']]));
codes = unique(result[['CODE']]);
for (i in 1:length(codes)){
df = result[result[["CODE"]]==codes[i],c('DT_REFERENCIA','VALOR')];
colnames(df) = c('DT_REFERENCIA',codes[i]);
data = merge(data,df,by='DT_REFERENCIA',all=TRUE);
}
|
ad5d6cf7b0205b8f881c3c3f94018c33aba21bdf
|
ec71dc1a7ade785d3cab60c9878b939969636760
|
/R/data.R
|
98da6fc9fb6a709184cff8a87c75d9c0ea60180d
|
[] |
no_license
|
Elsa-Yang98/ConformalSmallest
|
a800255f55446f173575a6367ca193bc5acb89b0
|
adcc98543289600952d80eccbe278d387ff55295
|
refs/heads/main
| 2023-07-07T14:44:30.746429
| 2021-08-10T08:10:52
| 2021-08-10T08:10:52
| 373,796,629
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
data.R
|
#' Blog data
#'
#'@format A dataset of dimension 280+1:
#' \describe{
#' \item{subject}{Anonymized Mechanical Turk Worker ID}
#' \item{trial}{Trial number, from 1..NNN}
#' ...
#' }
#'
#'@source blogData_train.csv
"blog"
#' Concrete data
#'
#'@format A dataset of dimension 8+1
#'
#'@source concrete.csv
"concrete"
#' Protein data
#'
#'@format A dataset of dimension 8+1
#'
#'@source CASP.csv
"protein"
#' News data
#'
#'@format A dataset of dimension 59+1
#'
#'@source OnlineNewsPopularity.csv
"news"
#' Kernel data
#'
#'@format A dataset of dimension 14+1
#'
#'@source sgemm_product.csv
"kernel"
#' Superconduct data
#'
#'@format A dataset of dimension 81+1
#'
#'@source train.csv
"superconduct"
|
ff4826cc5c850a131bcbaaa8fd0442ec24692cf2
|
a34c74086329dfd2aa7f8ad588e07f5bc7c05870
|
/functions and packages/gmes_calc_tobacco.R
|
0e90a4b1a601e869ea88c455204f2186c161ed2f
|
[] |
no_license
|
CourtneyCampany/WTC3_tree
|
cbc1a0813edf337eba367d428974588f85bc440a
|
500c06a5c134fb6b419901b8708b26f22232c733
|
refs/heads/master
| 2020-04-09T18:37:49.332701
| 2018-09-12T14:49:24
| 2018-09-12T14:49:24
| 30,519,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,397
|
r
|
gmes_calc_tobacco.R
|
####gmescalculation----------------------------------------------------------------------------------------------
gmcalc_tob_func <- function(x, a=4.4, ab= 2.9, e=30, b=29, f=16.2,del_growth = -8 , delR=-38,
k25r=0.718, k25g=38.89, Ea_r = 46.39, Ea_g = 24.46,Rgc=8.314472, c_r=18.72, c_g=13.49){
x$CiCa <- x$Ci/x$CO2R
x$a_prime <- (ab*(x$CO2S-x$C2sfc)+a*(x$C2sfc-x$Ci))/(x$CO2S-x$Ci)
x$Rd <- k25r * exp(c_r - Ea_r* 1000 / (Rgc*(273 + x$Tleaf)))
x$Gstar <- exp(c_g - Ea_g * 1000 / (Rgc*(273 + x$Tleaf))) * x$O2/21
# x$Rd <- k25r * exp(Ea_r*((x$Tleaf+273.15)-298)/(298*Rgc*(x$Tleaf+273.15)))
# x$Gstar <- k25g * exp(Ea_g*((x$Tleaf+273.15)-298)/(298*Rgc*(x$Tleaf+273.15)))
x$rd_term <- e*x$Rd*(x$Ci-x$Gstar)/((x$Photo+x$Rd)*x$CO2S)
x$f_term <- f*x$Gstar/x$CO2S
x$TleafminusTair <- x$Tleaf - x$Tair
x$TblockminusTair <- x$TBlk - x$Tair
x$CO2Rdry <- x$CO2R/(1-x$H2OR/1000)
x$CO2Sdry <- x$CO2S/(1-x$H2OS/1000)
x$t <- (1+x$a_prime/1000)*x$Trmmol/x$CndCO2/1000/2
x$t2 <- 1/(1-x$t)
x$t3 <- (1+x$t)/(1-x$t)
x$Di <- x$a_prime * x$t2+ (x$t3 * b-x$a_prime * x$t2) * x$CiCa
x$DiminusDo <- x$Di - x$DELTA
x$rd_term2 <- x$t3- x$rd_term
x$f_term2 <- x$t3 - x$f_term
x$gm <- x$t3 * (b - 1.8 - x$Rd * e / (x$Rd+x$Photo)) * x$Photo/x$CO2S/(x$DiminusDo - x$rd_term2 - x$f_term2)
x$gm_bar <- x$gm*100/x$Press
return(x)
}
|
835e0b4f19ce3bcff8cf8ad57afb46bf49c5006d
|
2c5ad7606ebf6c29ef24d15aa7947177a207225d
|
/helper.r
|
b118fee8a59251716a256ba0a8cd66687df9e577
|
[] |
no_license
|
WillemVervoort/RainfallCS
|
3013070a50a502674348c201534b74105f0c5c95
|
c20541d8aa8d353876404bb15993b626575ed98c
|
refs/heads/master
| 2021-01-15T18:01:06.889053
| 2014-12-11T11:14:10
| 2014-12-11T11:14:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,625
|
r
|
helper.r
|
## Helper functions for Rainfall CS project
# 1. multiplot function from http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_%28ggplot2%29/
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# 2. read in ozdata and stations Rdata
oz.map <- read.csv("ozdata.csv")
load("stations.Rdata")
# 3. Some old code
# # load the different tables into a data frame
# df_main <- sqlQuery(db, "select * from main_table",as.is = c(1,2,6,7),
# stringsAsFactors=F)
# df_regr_results <- sqlQuery(db, "select * from regr_results")
# df_regr_stats <- sqlQuery(db, "select * from regr_stats")
# here code that runs when app is launched
# This is some old code just to get the BOM stations into a Rdata file
# stations <- read.fwf("20140617_AllBOMstations.txt",
# widths=c(7,6,43,7,7,9,10,16,4,10,9,7),
# skip = 3,
# na.strings = c("..",".....","null"))
# colnames(stations) <-c("Site","Dist","Site_name","Start","End","Lat",
# "Lon","Source","STA","Height_m","Bar_ht","WMO")
# save(stations, file="Stations.Rdata")
# this could be moved to a helper script
|
3fa097e0c6bbcb7a3fcb670fc68c6925f1f5d514
|
bbbf014b7675036df4ca5d7039cf338375ac89a7
|
/Part3/workflow/scripts/plot_integrated_data_by_species.R
|
b13468e39a1c1677ea87b69d75889c0a7a11e6b6
|
[
"MIT"
] |
permissive
|
Woodformation1136/SingleCell
|
17c0a1bf9bf134178a2675f0ae06819417ea744c
|
cb4a463a7278b37c90c357ac8d4aaa0d57a8e774
|
refs/heads/main
| 2023-04-18T08:19:10.542089
| 2022-12-23T17:40:44
| 2022-12-23T17:40:44
| 385,837,434
| 0
| 1
| null | 2022-12-23T17:40:45
| 2021-07-14T06:29:16
|
R
|
UTF-8
|
R
| false
| false
| 4,428
|
r
|
plot_integrated_data_by_species.R
|
library(Seurat)
library(scales)
library(magrittr)
ori_par <- par(no.readonly = TRUE)
# Define functions =============================================================
# Output figure
output_png_figure <- function(
plotting_function,
output_figure = FALSE,
output_path = "temp.png",
output_without_margin = FALSE,
...
) {
if (output_figure) {
png(output_path,
pointsize = 10, res = 300,
width = 20, height = 15, units = "cm")
}
if (output_without_margin) {
par(mai = c(0, 0, 0, 0))
} else {
par(mai = ori_par$mai)
}
plotting_function(
output_figure = output_figure,
output_path = output_path,
output_without_margin = output_without_margin,
...
)
par(mai = ori_par$mai)
if (output_figure) dev.off()
}
# Plot seurat UMAP colored on each sample
plot_on_sample <- function(
MS_plotting_df,
output_without_margin,
output_without_legend,
col_species,
...
) {
x <- MS_plotting_df$UMAP.1
y <- MS_plotting_df$UMAP.2
col <- col_species[MS_plotting_df$Species]
randam_order <- sample(length(x))
plot(x[randam_order], y[randam_order],
col = col[randam_order], pch = 20, cex = 0.3,
xlab = "UMAP_1", ylab = "UMAP_2",
main = ifelse(output_without_margin, "", "main"),
axes = !output_without_margin, las = 1
)
if (!output_without_legend) {
legend(
"bottomleft",
pch = 20, col = col_species, legend = names(col_species)
)
}
}
# Set parameters ===============================================================
# Get input parameters from command line
input_MS_plotting_csv <- snakemake@input$MS_plotting_csv
output_figure_folder <- snakemake@output$figure_folder
# Implementation ===============================================================
# Create output directory
if (!dir.exists(output_figure_folder)) {
dir.create(output_figure_folder, recursive = TRUE)
}
# Input MS plotting information
MS_plotting_df <- read.csv(input_MS_plotting_csv)
n_species <- length(unique(MS_plotting_df$Species))
# Wrap function of plotting seurat UMAP colored on each sample
plotting_function <- function(
output_without_margin,
output_without_legend,
col_species,
...
) {
plot_on_sample(
MS_plotting_df,
output_without_margin,
output_without_legend,
col_species,
...
)
}
## Multiples samples: colorful
if (n_species > 2) {
col_matrix <- matrix(
hue_pal()(n_species),
ncol = n_species, nrow = n_species + 1,
byrow = TRUE
)
for (i in seq(n_species)) {
col_matrix[i, -i] <- paste0(col_matrix[i, -i], "00")
}
}
## Two samples: (1st: black; 2nd: gold)
if (n_species == 2) {
col_matrix <- matrix(
c("#000000", "#C59739"),
ncol = 2, nrow = 3,
byrow = TRUE
)
for (i in seq(n_species)) {
col_matrix[i, -i] <- paste0(col_matrix[i, i], "00")
}
}
## One samples: black
if (n_species == 1) {
col_matrix <- matrix("#000000", ncol = 1, nrow = 1)
}
## Output figure
for (i in seq(nrow(col_matrix))) {
species <- unique(MS_plotting_df$Species)
output_png_figure(
plotting_function,
output_figure = TRUE,
output_path =
paste0(
output_figure_folder,
"/BySpecies_", c(species, "All")[i], ".png"
),
output_without_margin = FALSE,
output_without_legend = FALSE,
col_species = col_matrix[i, ] %>%
set_names(species)
)
output_png_figure(
plotting_function,
output_figure = TRUE,
output_path =
paste0(
output_figure_folder,
"/BySpecies_", c(species, "All")[i], "_NoLegend.png"
),
output_without_margin = FALSE,
output_without_legend = TRUE,
col_species = col_matrix[i, ] %>%
set_names(species)
)
output_png_figure(
plotting_function,
output_figure = TRUE,
output_path =
paste0(
output_figure_folder,
"/BySpecies_", c(species, "All")[i], "_Clear.png"
),
output_without_margin = TRUE,
output_without_legend = TRUE,
col_species = col_matrix[i, ] %>%
set_names(species)
)
}
|
ce76b33f40348d102f0d59067d36e7c3a5dc3c81
|
efc373c2ecb0fc0b00db2f05808f2b50de3af5f1
|
/BasePower.r
|
d1cb4aee6b309753b869457c3f6d937788a00eb6
|
[] |
no_license
|
obaidpervaizgill/Fun-Shorts
|
ff27bccac7ac215bf2f32e11d7f89ff3a77040a7
|
dd074fba2c79d07bf320359ef94d7531b02208ac
|
refs/heads/master
| 2020-12-25T14:49:24.671557
| 2017-06-24T15:10:25
| 2017-06-24T15:10:25
| 62,437,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 423
|
r
|
BasePower.r
|
#base power - function
#num is number you are trying to find the x for where x is some power to the base 10
#e.g. 100 == 10^x the function should return 2, here 100 is num and x is what we are trying to find
findNum<- function(num){
x <- seq(1,100000,0.01)
for(i in x){
y <- as.integer(10^i)
y <- y[!is.na(y)]
if(y == num){
print(i)
}
}
}
findNum(100)
|
c6e03d1bb567c398c9f1189743b8093ec8347760
|
1ff3a51b463c951aa02ef40a89c5a884c94f9516
|
/man/overlaidSimpleRegressionPlot.Rd
|
87ecfa1e1f4115f1a9a8b16a8798e7a5f6900426
|
[] |
no_license
|
cran/fit.models
|
3a250a89603637cfd2296b4cf25f6bcc8e38eda6
|
2548545703702dbc11c8a2b9ceda8da77777386e
|
refs/heads/master
| 2021-01-10T01:00:23.547075
| 2020-08-02T13:30:02
| 2020-08-02T13:30:02
| 17,696,066
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 817
|
rd
|
overlaidSimpleRegressionPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/overlaidSimpleRegressionPlot.R
\name{overlaidSimpleRegressionPlot}
\alias{overlaidSimpleRegressionPlot}
\title{Scatter Plot with Overlaid Fits}
\usage{
overlaidSimpleRegressionPlot(x, lwd.reg, col.reg, ...)
}
\arguments{
\item{x}{a \code{fit.models} object.}
\item{lwd.reg}{a vector with length equal to the number of models in
\code{x} specifying the line widths used in the plot.}
\item{col.reg}{a vector with length equal to the number of models in
\code{x} specifying the line colors used in the plot.}
\item{\dots}{additional arguments are passed to
\code{\link[lattice]{xyplot}}.}
}
\value{
the \code{trellis} object is invisibly returned.
}
\description{
Produces a scatter plot of the data with overlaid fits.
}
\keyword{hplot}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.