blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a80908c2b2e6b54f11a2737668f11a49e1df469
|
f3843622adeae8d70b9ea87047379ebedf0a0193
|
/R/kmr_histogram.R
|
65347e7270935d92c68f66492b68eafc72a77c0f
|
[] |
no_license
|
c5sire/kmerize
|
0dda2ed25444d8c54c6b09459f81dd6a5c971bfb
|
0f6aa30504faa3c2cdd9ac3bf5dde64d2297af53
|
refs/heads/master
| 2021-01-14T17:17:21.430902
| 2020-06-03T14:29:38
| 2020-06-03T14:29:38
| 242,692,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 504
|
r
|
kmr_histogram.R
|
#' kmr_histogram
#'
#' @param db path to kmc3 database
#' @param out filename for histogram data
#'
#' @return
#' @export
#'
# @examples
kmr_histogram <- function(db, out = paste0("hist_", basename(db), ".txt")) {
if (!dir.exists(dirname(out))) dir.create(dirname(out))
bn <- basename(db)
db <- file.path(db, bn)
#base_cmd <- "usr/local/bin/kmerize/kmc_tools transform %s histogram %s"
base_cmd <- histogram()
cmd <- sprintf(base_cmd, db, out)
#message(out)
system(cmd)
return(out)
}
|
53e680b67eea3f64078152cc544b4715913e7035
|
ea805d721a3cdc2db7a75e38a9b212e4e1885778
|
/ribiosArg/man/parseStrings.Rd
|
3feb80bc728bd354f6bbad5d2aa3e209194ee465
|
[] |
no_license
|
grst/ribios
|
28c02c1f89180f79f71f21a00ba8ad8c22be3251
|
430056c85f3365e1bcb5e565153a68489c1dc7b3
|
refs/heads/master
| 2023-06-01T04:48:20.792749
| 2017-04-10T14:28:23
| 2017-04-10T14:28:23
| 68,606,477
| 0
| 0
| null | 2016-09-19T13:04:00
| 2016-09-19T13:04:00
| null |
UTF-8
|
R
| false
| false
| 1,228
|
rd
|
parseStrings.Rd
|
\name{parseStrings}
\alias{parseStrings}
\title{Parse collapsed multiple options into a vector of character strings}
\description{
This function parses collapsed multiple options into a vector of
character strings. Each option is optionally trimmed of leading and tailing empty
spaces given by \code{trim}. See examples.
}
\usage{
parseStrings(str, collapse = ",", trim=TRUE, ...)
}
\arguments{
\item{str}{Character, input string.}
\item{collapse}{Character, separators used between multiple options}
\item{trim}{Logical, whether individual options should be trimmed}
\item{\dots}{Parameters passed on to \code{trim}}
}
\details{
In case of multiple separators, they can be given by concatenating
with piple signs, e.g. \code{,|\\t}.
If input string is \code{NULL}, the function returns \code{NULL}. This
can be useful in case the parameter is optional and not specified.
}
\value{A vector of character strings}
\author{Jitao David Zhang}
\seealso{\code{\link{strsplit}}, \code{\link{trim}}}
\examples{
parseStrings("a,b,c")
## options are trimmed
parseStrings("a,b,\tc,d\n")
## it works also with only one option
parseStrings("a")
## more than one separators
parseStrings("a,b,c;d", collapse=",|;")
}
|
4471545ebd73c11d8d4746464f4c06b88d8f6aad
|
ff25dc05d9be0f35bf3be844116c130197d93ef1
|
/man/alpha.aci.Rd
|
a3e0452962d1bcdfa37c508eee67b06176d59c5d
|
[] |
no_license
|
Justin8428/multicon
|
a7389643ea7b3fa36b3d3197c2eb16d34dd2644f
|
d01cc51f116e165aabd181ac1df355db35e57b4c
|
refs/heads/master
| 2023-03-16T12:50:33.453390
| 2015-01-28T00:00:00
| 2015-01-28T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,822
|
rd
|
alpha.aci.Rd
|
\name{alpha.aci}
\alias{alpha.aci}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Alpha Confidence Interval
}
\description{
Computes the asymptotic confidence interval for Cronbach's alpha following the method outlined by Koning & Franses (2003). }
\usage{
alpha.aci(x, k, n, CI = 0.95)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
An alpha coefficient to compute a confidence interval around.
}
\item{k}{
The number of items on which alpha was computed.
}
\item{n}{
The number of sampling units (observations) on which alpha was computed.
}
\item{CI}{
A numeric element between .00 and 1.00 indicating the desired confidence level.
}
}
\details{
Koning & Franses (2003) describe several methods for computing confidence intervals around Cronbach's alpha coefficient. This function returns what Koning and Franses (2003) refer to as the asymptotic confidence interval for alpha. The confidence interval is asymptomic and not necessarily symmetrical. For more info, see Koning and Franses (2003).
}
\value{
\item{Lower Limit }{Lower limit of confidence interval}
\item{Upper Limit }{Upperlimit of confidence interval }
}
\references{
Koning, A. J. & Franses, P. H. (2003). Confidence Intervals for Cronbach's Alpha Coefficient values. ERIM Report Series Reference No. ERS-2003-041-MKT. Available at SSRN: http//ssrn.com/abstract=423658 }
\author{
Ryne A. Sherman
}
\seealso{
\code{\link{alpha.xci}}
\code{\link{vector.alpha}}
}
\examples{
#Compute the asymptotic CI for an observed Cronbach's alpha
#of .7 on 200 observaitons from a 10 item scale'
alpha.aci(.7,10,200)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{alpha}
\keyword{confidence interval}% __ONLY ONE__ keyword per line
|
8c5331a2afa187b9367c56486861b115cb6a9363
|
17afbc057e8fba98bb687d12a3fe3dd017e99e86
|
/man/HEQueryCountWorker.Rd
|
ca973ddb866a6a4c73a3eeae4c31f23c5e4771e6
|
[] |
no_license
|
cran/distcomp
|
4fcb588c4210c0cd1309055a5946181d479b1d22
|
f87e29d541054abb52404f194f2cfe6358babb76
|
refs/heads/master
| 2022-09-17T21:48:41.342751
| 2022-09-01T20:00:02
| 2022-09-01T20:00:02
| 36,823,221
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,875
|
rd
|
HEQueryCountWorker.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/querycount.R
\name{HEQueryCountWorker}
\alias{HEQueryCountWorker}
\title{Create a homomorphic computation query count worker object for use with master objects generated by \code{\link[=HEQueryCountMaster]{HEQueryCountMaster()}}}
\description{
\code{HEQueryCountWorker} objects are worker objects at each site of
a distributed query count model computation using homomorphic encryption
}
\seealso{
\code{\link[=HEQueryCountMaster]{HEQueryCountMaster()}} which goes hand-in-hand with this object
}
\section{Super class}{
\code{distcomp::QueryCountWorker} -> \code{HEQueryCountWorker}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{pubkey}}{the master's public key visible to everyone}
\item{\code{den}}{the denominator for rational arithmetic}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-HEQueryCountWorker-new}{\code{HEQueryCountWorker$new()}}
\item \href{#method-HEQueryCountWorker-setParams}{\code{HEQueryCountWorker$setParams()}}
\item \href{#method-HEQueryCountWorker-queryCount}{\code{HEQueryCountWorker$queryCount()}}
\item \href{#method-HEQueryCountWorker-clone}{\code{HEQueryCountWorker$clone()}}
}
}
\if{html}{\out{
<details open><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="distcomp" data-topic="QueryCountWorker" data-id="getStateful"><a href='../../distcomp/html/QueryCountWorker.html#method-QueryCountWorker-getStateful'><code>distcomp::QueryCountWorker$getStateful()</code></a></span></li>
<li><span class="pkg-link" data-pkg="distcomp" data-topic="QueryCountWorker" data-id="kosher"><a href='../../distcomp/html/QueryCountWorker.html#method-QueryCountWorker-kosher'><code>distcomp::QueryCountWorker$kosher()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-HEQueryCountWorker-new"></a>}}
\if{latex}{\out{\hypertarget{method-HEQueryCountWorker-new}{}}}
\subsection{Method \code{new()}}{
Create a new \code{HEQueryMaster} object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HEQueryCountWorker$new(
defn,
data,
pubkey_bits = NULL,
pubkey_n = NULL,
den_bits = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{defn}}{the computation definition}
\item{\code{data}}{the data which is usually the list of sites}
\item{\code{pubkey_bits}}{the number of bits in public key}
\item{\code{pubkey_n}}{the \code{n} for the public key}
\item{\code{den_bits}}{the number of bits in the denominator (power of 2) used in rational approximations}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
a new \code{HEQueryMaster} object
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-HEQueryCountWorker-setParams"></a>}}
\if{latex}{\out{\hypertarget{method-HEQueryCountWorker-setParams}{}}}
\subsection{Method \code{setParams()}}{
Set some parameters for homomorphic computations
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HEQueryCountWorker$setParams(pubkey_bits, pubkey_n, den_bits)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{pubkey_bits}}{the number of bits in public key}
\item{\code{pubkey_n}}{the \code{n} for the public key}
\item{\code{den_bits}}{the number of bits in the denominator (power of 2) used in rational approximations}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-HEQueryCountWorker-queryCount"></a>}}
\if{latex}{\out{\hypertarget{method-HEQueryCountWorker-queryCount}{}}}
\subsection{Method \code{queryCount()}}{
Run the query count on local data and return the appropriate encrypted result to the party
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HEQueryCountWorker$queryCount(partyNumber, token)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{partyNumber}}{the NCP party number (1 or 2)}
\item{\code{token}}{a token to use for identifying parts of the same computation for NCP1 and NCP2}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the count as a list of encrypted items with components \code{int} and \code{frac}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-HEQueryCountWorker-clone"></a>}}
\if{latex}{\out{\hypertarget{method-HEQueryCountWorker-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HEQueryCountWorker$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
ec657ceb12a61e0f6309775f0611286cb891a560
|
587285694e7c0dab9acb9fab2f457726f4583bb1
|
/R/extr_leafArea_fct.r
|
47eae6b7b6c4fd04885dc693a7f74afcea6038f8
|
[] |
no_license
|
hjkluo/RapidACi0927
|
76f4c3628d5aad35466b1104deb8e9d02995c41e
|
7f9ea9790169f828183ebd021d0f8f57ab65ccc2
|
refs/heads/master
| 2022-12-22T07:20:14.409532
| 2020-07-20T13:28:52
| 2020-08-08T14:13:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
extr_leafArea_fct.r
|
#' extr_leafArea function
#'
#' @description Hidden function. Retrieves a dataframe of leaf surface area by sample from
#' WinSEEDLE software output files
#'
#' @param WinSEEDLE_filepath path for a winSEEDLE file (from the working directory)
#'
#' @return A dataframe of sample_ID with date and leaf area
extr_leafArea <- function(WinSEEDLE_filepath) {
x <- read_delim(WinSEEDLE_filepath, delim = "\t", skip = 4, col_names = FALSE) %>%
dplyr::filter(X2 == "GLOBAL") %>%
select(sample_ID = "X1", date = "X6", leafArea_mm2 = "X19")
return(x)
}
|
1c3744d667c8140890d2ff138b752ffb19edad07
|
3031e443423a08fe9e33e369fb0e92fc8f67816a
|
/man/graphs3.Rd
|
b130331692765ba3e2d37074d53652d879532c09
|
[] |
no_license
|
HRDAG/DGA
|
e441b0e3638d536be245bcb6af87c45638760cf7
|
c3a684dca92dc42977969c9d80b69eadb7e7f579
|
refs/heads/master
| 2021-07-08T06:27:23.494429
| 2021-05-04T20:21:02
| 2021-05-04T20:21:02
| 20,732,790
| 2
| 2
| null | 2021-05-04T20:01:33
| 2014-06-11T16:40:09
|
R
|
UTF-8
|
R
| false
| true
| 597
|
rd
|
graphs3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{graphs3}
\alias{graphs3}
\title{All Decomposable Graphical Models on Three Lists}
\format{
A list of lists. graphs3[[i]] is the \code{i}th model under
consideration. This consists of graphs3[[i]]$C, all of the cliques in that
model, and graphs3[[i]]$S, the separators.
}
\usage{
data(graphs3)
}
\description{
This dataset contains all of the cliques and separators for each of the
decomposable graphical models on three lists. On three lists, this is all of
the models.
}
\keyword{datasets}
|
a4dc09b2534bf9abb33caaca1483569735088ac9
|
cac8e9de4fb2fba4caa7a626bd58c82abf118fd5
|
/R_code_concepts_Illustration/R_sq_adj_R_saga.R
|
59ac79579bd4a8d5546a0a9a64138a2187f7f52a
|
[] |
no_license
|
rheasukthanker/Computational_Statistics_ETH
|
63ed3b680573223bd52691b526e93b571b97d272
|
ad29f3cd2f3744386da361c44e8eec4357f597b3
|
refs/heads/main
| 2023-06-26T16:03:29.502249
| 2021-07-31T06:33:51
| 2021-07-31T06:33:51
| 391,268,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
r
|
R_sq_adj_R_saga.R
|
#RSS R squared and adjusted R squared
#R^2 vs Adjusted R squared
?rnorm
n=500
R_sq=rep(0,n-3)
Adj_Rsq=rep(0,n-3)
for (i in c(2:498))
{
X=rnorm(n*i)
X=matrix(X,nrow=n,ncol=i)
X
y=2*X[,1]+4*X[,2]+rnorm(n)
fit_diffnp<-lm(y~X)
R_sq[i-1]=summary(fit_diffnp)$r.squared
Adj_Rsq[i-1]=summary(fit_diffnp)$adj.r.squared
}
par(mfrow=c(1,2))
plot(c(1:(n-3)),R_sq)
plot(c(1:(n-3)),Adj_Rsq)
length(c(1:(n-3)))
#See the significant drop in adj_Rsq when p very large justfies intuition
#ask when in general can you trust a high adj-R squared value
|
2aa2647f7378d8c3c8938a1c5c3a81788dfbd45b
|
216b7cbdcd61f0cdfc5a8f74e8a24d68b56c3057
|
/R_scripts/007_gca_property_xfer.R
|
838180090c5f7c1a586e332d00d070c249096161
|
[] |
no_license
|
Preetis17/CapstoneProject
|
7e0ebb9e02958ea100cc8153e625b4fe27644997
|
c026bc5369b77d9d46ffffb2f5f7c629d62ea4ad
|
refs/heads/master
| 2020-03-23T01:08:46.791202
| 2018-12-13T01:26:08
| 2018-12-13T01:26:08
| 140,902,835
| 0
| 2
| null | 2018-12-12T04:50:34
| 2018-07-14T00:07:43
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 11,819
|
r
|
007_gca_property_xfer.R
|
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... file : grid_cell_assignment.R
# ...
# ... organizer to prep data sets for submit to which_grdi_cell_function()
# ...
# ... ref : https://stackoverflow.com/questions/21977720/
# ... r-finding-closest-neighboring-point-and-number-of-neighbors-within-a-given-rad
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... 21-sep-2018
# ...
# ... patrick.mcdevitt@smu.edu
# ...
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
rm(list=ls())
library(sp)
library(rgeos)
library(geosphere)
library(dplyr)
library(tictoc)
library(ggplot2)
library(viridis)
library(wesanderson)
library(ggrepel)
library(rgdal)
library(rgeos)
library(maptools)
printf <- function(...) invisible(cat(sprintf(...)))
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... define some directory locations
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
home_dir <- ("/home/mcdevitt/_ds/_smu/_src/CapstoneProject/")
data_dir <- ("./data/")
grid_mapped_dir <- ("./data/grid_mapped")
plot_dir <- ("./plots/")
src_dir <- ("./R_scripts")
zillow_dir <- ("./data/ZillowNeighborhoods-OH")
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... define some utility functions
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
setwd(home_dir)
setwd(src_dir)
source("./which_grid_cell_function.R")
source("./which_grid_cell_function_big.R")
source("./cincy_zip_codes.R")
source("./clean_fire_incident.R")
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... read in some data sets
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
setwd(home_dir)
setwd(data_dir)
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
grid_file <- "grid_points_250m_w_neighborhood"
grid_centroid <- read.csv(paste0('./', grid_file, '.csv'), stringsAsFactors = FALSE, header = TRUE)
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
infile <- "hamilton_county_property_xfer_2008t2018_geocoded"
property_id_geocode <- read.table(paste0('./', infile, '.txt'),
sep = "|",
stringsAsFactors = FALSE, header = TRUE)
infile <- "hamilton_county_property_xfer_2008t2018"
property <- read.csv(paste0('./', infile, '.csv'),
stringsAsFactors = FALSE, header = TRUE)
#> names(property)
# [1] "book" "plat" "parcel" "parcel_id" "tax_district"
# [6] "owner_name_1" "owner_name_2" "land_value" "building_value" "property_class"
#[11] "house_number" "street_name" "street_suffix" "zip_code" "month_of_sale"
#[16] "day_of_sale" "year_of_sale" "number_parcels_sold" "sale_price" "valid_sale"
#[21] "conveyance_number" "deed_type" "appreaisal_area" "prior_owner" "property_number"
property$zip_five <- substr(property$zip_code, 1, 5)
property <- property[property$zip_five %in% cincy_zip_code, ]
cols_2_keep <- c("property_class", "zip_five", "month_of_sale", "day_of_sale", "year_of_sale",
"number_parcels_sold", "sale_price", "valid_sale", "deed_type",
"property_number")
property <- property[cols_2_keep]
property <- merge(property, property_id_geocode)
cincy_min_latitude <- 39.0
cincy_max_latitude <- 39.3
cincy_max_longitude <- -84.3
cincy_min_longitude <- -84.72
property <- property[property$lat > cincy_min_latitude & property$lat < cincy_max_latitude,]
property <- property[property$long > cincy_min_longitude & property$long < cincy_max_longitude,]
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... read in shapefile of neighborhoods for plotting
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
setwd(home_dir)
setwd(zillow_dir)
oh_shapefile <- readOGR("ZillowNeighborhoods-OH.shp", layer="ZillowNeighborhoods-OH")
cvg_shapefile <- oh_shapefile[oh_shapefile$City == "Cincinnati", ]
# ... drop 2 neighborhoods which are not in Cincinnati
cvg_shapefile <- cvg_shapefile[cvg_shapefile$Name != "Fruit Hill", ]
cvg_shapefile <- cvg_shapefile[cvg_shapefile$Name != "Forestville", ]
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... eliminate points that are outside city boundaries
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
df_overlap <- property
df_overlap$Longitude <- df_overlap$long
df_overlap$Latitude <- df_overlap$lat
coordinates(df_overlap) <- ~Longitude + Latitude
proj4string(df_overlap) <- proj4string(cvg_shapefile)
df_in_city <- over(df_overlap, cvg_shapefile)
df_prop_city <- cbind(property, df_in_city)
df_prop_city <- df_prop_city[!is.na(df_prop_city$RegionID),]
property <- df_prop_city
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... do a little present value adjustment
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
setwd(home_dir)
setwd(data_dir)
infile <- "zillow_market_appreciation"
value_adj <- read.csv(paste0('./', infile, '.csv'),
stringsAsFactors = FALSE, header = TRUE)
names(property)[names(property) == "year_of_sale"] <- "year"
names(property)[names(property) == "Name"] <- "neighborhood"
property <- (merge(value_adj, property, by = 'year'))
property$sale_price_adj <- property$sale_price * property$appreciation
cols_2_drop <- c("median", "appreciation", "month_of_sale", "day_of_sale", "sale_price",
"geo_accuracy", "State", "County", "City", "RegionID")
property[, cols_2_drop] <- list(NULL)
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... add property category description
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
setwd(home_dir)
setwd(grid_mapped_dir)
infile <- "../dictionaries/hamilton_county_land_use_codes.csv"
property_codes <- read.csv(infile,
stringsAsFactors = FALSE, header = TRUE)
property <- merge(property, property_codes, by = "property_class", all.x = TRUE)
property$category <- tolower(property$category)
# ... reduce category types to 4
property$category[property$category == "agricultural"] <- "other"
property$category[property$category == "public utilities"] <- "other"
property$category[is.na(property$category)] <- "other"
property$valid_sale[property$valid_sale == "U"] <- "Y"
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... neighborhood characteristics
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
property_hoods <- as.data.frame(property %>%
group_by(neighborhood, category, valid_sale) %>%
summarize(median_sale_price = median(sale_price_adj),
max_sale_price = max(sale_price_adj),
num_prop_sales = n()))
property_hood_valid <- property_hoods[property_hoods$valid_sale == "Y", ]
property_hood_valid_resid <- property_hood_valid[property_hood_valid$category == "residential", ]
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... assign data values to grid cell
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
df_mapped <- which_grid_cell_big(grid_centroid, property)
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... accumulate sum of costs in each grid cell
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
property_agg <- as.data.frame(df_mapped %>%
group_by(cell_id, lat_cell, long_cell, category, valid_sale) %>%
summarize(median_sale_price = median(sale_price_adj),
max_sale_price = max(sale_price_adj),
num_prop_sales = n()))
# ... make a plot to visualize result
setwd(home_dir)
setwd(plot_dir)
hoods <- ggplot() + geom_point(data=cvg_shapefile, aes(x=long, y=lat, group=group), size = 0.1, alpha = 0.4)
# ... Basic map of event severity
# ... !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ... change : data = xxx ; color = yyy for each new data set / variable to map
# ... !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ... plot 1
png(filename = "property_xfers_median_sale_price.png",
units = "in",
width = 15,
height = 9,
pointsize = 12,
res = 72)
prop_sub <- property_agg[property_agg$median_sale_price > 50000,]
hoods +
geom_point(data = prop_sub, aes(x = long_cell, y = lat_cell, color = log10(median_sale_price+1)), shape = 19, size = 2.5, alpha = 0.8) +
# geom_point(data = df_mapped, aes(x = long, y = lat), color = "firebrick2", shape = 5, size = 0.1, alpha = 0.1) +
geom_point(data = grid_centroid, aes(x = long, y = lat), color = "forestgreen", size = 0.2, alpha = 0.2) +
ggtitle("Cincinnati - Property Transfers (2008 - 2018)") +
xlab("Longitude") + ylab("Latitude") +
# theme_void() +
scale_color_gradientn(colors = rev(rainbow(8))[3:9], name = "Median Sale Price (log)") +
coord_cartesian(xlim = c(-84.35, -84.72), ylim = c(39.04, 39.23)) +
theme(legend.position = c(0.01, 0.95),
legend.justification = c(0, 1))
dev.off()
# ... plot 2
png(filename = "property_xfers_num_xfers_price.png",
units = "in",
width = 15,
height = 9,
pointsize = 12,
res = 72)
prop_sub2 <- property_agg[property_agg$num_prop_sales < 1000,]
hoods +
geom_point(data = prop_sub2, aes(x = long_cell, y = lat_cell, color = log10(num_prop_sales)), shape = 19, size = 2.5, alpha = 0.8) +
# geom_point(data = df_mapped, aes(x = long, y = lat), color = "firebrick2", shape = 5, size = 0.1, alpha = 0.1) +
geom_point(data = grid_centroid, aes(x = long, y = lat), color = "forestgreen", size = 0.2, alpha = 0.2) +
ggtitle("Cincinnati - Property Transfers (2008 - 2018)") +
xlab("Longitude") + ylab("Latitude") +
# theme_void() +
scale_color_gradientn(colors = rev(rainbow(8))[3:9], name = "Number Xfers (log)") +
coord_cartesian(xlim = c(-84.35, -84.72), ylim = c(39.04, 39.23)) +
theme(legend.position = c(0.01, 0.95),
legend.justification = c(0, 1))
dev.off()
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... save fle to csv
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
setwd(home_dir)
setwd(grid_mapped_dir)
base_name <- "cvg_property_xfers"
file_name <- paste0(base_name, "_mapped_to_grid_cells.csv")
write.table(df_mapped, file = file_name, sep = ",",
row.names = FALSE,
col.names = TRUE)
base_name <- "cvg_property_xfers"
file_name <- paste0(base_name, "_aggregated_to_cell.csv")
write.table(property_agg, file = file_name, sep = ",",
row.names = FALSE,
col.names = TRUE)
base_name <- "cvg_property_xfers"
file_name <- paste0(base_name, "_neighborhood_medians.csv")
write.table(property_hood_valid_resid, file = file_name, sep = ",",
row.names = FALSE,
col.names = TRUE)
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... end_of_file
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
4966101834815d41caa84f44aad663215454c6cc
|
712f63922a9477ce44541aa97fb26e75a77e2420
|
/man/plot.FCVAR_grid.Rd
|
94537fd3a26a7c443abc179ad1c2dbdb72123fc7
|
[] |
no_license
|
LeeMorinUCF/FCVAR
|
caacba62754044a5ff318144ac97d2c10e31521f
|
3a7684ade7d27dbaad0907229c11ed8ed8d8ad85
|
refs/heads/master
| 2022-06-04T08:09:57.346547
| 2022-05-04T18:54:02
| 2022-05-04T18:54:02
| 217,621,323
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,273
|
rd
|
plot.FCVAR_grid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FCVAR_aux.R
\name{plot.FCVAR_grid}
\alias{plot.FCVAR_grid}
\title{Plot the Likelihood Function for the FCVAR Model}
\usage{
\method{plot}{FCVAR_grid}(x, y = NULL, ...)
}
\arguments{
\item{x}{An S3 object of type \code{FCVAR_grid} output from \code{FCVARlikeGrid}.}
\item{y}{An argument for generic method \code{plot} that is not used in \code{plot.FCVAR_grid}.}
\item{...}{Arguments to be passed to methods, such as graphical parameters
for the generic plot function.}
}
\description{
\code{plot.FCVAR_grid} plots the likelihood function from \code{FCVARlikeGrid}.
\code{FCVARlikeGrid} performs a grid-search optimization
by calculating the likelihood function
on a grid of candidate parameter values.
This function evaluates the likelihood over a grid of values
for \code{c(d,b)} (or \code{phi}, when there are constraints on \code{c(d,b)}).
It can be used when parameter estimates are sensitive to
starting values to give an approximation of the global max which can
then be used as the starting value in the numerical optimization in
\code{FCVARestn}.
}
\note{
Calls \code{graphics::persp} when \code{x$Grid2d == TRUE} and
calls \code{graphics::plot} when \code{x$Grid2d == FALSE}.
}
\examples{
\donttest{
opt <- FCVARoptions()
opt$dbStep1D <- 0.1 # Coarser grid for plotting example.
opt$dbStep2D <- 0.2 # Coarser grid for plotting example.
opt$gridSearch <- 0 # Disable grid search in optimization.
opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
opt$progress <- 2 # Show progress report on each value of b.
likeGrid_params <- FCVARlikeGrid(x, k = 2, r = 1, opt)
graphics::plot(likeGrid_params)
}
}
\seealso{
\code{FCVARoptions} to set default estimation options.
\code{plot.FCVAR_grid} plots the likelihood function from \code{FCVARlikeGrid}.
Other FCVAR auxiliary functions:
\code{\link{FCVARforecast}()},
\code{\link{FCVARlikeGrid}()},
\code{\link{FCVARsimBS}()},
\code{\link{FCVARsim}()},
\code{\link{FracDiff}()}
}
\concept{FCVAR auxiliary functions}
|
cc0b2a6d449f5ca326aafcdd946daaa0291c3989
|
c2728bfe5bf2230eca3b3c7069b78c97b2a2c6bc
|
/proyecto_clustering/script_clustering.R
|
e9f93180f25d17f75d39e08c6e54e0c760b63751
|
[] |
no_license
|
pedrohserrano/topological-data-analysis
|
4d1a37269d7b2458c4cfe52b00d67989b28892ac
|
049346a32708a87814c24e1de002254095eeb709
|
refs/heads/master
| 2021-06-18T12:19:06.153829
| 2017-06-17T03:45:35
| 2017-06-17T03:45:35
| 55,701,407
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,225
|
r
|
script_clustering.R
|
#CLUSTERING
#----------------------------- LIBRARIES -----------------------------
#install.packages("fpc")
#install.packages("kernlab")
#install.packages("dbscan")
#install.packages("jsonlite")
#install.pachkages("qrage")
library(fpc)
library(dbscan)
library(kernlab)
library(jsonlite)
library(igraph)
library(RColorBrewer)
library(qrage)
#----------------------------- LOAD DATA -----------------------------
rm(list = ls())
rutawork = ('/home/denny/itam/topologia/proyecto_clustering/')
datos <- read.csv(paste(rutawork,'ecobici_preprocessed.csv',sep = ""), header = TRUE, sep = ",", quote="\"", dec=".", fill = TRUE)
str(datos)
#por lo pronto, me quedo con las variables int
datos <- datos[c("Edad_Usuario", "Distancia_km", "Duracion_viaje")]
#----------------------------- PCA ----------------------------------
#Esta parte nos ayuda a seleccionar la variable mas significativa que explica la dinamica de los datos
#PCA
# datos.pca <- prcomp(datos ,center = TRUE,scale. = TRUE)
# summary(datos.pca)
# str(datos.pca)
# datos.pca$rotation
#en este caso observarmos que la variable mas importante es distancia_km
#KERNEL PCA
variables <- length(datos)
sigma <- 1
kres <- kpca(~., data=datos,features=variables,kernel="rbfdot",kpar = list(sigma = sigma))
data_kpca <- as.data.frame(kres@rotated)
#ordena de mayor a menor (por la que tiene mayor varianza)
datos_prueba <- data_kpca[c("V1","V2")]
#----------------------------- GENERATE INTERVALS----------------------------------
df <- datos_prueba #choose a dataset
#----------------------------- NECESSARY PARAMETERS -----------------------------
#var_o <- data$x1 #variable we will use to make the overlapping subsets
var_o <- datos_prueba$V1 #if we want to use kernel pca variable to cut
n_int <- 6 #number of intervals we want
p <- 0.2 #proportion of each interval that should overlap with the next
#----------------------------- CREATING THE INTERVALS -----------------------------
#this section will create a data frame in which we will construct overlapping intervals
intervals_centers <- seq(min(var_o),max(var_o),length=n_int) #basic partition = centers
interval_length <- intervals_centers[2]-intervals_centers[1] #to create the overlaps of p% of this length
intervals <- data.frame(centers=intervals_centers) #create a data frame
#create the overlapping intervals
intervals$min <- intervals_centers - (0.5+p)*interval_length
intervals$max <- intervals_centers + (0.5+p)*interval_length
intervals$interval <- seq(1,n_int)
intervals$name <- with(intervals, sprintf("[%.2f;%.2f)",min,max))
#function that will split the variable according to the invervals
res <- lapply(split(intervals,intervals$interval), function(x){
return(df[var_o> x$min & var_o <= x$max,]) #res will be a list with each element res[i]
}) #being the points on the i'th subset
#res
#----------------------------- CLUSTERING FOR EACH INTERVAL -----------------------------
###-------------------------FUNCTIONS------------------------------------------------
## We obtain a clustering that attemps to not depend on the eps parameter
## we give a maximum eps for attempting clustering and evaluate on the percentage of
## noise obtained
noClust<-function(data, eps=0.7, eps_cl=6.5, np=.1){
#Default parameters for dbscan
p_noise <- 0.05 # we use this as a rule of thumb
##Number of clusters detected
numClust<-0
##Noise percentage
noise_perc<-1
MinPts <- p_noise*dim(data)[1]
# We iterate eps through an geometric function starting on the given value
for(j in 0:10){
eps<-eps+j*eps
## We iterate also on the eps_cl parameter with an exponential function
for(i in 0:3 ){
result<-optics(data,eps=eps,minPts=MinPts,eps_cl = eps_cl*10**-i)
noise_perc=length(result$cluster[result$cluster==0])/length(result$cluster[result$cluster!=0])
if (noise_perc < np) {
numClust<-max(result$cluster)
return(list(cluster=result$cluster, noise_perc=noise_perc, num_clust=numClust))
}
}
}
list(cluster=result$cluster, noise_perc=noise_perc)
}
p_noise <- 0.05
#ITERATE EVERY ELEMENT OF THE LIST (res[i]) AND CLUSTERIZE INSIDE
ints<-list()
counter1<-1;counter2<-1
for(i in 1:(n_int-1)){
df1<-as.data.frame(res[[i]])
df2<-as.data.frame(res[[i+1]])
if(i==1){
MinPts <- p_noise*dim(df1)[1]
result1<-(noClust(df1))
df1$cluster1 <- result1$cluster
#create columns in the original matrix to show which cluster they belong to
df[dim(df)[2]+i]<-rep(0,dim(df)[1])
df[row.names(df1),dim(df)[2]]<-result1$cluster
}else{result1 <- result2 #use the results for the last iteration
df1$cluster1 <- result1$cluster #this ensures that the cluster labels will be correct for the adj. matrix
}
MinPts <- p_noise*dim(df2)[1]
result2<-(noClust(df2))
df2$cluster2 <- result2$cluster
#create columns in the original matrix to show which cluster they belong to
df[dim(df)[2]+1]<-rep(0,dim(df)[1])
df[row.names(df2),dim(df)[2]]<-result2$cluster
intersection <- merge(df1,df2,all=TRUE) #points in the intersection
intersection[is.na(intersection)] <- 0
ints[[i]]<-as.data.frame(unique(intersection[3:4])) #list of all the clusters that intersect
}
#plot(df$V1,df$V2)
#---------------------DISTANCE BETWEEN CLUSTERS AND ADJ_MATRIX -----------------------------
#Bajo el supuesto que leemos una base que tiene columnas para cada intervalo
#Leemos la base de datos con los intervalos
base <- df
#Creamos una columna para los clusters
base$clusters<-0
#Columna en donde empieza el intervalo 1:
int_ini <- 3
#Columna en donde se ubica el ultimo intervalo:
int_fin <- 8
#Columna donde se creo la columna de "clusters":
col_cluster <- 9
for(i in seq(nrow(base[,int_ini:int_fin]))){
temp<-c()
for(m in seq(int_ini,int_fin)){
if (base[i,m] > 0){
temp<-c(paste0(temp,base[i,m],sep = ","))
}
}
if(length((temp))>0){
aux<-unlist(strsplit(temp,","))
aux2<-unique(aux)
aux3<-paste(aux2,collapse=",")
base[i,col_cluster]<-aux3
}
}
base <- data.frame(base$clusters)
names(base) <- c("clusters")
#Creamos una variable para enumerar la observacion
base$obs <- paste("obs",seq(1,length(base$clusters)))
#Detectamos los clusters
num_clusters <- sort(unique(strsplit(paste0(base$clusters, collapse=","),",")[[1]]))
clusters <- length((num_clusters))
#Creamos una columna con ceros para cada cluster
for(x in num_clusters){
base[[paste("c",x,sep="_")]] <- rep(0,nrow(base))
}
#Para cada columna que creamos agregamos un 1 según el cluster al que pertenece la obs
base$clusters<- as.character(base$clusters)
#Ojo es x+3, porque en la columna 3 en adelante es donde va vaciar los "1" de cada cluster
for(i in seq(nrow(base))){
vector <- strsplit(base$clusters[i], ",")[[1]]
vector <- sort(as.numeric(vector))
for(x in vector){
base[i,(x+3)] <- 1
}
}
dummy_mat<-base[,3:ncol(base)]
n_clusters<-ncol(dummy_mat)
cluster_list<-lapply(1:n_clusters,function (col) {which(dummy_mat[,col]==1)})
adj_matrix<-matrix(0,nrow=n_clusters,ncol=n_clusters)
for(i in 1:(n_clusters-1)){
for(j in (i+1):n_clusters){
distancia<-setdiff(cluster_list[[i]], cluster_list[[j]])
cercania<-length(cluster_list[[i]])-length(distancia)
adj_matrix[i,j]<-round(cercania/min(length(cluster_list[[i]]),length(cluster_list[[j]])),2)
adj_matrix[j,i]<-adj_matrix[i,j]
}
}
summary_cluster<-matrix(0,nrow=1,ncol=n_clusters)
for(i in 1:n_clusters){
summary_cluster[1,i]<-length(cluster_list[[i]])
}
#KEPLER
nodes.n <- clusters
nodes.size<- as.numeric(summary_cluster)/100
nodes.tooltips <- paste("Grupo:", 1:nodes.n)
nodes.names <- 1:nodes.n
nodes.color <- as.character(1:nodes.n)
# ------- AHORA TENEMOS QUE CREAR UN JSON DE ESO -----------------------------
adj.matrix <- adj_matrix
aux_mat <- data.frame()
for(i in 1:nodes.n) for(j in 1:nodes.n) if(adj.matrix[i, j]!=0) aux_mat <- rbind(aux_mat, data.frame(source=i-1, target=j-1, value=adj.matrix[i, j]))
linksJSON <- toJSON(aux_mat)
nodesJSON <- toJSON(data.frame(color=nodes.color, group=nodes.size, name=nodes.names, tooltip=nodes.tooltips))
graphJSON <- sprintf("{\"nodes\": %s, \"links\": %s}", nodesJSON, linksJSON)
#head(graphJSON)
# ------------ CREAMOS EL HTML ----------------------------------------------------------
htmlFile <- readLines('/home/denny/itam/topologia/ManifoldLearning/www/index.html')
#htmlFile <- readLines("www/index.html")
graph_def_line <- which(grepl("graph =", htmlFile))
#htmlFile[graph_def_line] <- sprintf("graph = %s;", graphJSON)
htmlFile[graph_def_line] <- sprintf("graph = %s;", graphJSON)
#writeLines(htmlFile, "www/index.html")
writeLines(htmlFile, '/home/denny/itam/topologia/ManifoldLearning/www/index.html')
browseURL("file:////home/denny/itam/topologia/ManifoldLearning/www/index.html")
#----------------------------OTRA VISUALIZACION IGRAPH ---------------------------------------
colores <- brewer.pal(12,"Set3")
grafica <- function(mat,colores,tam=c(1),lay='kamada') {
if(tam==c(1)) {
tam <- rep(1,dim(mat)[1])
}
g <- graph.adjacency(mat,mode='undirected',weighted=T)
g <- simplify(g)
if(lay=='kamada') {
plot(g,vertex.color=colores,vertex.size=(tam/sum(mat)),edge.arrow.size=.3,rescale=F, layout=layout.kamada.kawai)
}
}
grafica(adj_matrix,colores,summary_cluster,'kamada')
#------------------------OTRA VISUALIZACION QRAGE------------------------
tofdg <- function(matriz){
a <- as.data.frame(matrix(0,nrow=dim(matriz)[1]**2/2,ncol=3))
contador <- 0
for(i in 1:dim(matriz)[2]){
col <- matriz[,i]
for(j in 1:i){
a[contador+1,3] <- as.numeric(matriz[i,j])
a[contador+1,2] <- j
a[contador+1,1] <- i
contador <- contador +1
}
}
a
}
x <- tofdg(adj.matrix)
z <- as.data.frame(cbind(seq(1,clusters,1),t(summary_cluster)))
colores2 <- as.data.frame(cbind(seq(1,clusters,1),t(colores)[1:clusters]))
qrage(links=x, width = 1000, height = 800,distance=8000,nodeValue=z
,nodeColor=colores2,linkColor='#00f',arrowColor='#f00'
,cut=0.01,textSize=12
,linkWidth=c(1,8),linkOpacity=c(0.6,1))
|
d2da2544d822e9b8fb94673fb6fb58ac9c766149
|
2f5ed17ace2ae9c7a1102617ca1dcc91ae1f2466
|
/man/qqnormsim.Rd
|
7c7c1fa80afd225f2df06a1fe6fab04e4966d117
|
[] |
no_license
|
jbryer/DATA606
|
0b9f79590d257040e997b48a78c3b0c9ce0b006c
|
3c702d4b08af2e2258d54dc31b13ae61a8e29bcd
|
refs/heads/master
| 2023-08-17T04:27:03.710532
| 2023-08-11T14:59:38
| 2023-08-11T14:59:38
| 39,025,976
| 6
| 15
| null | 2022-11-11T22:27:03
| 2015-07-13T17:09:52
|
HTML
|
UTF-8
|
R
| false
| true
| 275
|
rd
|
qqnormsim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qqnormsim.R
\name{qqnormsim}
\alias{qqnormsim}
\title{Simulates QQ-plots with the given data.}
\usage{
qqnormsim(dat)
}
\description{
Simulates QQ-plots with the given data.
}
\author{
OpenIntro
}
|
80b9475072c205f98fbbab396008a783256ea2db
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PROscorer/examples/fsfi.Rd.R
|
f4e69b69ba1d7a0cff487367eae9143fa2db2920
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 490
|
r
|
fsfi.Rd.R
|
library(PROscorer)
### Name: fsfi
### Title: Score the Female Sexual Function Index (FSFI)
### Aliases: fsfi
### ** Examples
# Creating data frame of fake FSFI responses
dat <- PROscorerTools::makeFakeData(n = 10, nitems = 19, values = 0:5,
prefix = 'f')
dat1 <- PROscorerTools::makeFakeData(n = 10, nitems = 4, values = 1:5)
names(dat1) <- c('f1', 'f2', 'f15', 'f16')
dat[c(1, 2, 15, 16)] <- dat1
# Scoring the fake FSFI responses
fsfi(dat, 'f')
|
a6d4cdfcba65d62b62bd1f0721b25e10d6ae65b8
|
c194c5236006a758b29bd4d530ad563dc9ecab7e
|
/inst/apps/diagram_both/server.R
|
8d522c4c9f0a068823d91b5857f1b910dc1e8a62
|
[] |
no_license
|
Auburngrads/teachingApps
|
1087c20a21992433a2f8451db7b1eaa7d1d2cb89
|
b79c192e5f74c5e8376674d4fb9e0b95a426fe03
|
refs/heads/master
| 2021-03-16T07:49:56.579527
| 2020-06-14T12:10:12
| 2020-06-14T12:10:12
| 51,677,745
| 15
| 7
| null | 2018-03-01T03:44:58
| 2016-02-14T03:22:47
|
R
|
UTF-8
|
R
| false
| false
| 204
|
r
|
server.R
|
server = function(input, output, session) {
output$plotreal <- renderPlot({
par(oma = c(0,0,0,0), mar = c(4,4,2,2))
input$evalreal
return(isolate(eval(parse(text=input$realplot))))
})
}
|
ee5f0cd526a79dcf3256b5e52d9e2df2a35858a6
|
3976972fec0a7e5002b741e9d7d55ee522370f13
|
/ctseq/rFunctions.R
|
aa8f4d871e2c2077caebebdefc2d3e714c3bb837
|
[
"MIT"
] |
permissive
|
jzfarmer/KailosProject
|
92a7581ca96930b5b6573bb32574e8fda35d0208
|
42d70d6470955ea47ee40aed2d66bd9981f1cfef
|
refs/heads/main
| 2023-06-23T21:55:42.840363
| 2021-07-27T18:55:22
| 2021-07-27T18:55:22
| 374,232,234
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,368
|
r
|
rFunctions.R
|
# make sure a list of names is in the correct format
# taking in a list of names. if the name starts with a number, it adds an X to the beginning.
# adds an X to the beginning because R will add X's to names beginning with integers
# returns back the modified names.
formatNames=function(listOfNames){
newListOfNames=c()
for(i in seq(1,length(listOfNames),1)){ # create a sequence of numbers from 1 to the length of list of names, the last 1 is how many skips, increment it once
currName=listOfNames[i] # gets each name and adds it to currName
firstChar=substr(currName,1,1) # gets all the characters between 1 and 1 (just the first character)
# checks if first character in name is a number, if it is then it adds an X to the beginning of the name
if(firstChar=="0" || firstChar=="1" || firstChar=="2" || firstChar=="3" || firstChar=="4" || firstChar=="5" || firstChar=="6" || firstChar=="7" || firstChar=="8" || firstChar=="9"){
currName=paste("X",currName,sep = "") # Take the string X and the string currName and put them together. X is just being added to the front of currName
}
currName=gsub('-','.',currName) # replace '-' with '.'
newListOfNames=append(newListOfNames,currName) # putting the modified name into the newListOfNames
}
return(newListOfNames)
}
# merge two data frames
# cleans up the columns and removes duplicate row names
mergeDF = function(df1,df2){ # df1 and df2 are dataframes
finalDF = merge(df1,df2,by = "row.names",all = T) # merges by row names and not columns
rownames(finalDF) = finalDF$Row.names # clean up anything left behind by the merge, gets rid of columns
finalDF$Row.names = NULL # get rid of duplicate row names, remakes columns properly
return(finalDF)
}
# load and assemble all the data necessary to plot
loadFiles = function(fragInfoFileName,sampleStatsFileName,totalMolFileName,methMolFileName,methRatioFileName,sampleInfoFileName){
# info about fragments
fragInfo = read.table(fragInfoFileName, header = T, row.names = 1, sep = '\t')
# get order of frags for molecule depth plot
# assuming this is the order set by K-T's lab
fragOrder_molDepthPlot = fragInfo[with(fragInfo, order(fragOrder)),]
fragOrder_molDepthPlot = row.names(fragOrder_molDepthPlot)
fragOrder_heatmaps = rownames(fragInfo)
# info about each sample
sampleStats = read.table(file = sampleStatsFileName, header = T, row.names = 1, sep = "\t")
# R is weird adds "X" to column names beginning with an integer
# rename sample names in 'sampleStats' if first character of sample name is an integer
# replace '-' with '.'
newSampleStatsRowNames = formatNames(rownames(sampleStats))
rownames(sampleStats) = newSampleStatsRowNames
# reads in each file and makes it into a table
totalMolFile = read.table(totalMolFileName, header = T, row.names = 1, sep = "\t")
totalMolFile = totalMolFile[fragOrder_heatmaps,] # only grab frags in info file - we only want to plot these
methMolFile = read.table(methMolFileName,header = T, row.names = 1, sep = "\t")
methMolFile = methMolFile[fragOrder_heatmaps,]
methRatioFile = read.table(ratioFileName,header = T, row.names = 1, sep = "\t")
methRatioFile = methRatioFile[fragOrder_heatmaps,]
if(sampleInfoFileName!="NOSAMPLEINFO"){
sampleInfoFile = read.table(sampleInfoFileName, header = T, row.names = 1, sep = "\t")
newSampleInfoRowNames = formatNames(rownames(sampleInfoFile))
rownames(sampleInfoFile) = newSampleInfoRowNames
# put data file cols in same order at sampleInfoFile rows
totalMolFile = totalMolFile[,rownames(sampleInfoFile)]
colnames(totalMolFile) = paste(rownames(sampleInfoFile),as.character(sampleInfoFile[,1]),sep = '.')
colnames(totalMolFile) = formatNames(colnames(totalMolFile))
methMolFile = methMolFile[,rownames(sampleInfoFile)]
colnames(methMolFile) = paste(rownames(sampleInfoFile),as.character(sampleInfoFile[,1]),sep = '.')
colnames(methMolFile) = formatNames(colnames(methMolFile))
methRatioFile = methRatioFile[,rownames(sampleInfoFile)]
colnames(methRatioFile) = paste(rownames(sampleInfoFile),as.character(sampleInfoFile[,1]),sep = '.')
colnames(methRatioFile) = formatNames(colnames(methRatioFile))
sampleStats = sampleStats[rownames(sampleInfoFile),]
rownames(sampleStats) = paste(rownames(sampleInfoFile),as.character(sampleInfoFile[,1]),sep = '.')
rownames(sampleStats) = formatNames(rownames(sampleStats))
}
returnList = list(fragOrder_molDepthPlot = fragOrder_molDepthPlot,
totalMolFile = totalMolFile,
methMolFile = methMolFile,
methRatioFile = methRatioFile,
sampleStats = sampleStats)
return(returnList)
}
# molecule depth plot and heatmaps
plotData = function(runName,fragInfoFileName,sampleStats,totalMolFile,methMolFile,methRatioFile){
library(ggplot2)
library(reshape)
library(pheatmap)
# info about fragments
fragInfo = read.table(fragInfoFileName, header = T, row.names = 1, sep = '\t')
# get order of frags for molecule depth plot
fragOrder_molDepthPlot = fragInfo[with(fragInfo, order(fragOrder)),]
fragOrder_molDepthPlot = row.names(fragOrder_molDepthPlot)
fragOrder_heatmaps = rownames(fragInfo)
##### create color palette for all annotations we will use in the heatmaps #####
annotationFeatures = c(colnames(fragInfo),colnames(sampleStats))
# colors to pick from
colorList = c("aquamarine4","blue4","brown4","burlywood4","cadetblue4","chartreuse4","chocolate4","coral4","darkgoldenrod4","darkgreen",
"darkmagenta","darkorange3","darkorchid4","darkred","darkslateblue","darkslategray","deeppink4","deepskyblue4","dodgerblue4", "firebrick3",
"gray9","indianred3","hotpink3","purple4","navy","olivedrab4","magenta3","sienna","wheat4","turquoise4",
"palevioletred3","salmon","cornflowerblue","palegreen4","maroon","green4","royalblue3","rosybrown4","orange","purple")
set.seed(1) # set seed to keep order of colors the same
colorList = sample(colorList) # shuffle colors
my_ann_colors = list()
for(i in seq(1,length(annotationFeatures),1)){
feature = annotationFeatures[i]
color = colorList[i]
my_ann_colors[[feature]] = c("grey95",color)
}
####### total molecules plot #######
# load total molecules file
# totalMolFile=read.table(totalMolFileName, header = T, row.names = 1, sep = "\t")
# totalMolFile=totalMolFile[fragOrder_heatmaps,] # only grab frags in info file - we only want to plot these
colNamesTotalMol = colnames(totalMolFile)
colNamesTotalMol = c("Locus",colNamesTotalMol)
totalMolFile$Locus = rownames(totalMolFile)
totalMolFile = totalMolFile[,colNamesTotalMol]
rownames(totalMolFile) = NULL
meltedMol = melt(totalMolFile) # melt is from the reshape package. turns wide data to long data
colnames(meltedMol) = c("Fragment","Sample","Molecules")
# change all NA's to 1 because will be taking log
meltedMol[is.na(meltedMol) ] = 1
totalMolPlotName=paste(runName,"_totalMoleculesPlot.pdf",sep="")
moleculePlot = ggplot(meltedMol, aes(x=factor(Fragment), y=Molecules, fill=factor(Sample))) +
ggtitle(paste(runName,"Unique Molecules")) +
xlab("Fragment") +
scale_fill_discrete(name = "Sample") +
geom_dotplot(binaxis = "y", dotsize=0.4) +
scale_x_discrete(limits=fragOrder_molDepthPlot) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 0.01, color = "white")) +
scale_y_log10(limits = c(1,10000)) # scales the y axis by log10
pdf(totalMolPlotName)
print(moleculePlot)
dev.off()
####### HEATMAPS ########
rowFontSize=3
colFontSize=6
fontSize=6
if(ncol(methMolFile)>25 && ncol(methMolFile)<40){
colFontSize=4
}
else if(ncol(methMolFile)>=40){
colFontSize=3
}
if(nrow(fragInfo)>160){
rowFontSize=2
}
### total molecule heatmap ###
rownames(totalMolFile)=totalMolFile$Locus
totalMolFile$Locus=NULL
fragInfo$fragOrder=NULL
totalMolHeatmapName=paste(runName,"_totalMoleculesHeatmap.pdf",sep = "")
pdf(totalMolHeatmapName)
pheatmap(log2(totalMolFile[fragOrder_heatmaps,]+1),
main = paste(runName,"Aligned Molecules (log2)"),
fontsize_row = rowFontSize,
fontsize_col = colFontSize,
fontsize = fontSize,
show_rownames = T,
cluster_cols = F,
cluster_rows = F,
color = colorRampPalette(c("navy","yellow","forestgreen","darkgreen"))(50),
annotation_col = sampleStats,
annotation_row = fragInfo,
na_col = "snow4", annotation_colors = my_ann_colors)
dev.off()
### meth mol heatmap ###
# methMolFile=read.table(methMolFileName,header = T, row.names = 1, sep = "\t")
methMolHeatmapName=paste(runName,"_methylatedMoleculesHeatmap.pdf",sep = "")
pdf(methMolHeatmapName)
pheatmap(log2(methMolFile[fragOrder_heatmaps,]+1),
main=paste(runName,"Methylated Molecules (log2)"),
fontsize_row=rowFontSize,
fontsize_col=colFontSize,
fontsize = fontSize,
show_rownames = T,
cluster_cols=F,
cluster_rows=F,
color=colorRampPalette(c("navy","white","firebrick3","firebrick4"))(50),
annotation_col = sampleStats,
annotation_row = fragInfo,
na_col = "snow4",
annotation_colors = my_ann_colors)
dev.off()
### meth ratio heatmap ###
# methRatioFile=read.table(ratioFileName,header = T, row.names = 1, sep = "\t")
methRatioHeatmapName=paste(runName,"_methylationRatioHeatmap.pdf",sep = "")
pdf(methRatioHeatmapName)
pheatmap(methRatioFile[fragOrder_heatmaps,],
main=paste(runName,"Methylation Ratio"),
fontsize_row=rowFontSize,
fontsize_col=colFontSize, fontsize = fontSize,
show_rownames = T,
cluster_cols=F,
cluster_rows=F,
color=colorRampPalette(c("navy","white","sienna1","sienna2","sienna3","sienna","sienna4"))(50),
annotation_col = sampleStats,
annotation_row = fragInfo,
na_col = "snow4",
annotation_colors = my_ann_colors)
dev.off()
}
|
7f18b56c4284e8edcaeec404f3e4abb0a3b0f5c3
|
25ea70b6f8151a5994edf7afc9c65653eaf1cbc2
|
/day1.R
|
e5b993eeecb28fc371abb4836a4bde84bad284c0
|
[] |
no_license
|
yoooongd/test2
|
4c665d8033c8d6f821668ced4b14cd958ffc7932
|
6a36c1da161315b863876fdecd6b876bf6028cad
|
refs/heads/master
| 2020-04-22T14:39:49.946131
| 2019-02-13T12:26:49
| 2019-02-13T12:26:49
| 170,451,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 205
|
r
|
day1.R
|
xNum <- c(1, 3, 5, 7)
xLog <- c(TRUE, T, F, T)
xChar <- c("a","b","c","d")
df <-data.frame(xNum, xLog, xChar)
df
str(df)
df$Name <- c("Kim","Park","Tom","Joe")
df
df<- transform(df, Age=c(10,20,30,40))
df
|
81f06071cbd25a4955353165d0b526201e006b87
|
9ea744d0e28fe4fc4d3e1e00f7ec53ea054b8cd0
|
/R/nextstrain.json.R
|
c6acba9d0ed589fcfbdcc2cab53bcd9bc6b9bc4f
|
[] |
no_license
|
YuLab-SMU/treeio
|
8d434454f25336859e0e0c12fc65029a310b638b
|
c3f7b8e6df5f768f53e33b46b3e13dd529bb4f56
|
refs/heads/devel
| 2023-09-01T19:44:13.166325
| 2023-08-25T04:27:18
| 2023-08-25T04:27:18
| 75,700,092
| 56
| 17
| null | 2023-08-25T04:25:14
| 2016-12-06T06:05:56
|
R
|
UTF-8
|
R
| false
| false
| 2,760
|
r
|
nextstrain.json.R
|
#' @title read.nextstrain.json
#' @param x the json tree file of auspice from nextstrain.
#' @return treedata object
#' @export
#' @author Shuangbin Xu
#' @examples
#' file1 <- system.file("extdata/nextstrain.json", "minimal_v2.json", package="treeio")
#' tr <- read.nextstrain.json(file1)
#' tr
read.nextstrain.json <- function(x){
x <- jsonlite::read_json(x)
if (all(c('meta', 'tree') %in% names(x))){
dt <- parser_children(x$tree)
}else{
dt <- parser_children(x)
}
if ('branch.length' %in% colnames(dt)){
rmclnm <- c("parentID", "NodeID", "branch.length")
edgedf <- dt[, rmclnm]
}else{
rmclnm <- c("parentID", "NodeID")
edgedf <- dt[, rmclnm]
}
dd <- as.phylo(edgedf, "branch.length")
dt$label <- as.character(dt$NodeID)
dt <- dt[, !colnames(dt) %in% rmclnm, drop=FALSE]
dd <- dd |> tidytree::as_tibble() |> dplyr::full_join(dt, by='label')
if ("name" %in% colnames(dd)){
dd$label <- dd$name
dd$name <- NULL
}
tr <- dd |> as.treedata()
return(tr)
}
parser_children <- function(x, id=list2env(list(id = 0L)), parent = 1){
id[["id"]] <- id[["id"]] + 1L
id[["data"]][[id[["id"]]]] <- extract_node_attrs(x, id=id[["id"]], isTip=FALSE, parent=parent)
if ('div' %in% colnames(id[['data']][[id[['id']]]])){
parent.index <- id[['data']][[id[['id']]]][['parentID']]
id[['data']][[id[['id']]]][['branch.length']] <- as.numeric(id[['data']][[id[['id']]]][['div']]) -
as.numeric(id[['data']][[parent.index]][['div']])
}
if ('children' %in% names(x)){
lapply(x$children,
parser_children,
id = id,
parent = ifelse(id[['id']]>=2, id[["data"]][[id[["id"]]-1L]][["NodeID"]], 1)
)
}else{
id[["data"]][[id[["id"]]]][["isTip"]] <- TRUE
}
dat <- dplyr::bind_rows(as.list(id[["data"]])) %>% dplyr::mutate_if(check_num, as.numeric)
return(dat)
}
check_num <- function(x){
is_numeric(x) && is.character(x)
}
extract_node_attrs <- function(x, id, isTip, parent){
if ('node_attrs' %in% names(x)){
res <- build_node_attrs(x[['node_attrs']])
}else if('attr' %in% names(x)){
res <- build_node_attrs(x[['attr']])
}else{
res <- data.frame()
}
if ('name' %in% names(x)){
res$name <- x[['name']]
}else if('strain' %in% names(x)){
res$name <- x[['strain']]
}
res$parentID <- parent
res$NodeID <- id
res$isTip <- isTip
return(res)
}
build_node_attrs <- function(x){
x <- unlist(x)
index <- grepl('\\.value$', names(x))
names(x)[index] <- gsub('\\.value$', '', names(x)[index])
x <- tibble::as_tibble(t(x))
return(x)
}
|
4f01d34671d95156c2d77927244dbf60bb297f8a
|
9570c514cd5e90c04c3cf902c4a3e120afa97069
|
/Paper_plots/fig3_all_model_intersects_per_pop_plus_tables4_5.R
|
b6b6693add974b70576b606c76a71de9b669665f
|
[] |
no_license
|
WheelerLab/ML-PredictDB
|
983ce8652bff0818ad35e7cd29d662012e611c3e
|
8110036ec7b746aab977e38b7fcc1ce8bd63adda
|
refs/heads/master
| 2021-03-09T17:09:59.212863
| 2020-11-30T02:45:01
| 2020-11-30T02:45:01
| 246,360,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,340
|
r
|
fig3_all_model_intersects_per_pop_plus_tables4_5.R
|
#Make another Figure 3 Boxplot. Where the gene intersects per MESA training subpopulation is used per algorithm.
#e.g In AFA, all gene intersects of EN, RF, SVR, KNN
library(data.table)
library(dplyr)
library(tidyverse)
library(ggplot2)
"%&%" = function(a,b) paste (a,b,sep="")
#df <- NULL
algs <- c("en", "knn", "rf", "svr")
#pops <- c("AFA", "AFHI", "ALL", "CAU", "HIS")
#AFA (for common genes in AFA)
en_AFA <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/en_AFA_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
rf_AFA <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/rf_AFA_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
svr_AFA <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/svr_AFA_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
knn_AFA <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/knn_AFA_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
AFA <- inner_join(en_AFA, rf_AFA, by = "gene")
AFA <- inner_join(AFA, svr_AFA, by = "gene")
AFA <- inner_join(AFA, knn_AFA, by = "gene")
colnames(AFA) <- c("gene", "en", "rf", "svr", "knn")
#CAU (for common genes in CAU)
en_CAU <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/en_CAU_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
rf_CAU <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/rf_CAU_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
svr_CAU <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/svr_CAU_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
knn_CAU <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/knn_CAU_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
CAU <- inner_join(en_CAU, rf_CAU, by = "gene")
CAU <- inner_join(CAU, svr_CAU, by = "gene")
CAU <- inner_join(CAU, knn_CAU, by = "gene")
colnames(CAU) <- c("gene", "en", "rf", "svr", "knn")
#HIS (for common genes in HIS)
en_HIS <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/en_HIS_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
rf_HIS <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/rf_HIS_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
svr_HIS <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/svr_HIS_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
knn_HIS <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/knn_HIS_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
HIS <- inner_join(en_HIS, rf_HIS, by = "gene")
HIS <- inner_join(HIS, svr_HIS, by = "gene")
HIS <- inner_join(HIS, knn_HIS, by = "gene")
colnames(HIS) <- c("gene", "en", "rf", "svr", "knn")
#AFHI (for common genes in AFHI)
en_AFHI <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/en_AFHI_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
rf_AFHI <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/rf_AFHI_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
svr_AFHI <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/svr_AFHI_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
knn_AFHI <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/knn_AFHI_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
AFHI <- inner_join(en_AFHI, rf_AFHI, by = "gene")
AFHI <- inner_join(AFHI, svr_AFHI, by = "gene")
AFHI <- inner_join(AFHI, knn_AFHI, by = "gene")
colnames(AFHI) <- c("gene", "en", "rf", "svr", "knn")
#ALL (for common genes in ALL)
en_ALL <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/en_ALL_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
rf_ALL <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/rf_ALL_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
svr_ALL <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/svr_ALL_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
knn_ALL <- fread("Z:/data/ml_paper_reviewers_corrections/mets_spearman/used_pcair_10pc_10peer_obs_exp/knn_ALL_2_METS_corr_filt.txt",
header=T, sep="\t", stringsAsFactors=F) %>% select(c("gene","spearman"))
ALL <- inner_join(en_ALL, rf_ALL, by = "gene")
ALL <- inner_join(ALL, svr_ALL, by = "gene")
ALL <- inner_join(ALL, knn_ALL, by = "gene")
colnames(ALL) <- c("gene", "en", "rf", "svr", "knn")
df <- NULL
for (alg in algs){
pop1 <- data.frame(spearman=AFA[[alg]], Model=toupper(alg), mesa="AFA")
pop2 <- data.frame(spearman=CAU[[alg]], Model=toupper(alg), mesa="CAU")
pop3 <- data.frame(spearman=HIS[[alg]], Model=toupper(alg), mesa="HIS")
pop4 <- data.frame(spearman=AFHI[[alg]], Model=toupper(alg), mesa="AFHI")
pop5 <- data.frame(spearman=ALL[[alg]], Model=toupper(alg), mesa="ALL")
df <- rbind(df, pop1, pop2, pop3, pop4, pop5)
}
mesa2mets <- mutate(df,mesa=factor(mesa,levels=c("AFA","HIS","CAU","AFHI", "ALL")),
Model=factor(Model,levels=c("EN","RF","SVR","KNN")))
fig <- ggplot(mesa2mets, aes(x=mesa, y=spearman, fill=Model)) + geom_boxplot() + theme_classic(18) +
xlab("Population") + scale_y_continuous(breaks=seq(-1.0, 1.0, 0.25), limits=c(-1.0, 1.0)) +
ylab(expression(paste("Spearman Correlation ", rho)))
#print(fig)
tiff("Z:/data/ml_paper_reviewers_corrections/paper_figs/used_pcair_10pc_10peer_obs_exp/Fig3_all_model_intersects_per_pop.tiff",
width = 18, height = 14, units = 'cm', res = 300, compression = 'lzw')
fig
dev.off()
# means <- aggregate(spearman ~ Model, mesa2mets, mean)
#
# library(ggpubr)
# ggboxplot(mesa2mets, "mesa", "spearman", fill="Model", bxp.errorbar = T, repel=T) + theme_classic(18) +
# xlab("Population") + scale_y_continuous(breaks=seq(-1.0, 1.0, 0.25), limits=c(-1.0, 1.0)) +
# ylab(expression(paste("Spearman Correlation ", rho))) + stat_summary(fun.y="mean") +
# geom_text(data=means, aes(y=spearman))
#Make a table for the mean prediction performance of each algorithm per population. Note, this is on their intersect genes
algs <- c("en", "rf", "svr", "knn")
pops <- c("ALL","AFHI", "AFA", "CAU", "HIS")
tabut <- matrix(nrow=5, ncol=3) #ttest pvalue # Table 5 in Paper
colnames(tabut) <- c("RF", "SVR", "KNN")
rownames(tabut) <- pops
tabum <- matrix(nrow=5, ncol=4) #Mean # Table 4 in paper
colnames(tabum) <- algs
rownames(tabum) <- pops
ALL <- mutate(ALL, pop="ALL")
AFHI <- mutate(AFHI, pop="AFHI")
AFA <- mutate(AFA, pop="AFA")
CAU <- mutate(CAU, pop="CAU")
HIS <- mutate(HIS, pop="HIS")
popbox <- rbind(ALL, AFHI, AFA, CAU, HIS)
for (i in 1:length(pops)){
for (j in 2:length(algs)){
df <- subset(popbox, pop==pops[i])
tt <- t.test(df[["en"]], df[[algs[j]]], paired=TRUE)
tabut[i,j-1] <- tt$p.value
}
df[,c(1,6)] <- NULL
tabum[i,] <- colMeans(df)
}
|
00c4cf7919a7ab7a9306181f7ed4484cdb8b8cc2
|
a30c029a2e6dc3a621b20712d32b1233afc0eb02
|
/connection/RMySQL.R
|
eb16a47b57212ea25bf85f48f4ca8842e5e0dced
|
[] |
no_license
|
floss-for-fun/r-for-fun
|
141af0c89a35daedbc82e22f1fa7134ac1549658
|
011862312c66b47e6ce49c1ba38159e4e9df01ef
|
refs/heads/master
| 2020-04-13T21:14:03.459744
| 2017-07-05T01:29:24
| 2017-07-05T01:29:24
| 20,584,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 500
|
r
|
RMySQL.R
|
# load library
library(RMySQL)
# connect to database 'test' in localhost, username 'root',
# with no password
mydb <- dbConnect(MySQL(), user='root', password='',
dbname='test', host='localhost')
# show all tables in database
dbListTables(mydb)
# show all column in table
dbListFields(mydb, 'nama_table')
# get data from table to resultset
rs <- dbSendQuery(mydb, 'SELECT * FROM nama_table')
# fetch all data in resultset to data frame
data <- fetch(rs, n=-1)
# summarize
summary(data)
|
48e1373c26248561e351c0b3b1cc7fa54130a3da
|
db23803c56eb7f0f7e0239ba45ef5aa5f1e26fc6
|
/scripts/figure_scripts/Supp_EC_spearman_w_NSTI_and_scrambled.R
|
0589f5dc034e6c6c1f436c68b81bd14ef270d70c
|
[] |
no_license
|
weibokong27/picrust2_manuscript
|
ed362146df5f917e3f92aa012ba5a864dc2928fa
|
dd4e2daa0b7058fa0ef46bc8f02c052f226c34ed
|
refs/heads/master
| 2022-07-02T14:47:02.757223
| 2020-05-16T12:42:29
| 2020-05-16T12:42:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,485
|
r
|
Supp_EC_spearman_w_NSTI_and_scrambled.R
|
### Code to make figure contrasting EC correlations on each 16S validation dataset.
### Include all NSTI cut-offs in these plots.
rm(list=ls(all.names=TRUE))
library(ggplot2)
library(reshape2)
library(ggpubr)
library(cowplot)
library(ggbeeswarm)
setwd("/home/gavin/gavin_backup/projects/picrust2_manuscript/data/saved_RDS/16S_vs_MGS_metrics/")
source("/home/gavin/gavin_backup/projects/picrust2_manuscript/scripts/picrust2_ms_functions.R")
extra_nsti_categories <- c("NSTI=1.5", "NSTI=1", "NSTI=0.5", "NSTI=0.25", "NSTI=0.1", "NSTI=0.05")
datasets <- c("hmp", "mammal", "ocean", "blueberry", "indian", "cameroon", "primate")
dataset_names <- c("HMP", "Mammal", "Ocean", "Soil (Blueberry)", "Indian", "Cameroonian", "Primate")
ec_rho_outlist <- list()
ec_rho <- list()
ec_rho_wilcoxon <- list()
for(i in 1:length(datasets)) {
ec_rho_outlist[[datasets[i]]] <- parse_rho_rds_and_calc_wilcoxon(rho_rds = paste(datasets[i], "_ec_spearman_df.rds", sep=""),
dataset_name = dataset_names[i],
wilcox_cat2ignore = extra_nsti_categories,
y_pos_start = 0.9)
ec_rho[[datasets[i]]] <- ec_rho_outlist[[datasets[i]]][[1]]
ec_rho_wilcoxon[[datasets[i]]] <- ec_rho_outlist[[datasets[i]]][[2]]
ec_rho_wilcoxon[[datasets[i]]][which(ec_rho_wilcoxon[[datasets[i]]]$group2 == "Scrambled"), "group2"] <- "Shuffled\nASVs"
}
# Make plot for each dataset.
EC_spearman_boxplots <- list()
for(j in 1:length(datasets)) {
dataset_ec_rho <- ec_rho[[datasets[j]]]
dataset_ec_rho$cat <- as.character(dataset_ec_rho$cat)
dataset_ec_rho[which(dataset_ec_rho$cat == "Scrambled"), "cat"] <- "Shuffled\nASVs"
dataset_ec_rho <- dataset_ec_rho[-which(dataset_ec_rho$cat %in% c("NSTI=1.5", "NSTI=0.5", "NSTI=0.25", "NSTI=0.1")), ]
dataset_ec_rho$cat <- factor(dataset_ec_rho$cat,
levels=c("Null", "PAPRICA", "Shuffled\nASVs", "NSTI=2", "NSTI=1", "NSTI=0.05"))
dataset_ec_rho_melt <- melt(dataset_ec_rho)
dataset_ec_rho_melt[which(dataset_ec_rho_melt$Database == "Other"), "Database"] <- "PAPRICA"
dataset_ec_rho_melt[which(dataset_ec_rho_melt$cat == "Shuffled\nASVs"), "Database"] <- "PICRUSt2"
EC_spearman_boxplots[[datasets[j]]] <- ggplot(dataset_ec_rho_melt, aes(x=cat, y=value, fill=Database)) +
geom_boxplot(outlier.shape = NA) +
geom_quasirandom(size=0.1) +
scale_y_continuous(breaks=c(0.4, 0.6, 0.8, 1.0), limits=c(0.4, 1.05)) +
ylab(c("Spearman Correlation Coefficient")) +
xlab("") +
facet_grid(. ~ dataset, scales = "free", space = "free", switch="x") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x=element_text(angle=45, hjust=1),
legend.position = c(0.6, 0.2), legend.background = element_rect(color = "black",
fill = "white", size = 0.3, linetype = "solid"),
legend.title = element_text(colour="black", size=8, face="bold"),
legend.text = element_text(colour="black", size=8)) +
scale_fill_manual(values=c("light grey", "#F8766D", "#00BFC4")) +
stat_pvalue_manual(ec_rho_wilcoxon[[datasets[j]]], label = "p_symbol")
}
pdf(file = "../../../figures/Supp_EC_spearman.pdf", width=15, height=9)
plot_grid(EC_spearman_boxplots[["cameroon"]],
EC_spearman_boxplots[["hmp"]],
EC_spearman_boxplots[["indian"]],
EC_spearman_boxplots[["mammal"]],
EC_spearman_boxplots[["ocean"]],
EC_spearman_boxplots[["primate"]],
EC_spearman_boxplots[["blueberry"]],
labels=c("a", "b", "c", "d", "e", "f", "g"),
nrow=2,
ncol=4)
dev.off()
|
68cb869aa00d9c1e37e213bc62d13414a94d05bc
|
a8ace6bed475a0017e4ecc91c4cfc6e42477892b
|
/man/get.best.templates.Rd
|
b403dbe5721e2f8ee2b615475906b38834c6d6a3
|
[] |
no_license
|
adbelazim/carSignal
|
34993c617c92519eb35aa5a7d02e9202e879bb90
|
935e6fbd6c8d25ca514e1ed118f16342197116e3
|
refs/heads/master
| 2021-03-22T00:33:09.748942
| 2016-10-02T03:19:52
| 2016-10-02T03:19:52
| 69,776,080
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,260
|
rd
|
get.best.templates.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/carSignal-v15.9.10.R
\name{get.best.templates}
\alias{get.best.templates}
\title{Busca la curva que mejor se ajuste a los templates entregados como entrada, con el propósito de calcular
el índice ARI.}
\usage{
get.best.templates(time.instants, signal, templates,
referential.time.instant = 0, delta.time.before.ref = 0,
delta.time.after.ref = 20 * 0.8, comparison.function = get.MSE,
keep.details = TRUE, time.tol = min(diff(time.instants))/100, ...)
}
\arguments{
\item{time.instants:}{lista con los tiempos de muestreo de la señal}
\item{signal:}{señal con la que se desea comparar los templates}
\item{templates:}{templates a comparar con la señal}
\item{comparison.function:}{función con la cual se evalúa la curva con mejor ajuste a los template.}
}
\value{
El mejor valor de ajuste al template y una lista con los valores del template.
}
\description{
Busca la curva que mejor se ajuste a los templates entregados como entrada, con el propósito de calcular
el índice ARI.
}
\references{
Frank P. Tiecks, Arthur M. Lam, Rune Aaslid, David W. Newell. Comparison of Static and Dynamic Cerebral Autoregulation Measurements.
Stroke, 1995, 26, pages 1014-1019.
}
|
d8f52b69c39dacf1fdb7cc7e8690e864f03a111f
|
024b07663d349d5f2c6fd04db563307754a61ca4
|
/SimulationChooseIVs/test/test_ChooseInstruments.R
|
a1a2144d91a348f66477e80dda8b43c6f34115a0
|
[] |
no_license
|
fditraglia/fmsc
|
b794059959705b68ea3ec992b48b8c0bf2affc73
|
5c913e7912a05064bc0d093ca7037bc6e32e8aae
|
refs/heads/master
| 2021-06-30T14:32:59.409420
| 2016-09-02T15:30:06
| 2016-09-02T15:30:06
| 14,152,577
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,380
|
r
|
test_ChooseInstruments.R
|
setwd("~/fmsc/ChooseInstruments")
library(Rcpp)
library(RcppArmadillo)
sourceCpp("simulation_functions_ChooseInstruments.cpp")
library(sem)
simple_dgp <- function(){
n <- 100
a0 <- 0.5
a1 <- 1
a2 <- 1
b0 <- 0.75
b1 <- 1.5
z0 <- rep(1, n)
z1 <- rnorm(n)
z2 <- rnorm(n)
x1 <- a0 + a1 * z1 + a2 * z2 + rnorm(n)
x0 <- rep(1, n)
y <- b0 * x0 + b1 * x1 + rnorm(n)
out <- list(x0 = x0, x1 = x1, z0 = z0, z1 = z1, z2 = z2, y = y)
return(out)
}
#Test C++ estimates against tsls
cpp_est_sim <- function(){
sim_data <- simple_dgp()
X <- with(sim_data, cbind(x0, x1))
Z <- with(sim_data, cbind(z0, z1, z2))
y <- sim_data$y
out <- as.vector(tsls_est_cpp(X, y, Z))
return(out)
}
r_est_sim <- function(){
sim_data <- simple_dgp()
out <- tsls(y ~ x1, ~ z1 + z2, data = sim_data)$coef
names(out) <- NULL
return(out)
}
set.seed(7436)
system.time(foo <- replicate(1000, r_est_sim()))
set.seed(7436)
system.time(bar <- replicate(1000, cpp_est_sim()))
all.equal(foo, bar)
#Test C++ textbook standard errors against tsls
cpp_se_sim <- function(){
sim_data <- simple_dgp()
X <- with(sim_data, cbind(x0, x1))
Z <- with(sim_data, cbind(z0, z1, z2))
y <- sim_data$y
out <- as.vector(tsls_SE_textbook_cpp(X, y, Z))
return(out)
}
r_se_sim <- function(){
sim_data <- simple_dgp()
out <- tsls(y ~ x1, ~ z1 + z2, data = sim_data)$V
out <- sqrt(diag(out))
names(out) <- NULL
return(out)
}
set.seed(7436)
system.time(foo <- replicate(1000, r_se_sim()))
set.seed(7436)
system.time(bar <- replicate(1000, cpp_se_sim()))
all.equal(foo, bar)
#tsls doesn't seem to have an option for robust or centered standard errors so we can't test the C++ against it. However, for this DGP the robust and centered standard errors should be very close to the textbook ones.
set.seed(821)
sim_data <- simple_dgp()
X <- with(sim_data, cbind(x0, x1))
Z <- with(sim_data, cbind(z0, z1, z2))
y <- sim_data$y
tsls_SE_textbook_cpp(X, y, Z)
tsls_SE_robust_cpp(X, y, Z)
tsls_SE_center_cpp(X, y, Z)
#Test the dgp function
set.seed(352)
test_dgp(0.2, 0.1, 100)
#Test CCIC class
set.seed(389)
baz <- CCIC_test(1,0.1)
set.seed(389)
testy <- dgp_cpp(1,0.1)
cc <- cancor(testy$x, cbind(testy$z1, testy$z2))
n <- length(testy$x)
r <- cc$cor
bar <- lm(testy$x ~ testy$z1 + testy$z2 - 1)
all.equal(r^2, summary(bar)$r.squared)
first.term <- n * log(1 - r^2)
overid <- ncol(cbind(testy$z1, testy$z2)) - ncol(testy$x)
CC.BIC <- first.term + overid * log(n)
CC.AIC <- first.term + overid * 2
CC.HQ <- first.term + overid * 2.01 * log(log(n))
foo <- matrix(c(CC.BIC, CC.AIC, CC.HQ), 3, 1)
foo
baz
all.equal(foo, baz)
#Test linearGMM_msc class
set.seed(389)
baz <- Andrews_test(1,0)
baz #1-step est, 2-step est, J-stat, J-pvalue, AIC, BIC, HQ
set.seed(389)
testy <- dgp_cpp(1,0)
overid <- ncol(cbind(testy$z1, testy$z2)) - ncol(testy$x)
all.equal(pchisq(baz[3], 3), baz[4]) #Check chi-squared p-value
#Check first-step estimator
step1 <- tsls(testy$y ~ testy$x - 1, ~ testy$z1 + testy$z2 - 1)
all.equal(baz[1], step1$coef, check.attributes = FALSE)
#check second-step estimator
e1 <- testy$y - step1$coef * testy$x
n <- length(e1)
D1 <- diag(as.vector(e1)^2, n, n)
e1.outer <- e1 %*% t(e1)
Z <- cbind(testy$z1, testy$z2)
X <- testy$x
y <- testy$y
Omega1 <- t(Z) %*% (D1 / n - e1.outer / (n^2)) %*% Z
Omega1.inv <- solve(Omega1)
step2 <- solve(t(X) %*% Z %*% Omega1.inv %*% t(Z) %*% X) %*% t(X) %*% Z %*% Omega1.inv %*% t(Z) %*% y
all.equal(as.vector(step2), baz[2], check.attributes = FALSE)
#Check J-statistic
e2 <- y - as.vector(step2) * X
n <- length(e2)
D2 <- diag(as.vector(e2)^2, n, n)
e2.outer <- e2 %*% t(e2)
Omega2 <- t(Z) %*% (D2 / n - e2.outer / (n^2)) %*% Z
J <- t(e2) %*% Z %*% solve(Omega2) %*% t(Z) %*% e2 / n
all.equal(as.vector(J), baz[3], check.attributes = FALSE)
#Check GMM-AIC, BIC and HQ
bonus <- overid * c(2, log(n), 2.01 * log(log(n)))
all.equal(J - bonus, baz[5:7])
#Test linearGMM_select
r <- 0.4
set.seed(389)
results <- GMMselect_test(1, r)
names(results)
set.seed(389)
baz <- Andrews_test(1, r) #1-step est, 2-step est, J-stat, J-pvalue, AIC, BIC, HQ
set.seed(389)
baz2 <- CCIC_test(1, r) #CCIC-BIC, CCIC-AIC, CCIC-HQ
results$onestep
baz
results$J
#Test AIC
results$AIC
results$momentsAIC
results$estAIC
#Test BIC
results$BIC
results$momentsBIC
results$estBIC
#Test HQ
results$HQ
results$momentsHQ
results$estHQ
|
a56e306b2fe4bfdf34ee36cd1612548dc3eeea6d
|
10d6629bf46a82c0f87bea8456d36e7b0e66df82
|
/plot2.R
|
514e3c9b8f2f3a4051d422ef89a62346c4349ac9
|
[] |
no_license
|
cosetta-lodovici/ExData_Plotting1
|
e563c5c86bd7aba7210b7786ebc5d7a9180157fd
|
e5ecd6b0fc67922e8cf70b11413f64c73d8c0933
|
refs/heads/master
| 2021-01-21T18:10:47.502177
| 2017-05-22T12:48:52
| 2017-05-22T12:48:52
| 92,021,000
| 0
| 0
| null | 2017-05-22T06:52:14
| 2017-05-22T06:52:13
| null |
UTF-8
|
R
| false
| false
| 460
|
r
|
plot2.R
|
library (sqldf)
Sys.setlocale("LC_ALL", "English")
# read data
sourcefile <- "../household_power_consumption.txt"
DF <- read.csv.sql(sourcefile,sep=";",sql="select * from file where Date in ('1/2/2007','2/2/2007')")
# png file
png(file="plot2.png", width = 480, height = 480)
plot(as.POSIXct(paste(DF$Date,DF$Time),format="%d/%m/%Y %H:%M:%S"),DF$Global_active_power, type="o", pch=".", xlab="", ylab="Global Active Power (kilowatts)")
#save file
dev.off()
|
0ed47ce6a6305d7f9db7f04de900093bbf438c4c
|
af681784e683a9ff5b0e9b773504a934cc73cd7b
|
/Lab2_keane.R
|
10a69a94ad81765ad09d586dc64ccf68a43cc062
|
[] |
no_license
|
jakeane/qss17_assignments
|
0cf9850f07b0b538af801c416fbd24c54f8cdde4
|
778b6386a497be7281eed2c5c03d19b916cc26f4
|
refs/heads/master
| 2023-01-21T23:29:42.776236
| 2020-12-04T19:40:01
| 2020-12-04T19:40:01
| 295,878,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,318
|
r
|
Lab2_keane.R
|
## QSS 17
## John Keane
# Set up environment
library(tidyverse)
library(USAboundaries)
library(USAboundariesData)
library(gganimate)
library(sf)
library(transformr)
library(lubridate)
covid <- read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv")
county_pop <- read_csv("projectData/co-est2019-alldata.csv")
county_data <- us_counties()
state_data <- us_states()
county_gis <- county_data %>%
mutate(fips = paste(statefp, countyfp, sep = "")) %>%
select(fips, name, state_name, state_abbr, geometry)
county_popest <- county_pop %>%
mutate(fips = paste(STATE, COUNTY, sep = "")) %>%
select(fips, POPESTIMATE2019)
covid_agg <- covid %>%
filter(!is.na(fips)) %>%
select(date, fips, cases) %>%
mutate(date = as.character(date)) %>%
pivot_wider(
names_from = "date",
values_from = "cases"
) %>%
replace_na(set_names(as.list(rep(0, length(.))), names(.))) %>%
pivot_longer(names_to = "date", values_to = "cases", cols = `2020-01-21`:`2020-11-08`) %>%
group_by(fips) %>%
mutate(change = cases - lag(cases, n = 7, default = cases[1]))
combine_long <- inner_join(county_gis, covid_agg, by = "fips")
combine_pop <- inner_join(combine_long, county_popest, by = "fips") %>%
mutate(change_rate = change / POPESTIMATE2019 * 100)
states <- state_data %>%
filter(state_abbr %in% c("NY", "CT", "NJ", "PA", "MA", "VT", "NH", "DE", "MD"))
covid_ts <- combine_pop %>%
filter(
state_abbr %in% c("NY", "CT", "NJ", "PA", "MA", "VT", "NH", "DE", "MD"),
date >= as.Date("2020-03-15"),
date <= as.Date("2020-05-15"),
as.numeric(as.Date(date) - as.Date("2020-03-01")) %% 3 == 0
) %>%
ggplot() +
geom_sf(aes(fill = change_rate), color = alpha("white", 0.01)) +
geom_sf(data = states, fill = alpha("white", 0), size = 1.5) +
coord_sf(xlim = c(-76.5, -71), ylim = c(38.5, 43)) +
transition_states(date, transition_length = 10, state_length = 1) +
scale_fill_viridis_c() +
labs(
title = "Effect of First COVID-19 Epicenter in US",
subtitle = "Date: {closest_state}",
caption = "NYT Coronavirus Data in the United States",
fill = "% Increase of COVID-19"
) +
theme_minimal() +
theme(
legend.position = c(.75, .2)
)
animate(covid_ts, duration = 30, fps = 5)
combine_pop %>%
filter(
state_abbr %in% c("NY", "CT", "NJ", "PA", "MA", "VT", "NH", "DE", "MD"),
date == as.Date("2020-04-15"),
as.numeric(as.Date(date) - as.Date("2020-03-01")) %% 3 == 0
) %>%
ggplot() +
geom_sf(aes(fill = change_rate), color = alpha("white", 0.01)) +
geom_sf(data = states, fill = alpha("white", 0), size = 1.5) +
coord_sf(xlim = c(-76.5, -71), ylim = c(38.5, 43)) +
scale_fill_viridis_c() +
labs(
title = "Effect of First COVID-19 Epicenter in US",
subtitle = "Date: {closest_state}",
caption = "NYT Coronavirus Data in the United States",
fill = "% Increase of COVID-19"
) +
theme_minimal() +
theme(
legend.position = c(.75, .2)
)
covid_anim <- combine_pop %>%
group_by(date) %>%
top_n(10, change_rate) %>%
mutate(rank = rank(-change_rate, ties.method = "random")) %>%
filter(rank <= 10, date >= as.Date("2020-07-10")) %>%
mutate(county_name = paste(name, state_abbr, sep = ", ")) %>%
select(fips, date, change_rate, rank, county_name) %>%
ggplot(aes(x = rank, y = change_rate, fill = fips)) +
geom_bar(stat = "identity") +
geom_text(aes(label = county_name, y = -0.2), color = "black", hjust = 1) +
enter_fly(x_loc = -11) +
exit_fly(x_loc = -11) +
transition_states(date, transition_length = 100, state_length = 1) +
coord_flip() +
scale_x_reverse() +
ylim(c(-1.5, 12)) +
theme_minimal() +
theme(
legend.position = "none",
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank()
) +
labs(
title = "Breakouts during {closest_state}",
y = "Change over past week"
)
animate(covid_anim, duration = 80, fps = 10)
covid_rank <- combine_pop %>%
group_by(date) %>%
top_n(10, change_rate) %>%
mutate(rank = rank(-change_rate, ties.method = "random")) %>%
filter(rank <= 10, date >= as.Date("2020-07-10")) %>%
mutate(county_name = paste(name, state_abbr, sep = ", ")) %>%
select(fips, date, change_rate, rank, county_name)
sum(is.na(covid_rank$fips))
|
26a2059c23c76cb6044790c617b0c3e81481fea1
|
07f2ed7b3565c8d6679da4084bbb39930221da20
|
/src/R/HSROC/man/HSROC.Rd
|
0890738fe77d482286625f903a4144ebc98ebcfb
|
[] |
no_license
|
bwallace/OpenMeta-analyst-
|
0fbc19f77018a72ce293e1c72e9b2c0a7eb3b615
|
e3147cab25e773251e7052f3bf27852ea41d522e
|
refs/heads/master
| 2021-01-21T04:37:15.296676
| 2020-07-28T00:52:29
| 2020-07-28T00:52:29
| 293,390
| 26
| 13
| null | 2020-07-28T00:52:30
| 2009-08-31T16:25:12
|
Python
|
UTF-8
|
R
| false
| false
| 13,289
|
rd
|
HSROC.Rd
|
\name{HSROC}
\alias{HSROC}
\title{A function for joint meta-analysis of sensitivity and specificity of a diagnostic test.}
\description{ This function is used to estimate the parameters of a hierarchical summary receiver operating characteristic (HSROC) model allowing for the reference standard to be possibly imperfect, and assuming it is conditionally independent from the test under evaluation. The estimation is carried out using a Gibbs sampler. }
\usage{
HSROC(data, iter.num, init = NULL, sub_rs=NULL,
first.run = TRUE, path=getwd(), refresh = 100,
prior.SEref=NULL, prior.SPref=NULL, prior_PI=c(0,1),
prior_LAMBDA = c(-3,3), prior_THETA = c(-1.5,1.5),
prior_sd_alpha = list(0,2,"sd"), prior_sd_theta = list(0,2,"sd"),
prior_beta = c(-0.75,0.75))
}
\arguments{
\item{data}{a matrix with the number of rows equal to the number of studies and 4 columns. Each row consists of the entries of the 2x2 table of the index test (i.e. test under evaluation) vs. the reference test reported in each study. The ordering of the columns is ++, +-, -+, --, where the first entry refers to the result of the test under evaluation and the second entry refers to the result of the reference test. }
\item{iter.num}{the number of iterations of the Gibbs sampler.}
\item{init}{a list of initial values. See details for further explanation.}
\item{sub_rs}{a list that describes the partition of the reference standards among the studies, if any. The first element of the list is the number of different reference standards used. Element 2 to \code{sub_rs[1]} are vectors of integers specifying the study numbers that used each reference test. See details.}
\item{prior.SEref}{a vector of values specifying the lower and upper limits of the sensitivity of the reference test(s) based on prior information. If there are multiple reference standards, the lower and upper limits are each specified as a vector of length \code{sub_rs[1]}. The default value is NULL, implying a single reference standard with perfect sensitivity. }
\item{prior.SPref}{a vector of values specifying the lower and upper limits of the specificity of the reference test(s) based on prior information. If there are multiple reference standards, the lower and upper limits are each specified as a vector of length \code{sub_rs[1]}. The default value is NULL, implying a single reference standard with perfect specificity. }
\item{first.run}{logical. If TRUE (default), the \code{gibbs.sampler} function is run for the first time. If FALSE, the function assumes k iterations where already run and it continues from where it left off, that is from iteration k+1.}
\item{path}{a character string pointing to the directory where the sample drawn at each iteration of the Gibbs sampler are to be stored. }
\item{refresh}{A positive integer defining the frequency at which the Gibbs sampler's progress will be displayed on the R GUI. The default is 100. }
\item{prior_PI}{a vector with 2 components specifying the minimum and maximum values for the prevalence in each study based on prior information. If unspecified, \eqn{Beta(1,1)}{Beta(1,1)} priors are used. }
\item{prior_LAMBDA}{a vector with 2 components specifying the minimum and maximum values for the difference in mean values (diagnostic accuracy) among disease positive and negative groups based on prior information. The default value is \code{c(-3,3)} implying a \eqn{U(-3,3)}{U(-3,3)}.}
\item{prior_THETA}{a vector with 2 components specifying the minimum and maximum values for the overall mean cut-off value for defining a positive test based on prior information. The default value is \code{c(-1.5,1.5)} implying a \eqn{U(-1.5,1.5)}{U(-1.5,1.5)}.}
\item{prior_sd_alpha}{a list with 3 components. The first 2 components are specifying the minimum and maximum values for the between study standard deviation in the difference in mean values of the disease positive and negative groups in the \eqn{i^{th}}{ith} study, \eqn{\alpha_i}{alpha_i}, based on prior information. The third component determine whether we want the prior to be on the standard deviation (sd) or the variance (v). The default value is \code{list(0,2,"sd")} implying a \eqn{U(0,2)}{U(0,2)} prior. }
\item{prior_sd_theta}{a list with 3 components. The first 2 components are specifying the minimum and maximum values for the between study standard deviation in the cut-off, \eqn{\theta_i}{theta_i}, in the \eqn{i^{th}}{ith} study based on prior information. The third component determine whether we want the prior to be on the standard deviation (s) or the variance (v). The default value is \code{list(0,2,"sd")} implying a \eqn{U(0,2)}{U(0,2)} prior. }
\item{prior_beta}{a vector with 2 components specifying the minimum and maximum values for the logarithm of the ratio of the standard deviation of test results among patients with and without the disease, based on prior belief. This parameter is assumed to be constant across studies. The default value is \code{c(-0.75,0.75)} implying a \eqn{U(-0..75,0.75)}{U(-0.75,0.75)}. If the argument is (\code{NULL}) the function assumes a range of (-log( (LAMBDA.up/3) + 1 ) , log( (LAMBDA.up/3) + 1 ) ), where LAMBDA.up is the upper limit of \code{prior.LAMBDA}. }
}
\details{
Our HSROC model uses a probit link function and not the logit link function used in the HSROC model developped by Rutter and Gatsonis (2001).
The probability of a positive result on the index test for the \eqn{j^{th}}{jth} individual in the \eqn{i^{th}}{ith} study is given by :
\deqn{1 - \Phi( ( \theta_i - \alpha_i D_{ij} )exp(-\beta D_{ij}) ),}{1 - PHI( (theta_i - alpha_i D_{ij})*exp(-beta D_{ij})),}
while the probability of a negative result on the index test for the \eqn{j^{th}}{jth} individual in the \eqn{i^{th}}{ith} study is given by :
\deqn{\Phi( ( \theta_i - \alpha_i D_{ij} )exp(-\beta D_{ij}) ),}{PHI( (theta_i - alpha_i D_{ij})*exp(-beta D_{ij})),}
where \eqn{D_{ij}}{D_ij} = 0.5 if the \eqn{j^{th}}{jth} individual in the \eqn{i^{th}}{ith} study is disease positive and -0.5 if the \eqn{j^{th}}{jth} individual in the \eqn{i^{th}}{ith} study is not disease negative.
When ranges are provided for \code{prior.SEref}, \code{prior.SPref} and \code{prior_PI} they are transformed into Beta prior distributions using the method described in \code{beta.parameter}.
If the argument \code{init} = \code{NULL}, the function will randomly generate initial values based on the prior information. Otherwise, the user can provide his own initial values for the within-study and between-study parameters and for the reference standards through the \code{init} argument, with the within-study, between study and reference standard initial values as the first, second and third element of \code{init}, respectively. Furthermore,
the within-study parameters must be a matrix-like object with each column being initial values for \eqn{\alpha_i}{alpha_i}, \eqn{\theta_i}{theta_i}, sensitivity of the test under evaluation \eqn{S_{1i}}{S1_i}, specificity of the test under evaluation \eqn{C_{1i}}{C1_i} and prevalence \eqn{\pi_i}{pi_i}.
The between-study parameters must be a vector of the following initial values : \eqn{\Theta}{THETA}, \eqn{\sigma_{\theta}}{sigma_theta}, \eqn{\Lambda}{LAMBDA}, \eqn{\sigma_{\alpha}}{sigma_alpha} and \eqn{\beta}{beta}. The initial values for the reference standard must be a 2 X \code{sub_rs[[1]]} matrix-like object.
The first row must be the initial values of the sensitivity of the reference standard, while the second row must correspond to initial values of the specificity of the reference standard. The ordering described above in the within-study, between-study and reference standard parameters must be preserved.
The first element of the list-object \code{sub_rs} corresponds to the number of different reference standards. The default value is 1. The number of additional elements will depend on the value of the first element. There must be as many additional elements in \code{sub_rs} as there are different reference standards. Assuming the studies are labelled 1, ..., N,
each of these additional elements must be a vector (possibly of length one) taking as their values the labelling of the corresponding studies sharing the same reference standard. For example, if we have 2 reference tests, the first one aplied over study 1-10 and the second one applied over study 11-15 then the \code{sub_rs} list-argument should be of length 3 with the following elements : 3, 1:10, 11:15
The \code{path} argument points to the working directory where files written at each iteration of the Gibbs sampler will be saved. If no path is provided, the current working directory will be used
}
\value{
Text files with samples from the joint posterior distribution of the between-study parameters, within-study parameters and performance parameters of the reference standard(s) are created in the \code{path} directory. These results can be summarized using the \code{HSROCSummary} function.
The following files are also created and saved in the \code{path} directory :
\dQuote{Prior.information.txt}, lists the prior distributions.
\dQuote{Initial values.txt}, lists the initial values used. If the argument \code{init} = \code{NULL}, the initial value file is called \dQuote{Random Initial values.txt}.
A series of files listing the values of various parameters sampled in the last iteration of the Gibbs sampler as follows :
\dQuote{REstarting values.txt}, contains values of the within-study parameters (\eqn{\alpha_i}{alpha_i}, \eqn{\theta_i}{theta_i}, sensitivity of test under evaluation ( \eqn{S_{1i}}{S1_i} ), specificity of test under evaluation ( \eqn{C_{1i}}{C1_i} ) and prevalence ( \eqn{\pi_i}{pi_i} ) ).
\dQuote{REstarting values 2.txt}, contains values of the between-study parameters (\eqn{\Lambda}{LAMBDA}, standard deviation of \eqn{\alpha_i}{alpha_i}, ( \eqn{\sigma_{\alpha}}{sigma_alpha} ), \eqn{\Theta}{THETA}, standard deviation of \eqn{\theta_i}{theta_i} ( \eqn{\sigma_{\theta}}{sigma_theta }) and \eqn{\beta}{beta} ).
\dQuote{REstarting REFSTD.txt}, contains values of the sensitivity and specificity of the reference test.
\dQuote{REstart values index.txt}, lists the variable names in the 3 files described above.
}
\references{
N. Dendukuri, I. Schiller, L. Joseph and M. Pai \emph{Bayesian meta-analysis of the accuracy of a test for tuberculosis pleuritis in the absence of a gold-standard reference}. (Under review).
C. M. Rutter and C. A. Gatsonis. \emph{A hierarchical regression approach to meta-analysis of diagnostic accuracy evaluations}. Statistics in Medicine, 20(19):2865-2884, 2001.
}
\examples{
#===============================================================
#TO SET UP THE REFERENCE STANDARD
#===============================================================
#There were three different reference standards for the In.house dataset.
#The first reference standard was used in study 1 and 2,
#the second was used in studies 3 and 4 and the third in study 5 to 12.
REFSTD = list(3, 1:2, 3:4, 5:11)
#===============================================================
#TO SET UP DATA AND INITIAL VALUES
#===============================================================
data(In.house)
M = length(In.house[,1])
#Initial values for the within-study parameters
init.alpha = rep(2.5, M) ; init.theta = rep(1, M) ;
init.s1 = rep(0.5, M) ; init.c1 = rep(0.5, M) ;
init.pi = rep(0.5, M)
#Initial values for the between-study parameters
init.THETA = 1 ; init.sd.theta = 0.5 ;
init.LAMBDA = 2.5 ; init.sd.alpha = 0.5 ;
init.beta = 0 ;
#Initial values for the reference standard sensitivities and specificities
init.s2 = rep(0.5, REFSTD[[1]]) ; init.c2 = rep(0.5, REFSTD[[1]])
#The ordering of the initial values is important!
init1 = cbind(init.alpha, init.theta, init.s1, init.c1, init.pi)
init2 = c(init.THETA, init.sd.theta, init.LAMBDA, init.sd.alpha, init.beta)
init3 = rbind(init.s2, init.c2)
init = list(init1, init2, init3)
#===============================================================
#TO PROVIDE PRIOR INFORMATION
#===============================================================
S2.a = c(0.2, 0.2, 0.7) ; S2.b = c(0.6, 0.7, 0.9)
C2.a = rep(0.9, 3) ; C2.b = rep(1, 3)
#===============================================================
#TO RUN GIBBS SAMPLER
#===============================================================
\dontrun{
set.seed(10)
estimates = HSROC(data=In.house, init=init, iter.num=5000,
prior.SEref=c(S2.a,S2.b), prior.SPref=c (C2.a,C2.b), sub_rs=REFSTD)
#Putting prior information on sigma_alpha^2 (sigma_alphs squared)
#instead of sigma_alpha
set.seed(10)
estimates = HSROC(data=In.house, init=init, iter.num=5000,
prior.SEref=c(S2.a,S2.b), prior.SPref=c (C2.a,C2.b),
sub_rs=REFSTD, prior_sd_alpha = list(0,2,"v"))
#Letting the function select randomly its own initial values
set.seed(10)
estimates = HSROC(data=In.house, iter.num=5000,
prior.SEref=c(S2.a,S2.b), prior.SPref=c (C2.a,C2.b),
sub_rs=REFSTD)
}
}
\keyword{models}
|
72977d25a4cd1ab10f336a376869e94d705de4b7
|
2ae4d59d4dfb7d0cd96f729aa03dba089f1fe024
|
/titanic_analysis.R
|
f815bdd9dc18535da18179ded8434baaea5bf75d
|
[] |
no_license
|
data-better/statistics
|
7f854fc702611238a08db95fb4a124da521ea30a
|
154ef9f62d8fcf4a2221db501ba01a496200a0a6
|
refs/heads/main
| 2023-05-31T08:18:13.406203
| 2023-05-10T06:45:29
| 2023-05-10T06:45:29
| 356,442,068
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,157
|
r
|
titanic_analysis.R
|
# 필요한 패키지 불러오기
library(shiny)
library(ggplot2)
library(dplyr)
# 타이타닉 데이터 불러오기
titanic <- read.csv("https://raw.githubusercontent.com/datasciencedojo/datasets/master/titanic.csv")
# R shiny 앱 만들기
ui <- fluidPage(
# 제목
titlePanel("타이타닉 데이터 분석"),
# 사이드바
sidebarLayout(
# 사이드바 패널
sidebarPanel(
# 슬라이더 인풋: 나이 범위 선택
sliderInput("age", "나이 범위:", min = 0, max = 80, value = c(0, 80)),
# 라디오 버튼: 성별 선택
radioButtons("sex", "성별:", choices = c("전체", "남성", "여성"), selected = "전체"),
# 체크박스: 생존 여부 선택
checkboxGroupInput("survived", "생존 여부:", choices = c("생존", "사망"), selected = c("생존", "사망"))
),
# 메인 패널
mainPanel(
# 플롯 출력: 그래프 출력
plotOutput("plot")
)
)
)
server <- function(input, output) {
# 필터링된 데이터 생성
filtered_data <- reactive({
data <- titanic
data$Sex <- gsub("female", "여성", data$Sex)
data$Sex <- gsub("male", "남성", data$Sex)
data$Survived <- gsub(0, "사망", data$Survived)
data$Survived <- gsub(1, "생존", data$Survived)
data$Pclass <- as.factor(data$Pclass)
# 나이 범위에 따라 필터링
data <- data %>% filter(Age >= input$age[1] & Age <= input$age[2])
# 성별에 따라 필터링
if (input$sex != "전체") {
data <- data %>% filter(Sex == input$sex)
}
# 생존 여부에 따라 필터링
data <- data %>% filter(Survived %in% input$survived)
data
})
# 그래프 생성
output$plot <- renderPlot({
# 필터링된 데이터 사용
data <- filtered_data()
# ggplot 객체 생성
p <- ggplot(data, aes(x = Pclass, y = Fare, color = Survived)) + geom_boxplot()
# 그래프 제목 추가
p <- p + labs(title = "타이타닉 데이터: 객실 등급과 운임에 따른 생존 여부")
p
})
}
# 앱 실행
shinyApp(ui = ui, server = server)
|
8902f0030ddf165f77382226079615fed586e021
|
b1d2dba7dac3185e68282576542fe13e67018254
|
/rCode/scanExport.R
|
06db8d24ec46324368a3ab12ce26ba501f668482
|
[] |
no_license
|
hsuanyuchen1/closestPoint
|
4b78d6ef8f02809145ffa28b75930439e43caeb6
|
b23529487ff24bd6688d5daa4a9b8023aa4848dc
|
refs/heads/main
| 2023-01-03T12:48:13.104999
| 2020-10-27T08:02:47
| 2020-10-27T08:02:47
| 307,617,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,898
|
r
|
scanExport.R
|
library(data.table)
library(magrittr)
library(geosphere)
library(sf)
library(dplyr)
source("closestPoint/rCode/findMinDist.R")
#sourceData is the 25mx25m CSV with whole taiwan data
scanExport = function(targetDir, sourceData, tempDir, outDir, successDir, failDir){
target <- fread(targetDir)
#move the file to failDir first
file.copy(targetDir, paste(failDir, basename(targetDir)))
file.remove(targetDir)
results <- lapply(1:nrow(target), function(x) findMinDist(sourceData, target[x]) ) %>% rbindlist()
resultSF <- st_as_sf(results, coords = c("lon", "lat"), crs=4326)
resultSF$OGR_STYLE <- case_when(resultSF$AVGRSRP >= -85 ~ "SYMBOL(a:0,c:#0000FF,s:12pt,id:ogr-sym-4)",
resultSF$AVGRSRP >= -95 ~ "SYMBOL(a:0,c:#008000,s:12pt,id:ogr-sym-4)",
resultSF$AVGRSRP >= -105 ~ "SYMBOL(a:0,c:#D2D250,s:12pt,id:ogr-sym-4)",
resultSF$AVGRSRP >= -115 ~ "SYMBOL(a:0,c:#FFBE78,s:12pt,id:ogr-sym-4)",
resultSF$AVGRSRP < -115 ~ "SYMBOL(a:0,c:#FF0000,s:12pt,id:ogr-sym-4)")
fileName <- tools::file_path_sans_ext(basename(targetDir))
#write files to temp folder
fwrite(results, paste0(tempDir, fileName, "_result.csv"))
st_write(resultSF, paste0(tempDir, fileName, "_result.TAB"), driver = "MapInfo File", quiet=T)
#zip csv and TAB to outDir
zip(paste0(outDir, fileName,"_result.zip"),list.files(tempDir, full.names = T),
flags = " a -tzip", zip = "C:\\Program Files\\7-Zip\\7z")
#remove all the files from temp folder
file.remove(list.files(tempDir, full.names = T))
#move the file to success folder if everything went well
file.copy(paste(failDir, basename(targetDir)), paste(successDir, basename(targetDir)))
file.remove(paste(failDir, basename(targetDir)))
}
|
93cb3b25b369a9da268c8c511c7eb8591e12ea6a
|
91966e8ddddb3d5b542330ed6232eb89a83ea91a
|
/plot4.R
|
fcf7b5888b074436bc5505a8c5da84cd5757baea
|
[] |
no_license
|
owl775/ExData_Plotting1
|
b29dbe9914d14bc6cb05c0d1bb4c82f01b3e6b34
|
9ac69516d3b2e7ac1f6c340cc5fdc2f80631dda8
|
refs/heads/master
| 2021-05-17T01:45:26.969324
| 2020-03-27T20:34:33
| 2020-03-27T20:34:33
| 250,562,390
| 0
| 0
| null | 2020-03-27T14:55:13
| 2020-03-27T14:55:12
| null |
UTF-8
|
R
| false
| false
| 1,566
|
r
|
plot4.R
|
library(dplyr)
library(lubridate)
# read data file
data_file = 'household_power_consumption.txt'
power_consumption <- read.csv(data_file,sep=';', header=TRUE, na.strings = "?")
power_consumption$Date <- as.Date(power_consumption$Date, format="%d/%m/%Y")
# get subset of the data
start_date <- '2007-02-01'
stop_date <-'2007-02-02'
pc_subset <- filter(power_consumption, Date==start_date | Date==stop_date)
pc_subset$DT <- ymd(pc_subset$Date) + hms(pc_subset$Time)
pc_subset$which_day <- wday(pc_subset$DT, label=TRUE)
# assign plot name and labels
plot_name = 'plot4.png'
# create plot, save as png with a width of 480 pixels and a height of 480 pixels.
png(plot_name, width = 480, height = 480)
par(mfrow = c(2, 2))
# subplot_1
s1_y_label <- 'Global Active Power'
plot(pc_subset$DT , pc_subset$Global_active_power, type="l", xlab=" ", ylab=s1_y_label)
# subplot_2
s2_y_label <- 'Voltage'
plot(pc_subset$DT , pc_subset$Voltage, type="l", ylab=s2_y_label, xlab="datetime")
# subplot_3
s3_y_label <- 'Energy sub metering'
with(pc_subset, plot(DT, Sub_metering_1, type="l", xlab=" ", ylab=s3_y_label, col="black"))
with(pc_subset, lines(DT, Sub_metering_2, col="red"))
with(pc_subset, lines(DT, Sub_metering_3, col="slateblue"))
legend("topright",lty=1, col = c("black","red","slateblue") , legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), box.lty=0)
# subplot 4
s4_y_label <- 'Global_reactive_power'
plot(pc_subset$DT , pc_subset$Global_reactive_power, type="l", ylab=s4_y_label, xlab="datetime")
# turn off plotting device
dev.off()
|
1d6b270304fbb4ad716425da0f0cc2a90e777b22
|
c8c610879e88ba7d6dd92c98ad13877b9d1979f3
|
/sql_r/ar/coverage/insertIndirectPolygons.R
|
c09d8c0242eb9492deb5c46b51b84609aa4ce89d
|
[] |
no_license
|
Telefonica/rural-planner
|
7c14481cec0ba17de9d6d1a3637d41de92875cfe
|
c70226691ebeb2cec9591baf0df09b2f5f8e162e
|
refs/heads/master
| 2022-11-27T08:38:50.751388
| 2020-08-13T15:56:34
| 2020-08-13T15:56:34
| 274,978,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,362
|
r
|
insertIndirectPolygons.R
|
insertIndirectPolygons <- function(schema, indirect_polygons_table, infrastructure_table){
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, dbname = dbname,
host = host, port = port,
user = user, password = pwd)
query <- paste0("INSERT INTO ",schema,".", indirect_polygons_table, "
(SELECT source as operator, (ST_Union(ST_MakeValid(coverage_area_2g))) as geom_2g,
(ST_Union(ST_MakeValid(coverage_area_3g))) as geom_3g, (ST_Union(ST_MakeValid(coverage_area_4g))) as geom_4g
FROM (SELECT coverage_area_2g, coverage_area_3g, coverage_area_4g,
CASE WHEN source IN ('CLARO','PERSONAL') THEN 'IN SERVICE' else in_service END AS in_service,
CASE WHEN source='CLARO' THEN 'Claro'
WHEN source='PERSONAL' THEN 'Personal'
WHEN source IN ('TASA','TASA_FIXED') THEN 'Movistar' else source END AS source
FROM ",schema,".", infrastructure_table, " ) i
WHERE in_service IN ('IN SERVICE', 'AVAILABLE') AND source IN ('Claro', 'Personal', 'Movistar')
GROUP BY source)")
dbGetQuery(con,query)
dbDisconnect(con)
}
|
76e8c49b800e27a336d340d71527b7e0fb6da12b
|
22d316e4dda53044ae7a6389c699f06baf673adf
|
/learning/MasterBlaster.R
|
3d0bf8203ce0e5ed1275f10456e791e77a9e0351
|
[] |
no_license
|
sujaykhandekar/R_check
|
e454b396c8b354ce89086a3dde8e6f1a29179b13
|
00fa4571b93a0ffe63f0d685d2163d815aedb0ca
|
refs/heads/master
| 2023-07-02T01:10:46.521824
| 2021-08-11T03:16:39
| 2021-08-11T03:16:39
| 390,935,115
| 0
| 1
| null | 2021-08-05T17:25:37
| 2021-07-30T04:55:02
|
R
|
UTF-8
|
R
| false
| false
| 888
|
r
|
MasterBlaster.R
|
# Author: Jitender Aswani, Co-Founder @datadolph.in
# Date: 3/15/2013
# Copyright (c) 2011, under the Creative Commons Attribution-NonCommercial 3.0 Unported (CC BY-NC 3.0) License
# For more information see: https://creativecommons.org/licenses/by-nc/3.0/
# All rights reserved.
library("RJSONIO")
rm (list = ls())
# Read SachinTestRecords.csv
mb <- read.csv("datasets/SachinTestRecords.csv", as.is=TRUE, header=TRUE, stringsAsFactors=FALSE, strip.white=TRUE)
#mb <- read.csv("datasets/SachinTestRecords.csv", header=TRUE,strip.white=TRUE)
nRows <- nrow(mb)
nCols <- ncol(mb)
cNames <- colnames(mb)
cClass <- sapply(mb, class)
iCols <- list(name=cNames[1], class=class(mb[,1]), sample=head(mb[,1]))
for(i in 2:5) {
iCols <- list(iCols, list(name=cNames[i], class=class(mb[,i]), sample=head(mb[,i])))
}
metaData <- list(nRows=nRows, nCols=nCols, iCols)
jsonString <- toJSON(metaData)
|
42d08f8b8e862526f4bb913162759bc8db57ba09
|
a9729df11f1bf01a18d8a339a2915ce758118bc8
|
/man/box_read.Rd
|
058edd68022c26a6abd7ad2e0555c19e7f6919d2
|
[
"MIT"
] |
permissive
|
jilldwright56/boxr
|
ce5a651911d1aee50367aac9c9fa882ab4c91d7d
|
b81a6f7e821cb968788ccbbe45fbde6ba6e81705
|
refs/heads/master
| 2022-12-30T05:30:35.581815
| 2020-10-14T00:10:00
| 2020-10-14T00:10:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,260
|
rd
|
box_read.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boxr_read.R
\name{box_read}
\alias{box_read}
\alias{box_read_csv}
\alias{box_read_tsv}
\alias{box_read_json}
\alias{box_read_excel}
\alias{box_read_rds}
\title{Read an R object from a Box file}
\usage{
box_read(
file_id,
type = NULL,
version_id = NULL,
version_no = NULL,
read_fun = rio::import,
...
)
box_read_csv(file_id, ...)
box_read_tsv(file_id, ...)
box_read_json(file_id, ...)
box_read_excel(file_id, ...)
box_read_rds(file_id, ...)
}
\arguments{
\item{file_id}{\code{numeric} or \code{character}, file ID at Box.}
\item{type}{\code{character},
\href{http://en.wikipedia.org/wiki/Internet_media_type}{MIME type}
used to override the content type returned by the server.}
\item{version_id}{\code{character} or \code{numeric}, the \code{version_id} of the file.}
\item{version_no}{\code{numeric}, version of the file you'd like to download
(starting at 1).}
\item{read_fun}{\code{function}, used to read (parse) the content into R; for \code{box_read()}
the default function is \code{\link[rio:import]{rio::import()}}; the specific helpers
each use a different function directly.}
\item{...}{Other arguments passed to \code{read_fun}.}
}
\value{
Object returned by function \code{read_fun}.
}
\description{
These functions are used to download a Box file, specified by \code{file_id}, then
attempt to parse its contents into memory as an R object. For
example, you may wish to read a Box CSV file as a \code{data.frame}.
}
\details{
This is a two-step process. The first is to download the contents
of the file, the second is to parse those contents into an R object.
The default parsing-function is \code{\link[rio:import]{rio::import()}}.
In addition to \code{box_read()}, some specific helpers are
provided:
\describe{
\item{\code{box_read_csv()}}{parse a remote CSV file into a \code{data.frame}. Default
read-function is \code{\link[rio:import]{rio::import()}} with \code{format = "csv"}, which uses \code{\link[data.table:fread]{data.table::fread()}} if available,
and \code{utils::read.csv()} if not. Pass the argument \code{fread = FALSE} to \code{...}
to always use \code{utils::read.csv()}.}
\item{\code{box_read_tsv()}}{parse a remote TSV file into a \code{data.frame}. Default
read-function is \code{\link[rio:import]{rio::import()}} with \code{format = "tsv"}, which uses \code{\link[data.table:fread]{data.table::fread()}} if available,
and \code{utils::read.delim()} if not. Pass the argument \code{fread = FALSE} to \code{...}
to always use \code{utils::read.delim()}.}
\item{\code{box_read_json()}}{parse a remote JSON file into a R object. Default
read-function is \code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}}.}
\item{\code{box_read_excel()}}{parse a remote Microsoft Excel file into a \code{data.frame}. Default
read-function is \code{\link[rio:import]{rio::import()}} with \code{format = "excel"}, which uses \code{\link[readxl:read_excel]{readxl::read_excel()}} by default.
Pass the argument \code{readxl = FALSE} to \code{...} to use \code{\link[openxlsx:read.xlsx]{openxlsx::read.xlsx()}} instead.}
\item{\code{box_read_rds()}}{parse an RDS file into a R object. Uses \code{\link[=readRDS]{readRDS()}}.}
}
}
\section{rio's import() and JSON files}{
In rio (0.5.18) there was a change in how JSON files are processed by
\code{\link[rio:import]{rio::import()}}, a non-\code{data.frame} object stored in JSON is no longer coerced
into a \code{data.frame}. The old behavior would produce unexpected results or fatal errors
if the stored object was not a \code{data.frame}. The new behavior is closer to that
of the underlying function \code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} and similar to the behavior for RDS files.
In keeping with the spirit of \code{jsonlite}, \code{box_read_json()} has been
modified to call \code{jsonlite::fromJSON()} directly, which by-passes the old
"undesirable" behavior of \code{rio} (< 0.5.18). If you are using the current CRAN
release of \code{rio} (0.5.16) you should use \code{\link[jsonlite:read_json]{jsonlite::read_json()}} to avoid these issues.
}
\seealso{
\code{\link[=box_dl]{box_dl()}}, \code{\link[=box_save]{box_save()}}, \code{\link[=box_source]{box_source()}}
}
|
5f7fc9c06d9ca364e8c53afcca5454424b94c422
|
fce81b75022c9153389ea279477c323e15d12926
|
/datacommons/tests/testthat/test-popobs.R
|
1aac5bad27d9b31fb46b0bb652caf3eb34a2f1e4
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
datacommonsorg/api-r
|
5e9feea3efbbdd7333a84edc525156cae1ca7403
|
f1c835dd3d8fe57183d3f441c04682b0e2f20a21
|
refs/heads/master
| 2021-08-17T17:39:32.734867
| 2020-07-06T05:17:19
| 2020-07-06T05:17:19
| 201,317,967
| 2
| 4
|
Apache-2.0
| 2020-07-06T05:17:21
| 2019-08-08T18:46:32
|
HTML
|
UTF-8
|
R
| false
| false
| 5,794
|
r
|
test-popobs.R
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
context("Data Commons Node API (Populations and Observations Extension) - R Client")
test_that("get_populations gets populations", {
skip_if_no_dcpy()
# INPUT atomic vector of the dcids of California, Kentucky, and Maryland.
stateDcids <- c('geoId/06', 'geoId/21', 'geoId/24')
# Get the population dcids for each state.
femalePops <- get_populations(stateDcids, 'Person', list(gender = 'Female'))
malePops <- get_populations(stateDcids, 'Person', list(gender = 'Male'))
expect_equal(length(femalePops), 3)
expect_setequal(names(femalePops), list("geoId/06", "geoId/21", "geoId/24"))
expect_equal(length(femalePops[[1]]), 1)
expect_match(femalePops[[1]], "dc/p/.*")
expect_equal(length(malePops), 3)
expect_setequal(names(malePops), list("geoId/06", "geoId/21", "geoId/24"))
expect_equal(length(malePops[[1]]), 1)
expect_match(malePops[[1]], "dc/p/.*")
# INPUT tibble of the dcids of California, Kentucky, and Maryland; and random column.
df <- tibble(countyDcid = c('geoId/06', 'geoId/21', 'geoId/24'),
rand = c(1, 2, 3))
# Get the population dcids for each state.
femalePopsTibble <- get_populations(select(df, countyDcid), 'Person',
list(gender = 'Female'))
malePopsTibble <- get_populations(select(df, countyDcid), 'Person',
list(gender = 'Male'))
expect_setequal(femalePopsTibble, femalePops)
expect_setequal(malePopsTibble, malePops)
# INPUT data frame
df <- data.frame(countyDcid = c('geoId/06', 'geoId/21', 'geoId/24'),
rand = c(1, 2, 3))
# Using df$col as input to get_populations will error for data frames.
# While it will work for tibbles, we encourage using select(df, col).
expect_error(get_populations(df$countyDcid, 'Person',
list(gender = 'Female')))
# Correct way to select column
expect_setequal(get_populations(select(df, countyDcid), 'Person',
list(gender = 'Female')),
as.array(unlist(femalePopsTibble)))
expect_setequal(get_populations(select(df, countyDcid), 'Person',
list(gender = 'Male')),
as.array(unlist(malePopsTibble)))
})
test_that("get_populations fails with fake API key", {
skip_if_no_dcpy()
stateDcids <- c('geoId/06', 'geoId/21', 'geoId/24')
tmp <- Sys.getenv("DC_API_KEY")
set_api_key("fakekey")
expect_error(get_populations(stateDcids, 'Person', list(gender = 'Female')),
".*Response error: An HTTP 400 code.*")
set_api_key(tmp)
})
test_that("get_observations gets data", {
skip_if_no_dcpy()
# INPUT character vector
# Set the dcid to be that of Santa Clara County.
sccDcid <- 'geoId/06085'
# Get the population dcids for Santa Clara County.
femalePops <- get_populations(sccDcid, 'Person', list(gender = 'Female'))
malePops <- get_populations(sccDcid, 'Person', list(gender = 'Male'))
expect_equal(length(femalePops), 1)
expect_identical(names(femalePops), "geoId/06085")
expect_equal(length(femalePops[[1]]), 1)
expect_match(femalePops[[1]], "dc/p/.*")
expect_equal(length(malePops), 1)
expect_identical(names(malePops), "geoId/06085")
expect_equal(length(malePops[[1]]), 1)
expect_match(malePops[[1]], "dc/p/.*")
femaleCount <- get_observations(unlist(femalePops), 'count', 'measured_value',
'2016', measurement_method = 'CensusACS5yrSurvey')
maleCount <- get_observations(unlist(malePops), 'count', 'measured_value',
'2016', measurement_method = 'CensusACS5yrSurvey')
expect_gt(as.numeric(femaleCount), 500000)
expect_gt(as.numeric(maleCount), 500000)
# INPUT tibble with the dcids of California, Kentucky, and Maryland.
df <- tibble(countyDcid = c('geoId/06', 'geoId/21', 'geoId/24'),
rand = c(1, 2, 3))
# Get the population dcids for each state.
df$femalePops <- get_populations(select(df, countyDcid), 'Person',
list(gender = 'Female'))
df$malePops <- get_populations(select(df, countyDcid), 'Person',
list(gender = 'Male'))
df = unnest(df)
# Get observations
df$femaleCount <- get_observations(select(df,femalePops), 'count',
'measured_value', '2016',
measurement_method = 'CensusACS5yrSurvey')
df$maleCount <- get_observations(select(df,malePops), 'count',
'measured_value', '2016',
measurement_method = 'CensusACS5yrSurvey')
expect_gt(as.numeric(df$femaleCount[2]), 2000000)
expect_gt(as.numeric(df$maleCount[2]), 2000000)
})
test_that("get_observations fails with fake API key", {
skip_if_no_dcpy()
femalePops <- get_populations('geoId/06085', 'Person', list(gender = 'Female'))
tmp <- Sys.getenv("DC_API_KEY")
set_api_key("pseudokey")
expect_error(get_observations(unlist(femalePops), 'count', 'measured_value',
'2016', measurement_method = 'CensusACS5yrSurvey'),
".*Response error: An HTTP 400 code.*")
set_api_key(tmp)
})
|
a30a6d7430bd6a92758449d6112f64d5dde6d22a
|
1d5128b54975c29b64d6f89ab2addf0573119d58
|
/RegionPlot/demo/plotMCV.R
|
f31bb0963b634015726699faa57a100777a13fe9
|
[] |
no_license
|
Feng-Zhang/RegionPlot
|
46d184eeef1fd0e096ae1409101f3740b84d54c1
|
f959b882d80e58ef271ffe7a006714a6329f9927
|
refs/heads/master
| 2021-01-23T00:15:53.799614
| 2014-10-11T01:53:55
| 2014-10-11T01:53:55
| 23,922,954
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 95
|
r
|
plotMCV.R
|
data(MCV_pval)
png("MCV.png", width=3200, height=2200,res=300)
plotRegion(MCV_pval)
dev.off()
|
647a25c70ab4fb885e7a5660250c832e489e07ea
|
e489df05b30305693071d9a070bfa954c4b63608
|
/man/xbrlDoAllFun.Rd
|
2d6065201e439401a16a05910e61269862b99b87
|
[] |
no_license
|
kossal/fundfi
|
fb2053925d8f6a7a2efce15c0a1c351247cc11b3
|
5905a9f5cd92af05af7242c951092b21b8d775bb
|
refs/heads/master
| 2020-07-15T00:46:20.893168
| 2020-04-05T21:35:04
| 2020-04-05T21:35:04
| 205,440,019
| 0
| 0
| null | 2020-04-05T21:35:05
| 2019-08-30T18:53:04
|
R
|
UTF-8
|
R
| false
| true
| 868
|
rd
|
xbrlDoAllFun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xbrlDoAllFun.R
\name{xbrlDoAllFun}
\alias{xbrlDoAllFun}
\title{A mirror function of XBRL}
\usage{
xbrlDoAllFun(
file.inst,
cache.dir = "xbrl.Cache",
prefix.out = NULL,
verbose = FALSE,
delete.cached.inst = TRUE
)
}
\value{
A XBRL list of data.frames
}
\description{
A mirror function to package XBRL function xbrlDoAll
}
\details{
This function is almost exactly equal to the function xbrlDoAll of the
package XBRL. The reason it exist is because the above mentioned doesn´t work
with https type links, which is fixed by editing the fixFileName function
inside the XBRL function.
On linux I was able to built it but It seams I´m not able to do so on
Windows because I don´t know how to make libxml accesible to Rtools. For the
mean time, this is a workaround.
}
\keyword{XBRL}
|
8bae8e476c813cb4b093127e38007360578ee6e9
|
3a5a3b4c51213b711e76a73c8cefae1b84c201ed
|
/R/gpHistPredict.R
|
91b2e99ea89a9f4f7e53583b8d8e27e561cf8ec9
|
[] |
no_license
|
dennisthemenace2/gpHist
|
88c616f9e3f47cd0cdeee30a9b67ed5865e2c27f
|
51cae73bc157ac52eb48674d90d36adb44b0de62
|
refs/heads/master
| 2022-02-08T16:26:49.452782
| 2019-08-10T12:31:03
| 2019-08-10T12:31:03
| 72,182,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,177
|
r
|
gpHistPredict.R
|
##gpHistPredict
gpHistPredict =function(GP,X,x_pred){
# Make sure GP is right.
if(!is.list(GP) || is.null(GP$orders) || is.null(GP$alpha) ) {
print("gpHistPredict(): Object appears not to be a GP!")
return(NaN);
}
if ( ( ! is.matrix(X) ) || ( ! is.matrix(x_pred) ) ) {
print("gpHistPredict(): input must be matrices!")
return(NaN);
}
if(ncol(X)!= ncol(x_pred)){
print("gpHistPredict(): GP data and prediction data does not match!")
return(NaN);
}
if( nrow(GP$alpha)!= nrow(X) ) {
print("gpHistPredict(): GP object and data X dimension missmatch!")
return(NaN);
}
nsamples = nrow(x_pred)
multResult = matrix(rep(0,nsamples),nrow=nsamples,ncol=1)
output =.C("CgpHistPredict",
result = as.double(multResult),
mat1 = as.double(X),
numRows = as.integer(nrow(X)),
numCols = as.integer(ncol(X)),
mat2 = as.double(x_pred),
numRows2 = as.integer(nrow(x_pred)),
numCols2 = as.integer(ncol(x_pred)),
mat3 = as.double(GP$alpha),
orders = as.double(GP$orders)
)
ret = output$result;
return(ret)
}
|
97e8d113c6371cc52e4d4437355061b4d5eaec95
|
a9fb5a228b2316e5b43f58e4b8d6c858cb7784f7
|
/man/getCiceroGeneActivities-DsATAC-method.Rd
|
f041b3dc5c5ea6a2ab6144240ef9447e41a3fc0b
|
[] |
no_license
|
GreenleafLab/ChrAccR
|
f94232d5ac15caff2c5b2c364090bfb30b63e61a
|
43d010896dc95cedac3a8ea69aae3f67b2ced910
|
refs/heads/master
| 2023-06-24T05:29:29.804920
| 2023-03-17T13:01:49
| 2023-03-17T13:01:49
| 239,655,070
| 17
| 7
| null | 2023-05-05T09:51:23
| 2020-02-11T02:01:37
|
R
|
UTF-8
|
R
| false
| true
| 1,256
|
rd
|
getCiceroGeneActivities-DsATAC-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DsATAC-class.R
\docType{methods}
\name{getCiceroGeneActivities,DsATAC-method}
\alias{getCiceroGeneActivities,DsATAC-method}
\alias{getCiceroGeneActivities}
\title{getCiceroGeneActivities-methods}
\usage{
\S4method{getCiceroGeneActivities}{DsATAC}(
.object,
regionType,
promoterGr = NULL,
maxDist = 250000L,
corCutOff = 0.35,
dimRedCoord = NULL,
knn.k = 50
)
}
\arguments{
\item{.object}{\code{\linkS4class{DsATAC}} object}
\item{regionType}{region type of regions that will be linked to the promoter (typical some sort of peak annotation)}
\item{promoterGr}{\code{GRanges} object of promoter coordinates}
\item{maxDist}{maximum distance to consider for region-region interactions}
\item{corCutOff}{cutoff of correlation coefficients (Pearson) to consider for region-region interactions}
\item{dimRedCoord}{matrix of reduced dimension coordinates. must have coordinates for all samples/cells in the dataset}
\item{knn.k}{parameter k for Cicero's k-nearest-neighbor method}
}
\value{
an \code{SummarizedExperiment} object containing gene activities for all cells/samples in the dataset
}
\description{
Obtain Cicero gene activities
}
\author{
Fabian Mueller
}
|
105860969d2a36d59f6d49aaa6c0f584f1b73432
|
e625a2afab25b28cc2a280b1cca965794b9ab78e
|
/plot4.R
|
a03dcd34bbc9d90904d6b8497a2f55e52a2a0435
|
[] |
no_license
|
rumbaugh/ExData_Plotting1
|
1ee5cbf1b43834565cddba2a4fe1099e69878cb5
|
0d30a76d92518483acc716a1d69f8e99a2c90f36
|
refs/heads/master
| 2021-08-22T20:52:48.801364
| 2017-12-01T08:13:46
| 2017-12-01T08:13:46
| 112,700,157
| 0
| 0
| null | 2017-12-01T05:47:14
| 2017-12-01T05:47:14
| null |
UTF-8
|
R
| false
| false
| 1,762
|
r
|
plot4.R
|
plot4 <- function(datapath = '.', outfile = 'plot4.png') {
## Creates a PNG plots of Global Active Power, Voltage
## Energy Sub Metering, and Global reactive power versus time
## in a 2x2 grid.
## Requires the file "household_power_consumption.txt". The path
## to this file must be given by datapath
library(sqldf) # Loads library for performing SQL code to load subset of file
infile = paste(datapath, 'household_power_consumption.txt', sep = '/')
#Loads rows from file only for selected dates
df = read.csv.sql(infile, sep=';', header = T, sql = "select * from file where Date in ('1/2/2007','2/2/2007')", eol = "\n")
# Create POSIX vector for dates and times
Dates = strptime(paste(df$Date, df$Time), "%d/%m/%Y %H:%M:%S")
#Create plot
dev.new(x11, width=480, height=480)
par(mfrow=c(2,2)) # Plot in 2x2 grid
# First plot
plot(Dates, df$Global_active_power, xlab = '', ylab = 'Global Active Power (kilowatts)', type = 'l')
# Second plot
plot(Dates, df$Voltage, xlab = 'datetime', ylab = 'Voltage', type = 'l')
# Third plot
plot(Dates, df$Sub_metering_1, xlab = '', ylab = 'Energy sub metering', type = 'n') # Initiate
colors = c('black','red','blue')
for (i in 1:3) { # Loop through three columns
column = paste('Sub_metering_', i, sep = '')
points(Dates, df[,column], col = colors[[i]], type = 'l')
}
# Create legend
legend("topright", paste("Sub_metering_", 1:3, sep=''), col = colors, lty = 1, bty = 'n')
# Fourth plot
plot(Dates, df$Global_reactive_power, xlab = 'datetime', ylab = 'Global_reactive_power (kilowatts)', type = 'l')
# Save PNG
dev.copy(png, paste(datapath, outfile, sep = '/'))
dev.off()
}
|
86ed5915f5b0d9ebd614ed5a9d29a1c563ebb0d9
|
754d42490126fb8f2b0f505a92fff58972f59bbd
|
/R/classifyByGenesList.R
|
4c41051d5d305731b003e76b0135ac81a213e3e1
|
[] |
no_license
|
federicocozza/geneticApproach
|
dac4b4ab3ef311cc1af1bbfa09ff77f5faf193fa
|
c276140e601b54009a0ce11385caccc1d581ff73
|
refs/heads/master
| 2020-04-10T02:29:40.662943
| 2018-03-21T12:29:28
| 2018-03-21T12:29:28
| 124,255,907
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,313
|
r
|
classifyByGenesList.R
|
classifyByGenesList <- function(pythonPath,classType,genes,dataset,classLabels,core = 8, outer_fold = 3 ,inner_fold = 2){
temporanyDirectory <- getwd()
temporanyDirectory <- paste(temporanyDirectory,"/ImportantGenes",sep="")
temporanyDirectory_res <- paste(temporanyDirectory,"_res/",sep="")
dir.create(temporanyDirectory)
newPath <- paste(temporanyDirectory,"/ImportantGenes-",classType,sep="")
idx <- c(which(classLabels$Classes == strsplit(classType,split="vs")[[1]][1]),which(classLabels$Classes == strsplit(classType,split="vs")[[1]][2]))
labels2classes <- classLabels[idx,"Classes"]
labels2classes <- as.data.frame(labels2classes)
rownames(labels2classes) <- rownames(classLabels)[idx]
colnames(labels2classes) <- "x"
patients <- rownames(labels2classes)
dir.create(newPath)
dataToWrite <- t(dataset[genes,patients])
if(length(genes) == 1){
dataToWrite <- as.data.frame(dataToWrite)
rownames(dataToWrite) <- genes
dataToWrite <- t(dataToWrite)
}
write.table(dataToWrite,file = paste(newPath,"/","ImportantGenes-",classType,".txt",sep=""),quote=F)
write.table(labels2classes, file = paste(newPath,"/labels",sep="") ,quote=F)
system(paste(pythonPath,'Python/stacking_level1_multi.py',temporanyDirectory,core,outer_fold,inner_fold,"C"))
accuracies <- getPathwaysAccuraciesWithoutPlots(temporanyDirectory)
svmWeights <- getSVMweights(temporanyDirectory)
probabilities <- getSVMProbabilities(temporanyDirectory)
unlink(temporanyDirectory,recursive=T)
unlink(temporanyDirectory_res,recursive = T)
return(list(accuracy = accuracies$accuraciesMatrix$accuracy,svmWeights = svmWeights,probabilities = probabilities))
}
getPathwaysAccuraciesWithoutPlots <- function(path){
path_res <- paste(path,"_res/",sep="")
pathways <- list.files(path_res)
p1 <- paste(paste(path,pathways,sep="/"),"/",pathways,".txt",sep="")
res <- data.frame()
for(i in 1:length(pathways)){
x <- read.table(p1[i])
p <- pathways[i]
a <- read.table(paste(path_res,p,"/","test_avg_accuracy.txt",sep=""),sep=",",header = T)
psplit <- strsplit(p,split="-")
res <- rbind(res,data.frame(p,psplit[[1]][2],a[["avg_acc"]],ncol(x)))
}
colnames(res) <- c("pathway","class","accuracy","size")
return(list(accuraciesMatrix = res))
}
|
9f8440c1100a3c1bd5a6bb615c2f7711645ea027
|
08b4eaf203fbbe87b09fdb2dc96b5d11fff2c171
|
/man/seurat_sample_tms_liver.Rd
|
ab541345033a7b3168e9505211893a0376f4ad21
|
[] |
no_license
|
cran/scDiffCom
|
a8f28d7f92acfba6b84e123707c437300a9adfd9
|
26fbcb29d53a04e49208cb38f3e515f4a59827aa
|
refs/heads/master
| 2023-07-09T07:30:59.085372
| 2021-08-17T06:20:05
| 2021-08-17T06:20:05
| 397,309,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 913
|
rd
|
seurat_sample_tms_liver.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{seurat_sample_tms_liver}
\alias{seurat_sample_tms_liver}
\title{A down-sampled Seurat object to use for testing and benchmarking}
\format{
An object of class Seurat.
}
\usage{
data(seurat_sample_tms_liver)
}
\description{
This Seurat object has been down-sampled from the original
Tabula Muris Senis liver object. Pre-processing and normalization has
been performed before down-sampling. It contains 726 features (genes) and
468 samples (cells). It is only intended to be used for testing and
benchmarking and does not contain meaningful biological information.
}
\references{
\emph{A single-cell transcriptomic atlas characterizes
ageing tissues in the mouse}, Tabula Muris Consortium (2020)
(\href{https://pubmed.ncbi.nlm.nih.gov/32669714/}{PMID: 32669714})
}
\keyword{datasets}
|
246cfedc64e4aae7ddf942efe4448e3322bd0da5
|
c2e7ea15f6cae6b46b6a008423ff978ed474ce1c
|
/man/general.bar_plot_by.Rd
|
4d748273bfd08f5e542da70236b08d11ca56239e
|
[] |
no_license
|
jhooge/BioViz
|
690b08804849438bfb3bdde29277c56cb89f5f9e
|
1458724e59d4283ca9de9d0cd66543fb94422846
|
refs/heads/master
| 2021-01-23T07:56:03.244205
| 2017-08-10T09:59:17
| 2017-08-10T09:59:17
| 86,466,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,184
|
rd
|
general.bar_plot_by.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general.R
\name{general.bar_plot_by}
\alias{general.bar_plot_by}
\title{Function to create a barplot, with bars split by a second variable}
\usage{
general.bar_plot_by(freq, labels = NULL, labels.ab = NULL, file.name,
col = "tomato3", cex.lab = 1, cex.lab.ab = 1, cex.x = 1, xlab = "",
ylab = "Frequency", add.legend = FALSE, leg.lab = NULL,
leg.col = NULL, leg.title = NULL, mar = c(5, 4, 3, 1), png.width = 6,
png.height = 5, png.out = TRUE)
}
\arguments{
\item{freq}{vector or matrix with the bar heights, e.g. a frequency table ('table()' output),
names will be used for bar labels if 'labels' is not specified, if 'freq' is a matrix,
entries of each column are represented by separate bars with no space in between}
\item{labels}{bar labels (default=NULL)}
\item{labels.ab}{additional labels, which are placed above the bars (default=NULL), should
have the same dimension as 'freq'}
\item{file.name}{name for the PNG output file (without '.png' ending)}
\item{col}{color for the bars (default='tomato3'). can be a single value or an object of
the same dimension than freq, specifying the color for each bar}
\item{cex.lab}{size of the bar labels (default=1)}
\item{cex.lab.ab}{size of the bar labels above the bars (default=1)}
\item{cex.x}{size of the x-labels (default=1)}
\item{xlab}{label for the x-axis (default='')}
\item{ylab}{label for the y-axis (default='Frequency')}
\item{add.legend}{TRUE/FALSE should a legend be added underneath the plot (default=TRUE),
only works if legend parameters are specified}
\item{leg.lab}{labels for the legend (default=NULL)}
\item{leg.col}{colors for the legend (default=NULL)}
\item{leg.title}{title for the legend (default=NULL)}
\item{mar}{plot margin (default=c(5,4,3,1))}
\item{png.width}{width of the PNG-file in inches (default=6)}
\item{png.height}{height of the PNG-file in inches (default=5)}
\item{png.out}{TRUE/FALSE if true, a png file is created as output, otherwise the plot
is created in the R graphic device}
}
\value{
PNG file with the barplot ('file.name.png') numeric vector with size of the PNG-file
in inches for rescaling in RTF
}
\description{
Function to create a barplot, with bars split by a second variable
}
\examples{
\dontrun{
set.seed(42)
x <- sample(1:15, size=500, replace=TRUE)
by <- sample(c('A','B'), size=500, replace=TRUE)
freq <- table(by, x)
col <- matrix(ncol=ncol(freq), nrow=nrow(freq))
col1 <- colorRampPalette(c('tomato3','grey95'))
col[1,] <- col1(9)[1]
col[2,] <- col1(9)[7]
col2 <- colorRampPalette(c('skyblue2','grey95'))
col[1,c(5,11)] <- col2(9)[1]
col[2,c(5,11)] <- col2(9)[7]
cex.lab <- 0.7
cex.lab.ab <- 0.5
cex.x <- 0.9
labels <- NULL
labels.ab <- rep(paste0('X', 1:15), each=2)
labels.ab[which(1:length(labels.ab) \%\% 2 == 0)] <- ''
mar <- c(7,4,3,1)
xlab <- 'numbers'
ylab <- 'frequencies'
file.name <- 'test'
png.width <- 7
png.height <- 6
add.legend <- TRUE
leg.title <- 'color'
leg.col <- c(col1(9)[1],col1(9)[7],col2(9)[1],col2(9)[7])
leg.lab <- c('red','faded red','blue','faded blue')
general.bar_plot_by(freq=freq, labels=NULL, labels.ab=labels.ab,
file.name='test1_barplot', col=col, cex.lab=0.7, cex.lab.ab=0.5, cex.x=0.8,
xlab='Number', ylab='Frequency', add.legend=TRUE,
leg.lab=leg.lab, leg.col=leg.col, leg.title='color',
mar=c(7,4,3,1), png.width=7, png.height=6)
general.bar_plot_by(freq=freq, labels=paste0('X', 1:15), labels.ab=NULL,
file.name='test2_barplot',
col=col, cex.lab=0.7, cex.lab.ab=0.5, cex.x=0.8,
xlab='Variable', ylab='Frequency', add.legend=TRUE,
leg.lab=leg.lab, leg.col=leg.col, leg.title='color',
mar=c(7,4,3,1), png.width=7, png.height=6)
general.bar_plot_by(freq=table(x), labels.ab=paste0('X', 1:15),
file.name='test3_barplot', col=col[2,5],
xlab='Number', ylab='Frequency')
general.bar_plot_by(freq=table(x), labels.ab=paste0('X', 1:15),
file.name='test4_barplot', col=col[2,5], mar=c(7,4,3,1),
xlab='Number', ylab='Frequency', add.legend=TRUE,
leg.title='color', leg.col=leg.col[4], leg.lab=leg.lab[4])
}
}
\author{
Sebastian Voss, Adam Skubala
}
|
3db4b1827a891cc9e7a67d96d8eb348b18121bfa
|
3df348376d12e0c3d2d070c5a980809741639c5d
|
/R/tr_causal.R
|
8b12b016a5796cfa8b84f739811c0465f180bb4a
|
[
"MIT"
] |
permissive
|
bnicenboim/pangoling
|
a8d47e3c2677ce660b5d3917be8d5934a6598377
|
2a6855d5c51fa6e1ed2a723f309440a3df9379e6
|
refs/heads/main
| 2023-06-07T18:43:16.305506
| 2023-05-14T04:19:35
| 2023-05-14T04:19:35
| 497,831,295
| 4
| 0
|
NOASSERTION
| 2023-05-14T04:19:37
| 2022-05-30T07:17:10
|
R
|
UTF-8
|
R
| false
| false
| 15,811
|
r
|
tr_causal.R
|
#' Preloads a causal language model
#'
#' Preloads a causal language model to speed up next runs.
#'
#' A causal language model (also called GPT-like, auto-regressive, or decoder
#' model) is a type of large language model usually used for text-generation
#' that can predict the next word (or more accurately in fact token) based
#' on a preceding context.
#'
#' If not specified, the causal model that will be used is the one set in
#' specified in the global option `pangoling.causal.default`, this can be
#' accessed via `getOption("pangoling.causal.default")` (by default
#' "`r getOption("pangoling.causal.default")`"). To change the default option
#' use `options(pangoling.causal.default = "newcausalmodel")`.
#'
#' A list of possible causal models can be found in
#' [Hugging Face website](https://huggingface.co/models?pipeline_tag=text-generation).
#'
#' Using the `config_model` and `config_tokenizer` arguments, it's possible to
#' control how the model and tokenizer from Hugging Face is accessed, see the
#' Python method
#' [`from_pretrained`](https://huggingface.co/docs/transformers/v4.25.1/en/model_doc/auto#transformers.AutoProcessor.from_pretrained)
#' for details.
#'
#' In case of errors when a new model is run, check the status of
#' [https://status.huggingface.co/](https://status.huggingface.co/)
#'
#' @param model Name of a pre-trained model or folder.
#' @param checkpoint Folder of a checkpoint.
#' @param add_special_tokens Whether to include special tokens. It has the
#' same default as the
#' [AutoTokenizer](https://huggingface.co/docs/transformers/v4.25.1/en/model_doc/auto#transformers.AutoTokenizer) method in Python.
#' @param config_model List with other arguments that control how the
#' model from Hugging Face is accessed.
#' @param config_tokenizer List with other arguments that control how the tokenizer from Hugging Face is accessed.
#'
#' @return Nothing.
#'
#' @examplesIf interactive()
#' causal_preload(model = "gpt2")
#'
#' @family causal model functions
#' @export
#'
causal_preload <- function(model = getOption("pangoling.causal.default"),
checkpoint = NULL,
add_special_tokens = NULL,
config_model = NULL, config_tokenizer = NULL) {
message_verbose("Preloading causal model ", model, "...")
lang_model(model, checkpoint = checkpoint, task = "causal", config_model)
tokenizer(model, add_special_tokens = add_special_tokens, config_tokenizer)
invisible()
}
#' Returns the configuration of a causal model
#'
#' @inheritParams causal_preload
#' @inherit causal_preload details
#' @return A list with the configuration of the model.
#' @examplesIf interactive()
#' causal_config(model = "gpt2")
#'
#' @family causal model functions
#' @export
causal_config <- function(model = getOption("pangoling.causal.default"),
checkpoint = NULL, config_model = NULL) {
lang_model(
model = model,
checkpoint = checkpoint,
task = "causal",
config_model = config_model
)$config$to_dict()
}
#' Get the possible next tokens and their log probabilities its previous context using a causal transformer
#'
#' Get the possible next tokens and their log probabilities based on its
#' previous context using a causal transformer model from [Hugging Face](https://huggingface.co).
#'
#' @section More examples:
#' See the
#' [online article](https://bruno.nicenboim.me/pangoling/articles/intro-gpt2.html)
#' in pangoling website for more examples.
#'
#' @param context The context.
#' @inheritParams causal_preload
#' @inherit causal_preload details
#' @return A table with possible next tokens and their log-probabilities.
#' @examplesIf interactive()
#' causal_next_tokens_tbl(
#' context = "The apple doesn't fall far from the",
#' model = "gpt2"
#' )
#'
#' @family causal model functions
#' @export
causal_next_tokens_tbl <- function(context,
model = getOption("pangoling.causal.default"),
checkpoint = NULL,
add_special_tokens = NULL,
config_model = NULL,
config_tokenizer = NULL) {
if (length(unlist(context)) > 1) stop2("Only one context is allowed in this function.")
message_verbose("Processing using causal model '", file.path(model, checkpoint), "'...")
trf <- lang_model(model,
checkpoint = checkpoint,
task = "causal",
config_model = config_model
)
tkzr <- tokenizer(model,
add_special_tokens = add_special_tokens,
config_tokenizer = config_tokenizer
)
# no batches allowed
context_tensor <- encode(list(unlist(context)),
tkzr,
add_special_tokens = add_special_tokens
)$input_ids
generated_outputs <- trf(context_tensor)
n_tokens <- length(context_tensor$tolist()[0])
logits_next_word <- generated_outputs$logits[0][n_tokens - 1]
l_softmax <- torch$log_softmax(logits_next_word, dim = -1L)$tolist()
lp <- reticulate::py_to_r(l_softmax) |>
unlist()
vocab <- get_vocab(tkzr)
tidytable::tidytable(token = vocab, lp = lp) |>
tidytable::arrange(-lp)
}
#' Get the log probability of each element of a vector of words (or phrases) using a causal transformer
#'
#' Get the log probability of each element of a vector of words (or phrases) using a causal transformer model. See the
#' [online article](https://bruno.nicenboim.me/pangoling/articles/intro-gpt2.html)
#' in pangoling website for more examples.
#'
#'
#' @param x Vector of words, phrases or texts.
#' @param .by Vector that indicates how the text should be split.
#' @param l_contexts Left context for each word in `x`. If `l_contexts` is used,
#' `.by` is ignored. Set `.by = NULL` to avoid a message notifying that.
#' @inheritParams causal_preload
#' @param ignore_regex Can ignore certain characters when calculates the log
#' probabilities. For example `^[[:punct:]]$` will ignore
#' all punctuation that stands alone in a token.
#' @param batch_size Maximum size of the batch. Larges batches speedup
#' processing but take more memory.
#' @inherit causal_preload details
#' @inheritSection causal_next_tokens_tbl More examples
#' @return A named vector of log probabilities.
#'
#' @examplesIf interactive()
#' causal_lp(
#' x = c("The", "apple", "doesn't", "fall", "far", "from", "the", "tree."),
#' model = "gpt2"
#' )
#'
#'causal_lp(
#' x = "tree.",
#' l_contexts = "The apple doesn't fall far from the tree.",
#' .by = NULL, # it's ignored anyways
#' model = "gpt2"
#' )
#' @family causal model functions
#' @export
causal_lp <- function(x,
.by = rep(1, length(x)),
l_contexts = NULL,
ignore_regex = "",
model = getOption("pangoling.causal.default"),
checkpoint = NULL,
add_special_tokens = NULL,
config_model = NULL,
config_tokenizer = NULL,
batch_size = 1) {
stride <- 1 # fixed for now
message_verbose("Processing using causal model '", file.path(model, checkpoint), "'...")
if(!is.null(l_contexts)){
if(all(!is.null(.by))) message_verbose("Ignoring `.by` argument")
x <- c(rbind(l_contexts, x))
.by <- rep(seq_len(length(x)/2), each = 2)
}
word_by_word_texts <- get_word_by_word_texts(x, .by)
pasted_texts <- lapply(
word_by_word_texts,
function(word) paste0(word, collapse = " ")
)
tkzr <- tokenizer(model,
add_special_tokens = add_special_tokens,
config_tokenizer = config_tokenizer
)
trf <- lang_model(model,
checkpoint = checkpoint,
task = "causal",
config_model = config_model
)
tensors <- create_tensor_lst(
texts = unname(pasted_texts),
tkzr = tkzr,
add_special_tokens = add_special_tokens,
stride = stride,
batch_size = batch_size
)
lmats <- lapply(tensors, function(tensor) {
causal_mat(tensor,
trf,
tkzr,
add_special_tokens = add_special_tokens,
stride = stride
)
}) |>
unlist(recursive = FALSE)
out <- tidytable::pmap(
list(
word_by_word_texts,
names(word_by_word_texts),
lmats
),
function(words, item, mat) {
# words <- word_by_word_texts[[1]]
# item <- names(word_by_word_texts)
# mat <- lmats[[1]]
message_verbose(
"Text id: ", item, "\n`",
paste(words, collapse = " "),
"`"
)
word_lp(words,
mat = mat,
ignore_regex = ignore_regex,
model = model,
add_special_tokens = add_special_tokens,
config_tokenizer = config_tokenizer
)
}
)
if(!is.null(l_contexts)) {
# remove the contexts
keep <- c(FALSE, TRUE)
} else {
keep <- TRUE
}
unlist(out, recursive = FALSE)[keep]
}
#' Get the log probability of each token in a sentence (or group of sentences) using a causal transformer
#'
#' Get the log probability of each token in a sentence (or group of sentences) using a causal transformer model.
#'
#'
#' @param texts Vector or list of texts.
#' @param .id Name of the column with the sentence id.
#' @inheritParams causal_preload
#' @inheritParams causal_lp
#' @inherit causal_preload details
#' @inheritSection causal_next_tokens_tbl More examples
#' @return A table with token names (`token`), log-probability (`lp`) and optionally sentence id.
#'
#' @examplesIf interactive()
#' causal_tokens_lp_tbl(
#' texts = c("The apple doesn't fall far from the tree."),
#' model = "gpt2"
#' )
#'
#' @family causal model functions
#' @export
causal_tokens_lp_tbl <- function(texts,
model = getOption("pangoling.causal.default"),
checkpoint = NULL,
add_special_tokens = NULL,
config_model = NULL,
config_tokenizer = NULL,
batch_size = 1,
.id = NULL) {
stride <- 1
message_verbose("Processing using causal model '", file.path(model, checkpoint), "'...")
ltexts <- as.list(unlist(texts, recursive = TRUE))
tkzr <- tokenizer(model,
add_special_tokens = add_special_tokens,
config_tokenizer = config_tokenizer
)
trf <- lang_model(model,
checkpoint = checkpoint,
task = "causal",
config_model = config_model
)
tensors <- create_tensor_lst(ltexts,
tkzr,
add_special_tokens = add_special_tokens,
stride = stride,
batch_size = batch_size
)
ls_mat <- tidytable::map(tensors, function(tensor) {
causal_mat(tensor,
trf,
tkzr,
add_special_tokens = add_special_tokens,
stride = stride
)
}) |>
unlist(recursive = FALSE)
tidytable::map_dfr(ls_mat, function(mat) {
if (ncol(mat) == 1 && colnames(mat) == "") {
tidytable::tidytable(
token = "",
lp = NA_real_
)
} else {
tidytable::tidytable(
token = colnames(mat),
lp = tidytable::map2_dbl(colnames(mat), seq_len(ncol(mat)), ~ mat[.x, .y])
)
}
}, .id = .id)
}
#' @noRd
causal_mat <- function(tensor,
trf,
tkzr,
add_special_tokens = NULL,
stride = 1) {
message_verbose(
"Processing a batch of size ",
tensor$input_ids$shape[0],
" with ",
tensor$input_ids$shape[1], " tokens."
)
if (tensor$input_ids$shape[1] == 0) {
warning("No tokens found.", call. = FALSE)
vocab <- get_vocab(tkzr)
mat <- matrix(rep(NA, length(vocab)), ncol = 1)
rownames(mat) <- vocab
colnames(mat) <- ""
return(list(mat))
}
logits_b <- trf$forward(
input_ids = tensor$input_ids,
attention_mask = tensor$attention_mask
)$logits
# if (logits_b$shape[0] > 1) {
# stop2("Input is too long")
# # if there is a sliding window, because
# # max_tokens was exceeded:
# final_words <- lapply(1:(logits_b$shape[0] - 1), function(x) logits_b[x][seq(stride, max_length - 1)])
# logits <- torch$row_stack(c(logits_b[0], final_words))
#
# first_tokens <- tkzr$convert_ids_to_tokens(tensor[0])
# final_tokens <- tidytable::map(0:(logits_b$shape[0] - 1), function(n) {
# t <- tensor[n][seq(stride, max_length - 1)]
# # in case the tensor is of size 1 and lost a dimension:
# if (t$shape$numel() == 1L) t <- t$reshape(1L)
# tkzr$convert_ids_to_tokens(t)
# }) |>
# unlist()
#
# tokens <- c(first_tokens, final_tokens)
# }
lmat <- lapply(seq_len(logits_b$shape[0]) - 1, function(i) {
real_token_pos <- seq_len(sum(tensor$attention_mask[i]$tolist())) - 1
logits <- logits_b[i][real_token_pos]
# in case it's only one token, it needs to be unsqueezed
ids <- tensor$input_ids[i]$unsqueeze(1L)
tokens <- tkzr$convert_ids_to_tokens(ids[real_token_pos])
lp <- reticulate::py_to_r(torch$log_softmax(logits, dim = -1L))$tolist()
rm(logits)
gc(full = TRUE)
if (is.list(lp)) {
mat <- do.call("cbind", lp)
} else {
# In case it's only one token, lp won't be a list
mat <- matrix(lp, ncol = 1)
}
# remove the last prediction, and the first is NA
mat <- cbind(rep(NA, nrow(mat)), mat[, -ncol(mat)])
rownames(mat) <- get_vocab(tkzr)
colnames(mat) <- unlist(tokens)
mat
})
rm(logits_b)
lmat
}
#' Get a list of matrices with the log probabilities of possible word given its previous context using a causal transformer
#'
#' Get a list of matrices with the log probabilities of possible word given
#' its previous context using a causal transformer model.
#'
#' @inheritParams causal_lp
#' @inheritParams causal_preload
#' @inherit causal_preload details
#' @inheritSection causal_next_tokens_tbl More examples
#' @return A list of matrices with tokens in their columns and the vocabulary of the model in their rows
#'
#' @examplesIf interactive()
#' causal_lp_mats(
#' x = c("The", "apple", "doesn't", "fall", "far", "from", "the", "tree."),
#' model = "gpt2"
#' )
#'
#' @family causal model functions
#' @export
#'
causal_lp_mats <- function(x,
.by = rep(1, length(x)),
model = getOption("pangoling.causal.default"),
checkpoint = NULL,
add_special_tokens = NULL,
config_model = NULL,
config_tokenizer = NULL,
batch_size = 1) {
stride <- 1
message_verbose("Processing using causal model '", file.path(model, checkpoint), "'...")
tkzr <- tokenizer(model,
add_special_tokens = add_special_tokens,
config_tokenizer = config_tokenizer
)
trf <- lang_model(model,
checkpoint = checkpoint,
task = "causal",
config_model = config_model
)
x <- trimws(x, whitespace = "[ \t]")
word_by_word_texts <- split(x, .by)
pasted_texts <- lapply(
word_by_word_texts,
function(word) paste0(word, collapse = " ")
)
tensors <- create_tensor_lst(unname(pasted_texts),
tkzr,
add_special_tokens = add_special_tokens,
stride = stride,
batch_size = batch_size
)
lmat <- tidytable::map(
tensors,
function(tensor) {
causal_mat(tensor,
trf,
tkzr,
add_special_tokens = add_special_tokens,
stride = stride
)
}
)
lmat |>
unlist(recursive = FALSE)
}
|
78b69689c7140634de499bfbfc8fa31d33133908
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlegamesv1.auto/man/RevisionCheckResponse.Rd
|
7fa214c3079ef0789525875ce63088928d3e8964
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 661
|
rd
|
RevisionCheckResponse.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/games_objects.R
\name{RevisionCheckResponse}
\alias{RevisionCheckResponse}
\title{RevisionCheckResponse Object}
\usage{
RevisionCheckResponse(apiVersion = NULL, revisionStatus = NULL)
}
\arguments{
\item{apiVersion}{The version of the API this client revision should use when calling API methods}
\item{revisionStatus}{The result of the revision check}
}
\value{
RevisionCheckResponse object
}
\description{
RevisionCheckResponse Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
This is a JSON template for the result of checking a revision.
}
|
ded1cfb2ef2e580738b12ab4c75861eb6b38c641
|
8c7d64d8a7519c8636de1448a52d8d2c74856665
|
/cachematrix.R
|
5bfe5d1a864959834cbf840c778159e7e3e96041
|
[] |
no_license
|
rwarrier4/ProgrammingAssignment2
|
44650bee0b85dd5cc457d51ff6fa200f95cead86
|
c2048269ff2e12b6cd465c60399965eb0921216d
|
refs/heads/master
| 2021-01-22T00:36:17.201739
| 2014-09-20T15:27:38
| 2014-09-20T15:27:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,400
|
r
|
cachematrix.R
|
## MakeCacheMatrix function creates a matrix and returns a list of functions
## to access and manipulate the matrix that you've created.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
geti <- function() i
## The set function is an alternative to creating the matrix using the makeCacheMatrix. The set
## function can also be used to assign a different value to x after the makeCacheMatrix object
## is created.
set <- function(y = matrix()) {
x <<- y
i <<- NULL
}
## The get function simply return the matrix that you've created using makeCacheMatrix or
## using the set function
get <- function() {
x
}
## The setInverse function takes a matrix as input and sets the enclosing environment variable
## to store the inverse of a matrix. The setInverse function does not calculate the inverse.
## It just sets the value of the enclosing environment variable.
setInverse <- function(inverse = matrix()) {
i <<- inverse
}
## The getInverse function returns the value of i which is set to NULL in the evaluation
## environment or is set using the setInverse function. The function will check the
## evaluation environment first for the definition of i and move to the parent Environment.
getInverse <- function() {
i
}
##The makeCacheMatrix function returns a list of functions defined that can be called
list(geti = geti, get = get, set = set, setInverse = setInverse, getInverse = getInverse)
}
## cacheSolve function uses the makeCacheMatrix function to either return the cached environment
## variable. If the cached value is NULL, the function calculates the inverse of a matrix using the
## solve function and sets the value of the enclosing Environment variable by calling the setInverse
## function of the makeCacheMatrix object ad also return the inverse matrix to the caller.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
c64a103894f6cdc0e9ed0cde15e02781710dbbe3
|
6ad337e2b26380a4ebf1ac301bb3e8aff19b846b
|
/R/CIT.R
|
e0d63013b36c09a1f434749e6aeef46309a9db3b
|
[] |
no_license
|
kaseyriver11/k3d3
|
2824f2c078c2f0ba0659333b0bd68909442c4270
|
85c21f7725f6afe06a95d773716ddadff4386622
|
refs/heads/master
| 2020-12-29T02:44:20.607587
| 2017-06-04T22:56:11
| 2017-06-04T22:56:11
| 38,123,059
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,145
|
r
|
CIT.R
|
#' D3 Visualization: Collapsible Indented Tree
#'
#' Creates a collapsible indented tree.
#'
#' @param data the json file being used for the visualizations.
#' @param width width for the graph's frame area (in pixels) - default is null.
#' @param height height for the graph's frame area (in pixels) - default is null.
#' @param color1 the color of the bars when the branch is collapsed
#' @param color2 the color of the bars when the branch is not collapsed
#' @param color3 the color of the bars that represent the children
#'
#' @examples
#' \dontrun{
#' # load in an appropriate json file.
#' # Such as \url{https://gist.github.com/mbostock/1093025#file-flare-json}
#' # we will call this json.json
#' CIT(json.json) # This should reproduce the Mike Bostock's Example
#' CIT(json.json, color1 = "blue", color2 = "red", color3 = "green")
#' # Here we change around the colors of the visualization.
#' }
#'
#' @source
#' D3.js was created by Michael Bostock. See \url{http://d3js.org/}
#'
#' @import htmlwidgets
#'
#' @export
CIT <- function(data,
height = NULL,
width = NULL,
color1 = "#bd3182", # This is a purple color
color2 = "#31bd6c", # This is a green/blue color
color3 = "#c6dbef") # This is a lighter green/blue color
{
# create options
options = list(
color1 = color1,
color2 = color2,
color3 = color3
)
# create widget
htmlwidgets::createWidget(
name = "CIT",
x = list(data = data, options = options),
width = width,
height = height,
htmlwidgets::sizingPolicy(padding = 0, browser.fill = TRUE),
package = "k3d3"
)
}
#' @rdname k3d3-shiny
#' @export
CITOutput <- function(outputId, width = "100%", height = "500px") {
shinyWidgetOutput(outputId, "CIT", width, height,
package = "k3d3")
}
#' @rdname k3d3-shiny
#' @export
renderCIT <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, CITOutput, env, quoted = TRUE)
}
|
5c5a6ca9ca3de1d392f37d37b364e7e3baa37194
|
a7cba5bed6b27f1c67f779a87a9b6f37462d761c
|
/man/plot_net_country.Rd
|
27be11e4c742dc36c7620db8f928d765575ba04c
|
[] |
no_license
|
ropensci/refsplitr
|
f75889011b59e24d736fda6492a6955c11f45860
|
38c01e609cbdf1ad28bc7c6a89de1e433892eb10
|
refs/heads/master
| 2023-05-22T20:59:33.245494
| 2022-02-07T17:25:38
| 2022-02-07T17:25:38
| 114,401,756
| 29
| 4
| null | 2022-02-07T14:26:19
| 2017-12-15T18:51:00
|
R
|
UTF-8
|
R
| false
| true
| 1,877
|
rd
|
plot_net_country.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_net_country.R
\name{plot_net_country}
\alias{plot_net_country}
\title{Creates a network diagram of coauthors' countries linked by reference, #and
with nodes arranged geographically}
\usage{
plot_net_country(
data,
lineResolution = 10,
mapRegion = "world",
lineAlpha = 0.5
)
}
\arguments{
\item{data}{the \code{address} element from the list outputted from the
\code{authors_georef()} function, containing geocoded address latitude and
longitude locations.}
\item{lineResolution}{the resolution of the lines drawn, higher numbers will
make smoother curves default is 10.}
\item{mapRegion}{what portion of the world map to show. possible values
include \code{"world"}, \code{"North America"}, \code{"South America"}, \code{"Australia"},
\code{"Africa"}, \code{"Antarctica"}, and \code{"Eurasia"}}
\item{lineAlpha}{transparency of the lines, fed into ggplots alpha value.
Number between 0 - 1.}
}
\description{
This function takes an addresses data.frame, links it to an
authors_references dataset and plots a network diagram generated for
countries of co-authorship.
}
\examples{
## Using the output of authors_georef (e.g., BITR_geocode)
data(BITR_geocode)
## Plots the whole world
output <- plot_net_country(BITR_geocode)
## Mapping only North America
output <- plot_net_country(BITR_geocode, mapRegion = 'North America')
## Change the transparency of lines by modifying the lineAlpha parameter
output <- plot_net_country(BITR_geocode, lineAlpha = 0.2)
## Change the curvature of lines by modifying the lineResolution paramater
output <- plot_net_country(BITR_geocode, lineResolution = 30 )
## With all arguments:
output <- plot_net_country(BITR_geocode, mapRegion = 'North America', lineAlpha = 0.2,
lineResolution = 30)
}
|
7eea4094f99baf55dd9f99f5067fb53885366027
|
d8fb86933d4bbe05a74f33b744686e92c479c420
|
/R/rhub.R
|
db779ebf54c3f3d75f58ff0d45162f4c3e4e6cba
|
[] |
no_license
|
cran/packager
|
3bcb53591cc93ccff83578f0ad5f706caa253a49
|
c41ba71a5517cd9b9e242bc29fd5fe9ef8795c69
|
refs/heads/master
| 2023-07-23T13:12:01.844282
| 2023-07-07T14:10:02
| 2023-07-07T14:10:02
| 242,503,192
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,788
|
r
|
rhub.R
|
## #' Check on \code{rhub}
## #'
## #' Create rhub logs that will be queried using
## #' \code{get_local_rhub}, a helper function for
## #' \code{\link{provide_cran_comments}}.
## #' @template package_path
## #' @param os Character string specifying the operation systems to test on, stick
## #' with the default.
## #' @return A list of rhub package checks (which is incomplete if not called
## #' interactively. But called for the side effect of starting the rhub processes.
## #' @family maintenance functions
## #' @keywords internal
## #' @export
## #' @examples
## #' \dontrun{
## #' res <- check_rhub(".")
## #' str(res)
## #' cat(capture.output(print(res)), file = "log/rhub.log", sep = "\n")
## #' get_local_rhub(".")
## #' }
check_rhub <- function(path = ".", os = c("m1", "solaris", "windows")) {
root <- rprojroot::find_root(path = path, rprojroot::is_r_package)
platforms <- as.data.frame(rhub::platforms())
res <- list()
if ("m1" %in% os) {
platform <- platforms[platforms$queue == "m1", "name"]
check <- rhub::check_for_cran(path = root, platform = platform,
show_status = TRUE)
res[["m1"]] <- check
}
if ("solaris" %in% os) {
index <- platforms[["os-type"]] == "Solaris"
if (sum(index) < 1) throw("Can get solaris for rhub")
if (sum(index) > 1) {
jndex <- grepl("Developer Studio", platforms[["compilers"]],
ignore.case = TRUE)
index <- index & jndex
}
if (sum(index) > 1) index <- index[1]
platform <- platforms[index, "name"]
check <- rhub::check_for_cran(path = root, platform = platform,
show_status = TRUE)
res[["solaris"]] <- check
}
if ("windows" %in% os) {
index <- platforms[["os-type"]] == "Windows"
if (sum(index) < 1) throw("Can get windows for rhub")
if (sum(index) > 1) {
jndex <- platforms[["rversion"]] == "r-devel"
index <- index & jndex
}
if (sum(index) > 1) index <- index[1]
platform <- platforms[index, "name"]
check <- rhub::check_for_cran(path = root, platform = platform,
show_status = TRUE)
res[["windows"]] <- check
}
if (!interactive()) message("The return value is meaningless.",
"Use get_rhub_latest()")
return(invisible(res))
}
get_rhub_latest <- function(path = ".") {
rhub <- rhub::list_package_checks(package = path)
version <- as.package(path)$version
rhub <- rhub[rhub$status == "ok" & rhub$version == version, TRUE]
print(rhub::get_check(as.character(rhub[["id"]])))
return(invisible(TRUE))
}
|
65f4cab4db8214ca195fb768d642ea46758c2f9b
|
4fe0f7d1d82290ba670fa0208436ff03f9159968
|
/run_analysis.R
|
6ccce0c244c1fdb07cdd3ea2f320ac58cd5df7ed
|
[] |
no_license
|
twheelock1/Getting-and-Cleaning-Data-Course-Project
|
e9b729260fb8d6a118e3baa3d78eac09a676ab22
|
1391a670ebba6c81001bf621fc9f02063a07d036
|
refs/heads/master
| 2021-01-10T02:26:27.815038
| 2015-10-27T16:35:28
| 2015-10-27T16:35:28
| 44,923,953
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,913
|
r
|
run_analysis.R
|
library(plyr)
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
x_test <- read.table("UCI HAR Dataset/test/x_test.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
features <- read.table("UCI HAR Dataset/features.txt")
## reads test and train datasets and assigns to objects of the same names
colnames(x_test) <- features[, 2]
colnames(x_train) <- features[, 2]
##applies features to the column names of the x data sets
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
for (i in 1:nrow(y_test)) {
y_test[i, ] <- as.character(activity_labels[as.numeric(y_test[i, ]), 2])
}
for (i in 1:nrow(y_train)) {
y_train[i, ] <- as.character(activity_labels[as.numeric(y_train[i, ]), 2])
}
##applies the corresponding activity label to the y data sets
extract_columns <- features[grepl("-mean()|-std()", features[, 2]), 2]
extract_x_test <- x_test[, extract_columns]
extract_x_train <- x_train[, extract_columns]
## extracts columns for mean and std values only
colnames(y_test) <- "activity"
colnames(y_train) <- "activity"
colnames(subject_test) <- "subject"
colnames(subject_train) <- "subject"
#assign subject and activity column names
test_data <- cbind(subject_test, as.character(y_test), extract_x_test)
train_data <- cbind(subject_train, as.character(y_train), extract_x_train)
## binds columns from the two sets together
merged_data <- rbind(test_data, train_data)
## merging all data together
averages_data <- ddply(merged_data, .(subject, activity), function(x) colMeans(x[, 3:81]))
## uses ddply to splits the data up by subject and activity, calculate the mean, and return a new table
write.table(averages_data, "averages_data.txt", row.name=FALSE)
|
1cea4e5821f8746afeeb349bf090240d775afa78
|
d68578f6fc83ac88e2e41a0e735479e68796c1c9
|
/R/utils.R
|
f2722a7554efb6d7dc6453d8b55b7a559230a9d3
|
[] |
no_license
|
anatolydryga/qiimeMap
|
a4413c4b036282a36e9b9af6f3cb0dcab30ac778
|
716e884d9bcd709bbf632a46e2bd908263439295
|
refs/heads/master
| 2021-01-20T10:35:55.170765
| 2014-11-13T17:17:06
| 2014-11-13T17:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 178
|
r
|
utils.R
|
is_unique_ids <- function(data) {
ids <- unlist(data)
ids <- ids[! is.na(ids)]
len <- length(ids)
len_uniq <- length(unique(ids))
(len == len_uniq)
}
|
3b732589e6019db529c83c109604f459470eedac
|
b5e1392db0834e30f9ec914e5b757605f08d8a7f
|
/Chapter3/Redes/zcript_Fig3A.R
|
85888095c9a49979a8aed9f0b0b3dfeb7094853a
|
[
"Unlicense"
] |
permissive
|
aaleta/thesis_plots
|
6226b180962cf3457b268496d0f1515d0c15364f
|
58f2f2f7fc592234628e39ab90fff89763a19679
|
refs/heads/master
| 2022-12-17T03:50:56.104445
| 2020-09-25T00:21:07
| 2020-09-25T00:21:07
| 298,241,760
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,767
|
r
|
zcript_Fig3A.R
|
library(ggplot2)
library(gridExtra)
library(grid)
library(ggthemes)
library(ggsci)
source("../../theme.R")
plotUER = function(print.plot=F)
{
#UUU
a = read.table("Data/UUU/diagramC_k6_mu0.10_ER.txt",header=T)
data = data.frame(gamma=a$gamma,beta=a$beta,id="sU6")
a = read.table("Data/UUU/diagramC_k12_mu0.10_ER.txt",header=T)
data = rbind(data,data.frame(gamma=a$gamma,beta=a$beta,id="sU12"))
#DUD
a = read.table("Data/DUD/diagramC_k6_mu0.10_ER.txt",header=T)
data = rbind(data,data.frame(gamma=a$gamma,beta=a$beta,id="sD6"))
a = read.table("Data/DUD/diagramC_k12_mu0.10_ER.txt",header=T)
data = rbind(data,data.frame(gamma=a$gamma,beta=a$beta,id="sD12"))
data = data[data$gamma<0.20,]
p = ggplot() +
theme_thesis() +
geom_point(data = data, aes(x = gamma, y = beta, color = id, shape = id), size=4, stroke=2) +
scale_shape_manual(values=c("sU6" = 0, "sU12" = 2, "sD6" = 23, "sD12" = 25),guide=FALSE) +
scale_color_jama(breaks = c("sU6", "sU12", "sD6", "sD12"),
labels = c("sU6" = "UUU <k>=6",
"sU12" = "UUU <k>=12",
"sD6" = "DUD <k>=6",
"sD12" = "DUD <k>=12"),
guide = guide_legend(override.aes = list(
shape = c(0,2,23,25)))
) +
labs(title="ER") +
ylab(expression(bold("\u03b2"[c]))) +
xlab(expression(bold("\u03b3"))) +
theme(legend.position = c(0.75,0.9),
plot.title = element_text(size=30,hjust = 0.5),
legend.direction = "vertical",
legend.title = element_blank(),
legend.key.size = unit(0.8,"cm"),
) +
scale_x_continuous(limits=c(0.0,0.20),expand=c(0,0)) +
scale_y_continuous(limits=c(0.000,0.02),expand=c(0,0),breaks=seq(.00,.02,by=.004))
if(print.plot){
pdf(paste0("Plots/Fig3A.pdf"),width=8,height=6)
plot(p)
dev.off()
}
return(p)
}
|
36b9b1872cd2ddbf6b40af03dfbd85a88e5d73ff
|
3aef5a679c390d1f2c7ecba35eca09864164c5a5
|
/R/tparams_mean.R
|
f470f1c99c4fba42e45a8320db70718b9c65ee2c
|
[] |
no_license
|
jeff-m-sullivan/hesim
|
576edfd8c943c62315890528039366fe20cf7844
|
fa14d0257f0d6d4fc7d344594b2c4bf73417aaf3
|
refs/heads/master
| 2022-11-14T07:35:15.780960
| 2022-09-02T03:13:49
| 2022-09-02T03:13:49
| 140,300,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,695
|
r
|
tparams_mean.R
|
# tparams_mean -----------------------------------------------------------------
#' Predicted means
#'
#' Create a list containing means predicted from a statistical model.
#'
#' @param value Matrix of samples from the distribution of the
#' mean. Columns denote random samples and rows denote means for different
#' observations.
#' @param ... Arguments to pass to [id_attributes]. Each row in
#' `value` must be a prediction for a `strategy_id`,
#' `patient_id`, `state_id`, and optionally `time_id` combination.
#'
#' @note The `tparams_mean()` constructor would not normally be used by users; instead,
#' a `tparams_mean` object is typically created automatically as part of the
#' [`StateVals`] class with [create_StateVals()].
#'
#' @return An object of class `tparams_mean`, which is a list containing `value`,
#' `n_samples`, and the ID attributes passed to [id_attributes].
#'
#' @seealso A `tparams_mean` object is a type of [transformed parameter][tparams]
#' object and is a supported class type of the `params` field of the [`StateVals`]
#' class. See the documentation for [create_StateVals()] and [stateval_tbl()]
#' for examples of how to create`StateVals` objects. Predicted means can be
#' summarized across parameter samples using [summary.tparams_mean()].
#'
#' @example man-roxygen/example-tparams_mean.R
#'
#' @export
tparams_mean <- function(value, ...){
stopifnot(is.matrix(value))
check(new_tparams_mean(value, n_samples = ncol(value), ...),
...)
}
new_tparams_mean <- function(value, n_samples, ...){
l <- c(list(value = value,
n_samples = n_samples),
do.call("new_id_attributes", list(...)))
class(l) <- "tparams_mean"
return(l)
}
#' @rdname check
check.tparams_mean <- function(object, ...){
id_args <- list(...)
check(do.call("new_id_attributes", id_args))
for (v in c("strategy_id", "patient_id", "state_id")){
if (nrow(object$value) != length(id_args[[v]])){
stop("The length of each ID variable must equal the number of rows in 'value'.",
call. = FALSE)
}
}
return(object)
}
# summary.tparams_mean ---------------------------------------------------------
#' Summarize `tparams_mean` object
#'
#' The `summary()` method summarizes a [`tparams_mean`] object containing
#' predicted means; summary statistics are computed for each
#' combination of the ID variables. The `print()` method
#' summarizes the object using `summary.tparams_mean()` and prints it to the
#' console.
#'
#' @inheritParams summary.params
#' @param object,x A [`tparams_mean`] object.
#' @param ... Currently unused.
#'
#' @return A `data.table` with columns for (i) the ID variables,
#' (ii) the mean of each parameter across parameter samples (`mean`),
#' (iii) the standard deviation of the parameter samples (`sd`), and
#' (iv) quantiles of the parameter samples corresponding to the `probs` argument.
#'
#' @seealso See [`tparams_mean`] for an example use of the summary and
#' print methods.
#'
#' @export
summary.tparams_mean <- function(object, probs = c(0.025, 0.975), ...) {
q <- apply(object$value, 1, stats::quantile, probs = probs)
if (is.matrix(q)) {
q <- t(q)
} else{
q <- as.matrix(q)
colnames(q) <- paste0(probs * 100, "%")
}
data.table(
make_id_data_table(object),
mean = apply(object$value, 1, mean),
sd = apply(object$value, 1, stats::sd),
q
)
}
# print.tparams_mean -----------------------------------------------------------
#' @rdname summary.tparams_mean
#' @export
print.tparams_mean <- function(x, ...) {
cat("A \"tparams_mean\" object \n\n")
cat("Summary of means:\n")
print(summary(x, ...))
invisible(x)
}
|
63c0f017432a0012022f63307ecca413db818fed
|
8c0faaeda764de080239adf03ba3e17192df48da
|
/02a-Utils.R
|
4644b1e29d9be614c5dc2e9376925dc9f8739d11
|
[] |
no_license
|
HaydenMcT/Stat447-PokemonEggSteps
|
8e409c3a6e6ed613dfd3559ff66914313430f00d
|
9c3fcc9a420b3e7f2f20b39fa8d1cf719d3091be
|
refs/heads/main
| 2023-04-10T20:02:17.484177
| 2021-04-15T18:36:23
| 2021-04-15T18:36:23
| 357,062,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,957
|
r
|
02a-Utils.R
|
## CODE FILE 2a: Creates new Rdata object containing all utility functions needed for Phase B,
## and some for Phase C.
#' @description
#' Find category with modal probability
#' @param predMatrix ncases x J matrix; J is number of response categories
#' Each row of predMatrix is a probability mass function.
#' @return vector of length ncases, each entry in {1,...,J}
CatModalProb=function(predMatrix)
{ modevalue=function(pmf) { tem=which(pmf==max(pmf)); return(tem[1]) }
# return first category in case of ties
apply(predMatrix,1,modevalue)
}
#' @description
#' Prediction intervals for a categorical response
#' @param ProbMatrix of dimension nxJ, J = # categories,
#' each row is a probability mass function
#' @param labels vector of length J, with short names for categories
#' @param level1 numeric decimal representing first level of pred interval
#' @param level2 numeric decimal representing second level of pred interval
#'
#' @details
#' level1 and level2 as params allows this to be a more general function for any
#' level(s) of prediction intervals.
#'
#' @return list with two string vectors of length n:
#' pred1 has level1 prediction intervals
#' pred2 has level2 prediction intervals
#'
CategoryPredInterval = function(ProbMatrix,labels, level1, level2)
{ ncases=nrow(ProbMatrix)
pred1=rep(NA,ncases); pred2=rep(NA,ncases)
for(i in 1:ncases)
{ p=ProbMatrix[i,]
ip=order(p)
pOrdered=p[ip] # increasing order
labelsOrdered=labels[rev(ip)] # decreasing order
G=rev(cumsum(c(0,pOrdered))) # cumulative sum from smallest
k1=min(which(G<=(1-level1)))-1 # 1-level1
k2=min(which(G<=(1-level2)))-1 # 1-level2
predlevel1 =labelsOrdered[1:k1]; predlevel2 =labelsOrdered[1:k2]
pred1[i]=paste(predlevel1,collapse="")
pred2[i]=paste(predlevel2,collapse="")
}
list(pred1=pred1, pred2=pred2)
}
#' @description
#' Prediction intervals for an ordinal response
#' @param ProbMatrix of dimension nxJ, J = # categories,
#' each row is a probability mass function
#' @param labels vector of length J, with short names for categories
#' @param level1 numeric decimal representing first level of pred interval
#' @param level2 numeric decimal representing second level of pred interval
#'
#' @details
#' level1 and level2 as params allows this to be a more general function for any
#' level(s) of prediction intervals.
#'
#' @return list with two string vectors of length n:
#' pred1 has level1 prediction intervals
#' pred2 has level2 prediction intervals
#' Predicted categories are NOT ordered by decreasing probability
#'
OrdinalPredInterval = function(ProbMatrix,labels, level1=.50, level2=.80)
{
nlabels = ncol(ProbMatrix)
ncases=nrow(ProbMatrix)
pred1=rep(NA,ncases); pred2=rep(NA,ncases)
for(i in 1:ncases)
{ p=ProbMatrix[i,]
# try the best size 1 interval, then size 2 if the best size 1 interval
# doesn't capture (1-level1)% confidence, and so on
for(interval_size in 1:nlabels){
candidate = FindMaxContiguousSubset(p, interval_size)
if (candidate$sum > level1){
pred_level1 = labels[candidate$indices]
break
}
}
#find interval for second pred_level
for(interval_size in 1:nlabels){
candidate = FindMaxContiguousSubset(p, interval_size)
if (candidate$sum > level2){
pred_level2 = labels[candidate$indices]
break
}
}
pred1[i]=paste(pred_level1,collapse="")
pred2[i]=paste(pred_level2,collapse="")
}
list(pred1=pred1, pred2=pred2)
}
#' @description
#' Given a vector p and a length len, find the maximal contiguous interval of length len. Assumes maximal interval is >0
#' @param p a vector for which to find the maximal contiguous subset (in some applications, this will be a probability)
#' @param len the length of the contiguous subset to find. Must be <= length(p) and >= 1
#'
#' @return The value and indices for the maximal contiguous interval of length len
#'
FindMaxContiguousSubset = function(p, len)
{
num_entries = length(p)
#check for errors in input values
if (num_entries < len){
print("Error! number of entries in provided array p is less than requested length of interval.")
return(list(sum=0,indices=0))
}
if (len < 1){
print("Error! requested length of interval must be an integer >= 1.")
return(list(sum=0,indices=0))
}
#find maximal subset
best_sum = 0
best_interval_indices = c()
for(i in 1:(num_entries - len + 1)){
current_sum = sum(p[i:(i+len-1)])
if (current_sum > best_sum){
best_sum = current_sum
best_interval_indices = i:(i+len-1)
}
}
return(list(sum = best_sum, indices = best_interval_indices))
}
#' @description
#' Create contingency table showing the frequencies some given prediction intervals: contain the true class, miss the true class by 1 class,
#' miss the true class by 2 classes, or miss the true class by 3 classes.
#' @param prediction_intervals vector of length n, giving prediction intervals for each data point
#' e.g. 50% pred intervals for each point
#' @param true_labels vector of length n, giving true labels for each holdout data point. If having k ordered levels, vector should have been encoded to
#' k factored AND ORDERED labels.
#'
#' @return Contingency table showing numbers of correct and 'off' (by how many classes) classifications.
#'
PredIntMisclassTab = function(prediction_intervals,true_labels){
tab <- matrix(rep(0,16), ncol=4, byrow=TRUE)
colnames(tab) <- c("good", "missed_by_1", "missed_by_2", "missed_by_3")
rownames(tab) <- c("S", "M", "L", "E")
tab<-as.table(tab)
n = length(prediction_intervals)
level_vector = levels(true_labels)
klevels = length(levels(true_labels))
for (i in 1:n){
interval = prediction_intervals[i]
if (grepl(true_labels[i], interval)){ # i.e. if pred interval contains true value
tab[true_labels[i],"good"] <- tab[true_labels[i],"good"] + 1
}
else if (true_labels[i]==level_vector[1] & ( grepl(level_vector[2],interval) )){
tab[true_labels[i],"missed_by_1"] <- tab[true_labels[i],"missed_by_1"]+1
}
else if (true_labels[i]==level_vector[2] & ( grepl(level_vector[1],interval) | grepl(level_vector[3],interval) )){
tab[true_labels[i],"missed_by_1"] <- tab[true_labels[i],"missed_by_1"]+1
}
else if (true_labels[i]==level_vector[3] & ( grepl(level_vector[2],interval) | grepl(level_vector[4],interval) )){
tab[true_labels[i],"missed_by_1"] <- tab[true_labels[i],"missed_by_1"]+1
}
else if (true_labels[i]==level_vector[4] & ( grepl(level_vector[3],interval) )){
tab[true_labels[i],"missed_by_1"] <- tab[true_labels[i],"missed_by_1"]+1
}
else if (true_labels[i]==level_vector[1] & ( grepl(level_vector[3],interval) )){
tab[true_labels[i],"missed_by_2"] <- tab[true_labels[i],"missed_by_2"]+1
}
else if (true_labels[i]==level_vector[2] & ( grepl(level_vector[4],interval) )){
tab[true_labels[i],"missed_by_2"] <- tab[true_labels[i],"missed_by_2"]+1
}
else if (true_labels[i]==level_vector[3] & ( grepl(level_vector[1],interval) )){
tab[true_labels[i],"missed_by_2"] <- tab[true_labels[i],"missed_by_2"]+1
}
else if (true_labels[i]==level_vector[4] & ( grepl(level_vector[2],interval) )){
tab[true_labels[i],"missed_by_2"] <- tab[true_labels[i],"missed_by_2"]+1
}
else if (true_labels[i]==level_vector[1] & ( grepl(level_vector[4],interval) )){ # missing it completely, nowhere close.
tab[true_labels[i],"missed_by_3"] <- tab[true_labels[i],"missed_by_3"]+1
}
else if (true_labels[i]==level_vector[4] & ( grepl(level_vector[1],interval) )){ # missing it completely, nowhere close.
tab[true_labels[i],"missed_by_3"] <- tab[true_labels[i],"missed_by_3"]+1
}
}
return(tab)
}
#' @description
#' Encode length-n vector of K ORDERED factor levels, to K ordered single characters as factor labels.
#' @param ordFact vector of length n with K ORDERED factor levels
#' @param newLabels K-length vector of ORDERED single-character labels for each ordered factor
#'
#' @details
#' newLabels as params allows this to be a more general function for any chosen factor labels.
#'
#' @return encod_labels vector of length n, with new labels for each ordered factor
#'
EncodeToChar = function(ordFact, newLabels)
{
ncases = length(ordFact)
klevels = length(levels(ordFact))
encod_labels=rep(NA,ncases)
for(i in 1:ncases){
for(j in 1:klevels){
if (ordFact[i]==(levels(ordFact))[j]){
encod_labels[i]=paste(newLabels[j])
}
}
}
return(encod_labels)
}
#' @description
#' Calculate loss for prediction intervals with a categorical response that has 4 categories
#' @param prediction_interval vector of length n, giving prediction intervals for each data point
#' @param true_labels vector of length n, giving true labels for each data point
#' @param costs_correct vector giving costs for having the correct letter in the prediction interval, for intervals containing 1,2, 3, or 4 categories respectively
#' (note that costs_correct[1] is always assumed to be 0, because that corresponds to correctly predicting the exact category)
#' @param costs_incorrect vector giving costs for not having the correct letter in the interval, for intervals containing 1,2, 3, or 4 categories respectively
#' (note that we will never have an incorrect prediction if we include all 4 categories in the interval, so costs_incorrect[4] is meaningless)
#'
#' @return The loss for the provided prediction interval (a higher value means the prediction interval is worse)
#'
PredIntervalLoss = function(prediction_intervals,true_labels,
costs_correct = c(0, 1/4, 2/4, 3/4) * 1/length(true_labels),
costs_incorrect = c(1, 1, 1, 0) * 1/length(true_labels)){
n = length(true_labels)
loss = 0 #initialize loss function at 0
for (i in 1:n) {
interval = prediction_intervals[i]
interval_size = nchar(interval)
is_correct = grepl(true_labels[i], interval) #check if prediction interval contains true value
if (is_correct){
loss = loss + costs_correct[interval_size]
} else {
loss = loss + costs_incorrect[interval_size]
}
}
return(loss)
}
#' @description Coverage rate of prediction intervals for a categorical response
#' @param Table table with true class labels as row names, pred intervals as column names
#' @return list with average length, #misses, miss rate, coverage rate by class
Coverage=function(Table)
{ nclass=nrow(Table); npred=ncol(Table); rowFreq=rowSums(Table)
labels=rownames(Table); predLabels=colnames(Table)
cover=rep(0,nclass); avgLen=rep(0,nclass)
for(irow in 1:nclass)
{ for(icol in 1:npred)
{ intervalSize = nchar(predLabels[icol])
isCovered = grepl(labels[irow], predLabels[icol])
frequency = Table[irow,icol]
cover[irow] = cover[irow] + frequency*isCovered
avgLen[irow] = avgLen[irow] + frequency*intervalSize
}
}
miss = rowFreq-cover; avgLen = avgLen/rowFreq
out=list(avgLen=avgLen,miss=miss,missRate=miss/rowFreq,coverRate=cover/rowFreq)
return(out)
}
#' @description Returns True Coverage rate of prediction intervals for a categorical response
#' not coverage rate per class
#' @param Table table with true class labels as row names, pred intervals as column names
#' @return coverage rate and average length of prediction interval across all classes
CoverageAcrossClasses =function(Table)
{ nclass=nrow(Table); npred=ncol(Table); n = sum(Table)
labels=rownames(Table); predLabels=colnames(Table)
cover=0; avgLen=0
for(irow in 1:nclass)
{ for(icol in 1:npred)
{ intervalSize = nchar(predLabels[icol])
isCovered = grepl(labels[irow], predLabels[icol])
frequency = Table[irow,icol]
cover = cover + frequency*isCovered
avgLen = avgLen + frequency*intervalSize
}
}
avgLen = avgLen/n
out=list(avgLen=avgLen,coverRate=cover/n)
return(out)
}
#' @description
#' (wrapper for predInterval loss) - calculates loss for a prediction interval made from the provided predictions
#' @param predictions matrix of length n x 3, giving predictions for each data point,
#' where a prediction is a probability for each possible letter
#' @param true_labels vector of length n, giving true labels for each data point
#' @param use_pred50 true if loss is to be based on the 50% prediction intervals,
#' false if loss is to be based on the 80% prediction intervals
#'
#'
#' @return loss based on a prediction interval formed from predictions (higher value means a worse model)
#'
GetLoss = function(predictions, true_labels, use_pred50=TRUE){
predInt = OrdinalPredInterval(predictions,labels=c("S", "M", "L", "E"))
if (use_pred50){
loss = PredIntervalLoss(predInt$pred1, true_labels)
} else {
loss = PredIntervalLoss(predInt$pred2, true_labels)
}
return(loss)
}
#' @description
#' (consolidates the logic of evaluating each potential tree/multinomial model)
#' given a model, fits that model to the training set and creates predictions such that each category has a predicted probability
#' @param model one of "random_forest", "multinomial", "polr", "ctree", corresponding to the model to be used
#' @param train_set training set, possibly with some features removed (so that we can test different feature selection models)
#' @param train_y response variable for training set
#' @param predict_set set to evaluate predictions on. May be the same as training set, or could match a validation/holdout set
#'
#' @return predictions of the newly fit model on predict_set, where the prediction for each example are formatted as a probability
#' of that example matching each class, respectively
#'
GetModelPreds = function(model, train_set, train_y, predict_set){
if (model == "multinomial"){
multinom = vglm(train_y ~ ., multinomial(), data=train_set)
preds = predict(multinom, type="response", newdata=predict_set)
} else if (model == "random_forest"){
rforest = randomForest(train_y ~ .,data=train_set, importance=TRUE, proximity=TRUE)
preds = predict(rforest, type="prob", newdata=predict_set)
} else if (model == "ctree"){
ctree = rpart(train_y ~ .,data=train_set)
preds = predict(ctree, newdata=predict_set)
} else if (model=='polr'){
ordLogit= polr(train_y ~., data=train_set) # Using 3 most important predictors
preds=predict(ordLogit,type="probs",newdata=predict_set)
}else {
print("model not recognized. Should be one of \"multinomial\", \"random_forest\", \"polr\", or \"ctree\". ")
return(0)
}
return(preds)
}
#' @description
#' Performs forward selection: at each step, it adds the one variable
#' which decreases loss the most for this multinomial logistic classifier's prediction interval
#' given the previously selected variables. Stops adding variables when
#' validation/holdout set accuracy stops improving.
#'
#' @param train the dataset on which to fit the data. CANNOT INCLUDE RESPONSE VBL
#' (all features will be considered in the model)
#' @param validation the dataset on which to test the model
#' @param y a vector representing the response variable for this dataset
#' @param y_tilde a vector representing the response variable for the validation set
#' @param required_improvement a vector representing how much training set improvement is required to warrant considering a new variable
#' @param use_pred50 if true, use a 50% prediction interval. Else use 80%
#' @param model one of "random_forest", "multinomial", "ctree", corresponding to the model to be used for feature selection
#' @param always_include a vector of variable names to always select as part of the model
#' (it is assumed that always_include is a subset of the variable names in train)
#' @return the variables selected by the model
ForwardSelect = function(train, validation, y, y_tilde, required_improvement = 0.00, use_pred50 = TRUE, model= "multinomial", always_include = c()){
var_names=names(train)
max_score = -Inf
max_validation_score = -Inf
variables = always_include
var_to_add = var_names[1]
add_new = TRUE
while(add_new == TRUE){
add_new = FALSE
for (variable in var_names[!(var_names %in% variables)]){
predictions = GetModelPreds(model, subset(train, select = c(variable, variables)), y, train)
loss = GetLoss(predictions, y, use_pred50)
model_score = 1 - loss
if (model_score > max_score + required_improvement){
add_new = TRUE
max_score = model_score
var_to_add = variable
}
}
if (add_new){
predictions_validation = GetModelPreds(model, subset(train, select = c(var_to_add, variables)), y, validation)
loss = GetLoss(predictions_validation, y_tilde, use_pred50)
new_validation_score = 1 - loss
if (new_validation_score > max_validation_score){
variables = c(variables, var_to_add)
max_validation_score = new_validation_score
} else {
add_new = FALSE
}
}
}
return(variables)
}
# needed for Phase C
#' @description
#' Produces an nxn table with averages of values from some given nxn tables.
#' @param tables k-length list of nxn tables.
#'
#' @return table of averaged values from all given tables.
#'
AverageTables = function(tables){
ktables = length(tables)
ncols = ncol(tables[[1]])
tab <- matrix(rep(0,ncols*ncols), ncol=ncols, byrow=TRUE)
colnames(tab) <- c("good", "missed_by_1", "missed_by_2", "missed_by_3")
rownames(tab) <- c("S", "M", "L", "E")
tab<-as.table(tab)
for (k in 1:ktables){
for(i in 1:ncols){
for(j in 1:ncols){
tab[i,j] = tab[i,j] + (1/ktables)*(tables[[k]][i,j])
}
}
}
return(tab)
}
save(file="RDataFiles/Utils.RData", CoverageAcrossClasses, CatModalProb, CategoryPredInterval, OrdinalPredInterval, FindMaxContiguousSubset, PredIntervalLoss, Coverage,
EncodeToChar, ForwardSelect, GetModelPreds, GetLoss, PredIntMisclassTab, AverageTables)
|
18e3db360b5ea9e0680e97332567bf17fcf3d78f
|
3b41fce22a52127830f1dd69a56a6485d8ee0cd8
|
/man/FindAlpha.Rd
|
639117b11995bae03e3441408ed15014241bfb35
|
[
"Apache-2.0"
] |
permissive
|
cran/RegSDC
|
165bbee727fc53149128a3e01e298cd535f25816
|
ea50d6d5a35efd334df582d2a301c6a120bf1014
|
refs/heads/master
| 2022-08-29T08:12:12.653287
| 2022-08-19T07:30:02
| 2022-08-19T07:30:02
| 166,078,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 811
|
rd
|
FindAlpha.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CalculateC.R
\name{FindAlpha}
\alias{FindAlpha}
\alias{FindAlphaSimple}
\title{Calculation of alpha}
\usage{
FindAlpha(a, b, tryViaQR = TRUE)
FindAlphaSimple(a, b)
}
\arguments{
\item{a}{matrix E in paper}
\item{b}{matrix Eg in paper}
\item{tryViaQR}{When TRUE QR transformation used (to handle collinearity)
when ordinary calculations fail.}
}
\value{
alpha
}
\description{
Function to find the largest alpha that makes equation 10 in the paper solvable.
}
\note{
FindAlphaSimple performs the calculations by a simple/direct method.
FindAlpha is made to handle problematic special cases.
}
\seealso{
See examples in the documentation of \code{\link{CalculateC}}
}
\author{
Øyvind Langsrud
}
|
8984a6820890f5ec2f5dcd8ba83c09ac8243d6f5
|
fe3a2d4d2303c1302ab219a23d00828997cd5d12
|
/rDNA/man/dna_dendrogram.Rd
|
590a0f780ca1ebef342d85175540ced0ccfe9de4
|
[] |
no_license
|
njatel/dna
|
2dfbb9e7b7d08c4e22d89191b5d726ca96e3b77c
|
09a3e7aa3e13b5bf382365c7dee6d9a10d5e4ee9
|
refs/heads/master
| 2023-01-11T10:01:16.784668
| 2020-11-18T08:50:48
| 2020-11-18T08:50:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 18,317
|
rd
|
dna_dendrogram.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rDNA.R
\name{dna_dendrogram}
\alias{dna_dendrogram}
\title{Create a cluster dendrogram for a DNA database}
\usage{
dna_dendrogram(
connection,
statementType = "DNA Statement",
variable1 = "organization",
variable1Document = FALSE,
variable2 = "concept",
variable2Document = FALSE,
qualifier = "agreement",
duplicates = "include",
start.date = "01.01.1900",
stop.date = "31.12.2099",
start.time = "00:00:00",
stop.time = "23:59:59",
excludeValues = list(),
excludeAuthors = character(),
excludeSources = character(),
excludeSections = character(),
excludeTypes = character(),
invertValues = FALSE,
invertAuthors = FALSE,
invertSources = FALSE,
invertSections = FALSE,
invertTypes = FALSE,
method = "best",
k = 0,
k.max = 5,
rectangle.colors = NULL,
labels = "value",
label.colors = "color",
label.size = 12,
label.truncate = 30,
leaf.shape = "elbow",
leaf.colors = label.colors,
leaf.width = 1,
leaf.alpha = 1,
symbol.shapes = 19,
symbol.colors = label.colors,
symbol.sizes = 5,
circular = FALSE,
theme = "bw",
caption = TRUE,
return.multiclust = FALSE
)
}
\arguments{
\item{connection}{A \code{dna_connection} object created by the
\code{dna_connection} function.}
\item{statementType}{The name of the statement type in which the variable
of interest is nested. For example, \code{"DNA Statement"}.}
\item{variable1}{The first variable for network construction. In a one-mode
network, this is the variable for both the rows and columns. In a
two-mode network, this is the variable for the rows only. In an event
list, this variable is only used to check for duplicates (depending on
the setting of the \code{duplicate} argument).}
\item{variable1Document}{A boolean value indicating whether the first
variable is at the document level (i.e., \code{"author"},
\code{"source"}, \code{"section"}, \code{"type"}, \code{"id"}, or
\code{"title"}).}
\item{variable2}{The second variable for network construction. In a one-mode
network, this is the variable over which the ties are created. For
example, if an organization x organization network is created, and ties
in this network indicate co-reference to a concept, then the second
variable is the \code{"concept"}. In a two-mode network, this is the
variable used for the columns of the network matrix. In an event list,
this variable is only used to check for duplicates (depending on the
setting of the \code{duplicate} argument).}
\item{variable2Document}{A boolean value indicating whether the second
variable is at the document level (i.e., \code{"author"},
\code{"source"}, \code{"section"}, \code{"type"}, \code{"id"}, or
\code{"title"}}
\item{qualifier}{The qualifier variable. In a one-mode network, this
variable can be used to count only congruence or conflict ties. For
example, in an organization x organization network via common concepts,
a binary \code{"agreement"} qualifier could be used to record only ties
where both organizations have a positive stance on the concept or where
both organizations have a negative stance on the concept. With an
integer qualifier, the tie weight between the organizations would be
proportional to the similarity or distance between the two organizations
on the scale of the integer variable.
In a two-mode network, the qualifier variable can be used to retain only
positive or only negative statements or subtract negative from positive
mentions. All of this depends on the setting of the
\code{qualifierAggregation} argument. For event lists, the qualifier
variable is only used for filtering out duplicates (depending on the
setting of the \code{duplicate} argument.
The qualifier can also be \code{NULL}, in which case it is ignored, meaning
that values in \code{variable1} and \code{variable2} are unconditionally
associated with each other in the network when they co-occur. This is
identical to selecting a qualifier variable and setting
\code{qualifierAggregation = "ignore"}.}
\item{duplicates}{Setting for excluding duplicate statements before network
construction. Valid settings are \code{"include"} (for including all
statements in network construction), \code{"document"} (for counting
only one identical statement per document), \code{"week"} (for counting
only one identical statement per calendar week), \code{"month"} (for
counting only one identical statement per calendar month), \code{"year"}
(for counting only one identical statement per calendar year), and
\code{"acrossrange"} (for counting only one identical statement across
the whole time range).}
\item{start.date}{The start date for network construction in the format
"dd.mm.yyyy". All statements before this date will be excluded.}
\item{stop.date}{The stop date for network construction in the format
"dd.mm.yyyy". All statements after this date will be excluded.}
\item{start.time}{The start time for network construction on the specified
\code{start.date}. All statements before this time on the specified date
will be excluded.}
\item{stop.time}{The stop time for network construction on the specified
\code{stop.date}. All statements after this time on the specified date
will be excluded.}
\item{excludeValues}{A list of named character vectors that contains entries
which should be excluded during network construction. For example,
\code{list(concept = c("A", "B"), organization = c("org A", "org B"))}
would exclude all statements containing concepts "A" or "B" or
organizations "org A" or "org B" when the network is constructed. This
is irrespective of whether these values appear in \code{variable1},
\code{variable2}, or the \code{qualifier}. Note that only variables at
the statement level can be used here. There are separate arguments for
excluding statements nested in documents with certain meta-data.}
\item{excludeAuthors}{A character vector of authors. If a statement is
nested in a document where one of these authors is set in the "Author"
meta-data field, the statement is excluded from network construction.}
\item{excludeSources}{A character vector of sources. If a statement is
nested in a document where one of these sources is set in the "Source"
meta-data field, the statement is excluded from network construction.}
\item{excludeSections}{A character vector of sections. If a statement is
nested in a document where one of these sections is set in the "Section"
meta-data field, the statement is excluded from network construction.}
\item{excludeTypes}{A character vector of types. If a statement is
nested in a document where one of these types is set in the "Type"
meta-data field, the statement is excluded from network construction.}
\item{invertValues}{A boolean value indicating whether the entries provided
by the \code{excludeValues} argument should be excluded from network
construction (\code{invertValues = FALSE}) or if they should be the only
values that should be included during network construction
(\code{invertValues = TRUE}).}
\item{invertAuthors}{A boolean value indicating whether the entries provided
by the \code{excludeAuthors} argument should be excluded from network
construction (\code{invertAuthors = FALSE}) or if they should be the
only values that should be included during network construction
(\code{invertAuthors = TRUE}).}
\item{invertSources}{A boolean value indicating whether the entries provided
by the \code{excludeSources} argument should be excluded from network
construction (\code{invertSources = FALSE}) or if they should be the
only values that should be included during network construction
(\code{invertSources = TRUE}).}
\item{invertSections}{A boolean value indicating whether the entries
provided by the \code{excludeSections} argument should be excluded from
network construction (\code{invertSections = FALSE}) or if they should
be the only values that should be included during network construction
(\code{invertSections = TRUE}).}
\item{invertTypes}{A boolean value indicating whether the entries provided
by the \code{excludeTypes} argument should be excluded from network
construction (\code{invertTypes = FALSE}) or if they should be the
only values that should be included during network construction
(\code{invertTypes = TRUE}).}
\item{method}{This argument represents the clustering method to be used for
the dendrogram. Only hierarchical clustering methods are compatible with
dendrograms. The following values are permitted:
\describe{
\item{"best"}{Automatically choose the best clustering method with a given
number of clusters \code{k} (or between 2 and \code{k.max} if
\code{k = 0}). The selection is based on network modularity of a given
cluster solution in the subtract network.}
\item{"single"}{Hierarchical clustering with single linkage.}
\item{"average"}{Hierarchical clustering with average linkage.}
\item{"complete"}{Hierarchical clustering with complete linkage.}
\item{"ward"}{Hierarchical clustering with Ward's algorithm.}
\item{"fastgreedy"}{Fast & greedy community detection.}
\item{"walktrap"}{Walktrap community detection.}
\item{"leading_eigen"}{Leading eigenvector community detection.}
\item{"edge_betweenness"}{Edge betweenness community detection
(Girvan-Newman algorithm).}
}}
\item{k}{If \code{method = "best"} is selected, \code{k} is used to determine
at which level the respective clustering method works best. For example, if
\code{k = 3} is supplied, the algorithm will compare the modularity of the
different cluster solutions with three clusters each time to determine the
best-fitting cluster solution. If \code{k = 0} is supplied (the default),
all solutions between one and \code{k.max} clusters will be attempted. The
\code{k} argument also determines the number of rectangles that are drawn
if the \code{rectangle.colors} argument is used. If \code{k = 0}, the
actual number of clusters that works best will be used instead.}
\item{k.max}{The maximal number of clusters to try if \code{method = "best"}
is used.}
\item{rectangle.colors}{If \code{NULL}, no rectangles are drawn. If a single
color is provided (for example, \code{"purple"} or \code{"#AA9900"}),
\code{k} rectangles (one per cluster) will be drawn, and they will all be
in the same color. If \code{k} colors are provided as a vector, each
rectangle will be in a separate color. If \code{rectangle.colors =
"cluster"}, different colors will be picked for the rectangles based on
cluster membership.}
\item{labels}{Which labels to use for variable 1. These can be:
\describe{
\item{"value"}{The actual variable values for \code{variable1} as extracted
from the "value" column in a \code{\link{dna_getAttributes}} call.}
\item{"color"}{The color designated for \code{variable1} in DNA, as
extracted from the "color" column in a \code{\link{dna_getAttributes}}
call.}
\item{"type"}{The type designated for \code{variable1} in DNA, as
extracted from the "type" column in a \code{\link{dna_getAttributes}}
call.}
\item{"alias"}{The alias designated for \code{variable1} in DNA, as
extracted from the "alias" column in a \code{\link{dna_getAttributes}}
call.}
\item{"notes"}{The notes designated for \code{variable1} in DNA, as
extracted from the "notes" column in a \code{\link{dna_getAttributes}}
call.}
\item{a vector of \code{character} objects}{A vector with as many labels
as there are rows in a \code{\link{dna_getAttributes}} call, in the
same order (i.e., replacing the original labels that are in
alphabetical order).}
}}
\item{label.colors}{Colors for the labels. Numbers, string colors, or
hexadecimal strings are allowed. The following values are permitted:
\describe{
\item{a single color}{A single color for all labels.}
\item{a vector of \code{k} colors}{A separate color for the labels in
each cluster.}
\item{a vector of as many colors as labels}{A separate color for each
single label.}
\item{"cluster"}{A separate color is chosen automatically for the labels
in each cluster. There are \code{k} different colors.}
\item{"color"}{The colors stored in the "color" column in the attribute
manager of the DNA database are used, as extracted using the
\code{\link{dna_getAttributes}} function.}
\item{"type"}{A separate color for each value in the "type" column in the
attribute manager of DNA is used.}
\item{"alias"}{A separate color for each value in the "alias" column in
the attribute manager of DNA is used.}
\item{"notes"}{A separate color for each value in the "notes" column in
the attribute manager of DNA is used.}
}}
\item{label.size}{Font size for the labels.}
\item{label.truncate}{Number of characters to retain for each label. If all
characters should be kept, \code{Inf} can be set.}
\item{leaf.shape}{The way the dendrogram leaves are drawn. The following
values are permitted:
\itemize{
\item "elbow"
\item "link"
\item "diagonal"
\item "arc"
\item "fan"
}}
\item{leaf.colors}{The colors of the leaves in the dendrogram. The same
values as in the \code{label.colors} argument are permitted (see the
description above for details).}
\item{leaf.width}{The line width of the leaves.}
\item{leaf.alpha}{The opacity of the leaves and symbols (between 0 and 1).}
\item{symbol.shapes}{The shapes of the leaf end symbols for each leaf. The
following values are permitted:
\describe{
\item{a single integer}{A single \code{pch} symbol to use for all leaves,
for example \code{2} or \code{19}.}
\item{an integer vector with \code{k} elements}{A vector of separate
\code{pch} symbol integer values for the labels in each cluster. For
example, if there are three clusters, this could be \code{1:3}.}
\item{an integer vector with as many elements as symbols}{A vector of
separate \code{pch} values for each value in \code{variable1}, in the
order as the rows produced by \code{\link{dna_getAttributes}} (i.e.,
alphabetical label order).}
\item{"cluster"}{A separate symbol is automatically selected for each
cluster.}
\item{"color"}{A separate symbol is automatically selected for each color
saved in the "color" column of the output of
\code{\link{dna_getAttributes}}, which represents the colors in the
attribute manager in DNA.}
\item{"type"}{A separate symbol is automatically selected for each type
saved in the "type" column of the output of
\code{\link{dna_getAttributes}}, which represents the types in the
attribute manager in DNA.}
\item{"alias"}{A separate symbol is automatically selected for each alias
saved in the "alias" column of the output of
\code{\link{dna_getAttributes}}, which represents the aliases in the
attribute manager in DNA.}
\item{"notes"}{A separate symbol is automatically selected for each note
saved in the "notes" column of the output of
\code{\link{dna_getAttributes}}, which represents the notes in the
attribute manager in DNA.}
}}
\item{symbol.colors}{The colors of the symbols at the leaf ends. The same
values are permitted as in the \code{label.colors} argument (see the
description there).}
\item{symbol.sizes}{The sizes of the symbols at the leaf ends. The default
value is \code{5} for all symbols. Instead of a single number, a vector of
\code{k} values (one for each cluster) or a vector with as many values
as there are leafs or labels can be supplied.}
\item{circular}{Draw a dendrogram with a circular layout?}
\item{theme}{The theme to be used. See \code{\link[ggplot2]{ggtheme}} for
details. Permitted values are:
\itemize{
\item "bw"
\item "classic"
\item "gray"
\item "dark"
\item "light"
\item "minimal"
}}
\item{caption}{Add a caption with details at the bottom of the plot? The
details include the clustering method, the number of clusters, and the
modularity value given the number of clusters with this method.}
\item{return.multiclust}{Instead of returning a \code{ggplot2} plot, return
the \code{dna_multiclust} object upon which the dendrogram is based?}
}
\value{
A \code{ggplot2} plot.
}
\description{
Create a cluster dendrogram based on a DNA database.
}
\details{
This function serves to conduct a cluster analysis of a DNA dataset and
visualize the results as a dendrogram. The user can either select a specific
clustering method or let the \code{\link{dna_multiclust}} function determine
the best cluster solution. The following clustering methods are available for
creating dendrograms:
\itemize{
\item Hierarchical clustering with single linkage.
\item Hierarchical clustering with average linkage.
\item Hierarchical clustering with complete linkage.
\item Hierarchical clustering with Ward's algorithm.
\item Fast & greedy community detection.
\item Walktrap community detection.
\item Leading eigenvector community detection.
\item Edge betweenness community detection (Girvan-Newman algorithm).
}
The resulting dendrograms can have different label, leaf, and symbol
properties as well as rectangles for the clusters and other customization
options. It is possible to return the underlying \code{\link{dna_multiclust}}
object instead of the plot by using the \code{return.multiclust} argument.
}
\examples{
\dontrun{
library("rDNA")
dna_init()
samp <- dna_sample()
conn <- dna_connection(samp)
# Single-linkage with k = 2 is chosen automatically based on modularity:
dna_dendrogram(conn, method = "best", k = 0)
# Walktrap community detection with three clusters and rectangles:
dna_dendrogram(conn, method = "walktrap", k = 3, rectangle.colors = "purple")
# Custom colors and shapes:
dna_dendrogram(conn,
label.colors = "color",
leaf.colors = "cluster",
rectangle.colors = c("steelblue", "orange"),
symbol.shapes = 17:18,
symbol.colors = 3:4)
# Circular dendrogram:
dna_dendrogram(conn, circular = TRUE, label.truncate = 12)
# Modifying the underlying network, e.g., leaving out concepts:
dna_dendrogram(conn, excludeValues = list(concept =
"There should be legislation to regulate emissions."))
# Return the dna_multiclust object
mc <- dna_dendrogram(conn, k = 0, method = "best", return.multiclust = TRUE)
mc
}
}
\author{
Philip Leifeld, Johannes B. Gruber
}
|
b7b41917ce155dce6d65bae29d097500e873d502
|
8f5bd879605797d1399cf4fb7bd15b8dff617649
|
/src/1json_import.R
|
271d1d841167e420a554ed75ef9cccf4f4a60e17
|
[] |
no_license
|
ryasmin/DHS_Data_Analysis
|
40c45f2e12db24fb79bd3d209c2f710e1f742c4e
|
cd2291668d546dd748aeac1709b9b0a5def79f8f
|
refs/heads/main
| 2023-03-10T04:55:39.666697
| 2021-02-16T20:08:00
| 2021-02-16T20:08:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 410
|
r
|
1json_import.R
|
library(rjson)
jsonRawPilot <- fromJSON(file= "json_data/PilotTest.json")
jsonRaw2_1 <- fromJSON(file = "json_data/Test2-1.json")
jsonRaw2_2 <- fromJSON(file = "json_data/Test2-2.json")
jsonRaw2_3 <- fromJSON(file = "json_data/Test2-3.json")
jsonRaw3_1 <- fromJSON(file = "json_data/Test3-1.json")
jsonRaw3_2 <- fromJSON(file = "json_data/Test3-2.json")
jsonRaw3_3 <- fromJSON(file = "json_data/Test3-3.json")
|
eab4549e141ff32ebaafc305b91d3c47ca5f29b2
|
2b0e7454e2c87076f4f97d35000bf3426b7d9aaa
|
/R/pub03_DatabaseOperationFuncs.R
|
ea09f40c75997079e421aefd53d2cf4a06a61113
|
[] |
no_license
|
raphael210/QDataGet
|
52df9d791d7d1d8933555dbdfa9d81e42558a5ee
|
83531020e180fe8d07fdfa4a75413fd2b95cd6b4
|
refs/heads/master
| 2020-04-12T06:29:33.198718
| 2019-02-01T07:50:14
| 2019-02-01T07:50:14
| 64,194,185
| 0
| 5
| null | 2017-03-16T03:29:45
| 2016-07-26T06:00:12
|
R
|
UTF-8
|
R
| false
| false
| 77,474
|
r
|
pub03_DatabaseOperationFuncs.R
|
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ======================
# ===================== Database Operation ===========================
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ======================
#' defaultDataSRC
#'
#' get the default datasrc. You can reset the default value by eg. \code{options(datasrc="quant")}
#' @return a character string, the value of the default datasrc.
#' @export
#' @examples
#' # -- get the default datasrc
#' defaultDataSRC()
#' # -- reset
#' options(datasrc="quant")
#' # -- reget
#' defaultDataSRC()
defaultDataSRC <- function(){
getOption("datasrc",default="local")
}
#' origin_sql
#' @export
origin_sql <- function(){
return("1970-01-01")
}
#' Database connection
#'
#' connect database wind, quant, cs, jy, local, ...
#' @rdname db.connection
#' @return a database connection.
#' @export
db.local <- function(dbname = "QTlite",dbpath="D:/sqlitedb"){
driver = DBI::dbDriver("SQLite")
dbname <- paste(paste(dbpath,dbname,sep = "/"),".db",sep="")
dbConnect(driver, dbname = dbname)
}
#' @rdname db.connection
#' @export
db.quant <- function(uid = "wsread",pwd = "wsread"){
odbcConnect("jyquant", uid = uid, pwd = pwd)
}
#' @rdname db.connection
#' @export
db.cs <- function(){
odbcConnect("csdb", uid = "wsread",pwd = "wsread")
}
#' @rdname db.connection
#' @export
db.jy <- function(){
odbcConnect("jy", uid = "jyread",pwd = "jyread")
}
#' @rdname db.connection
#' @export
db.wind <- function(){
odbcConnect("wind", uid = "wsread",pwd = "wsread")
}
# db.lite <- function(){ # connect SQLite by ODBC
# odbcConnect("lite")
# }
#' queryAndClose.odbc
#'
#' read data from a ODBC data source with a query
#' @param db a ODBC database object
#' @param query a character string,indicating the query to execute
#' @return a dataframe
#' @author Ruifei.Yin
#' @export
#' @examples
#' queryAndClose.odbc(db.quant(),"select top 10 * from QT_DailyQuote")
queryAndClose.odbc <- function (db, query, as.is=FALSE, ...) {
table = sqlQuery(db, query, as.is = as.is, ...)
odbcClose(db)
return(table)
}
#' queryAndClose.dbi
#'
#' read data from a DBI data source with a query
#' @param db a DBI data source object
#' @param query a character string,indicating the query to execute
#' @return a dataframe
#' @author Ruifei.Yin
#' @export
#' @examples
#' queryAndClose.dbi(db.local("qt"),"select * from QT_DailyQuote limit 10")
queryAndClose.dbi <- function (db, query, ...) {
table = dbGetQuery(db, query, ...)
dbDisconnect(db)
return(table)
}
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ======================
# ===================== Database Updating ===========================
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ======================
# -------------------- ~~ from 'jyquant' ----------------
#' lcdb.updatetime
#'
#' get the updatetime of tables in lcdb
#' @return a datafrme, with cols: "table", "updatetime".
#' @export
lcdb.updatetime <- function () {
con_main <- db.local("main")
con_fs <- db.local("fs")
con_fs_r <- db.local("fs_r")
con_qt <- db.local("qt")
updatetime <- c(
dbGetQuery(con_main,"select max(EndDate) from LC_IndexComponentsWeight")[[1]],
dbGetQuery(con_main,"select max(TradingDay) from QT_IndexQuote")[[1]] ,
dbGetQuery(con_qt,"select max(TradingDay) from QT_DailyQuote")[[1]] ,
dbGetQuery(con_fs,"select max(TradingDay) from QT_FactorScore")[[1]] ,
dbGetQuery(con_fs_r,"select max(TradingDay) from QT_FactorScore_R")[[1]] ,
dbGetQuery(con_main,"select max(PublDate) from LC_RptDate")[[1]] ,
dbGetQuery(con_main,"select max(InfoPublDate) from LC_PerformanceGrowth")[[1]],
dbGetQuery(con_main,"select max(date) from QT_FreeShares")[[1]],
dbGetQuery(con_qt,"select max(updateDate) from QT_sus_res")[[1]],
dbGetQuery(con_fs_r,"select max(date) from QT_FactorReturn")[[1]],
dbGetQuery(con_fs_r,"select max(date)from QT_Cov")[[1]]
)
table <- c(
"LC_IndexComponentsWeight",
"QT_IndexQuote",
"QT_DailyQuote",
"QT_FactorScore",
"QT_FactorScore_R",
"LC_RptDate",
"LC_PerformanceGrowth",
"QT_FreeShares",
"QT_sus_res",
"QT_FactorReturn",
"QT_Cov"
)
dbDisconnect(con_main)
dbDisconnect(con_fs)
dbDisconnect(con_qt)
return(data.frame(table,updatetime))
}
#' update the local database
#' @return NULL
#' @export
lcdb.update <- function(){
lcdb.update.SecuMain() ; message("lcdb.update.SecuMain()... Done ");
lcdb.update.QT_DailyQuote() ; message("lcdb.update.QT_DailyQuote()... Done ");
lcdb.update.QT_TradingDay() ; message("lcdb.update.QT_TradingDay()... Done");
lcdb.update.QT_sus_res() ; message("lcdb.update.QT_sus_res()... Done");
lcdb.update.CT_SystemConst() ; message("lcdb.update.CT_SystemConst()... Done");
lcdb.update.CT_IndustryList() ; message("lcdb.update.CT_IndustryList()... Done");
lcdb.update.LC_ExgIndustry() ; message("lcdb.update.LC_ExgIndustry()... Done");
lcdb.fix.swindustry() ; message("lcdb.fix.swindustry()... Done");
lcdb.fix.ezindustry() ; message("lcdb.fix.ezindustry()... Done");
lcdb.update.LC_IndexComponent() ; message("lcdb.update.LC_IndexComponent()... Done ");
lcdb.add.LC_IndexComponent("EI000985") ; message("lcdb.add.LC_IndexComponent('EI000985')... Done ");
lcdb.update.LC_IndexComponentsWeight() ; message("lcdb.update.LC_IndexComponentsWeight()... Done");
lcdb.update.QT_IndexQuote() ; message("lcdb.update.QT_IndexQuote()... Done ");
lcdb.update.IndexQuote_000985E() ; message("lcdb.update.IndexQuote_000985E()... Done ");
lcdb.update.LC_RptDate() ; message("lcdb.update.LC_RptDate()... Done ");
lcdb.update.LC_PerformanceGrowth() ; message("lcdb.update.LC_PerformanceGrowth()... Done ");
lcdb.update.QT_FreeShares() ; message("lcdb.update.QT_FreeShares()... Done ");
lcdb.update.QT_Size() ; message("lcdb.update.QT_Size()... Done ");
# lcdb.update.QT_Rf() ; message("lcdb.update.QT_Rf()... Done ");
lcdb.update.QT_FactorScore(type = "alpha") ; message("lcdb.update.QT_FactorScore(alpha)... Done ");
lcdb.update.QT_FactorScore(type = "risk") ; message("lcdb.update.QT_FactorScore(risk)... Done ");
lcdb.update.barra_basic() ; message("lcdb.update.QT_barra_basic()... Done ");
lcdb.update.barra_adv() ; message("lcdb.update.QT_barra_adv()... Done ");
}
#' @rdname lcdb.update
#' @export
lcdb.update.SecuMain <- function(){
tb.from <- queryAndClose.odbc(db.quant(),query="select * from SecuMain", as.is=4)
con <- db.local("main")
dbExecute(con,"delete from SecuMain")
dbWriteTable(con,"SecuMain",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
lcdb.update.QT_TradingDay <- function(){
tb.from <- queryAndClose.odbc(db.quant(),query="select * from QT_TradingDay")
con <- db.local("main")
dbExecute(con,"delete from QT_TradingDay")
dbWriteTable(con,"QT_TradingDay",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
lcdb.update.CT_SystemConst <- function(){
tb.from <- queryAndClose.odbc(db.quant(),query="select * from CT_SystemConst")
con <- db.local("main")
dbExecute(con,"delete from CT_SystemConst")
dbWriteTable(con,"CT_SystemConst",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
lcdb.update.CT_IndustryList <- function(){
tb.from <- queryAndClose.odbc(db.quant(),query="select * from CT_IndustryList")
con <- db.local("main")
dbExecute(con,"delete from CT_IndustryList")
dbWriteTable(con,"CT_IndustryList",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
lcdb.update.LC_ExgIndustry <- function(){
tb.from <- queryAndClose.odbc(db.quant(),query="select * from LC_ExgIndustry")
con <- db.local("main")
dbExecute(con,"delete from LC_ExgIndustry")
dbWriteTable(con,"LC_ExgIndustry",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
lcdb.update.LC_IndexComponent <- function(){
tb.from <- queryAndClose.odbc(db.quant(),query="select * from LC_IndexComponent")
con <- db.local("main")
dbExecute(con,"delete from LC_IndexComponent")
dbWriteTable(con,"LC_IndexComponent",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
#' @param begT a numeric date. if missing, '\code{max(EndDate)}' in the lcdb.
#' @param endT a numeric date. if missing, 99990101.
#' @aliases indexID a vector of indexID. if missing, all the index in server database.
#' @examples
#' lcdb.update.LC_IndexComponentsWeight() # update all the indexs up to date
#' lcdb.update.LC_IndexComponentsWeight(20060101,20060330) # update all the indexs in given period
#' lcdb.update.LC_IndexComponentsWeight(19000101,99990101,"EI000905") # update all the data of given index
#' lcdb.update.LC_IndexComponentsWeight(20060101,20060330,"EI000905")
lcdb.update.LC_IndexComponentsWeight <- function(begT,endT,IndexID){
con <- db.local("main")
if(TRUE){
if(missing(begT)){
begT <- dbGetQuery(con,"select max(EndDate) from LC_IndexComponentsWeight")[[1]]
}
begT_filt <- paste("EndDate >=",begT)
if(missing(endT)){
endT <- 99990101
}
endT_filt <- paste("EndDate < ",endT)
if(missing(IndexID)){
pool_filt <- "1>0"
} else{
pool_filt <- paste("IndexID in",brkQT(IndexID))
}
}
tb.from <- queryAndClose.odbc(db.quant(),query=paste("select * from LC_IndexComponentsWeight where",begT_filt,"and",endT_filt,"and",pool_filt))
if(NROW(tb.from)==0){
return()
}
dbExecute(con,paste("delete from LC_IndexComponentsWeight where",begT_filt,"and",endT_filt,"and",pool_filt))
dbWriteTable(con,"LC_IndexComponentsWeight",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
lcdb.update.QT_IndexQuote <- function(begT,endT,IndexID,datasrc=c("quant","jy")){
datasrc <- match.arg(datasrc)
con <- db.local("main")
if(TRUE){
if(missing(begT)){
if(missing(IndexID)){
begT <- dbGetQuery(con,"select max(TradingDay) from QT_IndexQuote")[[1]]
} else {
begT <- dbGetQuery(con,"select min(TradingDay) from QT_IndexQuote")[[1]]
}
}
begT_filt <- paste("TradingDay >=",begT)
if(missing(endT)){
if(missing(IndexID)){
endT <- 99990101
} else {
endT <- dbGetQuery(con,"select max(TradingDay) from QT_IndexQuote")[[1]]
}
}
endT_filt <- paste("TradingDay < ",endT)
if(missing(IndexID)){
pool_filt <- "1>0"
} else{
pool_filt <- paste("ID in",brkQT(IndexID))
}
}
if(datasrc=='quant'){
tb.from <- queryAndClose.odbc(db.quant(),query=paste("select * from QT_IndexQuote where ",begT_filt,"and",endT_filt,"and",pool_filt))
}else if(datasrc=='jy'){
begT_filt_ <- paste("TradingDay >=",QT(intdate2r(begT)))
endT_filt_ <- paste("TradingDay < ",QT(intdate2r(endT)))
IndexID_ <- stringr::str_replace(IndexID,'EI','')
pool_filt_ <- paste("SecuCode in",brkQT(IndexID_))
qr <- paste("SELECT q.InnerCode,
year(TradingDay)*10000+month(TradingDay)*100+day(TradingDay) 'TradingDay',
PrevClosePrice,OpenPrice,HighPrice,LowPrice,ClosePrice,TurnoverVolume,
TurnoverValue,TurnoverDeals,ChangePCT,NegotiableMV,
(case when PrevClosePrice is not null and PrevClosePrice <> 0 then ClosePrice/PrevClosePrice-1 else null end) 'DailyReturn',
'EI'+s.SecuCode 'ID'
FROM QT_IndexQuote q,SecuMain s
where q.InnerCode=s.InnerCode and ",begT_filt_,"and",endT_filt_,"and",pool_filt_)
tb.from <- queryAndClose.odbc(db.jy(),query=qr)
}
if(NROW(tb.from)==0){
return()
}
dbExecute(con,paste("delete from QT_IndexQuote where",begT_filt,"and",endT_filt,"and",pool_filt))
dbWriteTable(con,"QT_IndexQuote",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
lcdb.update.QT_DailyQuote <- function(begT,endT,stockID,loopFreq="100 year"){
con <- db.local("qt")
if(TRUE){
if(missing(begT)){
if(missing(stockID)){
begT <- dbGetQuery(con,"select max(TradingDay) from QT_DailyQuote")[[1]]
} else{
begT <- dbGetQuery(con,"select min(TradingDay) from QT_DailyQuote")[[1]]
}
}
if(missing(endT)){
if(missing(stockID)){
endT <- 99990101
} else {
endT <- dbGetQuery(con,"select max(TradingDay) from QT_DailyQuote")[[1]]
}
}
if(missing(stockID)){
pool_filt <- "1>0"
} else{
pool_filt <- paste("ID in",brkQT(stockID))
}
}
endT <- min(intdate2r(endT), Sys.Date())
dates <- c(seq(intdate2r(begT), endT ,by = loopFreq), endT)
dates <- rdate2int(dates)
for(ii in 1:(length(dates)-1)){
message(paste("lcdb.update.QT_DailyQuote: updating to ",dates[ii+1],"..."))
begT_filt <- paste("TradingDay >=",dates[ii])
endT_filt <- paste("TradingDay < ",dates[ii+1])
tb.from <- queryAndClose.odbc(db.quant(),query=paste("select * from QT_DailyQuote where ",begT_filt,"and",endT_filt,"and",pool_filt))
if(NROW(tb.from)==0){
return()
}
dbExecute(con,paste("delete from QT_DailyQuote where",begT_filt,"and",endT_filt,"and",pool_filt))
dbWriteTable(con,"QT_DailyQuote",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
gc()
}
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
lcdb.update.LC_RptDate <- function(begT,endT,stockID){
con <- db.local("main")
if(missing(begT)){
begT <- 10000101
}
if(missing(endT)){
endT <- 99990101
}
if(missing(stockID)){
pool_filt <- ""
} else{
pool_filt <- paste(" and stockID in",brkQT(stockID))
}
qr <- paste("select * from LC_RptDate where PublDate >=",begT,"and PublDate <=",endT,pool_filt)
tb.from <- queryAndClose.odbc(db.quant(),qr)
if(NROW(tb.from)==0){
dbDisconnect(con)
return()
}
qr <- paste("delete from LC_RptDate where PublDate >=",begT,"and PublDate<=",endT,pool_filt)
dbExecute(con,qr)
dbWriteTable(con,"LC_RptDate",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @rdname lcdb.update
#' @export
lcdb.update.LC_PerformanceGrowth <- function(begT,endT){
con <- db.local("main")
if(missing(begT)){
begT <- dbGetQuery(con,"select max(InfoPublDate) from LC_PerformanceGrowth")[[1]]
}
if(missing(endT)){
endT <- 99990101
}
tb.from <- queryAndClose.odbc(db.quant(),query=paste("select * from LC_PerformanceGrowth where InfoPublDate>=",begT,"and InfoPublDate<=",endT))
if(NROW(tb.from)==0){
return()
}
dbExecute(con,paste("delete from LC_PerformanceGrowth where InfoPublDate >=",begT,"and InfoPublDate<=",endT))
dbWriteTable(con,"LC_PerformanceGrowth",tb.from,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
# -------------------- ~~ fixing ----------------
#' add a index to local database from JY database
#'
#'
#' @author Andrew Dow
#' @param indexID is index code,such as EI000300
#' @return nothing
#' @examples
#' lcdb.add.LC_IndexComponent(indexID="EI801003")
#' lcdb.add.LC_IndexComponent(indexID="EI000985")
#' @export
lcdb.add.LC_IndexComponent <- function(indexID){
qr1 <- paste("select ID,InnerCode,CompanyCode,'EI'+SecuCode 'SecuCode',SecuAbbr,
SecuMarket,ListedSector,ListedState,JSID 'UpdateTime',
SecuCode 'StockID_TS',SecuCategory,
convert(varchar,ListedDate,112) 'ListedDate',SecuCode 'StockID_wind'
from SecuMain WHERE SecuCode=",QT(substr(indexID,3,8)),
" and SecuCategory=4",sep='')
indexInfo <- queryAndClose.odbc(db.jy(),qr1,stringsAsFactors=FALSE)
qr2 <- paste("SELECT 'EI'+s1.SecuCode 'IndexID','EQ'+s2.SecuCode 'SecuID',
convert(varchar(8),l.InDate,112) 'InDate',
convert(varchar(8),l.OutDate,112) 'OutDate',l.Flag,l.XGRQ 'UpdateTime'
FROM LC_IndexComponent l inner join SecuMain s1
on l.IndexInnerCode=s1.InnerCode and s1.SecuCode=",QT(substr(indexID,3,8)),
" left join SecuMain s2 on l.SecuInnerCode=s2.InnerCode",
" where s2.SecuCode like '3%' or s2.SecuCode like '6%' or s2.SecuCode like '0%'")
indexComp <- queryAndClose.odbc(db.jy(),qr2,stringsAsFactors=FALSE)
if(indexID=='EI000985'){
changeDate <- as.Date('2011-08-02')
indexInfo <- transform(indexInfo,ID=indexID,
SecuCode=substr(SecuCode,3,8),
StockID_TS='SH000985',
StockID_wind='000985.SH')
#part 2 update local LC_IndexComponent
qr <- paste("SELECT 'EI'+s1.SecuCode 'IndexID','EQ'+s2.SecuCode 'SecuID',
convert(varchar(8),l.InDate,112) 'InDate',
convert(varchar(8),l.OutDate,112) 'OutDate',
convert(varchar(8),s2.ListedDate,112) 'IPODate'
FROM LC_IndexComponent l
inner join SecuMain s1 on l.IndexInnerCode=s1.InnerCode and s1.SecuCode='801003'
left join SecuMain s2 on l.SecuInnerCode=s2.InnerCode
where (s2.SecuCode like '3%' or s2.SecuCode like '6%' or s2.SecuCode like '0%')
and l.InDate<",QT(changeDate))
re <- queryAndClose.odbc(db.jy(),qr,stringsAsFactors=FALSE)
if(TRUE){ # -- 801003
tmp <- transform(re,InDate=intdate2r(InDate),
OutDate=intdate2r(OutDate),
IPODate=intdate2r(IPODate)+90)
tmp[tmp$InDate<tmp$IPODate,'InDate'] <- tmp[tmp$InDate<tmp$IPODate,'IPODate']
tmp <- tmp[tmp$InDate < changeDate,c("SecuID","InDate","OutDate")]
tmp[is.na(tmp$OutDate),'OutDate'] <- changeDate
tmp[tmp$OutDate>changeDate,'OutDate'] <- changeDate
qr <- paste("select 'EQ'+s.SecuCode 'SecuID',
case when st.SpecialTradeType in(2,4,6) then convert(varchar(8),st.SpecialTradeTime,112)
else NULL end 'InDate',
case when st.SpecialTradeType in(1,3,5) then convert(varchar(8),st.SpecialTradeTime,112)
else NULL end 'OutDate'
from LC_SpecialTrade st,SecuMain s
where st.InnerCode=s.InnerCode and s.SecuCategory=1
and st.SpecialTradeTime<",QT(changeDate),
" and st.SpecialTradeType in(1,2,3,4,5,6)
and (s.SecuCode like '3%' or s.SecuCode like '6%' or s.SecuCode like '0%')
order by s.SecuCode,st.SpecialTradeTime")
st <- queryAndClose.odbc(db.jy(),qr,stringsAsFactors=FALSE)
st <- transform(st,InDate=intdate2r(InDate),
OutDate=intdate2r(OutDate))
st[is.na(st$OutDate),'OutDate'] <- changeDate
tmp <- rbind(tmp[,c("SecuID","InDate","OutDate")],st)
tmp <- reshape2::melt(tmp,id=c('SecuID'))
tmp <- na.omit(tmp)
tmp <- unique(tmp)
tmp <- dplyr::arrange(tmp,SecuID,value)
tmp$flag <- c(1)
for(i in 2: nrow(tmp)){
if(tmp$SecuID[i]==tmp$SecuID[i-1] && tmp$variable[i-1]=='InDate' && tmp$variable[i]=='InDate'){
tmp$flag[i-1] <- 0
}else if(tmp$SecuID[i]==tmp$SecuID[i-1] && tmp$variable[i-1]=='OutDate' && tmp$variable[i]=='OutDate'){
tmp$flag[i] <- 0
}else{
next
}
}
tmp <- tmp[tmp$flag==1,c("SecuID","variable","value")]
tmp <- cbind(tmp[tmp$variable=='InDate',c("SecuID","value")],
tmp[tmp$variable=='OutDate',"value"])
colnames(tmp) <- c("SecuID","InDate","OutDate")
tmp <- transform(tmp,IndexID='EI000985',
Flag=0,
UpdateTime=Sys.time(),
InDate=rdate2int(InDate),
OutDate=rdate2int(OutDate))
tmp <- tmp[,c("IndexID","SecuID","InDate","OutDate","Flag","UpdateTime")]
}
indexComp <- rbind(indexComp,tmp)
}else{
#part 1 update local SecuMain
indexInfo <- transform(indexInfo,ID=indexID,
SecuCode=substr(SecuCode,3,8),
StockID_TS=ifelse(is.na(stockID2stockID(indexID,'local','ts')),substr(indexID,3,8),
stockID2stockID(indexID,'local','ts')),
StockID_wind=ifelse(is.na(stockID2stockID(indexID,'local','wind')),substr(indexID,3,8),
stockID2stockID(indexID,'local','wind')))
}
con <- db.local("main")
dbExecute(con,paste("delete from SecuMain where ID=",QT(indexID),sep=''))
dbExecute(con,paste("delete from LC_IndexComponent where IndexID=",QT(indexID),sep=''))
dbWriteTable(con,"SecuMain",indexInfo,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbWriteTable(con,"LC_IndexComponent",indexComp,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' fix shenwan new industry rule
#'
#' Due to the SHENWAN inustry standard changing in 2014, fix local database's shenwan industry rule's bug and make the rule keep consistent. The new industry standard is 33. Update 3 related local tables:CT_SystemConst,CT_IndustryList and LC_ExgIndusry.
#' @rdname lcdb.update
#' @author Andrew Dow
#' @return nothing.
#' @examples
#' lcdb.fix.swindustry()
#' @export
lcdb.fix.swindustry <- function(){
#get raw data
qr <- "SELECT 'EQ'+s.SecuCode 'stockID',l.CompanyCode,l.FirstIndustryCode 'Code1',l.FirstIndustryName 'Name1',
l.SecondIndustryCode 'Code2',l.SecondIndustryName 'Name2',l.ThirdIndustryCode 'Code3',
l.ThirdIndustryName 'Name3',convert(varchar, l.InfoPublDate, 112) 'InDate',
convert(varchar, l.CancelDate, 112) 'OutDate',l.InfoSource,l.Standard,l.Industry,
l.IfPerformed 'Flag',convert(float,l.XGRQ) 'UpdateTime',
convert(varchar, s.ListedDate, 112) 'IPODate'
FROM LC_ExgIndustry l,SecuMain s
where l.CompanyCode=s.CompanyCode and s.SecuCategory=1
and s.SecuMarket in(83,90) and l.Standard in(9,24)
order by l.Standard,l.InfoPublDate"
re <- queryAndClose.odbc(db.jy(),qr,as.is = TRUE)
re <- re %>% filter(substr(stockID,1,3) %in% c('EQ6','EQ3','EQ0'),!is.na(IPODate)) %>%
mutate(InDate=as.integer(InDate),OutDate=as.integer(OutDate),UpdateTime=as.double(UpdateTime),IPODate=as.integer(IPODate))
#use standard 24 data directly
sw24use <- re %>% filter(Standard==24) %>% dplyr::select(-IPODate)
#use standard 9 data before standard 24 published date
sw9use <- re %>% filter(Standard==9,InDate<20140101,IPODate<20140101)
sw9use[is.na(sw9use$OutDate) | sw9use$OutDate>20140101,'OutDate'] <- 20140101
sw9use <- sw9use %>% mutate(Flag=2,unlistDate=trday.unlist(stockID)) %>% mutate(unlistDate=rdate2int(unlistDate))
sw9use <- sw9use[is.na(sw9use$unlistDate) | sw9use$InDate<sw9use$unlistDate,] # remove Indate> unlistdate
# remove outdate> unlistdate
sw9use <- sw9use %>% mutate(OutDate=ifelse(!is.na(sw9use$unlistDate) & sw9use$OutDate>sw9use$unlistDate,unlistDate,OutDate)) %>%
dplyr::select(-IPODate,-unlistDate) %>%
dplyr::rename(OldCode1=Code1,OldName1=Name1,OldCode2=Code2,OldName2=Name2,OldCode3=Code3,OldName3=Name3)
#convert old industry to new industry
sw24tmp <- sw24use[sw24use$InDate==20140101,c("stockID","Code1","Name1","Code2","Name2","Code3","Name3")]
sw9part1 <- sw9use[sw9use$OutDate==20140101,]
sw9part1 <- dplyr::left_join(sw9part1,sw24tmp,by='stockID')
#get industry match table
indmatch <- unique(sw9part1[,c("Code1","Name1","Code2","Name2","Code3","Name3","OldCode1","OldName1","OldCode2","OldName2","OldCode3","OldName3")])
indmatch <- plyr::ddply(indmatch,~OldName3,plyr::mutate,n=length(OldName3))
indmatch <- indmatch[indmatch$n==1,c("Code1","Name1","Code2","Name2","Code3","Name3","OldCode1","OldName1","OldCode2","OldName2","OldCode3","OldName3")]
sw9part1 <- sw9part1[,colnames(sw24use)]
sw9part2 <- sw9use[sw9use$OutDate<20140101,]
sw9part2 <- dplyr::left_join(sw9part2,indmatch,by=c("OldCode1","OldName1",
"OldCode2","OldName2",
"OldCode3","OldName3"))
sw9part3 <- sw9part2 %>% filter(is.na(Code1)) %>% dplyr::select(-Code1,-Name1,-Code2,-Name2,-Code3,-Name3)
sw9part2 <- sw9part2[!is.na(sw9part2$Code1),colnames(sw24use)]
sw9part3 <- dplyr::left_join(sw9part3,sw24tmp,by='stockID')
sw9part3 <- sw9part3[,colnames(sw24use)]
sw9use <- rbind(sw9part1,sw9part2,sw9part3)
#fill na to zonghe industry
zhcn <- unique(sw24use[sw24use$Code1==510000,'Name1'])
sw9use[is.na(sw9use$Code1),c("Name1","Name2","Name3")] <- zhcn
sw9use[is.na(sw9use$Code1),"Code1"] <-510000
sw9use[is.na(sw9use$Code2),"Code2"] <-510100
sw9use[is.na(sw9use$Code3),"Code3"] <-510101
sw33 <- rbind(sw9use,sw24use)
sw33 <- transform(sw33,Standard=33,
Code1=paste('ES33',Code1,sep = ''),
Code2=paste('ES33',Code2,sep = ''),
Code3=paste('ES33',Code3,sep = ''),
Code99=c(NA),
Name99=c(NA),
Code98=c(NA),
Name98=c(NA))
sw33 <- dplyr::arrange(sw33,stockID,InDate)
#deal with abnormal condition
#1 outdate<=indate
sw33 <- sw33[ifelse(is.na(sw33$OutDate),TRUE,sw33$OutDate>sw33$InDate),]
#2 one stock has two null outdate
tmp <- sw33 %>% dplyr::group_by(stockID) %>%
dplyr::summarise(NANum=sum(is.na(OutDate))) %>%
dplyr::ungroup() %>% dplyr::filter(NANum>1)
if(nrow(tmp)>0){
tmp <- tmp$stockID
sw33tmp <- sw33[(sw33$stockID %in% tmp) & is.na(sw33$OutDate),]
sw33 <- sw33[!((sw33$stockID %in% tmp) & is.na(sw33$OutDate)),]
sw33tmp <- sw33tmp %>% dplyr::group_by(stockID) %>% dplyr::filter(InDate==min(InDate)) %>% dplyr::ungroup()
sw33 <- rbind(sw33,sw33tmp)
sw33 <- dplyr::arrange(sw33,stockID,InDate)
}
#3 indate[i+1]!=outdate[i]
sw33$tmpstockID <- c(sw33$stockID[1],sw33$stockID[1:(nrow(sw33)-1)])
sw33$tmpOutDate <- c(NA,sw33$OutDate[1:(nrow(sw33)-1)])
sw33$InDate <- ifelse(ifelse(is.na(sw33$tmpOutDate),FALSE,sw33$stockID==sw33$tmpstockID & sw33$InDate!=sw33$tmpOutDate),
sw33$tmpOutDate,sw33$InDate)
sw33 <- subset(sw33,select=-c(tmpstockID,tmpOutDate))
# 4 duplicate indate
sw33 <- sw33[ifelse(is.na(sw33$OutDate),TRUE,sw33$OutDate>sw33$InDate),]
sw33[!is.na(sw33$OutDate) & sw33$Flag==1,'Flag'] <- 2
# update local database CT_IndustryList
qr <- "SELECT Standard,Classification 'Level','ES33'+IndustryCode 'IndustryID'
,IndustryName,SectorCode 'Alias','ES33'+FirstIndustryCode 'Code1'
,FirstIndustryName 'Name1','ES33'+SecondIndustryCode 'Code2'
,SecondIndustryName 'Name2','ES33'+ThirdIndustryCode 'Code3'
,ThirdIndustryName 'Name3',convert(float,UpdateTime) 'UpdateTime'
FROM CT_IndustryType where Standard=24"
indCon <- queryAndClose.odbc(db.jy(),qr,as.is = TRUE)
indCon <- transform(indCon,Standard=33,UpdateTime=as.double(UpdateTime))
indCon[is.na(indCon$Name2),'Code2'] <- NA
indCon[is.na(indCon$Name3),'Code3'] <- NA
# update local database CT_SystemConst
syscon <- queryAndClose.odbc(db.jy(),"select top 1 LB, LBMC, DM ,MS from CT_SystemConst where LB=1081",as.is = TRUE)
syscon <- transform(syscon,DM=33, MS="SHENWAN2014fixed")
# update...
con <- db.local("main")
res <- dbSendQuery(con,"delete from LC_ExgIndustry where Standard=33")
dbClearResult(res)
res <- dbSendQuery(con,"delete from CT_IndustryList where Standard=33")
dbClearResult(res)
res <- dbSendQuery(con,"delete from CT_SystemConst where LB=1081 and DM=33")
dbClearResult(res)
dbWriteTable(con,'LC_ExgIndustry',sw33,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbWriteTable(con,'CT_IndustryList',indCon,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbWriteTable(con,'CT_SystemConst',syscon,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
# return('Done!')
}
#' @rdname lcdb.update
#' @author Qian Han
#' @return nothing.
#' @examples
#' lcdb.fix.ezindustry()
#' @export
lcdb.fix.ezindustry <- function(){
con <- db.local("main")
# Sec
seclist <- list()
namelist <- list()
seclist[[1]] <- c("ES33110000","ES33210000","ES33220000","ES33230000","ES33240000")
namelist[[1]] <- "BigCycle"
seclist[[2]] <- c("ES33480000","ES33490000","ES33430000")
namelist[[2]] <- "FinRealEstate"
seclist[[3]] <- c("ES33710000","ES33720000","ES33730000","ES33270000")
namelist[[3]] <- "TMT"
seclist[[4]] <- c("ES33280000","ES33330000","ES33340000","ES33350000","ES33460000","ES33370000","ES33450000")
namelist[[4]] <- "Comsumption"
seclist[[5]] <- c("ES33360000","ES33630000","ES33640000","ES33610000","ES33620000","ES33650000")
namelist[[5]] <- "Manufacturing"
seclist[[6]] <- c("ES33420000","ES33410000","ES33510000")
namelist[[6]] <- "Others"
# LC_ExgIndustry
qr <- paste(" select * from LC_ExgIndustry
where Standard = 33")
tmpdat <- DBI::dbGetQuery(con, qr)
for(i in 1:nrow(tmpdat)){
for(j in 1:6){
if(tmpdat$Code1[i] %in% seclist[[j]]){
tmpdat$Code1[i] <- paste0("ES",j)
tmpdat$Name1[i] <- namelist[[j]]
}
}
}
tmpdat$Standard <- 336
# CT_SystemConst
tmpdat2 <- DBI::dbReadTable(con, "CT_SystemConst")
tmpdat2 <- subset(tmpdat2, LB == 1081 & DM == 33)
tmpdat2$DM <- 336
tmpdat2$MS <- "6EasyIndustryCategory"
# CT_IndustryList
qr3 <- " select * from CT_IndustryList
where Standard = 33
and Level = 1"
tmpdat3 <- DBI::dbGetQuery(con, qr3)
tmpdat3 <- tmpdat3[1:6,]
tmpdat3$Standard <- 336
tmpdat3$IndustryID <- paste0("ES",1:6)
tmpdat3$IndustryName <- unlist(namelist)
tmpdat3$Code1 <- tmpdat3$IndustryID
tmpdat3$Name1 <- tmpdat3$IndustryName
# Update into LCDB
dbExecute(con,"delete from LC_ExgIndustry where Standard=336")
dbExecute(con,"delete from CT_IndustryList where Standard=336")
dbExecute(con,"delete from CT_SystemConst where LB=1081 and DM=336")
dbWriteTable(con,'LC_ExgIndustry',tmpdat,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbWriteTable(con,'CT_SystemConst',tmpdat2,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbWriteTable(con,'CT_IndustryList',tmpdat3,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
return('Done!')
}
#' lcdb.update.IndexQuote_000985E
#'
#' @examples
#' lcdb.update.IndexQuote_000985E()
#' @export
lcdb.update.IndexQuote_000985E <- function(begT,endT){
con <- db.local("main")
con_qt <- db.local("qt")
if(TRUE){
if(missing(begT)){
begT <- dbGetQuery(con,"select max(TradingDay) from QT_IndexQuote where ID='EI000985E'")[[1]]
}
if(missing(endT)){
endT <- dbGetQuery(con,"select max(TradingDay) from QT_IndexQuote")[[1]]
}
}
if(begT==endT){
return('Alread up to date!')
} else if(begT>endT){
stop("please update the 'QT_IndexQuote' table firstly!")
} else {
init_data <- dbGetQuery(con,paste("select * from QT_IndexQuote where ID='EI000985E' and TradingDay=",begT))
begT <- intdate2r(begT)
endT <- intdate2r(endT)
begT <- trday.nearby(begT,by=1) # -- one day after
# TS <- getTS(begT,indexID = 'EI000985')
TS <- getIndexComp(indexID = 'EI000985',endT = begT,drop=FALSE)
# tmp.dates <- getRebDates(begT,endT,rebFreq = 'day')
tmp.dates <- trday.get(begT,endT)
message('calculating',rdate2int(min(tmp.dates)),"~",rdate2int(max(tmp.dates)),'...')
qr <- paste("select TradingDay,ID,DailyReturn from QT_DailyQuote
where TradingDay>=",rdate2int(min(tmp.dates))," and TradingDay<=",rdate2int(max(tmp.dates)))
quotedf <- dbGetQuery(con_qt,qr)
quotedf$TradingDay <- intdate2r(quotedf$TradingDay)
quotedf <- quotedf[quotedf$ID %in% TS$stockID,]
index <- quotedf %>% dplyr::group_by(TradingDay) %>%
dplyr::summarise(DailyReturn = mean(DailyReturn, na.rm = TRUE))
tmp <- xts::xts(index$DailyReturn, order.by = index$TradingDay)
tmp <- WealthIndex(tmp)
close <- data.frame(TradingDay=zoo::index(tmp),close=zoo::coredata(tmp)*init_data$ClosePrice,row.names =NULL)
colnames(close) <- c('TradingDay','ClosePrice')
index <- merge(index,close,by='TradingDay')
index <- transform(index,TradingDay=rdate2int(TradingDay),
InnerCode=c(1000985),
PrevClosePrice=c(NA,index$ClosePrice[-(nrow(index))]),
OpenPrice=c(NA),
HighPrice=c(NA),
LowPrice=c(NA),
TurnoverVolume=c(NA),
TurnoverValue=c(NA),
TurnoverDeals=c(NA),
ChangePCT=DailyReturn*100,
NegotiableMV=c(NA),
UpdateTime=c(Sys.Date()),
ID=c('EI000985E'))
index <- index[,c("InnerCode","TradingDay","PrevClosePrice","OpenPrice","HighPrice",
"LowPrice","ClosePrice","TurnoverVolume","TurnoverValue","TurnoverDeals",
"ChangePCT","NegotiableMV","UpdateTime","DailyReturn","ID")]
index$PrevClosePrice[1] <- init_data$ClosePrice
dbExecute(con,paste("delete from QT_IndexQuote where ID='EI000985E' and TradingDay >=",rdate2int(begT),"and TradingDay <=",rdate2int(endT)))
dbWriteTable(con,'QT_IndexQuote',index,overwrite=FALSE,append=TRUE,row.names=FALSE)
}
dbDisconnect(con_qt)
dbDisconnect(con)
return('Done!')
}
#' lcdb.update.QT_FreeShares
#'
#' update QT_FreeShares through Wind API.
#' @examples
#' lcdb.update.QT_FreeShares()
#' @export
lcdb.update.QT_FreeShares <- function(begT,endT,Freq='week') {
con <- db.local("main")
re <- dbReadTable(con,'QT_FreeShares')
if(missing(begT)){
begT <- intdate2r(max(re$date))
begT <- rdate2int(trday.nearby(begT,1))
}
if(missing(endT)){
endT <- rdate2int(trday.nearest(Sys.Date()-1))
}
if(begT<endT){
dates <- getRebDates(intdate2r(begT),intdate2r(endT),rebFreq = Freq)
require(WindR)
WindR::w.start(showmenu = FALSE)
for(i in 1:length(dates)){
TS <- w.wset('sectorconstituent',date=dates[i],sectorid='a001010100000000')[[2]]
float_shares_ <- WindR::w.wss(TS$wind_code,'free_float_shares',tradeDate=dates[i])[[2]]
float_shares_ <- cbind(data.frame(date=dates[i]),float_shares_)
if(i==1){
float_shares <- float_shares_
}else{
float_shares <- rbind(float_shares,float_shares_)
}
}
colnames(float_shares) <- c("date","stockID","freeShares")
float_shares <- transform(float_shares,
date=rdate2int(date),
stockID=stringr::str_c("EQ",substr(stockID,1,6)),
freeShares=freeShares/1e8)
float_shares <- rbind(float_shares,re[re$date>=begT & re$date<=endT,])
float_shares <- float_shares %>% group_by(stockID,freeShares) %>% summarise(date=min(date)) %>% dplyr::ungroup()
float_shares <- float_shares[,c("date","stockID","freeShares")]
re_ <- re %>% dplyr::filter(date<begT | date>endT) %>% arrange(stockID,desc(date)) %>% group_by(stockID) %>% slice(1) %>% dplyr::ungroup()
re_ <- dplyr::rename(re_,dateold=date,freeSharesold=freeShares)
float_shares <- dplyr::left_join(float_shares,re_,by='stockID')
float_shares <- rbind(float_shares %>% dplyr::filter(!is.na(freeSharesold)) %>% dplyr::filter(date!=dateold & freeShares!=freeSharesold),
float_shares %>% dplyr::filter(is.na(freeSharesold)))
float_shares <- float_shares[,c("date","stockID","freeShares")]
float_shares <- arrange(float_shares,date,stockID)
dbExecute(con,paste("delete from QT_FreeShares where date >=",begT,"and date<=",endT))
dbWriteTable(con,'QT_FreeShares',float_shares,overwrite=FALSE,append=TRUE,row.names=FALSE)
}
dbDisconnect(con)
}
# -------------------- ~~ QT_sus_res ----------------
#' lcdb.update.QT_sus_res
#'
#' @export
#' @rdname lcdb.update.QT_sus_res
#' @examples
#' #-- initiate:
#' lcdb.init.QT_sus_res(19901231,19950630)
#' #-- update:
#' dates <- c(seq(as.Date("1998-12-31"),to = Sys.Date(),by = "year"),Sys.Date())
#' dates <- rdate2int(dates)
#' for(date in dates){
#' message(paste("updating to ",date,"..."))
#' lcdb.update.QT_sus_res(endT=date)
#' }
#' #-- fix the bugs
#' bugs <- lcdb.update.QT_sus_res_bugsFinding()
#' lcdb.update.QT_sus_res(stockID=bugs)
lcdb.init.QT_sus_res <- function(begT=19901231,endT=99990101){
con <- db.local("qt")
if(dbExistsTable(con,"QT_sus_res")){dbRemoveTable(con,"QT_sus_res")}
message("lcdb.init QT_sus_res ... ");
dbExecute(con,'CREATE TABLE QT_sus_res
( "stockID" TEXT,
"sus" INTEGER,
"res" INTEGER,
"updateDate" INTEGER
);')
dbExecute(con,'CREATE UNIQUE INDEX [IX_QT_sus_res] ON [QT_sus_res] ([stockID], [sus]);')
begT_filt <- paste("TradingDay >=",begT)
endT_filt <- paste("TradingDay <= ",endT)
updateDate <- dbGetQuery(con,paste("select max(TradingDay) from QT_DailyQuote where",endT_filt))[[1]]
loops <- dbGetQuery(con,"select distinct ID from QT_DailyQuote")[[1]]
# loops <- "EQ603520"
TB_sus_res <- data.frame()
for (ii in 1:length(loops)){
# ii <- 1
stockID_ <- loops[ii]
message(paste(stockID_," "),appendLF = FALSE)
QTstock <- dbGetQuery(con,paste("select ID, TradingDay, TurnoverVolume from QT_DailyQuote where ID=",QT(stockID_),"and ",begT_filt,"and",endT_filt, "order by TradingDay"))
QTstock <- dplyr::mutate(QTstock,Vol_lag=lag(TurnoverVolume))
QTstock <- dplyr::mutate(QTstock,sus_res=ifelse(TurnoverVolume<1 & Vol_lag>=1, "s", # - suspend
ifelse(Vol_lag<1 & TurnoverVolume>=1, "r", # - resumption
NA))) # - nothing
sus <- dplyr::filter(QTstock,sus_res=="s")$TradingDay
res <- dplyr::filter(QTstock,sus_res=="r")$TradingDay
if(length(sus)+length(res) > 0){
if(length(res)>0 & sus[1] > res[1]){
res_lag <- res[1] # -- res_lag
res <- res[-1]
}
if(length(sus)>length(res)){
res <- c(res,NA)
}
S_R <- data.frame(stockID=stockID_,sus=sus,res=res,updateDate=updateDate)
TB_sus_res <- rbind(TB_sus_res,S_R)
}
}
dbWriteTable(con,"QT_sus_res",TB_sus_res,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @export
#' @rdname lcdb.update.QT_sus_res
lcdb.update.QT_sus_res <- function(endT,stockID){
con <- db.local("qt")
if(missing(stockID)){
begT <- dbGetQuery(con,"select max(updateDate) from QT_sus_res")[[1]]
}else{
begT <- 19901231
}
if(missing(endT)){
if(missing(stockID)){
endT <- 99990101
} else {
endT <- dbGetQuery(con,"select max(updateDate) from QT_sus_res")[[1]]
}
}
begT_filt <- paste("TradingDay >=",begT)
endT_filt <- paste("TradingDay <= ",endT)
if(missing(stockID)){
pool_filt <- "1>0"
} else{
pool_filt <- paste("ID in",brkQT(stockID))
}
if(endT<=begT){
stop("Can not update this table in the midst!")
}
QTdata <- dbGetQuery(con,paste("select ID, TradingDay, TurnoverVolume from QT_DailyQuote where ",begT_filt,"and",endT_filt,"and",pool_filt, "order by ID,TradingDay"))
if(dim(QTdata)[1]<1){
return()
}
updateDate <- max(QTdata$TradingDay)
loops <- unique(QTdata$ID)
TB_sus_res <- data.frame()
for (ii in 1:length(loops)){
# ii <- 1
stockID_ <- loops[ii]
# message(stockID_) # --
QTstock <- dplyr::filter(QTdata,ID==stockID_)
QTstock <- dplyr::mutate(QTstock,Vol_lag=lag(TurnoverVolume)) # this step will trim the QTdata of begT just right to avoid the overlapping
QTstock <- dplyr::mutate(QTstock,sus_res=ifelse(TurnoverVolume<1 & Vol_lag>=1, "s", # - suspend
ifelse(Vol_lag<1 & TurnoverVolume>=1, "r", # - resumption
NA))) # - nothing
sus <- dplyr::filter(QTstock,sus_res=="s")$TradingDay
res <- dplyr::filter(QTstock,sus_res=="r")$TradingDay
if(length(sus)==0 & length(res) == 0){
next
} else if(length(sus)==0){ # length(sus)==0 & length(res)==1
dbExecute(con,paste("UPDATE QT_sus_res
SET res = ",res,
"WHERE stockID=",QT(stockID_),
"and sus=(select max(sus) from QT_sus_res where stockID=",QT(stockID_),")"))
} else if(length(res)==0) {# length(sus)==1 & length(res)==0
res <- NA
S_R <- data.frame(stockID=stockID_,sus=sus,res=res,updateDate=updateDate)
TB_sus_res <- rbind(TB_sus_res,S_R)
} else {
if(sus[1] > res[1]){
res_lag <- res[1] # -- res_lag
dbExecute(con,paste("UPDATE QT_sus_res
SET res = ",res_lag,
"WHERE stockID=",QT(stockID_),
"and sus=(select max(sus) from QT_sus_res where stockID=",QT(stockID_),")"))
res <- res[-1]
}
if(length(sus)>length(res)){
res <- c(res,NA)
}
S_R <- data.frame(stockID=stockID_,sus=sus,res=res,updateDate=updateDate)
TB_sus_res <- rbind(TB_sus_res,S_R)
}
}
if(!missing(stockID)){
dbExecute(con,paste("delete from QT_sus_res where stockID in",brkQT(loops)))
}
dbWriteTable(con,"QT_sus_res",TB_sus_res,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @export
#' @rdname lcdb.update.QT_sus_res
lcdb.update.QT_sus_res_bugsFinding <- function(){
con <- db.local("qt")
bugsdata <- dbGetQuery(con,"select * from (select * from QT_sus_res where res is null) a ,
(select stockID as 'ID', max(sus) as 'sus_max' from QT_sus_res group by stockID) m
where a.stockID=m.ID")
bugs <- dplyr::filter(bugsdata,sus!=sus_max)$stockID
dbDisconnect(con)
return(bugs)
}
# -------------------- ~~ QT_Size ----------------
#' @export
lcdb.init.QT_Size <- function(){
con <- db.local("qt")
if(dbExistsTable(con,"QT_Size")){
dbRemoveTable(con,"QT_Size")
}
message("lcdb.init QT_Size ... ");
dbExecute(con,'CREATE TABLE "QT_Size" (
"date" INTEGER,
"stockID" TEXT,
"mkt_cap" REAL,
"float_cap" REAL,
"free_cap" REAL
);')
dbExecute(con,'CREATE UNIQUE INDEX [IX_QT_size] ON [QT_Size] ([date], [stockID]);')
dbDisconnect(con)
}
#' @export
lcdb.update.QT_Size <- function(begT, endT){
# RebDates
if(missing(begT)){
begT <- queryAndClose.dbi(db.local("qt"),"select max(date) from QT_Size")[[1]]
if(is.na(begT)){ # EMPTY TABLE
begT <- queryAndClose.dbi(db.local("qt"),"select min(TradingDay) from QT_DailyQuote")[[1]]
}
}
if(missing(endT)){
endT <- queryAndClose.dbi(db.local("qt"),"select max(TradingDay) from QT_DailyQuote")[[1]]
}
begT <- intdate2r(begT)
endT <- intdate2r(endT)
if(begT >= endT){
return("Done.")
}else{
# checking the connection of the date sequence
rebdates <- getRebDates(begT, endT, rebFreq = "week")
rebdates_index <- cut.Date2(rebdates, breaks = "week")
if(rebdates_index[1] == rebdates_index[2]){
rebdates <- rebdates[-1]
}
# get TS (A shares)
# divide into groups
yearlist <- lubridate::year(rebdates)
yearlist_unique <- unique(yearlist)
for(i in 1:length(yearlist_unique)){
year_ <- yearlist_unique[i]
message(year_)
rebdates_ <- rebdates[yearlist == year_]
rebdates_qr <- paste0("(",paste(rdate2int(rebdates_), collapse = ","),")")
ts_ <- queryAndClose.dbi(db.local("qt"),
paste0("SELECT TradingDay, ID from QT_DailyQuote
WHERE TradingDay in ", rebdates_qr))
if(i == 1L){
ts <- ts_
}else{
ts <- rbind(ts, ts_)
}
}
colnames(ts) <- c("date","stockID")
ts <- dplyr::arrange(ts, date, stockID)
ts$date <- intdate2r(ts$date)
# get GF_CAP
tsf <- gf_cap(ts,log=FALSE,bc_lambda = NULL,
var="mkt_cap",na_fill=FALSE,varname="mkt_cap",
datasrc="local")
tsf <- gf_cap(tsf,log=FALSE,bc_lambda = NULL,
var="float_cap",na_fill=FALSE,varname="float_cap",
datasrc="local")
tsf <- gf_cap(tsf,log=FALSE,bc_lambda = NULL,
var="free_cap",na_fill=FALSE,varname="free_cap",
datasrc="local")
# output
tsf$date <- rdate2int(tsf$date)
con <- db.local("qt")
dbExecute(con,paste("delete from QT_Size where date >=", rdate2int(min(rebdates)),"and date <=", rdate2int(max(rebdates))))
dbWriteTable(con,'QT_Size',tsf,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
return("Done.")
}
}
# -------------------- ~~ FactorScore ----------------
#' lcdb.update.QT_FactorScore
#'
#' update \bold{all} factorscores in table CT_FactorLists (see \code{CT_FactorLists()}).
#'
#' @param begT the begin date of the updating
#' @param endT the end date of the updating
#' @param stockID a vector of stockID
#' @export
#' @seealso \code{\link{lcfs.update}}
#' @examples
#' # update factors on certain time
#' lcdb.update.QT_FactorScore(20130322,20130330)
#' # update factors of certain stocks
#' lcdb.update.QT_FactorScore(20130322,20130330,c("EQ000001","EQ000002"))
#' # update factors on certin time, of certain stocks
#' lcdb.update.QT_FactorScore(20130322,20130330,c("EQ000001","EQ000002"))
lcdb.update.QT_FactorScore <- function(begT,endT,stockID,loopFreq="month",type = c("alpha","risk")){
type <- match.arg(type)
if(type == "alpha"){
con_fs <- db.local("fs")
tableName_char <- "QT_FactorScore"
}else if(type == "risk"){
con_fs <- db.local("fs_r")
tableName_char <- "QT_FactorScore_R"
}
con_qt <- db.local("qt")
if(TRUE){
if(missing(begT)){
if(missing(stockID)){
begT <- dbGetQuery(con_fs,paste("select max(TradingDay) from",tableName_char))[[1]]
} else {
begT <- dbGetQuery(con_fs,paste("select min(TradingDay) from",tableName_char))[[1]]
}
}
if(missing(endT)){
if(missing(stockID)){
endT <- 99990101
} else {
endT <- dbGetQuery(con_fs,paste("select max(TradingDay) from",tableName_char))[[1]]
}
}
if(missing(stockID)){
pool_filt <- "1>0"
} else {
pool_filt <- paste("ID in",brkQT(stockID))
}
}
endT <- min(intdate2r(endT), Sys.Date())
dates <- c(seq(intdate2r(begT), endT ,by = loopFreq), endT)
dates <- rdate2int(dates)
for(ii in 1:(length(dates)-1)){
message(paste("lcdb.update.",tableName_char,": updating to ",dates[ii+1],"...",sep = ""))
begT_filt <- paste("TradingDay >=",dates[ii])
endT_filt <- paste("TradingDay < ",dates[ii+1])
TS <- dbGetQuery(con_qt, paste("select TradingDay as date, ID as stockID from QT_DailyQuote where ",begT_filt,"and",endT_filt,"and",pool_filt))
if(NROW(TS)==0) {
return()
}
TS <- transform(TS,date=intdate2r(date))
TS <- dplyr::arrange(TS,date,stockID)
require(QFactorGet)
factorLists <- CT_FactorLists(type = type)
for(i in 1:NROW(factorLists)){
factorName <- factorLists[i,"factorName"]
factorID <- factorLists[i,"factorID"]
factorFun <- factorLists[i,"factorFun"]
factorPar <- factorLists[i,"factorPar"]
message("Factor ",factorName," getting ...")
subTSF <- getRawFactor(TS=TS,factorFun=factorFun,factorPar=factorPar)
subTSF <- renameCol(subTSF,src="factorscore",tgt=factorID)
if(i==1L){
re <- subTSF[,c("date","stockID",factorID)]
} else {
re <- merge(re,subTSF[,c("date","stockID",factorID)],by=c("date","stockID"))
}
}
re <- renameCol(re,c("date","stockID"),c("TradingDay","ID"))
re$TradingDay <- rdate2int(re$TradingDay)
if(TRUE){ # add extra fields, and reorder the fields to fix the order of target table.
targetfield <- dbListFields(con_fs,tableName_char)
extrfield <- setdiff(targetfield,names(re))
extrdat <- as.data.frame(matrix(NA,NROW(re),length(extrfield)))
names(extrdat) <- extrfield
re <- cbind(re,extrdat)
re <- re[targetfield]
}
dbExecute(con_fs,paste("delete from",tableName_char,"where",begT_filt,"and",endT_filt,"and",pool_filt))
dbWriteTable(con_fs,name = tableName_char,value = re,overwrite=FALSE,append=TRUE,row.names=FALSE)
gc()
}
dbDisconnect(con_fs)
dbDisconnect(con_qt)
}
#' lcfs.update
#'
#' update \bold{one} specific factorscore.
#'
#' @param factorID a single charactor of factorID
#' @param begT the begin date of the updating
#' @param endT the end date of the updating
#' @param stockID a vector of stockID
#' @export
#' @seealso \code{\link{lcdb.update.QT_FactorScore}}, \code{\link{lcfs.add}}
#' @examples
#' # update a factorscore on all the time, of all the stocks
#' lcfs.update("F000008")
#' # update a factor on certain time
#' lcfs.update("F000008",20130322,20130330)
#' # update a factor of certain stocks
#' lcfs.update("F000008",20130322,20130330,c("EQ000001","EQ000002"))
#' # update a factorscore on certin time, of certain stocks
#' lcfs.update("F000008",20130322,20130330,c("EQ000001","EQ000002"))
lcfs.update <- function(factorID,begT,endT,stockID,
splitNbin="month"){
if(substr(factorID,1,1) == "F"){
con <- db.local("fs")
tableName_char <- "QT_FactorScore"
}else if(substr(factorID,1,1) == "R"){
con <- db.local("fs_r")
tableName_char <- "QT_FactorScore_R"
}
if(missing(begT)){
begT <- dbGetQuery(con,paste("select min(TradingDay) from", tableName_char))[[1]]
}
if(missing(endT)){
endT <- dbGetQuery(con,paste("select max(TradingDay) from", tableName_char))[[1]]
}
if(missing(stockID)){
pool_filt <- "1>0"
} else{
pool_filt <- paste("ID in",brkQT(stockID))
}
factorFun <- CT_FactorLists(factorID = factorID)$factorFun
factorPar <- CT_FactorLists(factorID = factorID)$factorPar
loopT <- rdate2int(trday.get(intdate2r(begT),intdate2r(endT)))
loopT.L <- split(loopT,cut(intdate2r(loopT),splitNbin))
subfun <- function(Ti){
message(paste(" ",min(Ti),"to",max(Ti)," ..."))
dates <- paste(Ti,collapse=",")
TS <- dbGetQuery(con,paste("select TradingDay as date, ID as stockID from",tableName_char,"where TradingDay in (",dates,") and",pool_filt))
TS$date <- intdate2r(TS$date)
TSF <- getRawFactor(TS,factorFun,factorPar)
TSF$date <- rdate2int(TSF$date)
TSF <- renameCol(TSF,src="factorscore",tgt=factorID)
for(Tij in Ti){ # update the factorscore day by day.
# Tij <- Ti[1]
# message(paste(" ",Tij))
dbWriteTable(con,"temp_table",TSF[TSF$date==Tij,],overwrite=TRUE,append=FALSE,row.names=FALSE)
if(tableName_char == "QT_FactorScore"){
qr <- paste("UPDATE QT_FactorScore
SET ",factorID,"= (SELECT ",factorID," FROM temp_table WHERE temp_table.stockID =QT_FactorScore.ID)
WHERE QT_FactorScore.ID = (SELECT stockID FROM temp_table WHERE temp_table.stockID =QT_FactorScore.ID)
and QT_FactorScore.TradingDay =",Tij)
}else if(tableName_char == "QT_FactorScore_R"){
qr <- paste("UPDATE QT_FactorScore_R
SET ",factorID,"= (SELECT ",factorID," FROM temp_table WHERE temp_table.stockID =QT_FactorScore_R.ID)
WHERE QT_FactorScore_R.ID = (SELECT stockID FROM temp_table WHERE temp_table.stockID =QT_FactorScore_R.ID)
and QT_FactorScore_R.TradingDay =",Tij)
}
res <- dbSendQuery(con,qr)
dbClearResult(res)
}
gc()
}
message(paste("Function lcfs.update: updateing factor score of",factorID,".... "))
plyr::l_ply(loopT.L, subfun, .progress = plyr::progress_text(style=3))
dbDisconnect(con)
}
#' lcfs.add
#'
#' add/update \bold{one} factorscore column in local sqlite table \code{"QT_FactorScore"}. On the same time, correspondingly, add/update a record into table \code{"CT_FactorLists"} and table \code{"CT_TechVars"}.
#'
#' @param factorFun a character string naming the function to get the factor scores
#' @param factorPar a character string, containing the parameters of the \code{factorFun}. Note that unlike in \code{\link{getTSF}}, here the factorPar could not be a list, because it need to be written into database.
#' @param factorDir a integer,should be 1 or -1 (1 for the positive factor,-1 for the negative one). \bold{Note that} the \code{factorDir} here is only used to write a record into table \code{"CT_FactorLists"}, not used when getting \code{TSF}. So that the factorscore in table \code{"QT_FactorScore"} is kept \bold{"raw", without adding the dirrection infomation}.
#' @param factorID a character string
#' @param factorName a character string. IF missing, then take a default name by function \code{default.factorName}.
#' @param factorType a character string
#' @param factorDesc a character string
#' @param splitNbin a character of interval specification(see \code{\link{cut.Date}} for detail). Specify the time interval when looping of getting the \code{TSF} object.
#' @return Write data into the local sqlite database, returning NULL.
#' @seealso \code{\link{getTSF}},\code{\link{modelPar.factor}}, \code{\link{lcdb.update.QT_FactorScore}}
#' @author Ruifei.Yin
#' @export
#' @examples
#' system.time(lcfs.add(factorFun="gf.F_rank_chg",factorPar="lag=60,con_type=\"1,2\"", factorDir=1, factorID="F000999"))
lcfs.add <- function(factorFun,
factorPar="",
factorDir,
factorID,
factorName = default.factorName(factorFun,factorPar,factorDir),
factorType = "",
factorDesc = "",
splitNbin = "month"){
#
if(substr(factorID,1,1) == "F"){
con_fs <- db.local("fs")
tableName_char <- "QT_FactorScore"
}else if(substr(factorID,1,1) == "R"){
con_fs <- db.local("fs_r")
tableName_char <- "QT_FactorScore_R"
}
#
if(factorID %in% CT_FactorLists()$factorID) {
is_overwrite <- select.list(choices=c("OK","CANCEL"),preselect="CANCEL",title=paste("Warning!\nThe factor",factorID,"has already exist!\nDo you want to overwrite it?"),graphics=FALSE)
if(is_overwrite == "CANCEL") return(invisible(NULL))
}
con_main <- db.local("main")
# insert or replace a row to table 'CT_FactorLists'
if(!is.character(factorPar)){
stop("The 'factorPar' must be a character!")
}
qr1 <- paste("replace into CT_FactorLists_R
(factorID, factorName, factorFun, factorPar, factorDir, factorType, factorDesc )
values
(
",QT(factorID),",
",QT(factorName),",
",QT(factorFun),",
",QT(factorPar),",
",QT(factorDir),",
",QT(factorType),",
",QT(factorDesc),"
) ")
dbExecute(con_fs,qr1)
# insert or replace a row to table 'CT_TechVars'
qr2 <- paste("replace into CT_TechVars
(datasrc, secuCate, varName, func, tableName)
values
(
'local',
'EQ',
",QT(factorID),",
",QT(factorID),",
'QT_FactorScore'
) ")
dbExecute(con_main,qr2)
# add 1 colume to table 'QT_FactorScore'
tryCatch(dbExecute(con_fs,paste("ALTER TABLE",tableName_char,"ADD COLUMN ",factorID,"float(0, 4)")),
error=function(e) { print("RS-DBI driver: (error in statement: duplicate column name)") })
dbDisconnect(con_fs)
dbDisconnect(con_main)
# update
lcfs.update(factorID = factorID,splitNbin = splitNbin)
}
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ======================
# ===================== Database Initiation ===========================
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ======================
# -------------------- ~~ main ----------------
#' @rdname lcdb.init
#' @export
lcdb.init.IndexQuote_000985E <- function(begT=20050101){
con <- db.local("main")
dbExecute(con,"delete from QT_IndexQuote where ID='EI000985E'")
endT <- dbGetQuery(con,"select max(TradingDay) from QT_IndexQuote")[[1]]
endT <- intdate2r(endT)
begT <- intdate2r(begT)
dates <- trday.get(begT,endT)
dates <- as.Date(unique(cut.Date2(dates,"month")))
TS <- getIndexComp(indexID = 'EI000985',endT = dates, drop = FALSE)
con_qt <- db.local("qt")
index <- data.frame()
for(i in 1:(length(dates)-1)){
# tmp.dates <- getRebDates(dates[i],dates[i+1],rebFreq = 'day')
tmp.dates <- trday.get(dates[i],dates[i+1])
tmp.dates <- tmp.dates[-length(tmp.dates)]
message(rdate2int(max(tmp.dates)))
qr <- paste("select TradingDay,ID,DailyReturn from QT_DailyQuote
where TradingDay>=",rdate2int(min(tmp.dates))," and TradingDay<=",rdate2int(max(tmp.dates)))
quotedf <- dbGetQuery(con_qt,qr)
quotedf$TradingDay <- intdate2r(quotedf$TradingDay)
tmp.TS <- TS[TS$date==dates[i],]
quotedf <- quotedf[quotedf$ID %in% tmp.TS$stockID,]
tmp <- quotedf %>% dplyr::group_by(TradingDay) %>%
dplyr::summarise(DailyReturn = mean(DailyReturn, na.rm = TRUE))
index <- rbind(index,tmp)
}
tmp <- xts::xts(index$DailyReturn,order.by = index$TradingDay)
tmp <- WealthIndex(tmp)
close <- data.frame(TradingDay=zoo::index(tmp),close=zoo::coredata(tmp)*1000,row.names =NULL)
colnames(close) <- c('TradingDay','ClosePrice')
index <- merge(index,close,by='TradingDay')
index <- transform(index,TradingDay=rdate2int(TradingDay),
InnerCode=c(1000985),
PrevClosePrice=c(NA,index$ClosePrice[-(nrow(index))]),
OpenPrice=c(NA),
HighPrice=c(NA),
LowPrice=c(NA),
TurnoverVolume=c(NA),
TurnoverValue=c(NA),
TurnoverDeals=c(NA),
ChangePCT=DailyReturn*100,
NegotiableMV=c(NA),
UpdateTime=c(Sys.Date()),
ID=c('EI000985E'))
index <- index[,c("InnerCode","TradingDay","PrevClosePrice","OpenPrice","HighPrice",
"LowPrice","ClosePrice","TurnoverVolume","TurnoverValue","TurnoverDeals",
"ChangePCT","NegotiableMV","UpdateTime","DailyReturn","ID")]
dbWriteTable(con,'QT_IndexQuote',index,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con_qt)
dbDisconnect(con)
}
#' @rdname lcdb.init
#' @export
lcdb.init.IndexQuote_000985 <- function(begT=19900101){
con <- db.local("main")
windCode='000985.CSI'
indexCode <- paste('EI',substr(windCode,1,6),sep = '')
dbExecute(con, paste("delete from QT_IndexQuote where ID=",QT(indexCode)))
endT <- dbGetQuery(con,"select max(TradingDay) from QT_IndexQuote")[[1]]
endT <- intdate2r(endT)
begT <- intdate2r(begT)
require(WindR)
WindR::w.start(showmenu = FALSE)
index <- w.wsd(windCode,"pre_close,open,high,low,close,volume,amt,dealnum,pct_chg",begT,endT)[[2]]
colnames(index) <- c("TradingDay","PrevClosePrice","OpenPrice","HighPrice", "LowPrice",
"ClosePrice","TurnoverVolume","TurnoverValue","TurnoverDeals","ChangePCT")
#get innercode
qr <- paste("SELECT InnerCode FROM SecuMain
where SecuCode=",QT(substr(windCode,1,6))," and SecuCategory=4")
innercode <- queryAndClose.odbc(db.jy(),qr)[[1]]
index <- transform(index,TradingDay=rdate2int(TradingDay),
InnerCode=c(innercode),
DailyReturn=ChangePCT/100,
NegotiableMV=c(NA),
UpdateTime=c(Sys.Date()),
ID=c(indexCode))
index <- index[,c("InnerCode","TradingDay","PrevClosePrice","OpenPrice","HighPrice",
"LowPrice","ClosePrice","TurnoverVolume","TurnoverValue","TurnoverDeals",
"ChangePCT","NegotiableMV","UpdateTime","DailyReturn","ID")]
dbWriteTable(con,'QT_IndexQuote',index,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @export
#' @rdname lcdb.init
lcdb.init.QT_FreeShares <- function(filename="D:/sqlitedb/QT_FreeShares.csv"){
re <- read.csv(filename,stringsAsFactors = FALSE)
con <- db.local("main")
if(dbExistsTable(con,"QT_FreeShares")){dbRemoveTable(con,"QT_FreeShares")}
message("lcdb.init QT_FreeShares ... ");
dbExecute(con,'CREATE TABLE "QT_FreeShares" (
"date" INTEGER,
"stockID" TEXT,
"freeShares" REAL
);')
dbExecute(con,'CREATE UNIQUE INDEX [IX_QT_Freeshares] ON [QT_FreeShares] ([date], [stockID]);')
dbWriteTable(con,'QT_FreeShares',re,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' @export
#' @rdname lcdb.init
lcdb.init.CT_TechVars <- function(filename="D:/sqlitedb/CT_TechVars.csv"){
re <- read.csv(filename,stringsAsFactors = FALSE)
con <- db.local("main")
if(dbExistsTable(con,"CT_TechVars")){dbRemoveTable(con,"CT_TechVars")}
message("lcdb.init CT_TechVars ... ");
dbExecute(con,'CREATE TABLE [CT_TechVars] (
[datasrc] TEXT,
[secuCate] TEXT,
[varName] TEXT,
[func] TEXT,
[tableName] TEXT);')
dbExecute(con,'CREATE UNIQUE INDEX [IX_CT_TechVars] ON [CT_TechVars] ([datasrc], [tableName], [secuCate], [varName]);')
dbWriteTable(con,'CT_TechVars',re,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
#' lcdb.init
#'
#' initialize all the tables in sqlitdb
#' @rdname lcdb.init
#' @examples
#' # A correct throughout process of initialize the whole local database:
#' # 1. expert 3 csv tables:
#' lcdb.export2csv("main","CT_TechVars")
#' lcdb.export2csv("main","QT_FreeShares")
#' lcdb.export2csv("fs","CT_FactorLists")
#' # 2. build 3 empty sqlite files: qt.db, fs.db, main.db
#' # 3. initialize 3 database files, in proper order:
#' lcdb.init_qt()
#' lcdb.init_main()
#' lcdb.init_fs()
#' @export
lcdb.init_main <- function(begT=19900101,endT=99990101){
con <- db.local("main")
if(dbExistsTable(con,"SecuMain")){dbRemoveTable(con,"SecuMain")}
message("lcdb.init SecuMain ... ");
dbExecute(con,'CREATE TABLE "SecuMain" (
"ID" TEXT,
"InnerCode" INTEGER,
"CompanyCode" INTEGER,
"SecuCode" TEXT,
"SecuAbbr" TEXT,
"SecuMarket" INTEGER,
"ListedSector" INTEGER,
"ListedState" INTEGER,
"UpdateTime" REAL,
"StockID_TS" TEXT,
"SecuCategory" INTEGER,
"ListedDate" INTEGER,
"StockID_wind" TEXT
); ')
dbExecute(con,'CREATE UNIQUE INDEX IX_SecuMain ON SecuMain (ID);')
lcdb.update.SecuMain()
if(dbExistsTable(con,"QT_TradingDay")){dbRemoveTable(con,"QT_TradingDay")}
message("lcdb.init QT_TradingDay ... ");
dbExecute(con,'CREATE TABLE "QT_TradingDay" (
"TradingDate" INTEGER,
"IfTradingDay" INTEGER,
"SecuMarket" INTEGER,
"IfWeekEnd" INTEGER,
"IfMonthEnd" INTEGER,
"IfQuarterEnd" INTEGER,
"IfYearEnd" INTEGER
);')
dbExecute(con,'CREATE UNIQUE INDEX IX_QT_TradingDay ON QT_TradingDay (TradingDate, SecuMarket);')
lcdb.update.QT_TradingDay()
if(dbExistsTable(con,"CT_IndustryList")){dbRemoveTable(con,"CT_IndustryList")}
message("lcdb.init CT_IndustryList ... ");
dbExecute(con,'CREATE TABLE "CT_IndustryList" (
"Standard" INTEGER,
"Level" INTEGER,
"IndustryID" TEXT,
"IndustryName" TEXT,
"Alias" INTEGER,
"Code1" TEXT,
"Name1" TEXT,
"Code2" TEXT,
"Name2" TEXT,
"Code3" TEXT,
"Name3" TEXT,
"UpdateTime" REAL
); ')
dbExecute(con,'CREATE UNIQUE INDEX IX_CT_IndustryList ON CT_IndustryList (Standard, IndustryID);')
lcdb.update.CT_IndustryList()
if(dbExistsTable(con,"CT_SystemConst")){dbRemoveTable(con,"CT_SystemConst")}
message("lcdb.init CT_SystemConst ... ");
dbExecute(con,'CREATE TABLE "CT_SystemConst" (
"LB" INTEGER,
"LBMC" TEXT,
"DM" INTEGER,
"MS" TEXT
);')
dbExecute(con,'CREATE UNIQUE INDEX [IX_CT_SystemConst] ON [CT_SystemConst] ([LB], [DM]);')
lcdb.update.CT_SystemConst()
lcdb.init.CT_TechVars()
if(dbExistsTable(con,"LC_ExgIndustry")){dbRemoveTable(con,"LC_ExgIndustry")}
message("lcdb.init LC_ExgIndustry ... ");
dbExecute(con,'CREATE TABLE "LC_ExgIndustry" (
"stockID" TEXT,
"CompanyCode" INTEGER,
"Code1" TEXT,
"Name1" TEXT,
"Code2" TEXT,
"Name2" TEXT,
"Code3" TEXT,
"Name3" TEXT,
"InDate" INTEGER,
"OutDate" INTEGER,
"InfoSource" TEXT,
"Standard" INTEGER,
"Industry" INTEGER,
"Flag" INTEGER,
"UpdateTime" REAL,
"Code99" TEXT,
"Name99" TEXT,
"Code98" TEXT,
"Name98" TEXT
);')
dbExecute(con,'CREATE UNIQUE INDEX IX_LC_ExgIndustry ON LC_ExgIndustry (Standard, stockID, InDate);')
lcdb.update.LC_ExgIndustry()
lcdb.fix.swindustry()
lcdb.fix.ezindustry()
if(dbExistsTable(con,"LC_IndexComponent")){dbRemoveTable(con,"LC_IndexComponent")}
message("lcdb.init LC_IndexComponent ... ");
dbExecute(con,'CREATE TABLE "LC_IndexComponent" (
"IndexID" TEXT,
"SecuID" TEXT,
"InDate" INTEGER,
"OutDate" INTEGER,
"Flag" INTEGER,
"UpdateTime" REAL
);')
dbExecute(con,'CREATE UNIQUE INDEX IX_LC_IndexComponent ON LC_IndexComponent (IndexID, InDate,SecuID);')
lcdb.update.LC_IndexComponent()
lcdb.add.LC_IndexComponent("EI000985")
if(dbExistsTable(con,"LC_IndexComponentsWeight")){dbRemoveTable(con,"LC_IndexComponentsWeight")}
message("lcdb.init LC_IndexComponentsWeight ... ");
dbExecute(con,'CREATE TABLE LC_IndexComponentsWeight
( IndexID TEXT,
SecuID TEXT,
EndDate INTEGER,
Weight REAL,
UpdateTime REAL
);')
dbExecute(con,'CREATE UNIQUE INDEX IX_LC_IndexComponentsWeight ON LC_IndexComponentsWeight ([IndexID], [EndDate], [SecuID]);')
lcdb.update.LC_IndexComponentsWeight(begT = begT, endT = endT)
if(dbExistsTable(con,"LC_PerformanceGrowth")){dbRemoveTable(con,"LC_PerformanceGrowth")}
message("lcdb.init LC_PerformanceGrowth ... ");
dbExecute(con,'CREATE TABLE [LC_PerformanceGrowth] (
[stockID] varchar(10) NOT NULL,
[CompanyCode] int NOT NULL,
[InfoPublDate] int NOT NULL,
[EndDate] int NOT NULL,
[PeriodMark] int NOT NULL,
[src] varchar(10) NOT NULL,
[NP] money,
[NP_LYCP] money,
[NP_YOY] decimal(18, 6),
[OperatingRevenue] money,
[OR_LYCP] money,
[OR_YOY] decimal(18, 6),
[ForcastType] INT,
[UpdateTime] datetime NOT NULL,
[id] NOT NULL,
[Mark] INT);')
dbExecute(con,'CREATE UNIQUE INDEX [IX_LC_PerformanceGrowth] ON [LC_PerformanceGrowth] ([stockID], [PeriodMark], [InfoPublDate], [EndDate], [src], [Mark]);')
dbExecute(con,'CREATE INDEX [IX_LC_PerformanceGrowth2] ON [LC_PerformanceGrowth] ([id]);')
lcdb.update.LC_PerformanceGrowth(begT = begT, endT = endT)
if(dbExistsTable(con,"LC_RptDate")){dbRemoveTable(con,"LC_RptDate")}
message("lcdb.init LC_RptDate ... ");
dbExecute(con,'CREATE TABLE LC_RptDate (
stockID varchar(10) NOT NULL,
CompanyCode int NOT NULL,
EndDate int NOT NULL,
PublDate int NOT NULL,
UpdateTime datetime NOT NULL
);')
dbExecute(con,'CREATE UNIQUE INDEX [IX_LC_RptDate] ON [LC_RptDate] ([stockID], [PublDate], [EndDate]);')
lcdb.update.LC_RptDate(begT = begT, endT = endT)
lcdb.init.QT_FreeShares()
lcdb.update.QT_FreeShares(endT = endT)
if(dbExistsTable(con,"QT_IndexQuote")){dbRemoveTable(con,"QT_IndexQuote")}
message("lcdb.init QT_IndexQuote ... ");
dbExecute(con,'CREATE TABLE QT_IndexQuote
( InnerCode INTEGER,
TradingDay INTEGER,
PrevClosePrice REAL,
OpenPrice REAL,
HighPrice REAL,
LowPrice REAL,
ClosePrice REAL,
TurnoverVolume REAL,
TurnoverValue REAL,
TurnoverDeals INTEGER,
ChangePCT REAL,
NegotiableMV REAL,
UpdateTime REAL,
DailyReturn REAL,
ID TEXT
);')
dbExecute(con,'CREATE UNIQUE INDEX IX_QT_IndexQuote ON QT_IndexQuote (ID,TradingDay);')
lcdb.update.QT_IndexQuote(begT = begT, endT = endT)
lcdb.init.IndexQuote_000985(begT = begT)
lcdb.init.IndexQuote_000985E(begT = begT)
dbDisconnect(con)
}
# -------------------- ~~ qt ----------------
#' @export
#' @rdname lcdb.init
lcdb.init_qt <- function(){
lcdb.init.QT_DailyQuote()
# QT_sus_res
lcdb.init.QT_sus_res()
bugs <- lcdb.update.QT_sus_res_bugsFinding()
if(length(bugs)>0){
message("\n QT_sus_res_bugsFinding:", bugs)
lcdb.update.QT_sus_res(stockID=bugs)
}
# QT_Size
lcdb.init.QT_Size()
lcdb.update.QT_Size()
}
#' @export
#' @rdname lcdb.init
lcdb.init.QT_DailyQuote <- function(begT=19900101,endT=99990101){
begT_filt <- paste("TradingDay >=",begT)
endT_filt <- paste("TradingDay < ",endT)
con <- db.local("qt")
if(dbExistsTable(con,"QT_DailyQuote")){dbRemoveTable(con,"QT_DailyQuote")}
message("lcdb.init QT_DailyQuote ... ");
dbExecute(con,"
CREATE TABLE QT_DailyQuote (
ID varchar(10) NOT NULL,
InnerCode int NOT NULL,
TradingDay int NOT NULL,
PrevClosePrice smallmoney NULL,
OpenPrice smallmoney NULL,
HighPrice smallmoney NULL,
LowPrice smallmoney NULL,
ClosePrice smallmoney NULL,
TurnoverVolume decimal(20, 0) NULL,
TurnoverValue money NULL,
DailyReturn float NULL,
STStatus smallint NULL,
SecuAbbr varchar(10) NULL,
RRFactor float NULL,
RRClosePrice smallmoney NULL,
TotalShares decimal(20, 4) NULL,
NonRestrictedShares decimal(20, 4) NULL
);
")
dbExecute(con,'CREATE UNIQUE INDEX IX_QT_DailyQuote ON QT_DailyQuote ([TradingDay] ,[InnerCode]);')
dbExecute(con,'CREATE INDEX [IX_QT_DailyQuote_2] ON [QT_DailyQuote]([TradingDay],[ID]);')
dbExecute(con,'CREATE INDEX [IX_QT_DailyQuote_3] ON [QT_DailyQuote]([ID] );')
# dbExecute(con,'CREATE INDEX [IX_QT_DailyQuote_4] on QT_DailyQuote ([InnerCode]);')
all.days <- queryAndClose.odbc(db.quant(),paste("select distinct TradingDay from QT_DailyQuote","where",begT_filt,"and",endT_filt))[[1]]
all.days <- all.days[order(all.days)]
subfun <- function(day0){
message(paste(" ",day0),appendLF = FALSE)
data0 <- dbGetQuery(con,paste("select * from QT_DailyQuote where TradingDay=",day0))
dbWriteTable(con, "QT_DailyQuote", data0, overwrite = FALSE, append=TRUE,row.names = FALSE)
gc()
}
plyr::l_ply(all.days, subfun, .progress = plyr::progress_text(style=3))
dbDisconnect(con)
}
# -------------------- ~~ fs ----------------
#' @export
#' @rdname lcdb.init
lcdb.init.CT_FactorLists <- function(filename, type = c("alpha","risk")){
type <- match.arg(type)
re <- read.csv(filename,stringsAsFactors = FALSE)
if(type == "alpha"){
con <- db.local("fs")
if(dbExistsTable(con,"CT_FactorLists")){dbRemoveTable(con,"CT_FactorLists")}
message("lcdb.init CT_FactorLists ... ");
dbExecute(con,'CREATE TABLE [CT_FactorLists] (
[factorID] TEXT NOT NULL,
[factorName] TEXT NOT NULL,
[factorFun] TEXT,
[factorPar] TEXT,
[factorDir] INT DEFAULT 1,
[factorType] TEXT,
[factorDesc] TEXT);')
dbExecute(con,'CREATE UNIQUE INDEX [IX_CT_FactorLists] ON [CT_FactorLists] ([factorID]);')
dbWriteTable(con,'CT_FactorLists',re,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}else if(type == "risk"){
con <- db.local("fs_r")
if(dbExistsTable(con,"CT_FactorLists_R")){dbRemoveTable(con,"CT_FactorLists_R")}
message("lcdb.init CT_FactorLists_R ... ");
dbExecute(con,'CREATE TABLE [CT_FactorLists_R] (
[factorID] TEXT NOT NULL,
[factorName] TEXT NOT NULL,
[factorFun] TEXT,
[factorPar] TEXT,
[factorDir] INT DEFAULT 1,
[factorType] TEXT,
[factorDesc] TEXT);')
dbExecute(con,'CREATE UNIQUE INDEX [IX_CT_FactorLists_R] ON [CT_FactorLists_R] ([factorID]);')
re <- data.frame("factorID" = "R000001",
"factorName" = "lncap",
"factorFun" = "gr.lncap",
"factorPar" = "",
"factorDir" = 1L,
"factorType" = NA,
"factorDesc" = NA)
dbWriteTable(con,'CT_FactorLists_R',re,overwrite=FALSE,append=TRUE,row.names=FALSE)
dbDisconnect(con)
}
return("Done")
}
#' @export
#' @rdname lcdb.init
lcdb.init.QT_FactorScore <- function(begT=20050104,endT=99990101, type = c("alpha","risk")){
type <- match.arg(type)
if(type == "alpha"){
con <- db.local("fs")
if(dbExistsTable(con,"QT_FactorScore")){dbRemoveTable(con,"QT_FactorScore")}
message("lcdb.init QT_FactorScore ... ");
IDs <- dbGetQuery(con,"select factorID from CT_FactorLists order by factorID")[[1]]
IDs <- paste(IDs," float(0, 4)")
IDs <- paste(IDs,collapse = ",")
char_createtable <- paste('CREATE TABLE [QT_FactorScore] (
[ID] varchar(10) NOT NULL,
[TradingDay] int NOT NULL,',
IDs,
')'
)
dbExecute(con,char_createtable)
dbExecute(con,'CREATE UNIQUE INDEX [IX_QT_FactorScore] ON [QT_FactorScore] ([TradingDay] DESC, [ID]);')
dbExecute(con,'CREATE INDEX [IX_QT_FactorScore_ID] ON [QT_FactorScore] ([ID]);')
lcdb.update.QT_FactorScore(begT,endT,loopFreq = "month",type = "alpha")
dbDisconnect(con)
}else if(type == "risk"){
con <- db.local("fs_r")
if(dbExistsTable(con,"QT_FactorScore_R")){dbRemoveTable(con,"QT_FactorScore_R")}
message("lcdb.init QT_FactorScore_R ... ");
IDs <- dbGetQuery(con,"select factorID from CT_FactorLists_R order by factorID")[[1]]
if(length(IDs) == 0){
char_createtable <- paste('CREATE TABLE [QT_FactorScore_R] (
[ID] varchar(10) NOT NULL,
[TradingDay] int NOT NULL)')
dbExecute(con,char_createtable)
dbExecute(con,'CREATE UNIQUE INDEX [IX_QT_FactorScore_R] ON [QT_FactorScore_R] ([TradingDay] DESC, [ID]);')
dbExecute(con,'CREATE INDEX [IX_QT_FactorScore_R_ID] ON [QT_FactorScore_R] ([ID]);')
}else{
IDs <- paste(IDs," float(0, 4)")
IDs <- paste(IDs,collapse = ",")
char_createtable <- paste('CREATE TABLE [QT_FactorScore_R] (
[ID] varchar(10) NOT NULL,
[TradingDay] int NOT NULL,',
IDs,
')'
)
dbExecute(con,char_createtable)
dbExecute(con,'CREATE UNIQUE INDEX [IX_QT_FactorScore_R] ON [QT_FactorScore_R] ([TradingDay] DESC, [ID]);')
dbExecute(con,'CREATE INDEX [IX_QT_FactorScore_R_ID] ON [QT_FactorScore_R] ([ID]);')
lcdb.update.QT_FactorScore(begT,endT,loopFreq = "month",type = "risk")
}
dbDisconnect(con)
}
return("Done.")
}
#' @export
#' @rdname lcdb.init
lcdb.init_fs <- function(type=c("alpha","risk")){
type <- match.arg(type)
if(type == "alpha"){
lcdb.init.CT_FactorLists(filename = "D:/sqlitedb/CT_FactorLists.csv", type = "alpha")
lcdb.init.QT_FactorScore(type = "alpha")
} else {
lcdb.init.CT_FactorLists(filename = "D:/sqlitedb/CT_FactorLists_r.csv", type = "risk")
lcdb.init.QT_FactorScore(type = "risk")
}
}
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ======================
# ===================== Table Exporting ===========================
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ======================
#' lcdb.export2csv
#'
#' @export
#' @examples
#' lcdb.export2csv("main","CT_TechVars")
#' lcdb.export2csv("main","QT_FreeShares")
#' lcdb.export2csv("fs","CT_FactorLists")
#' lcdb.export2csv("fs_r","CT_FactorLists_r")
lcdb.export2csv <- function(dbname,tablename,path="D:/sqlitedb"){
con <- db.local(dbname)
tb <- dbReadTable(con,tablename)
filename <- file.path(path,paste(tablename,".csv",sep = ""))
write.csv(tb,filename,row.names = FALSE)
}
|
92afa28b195e622abf278625fb4f53913035469f
|
6e32987e92e9074939fea0d76f103b6a29df7f1f
|
/googleaiplatformv1.auto/man/GoogleCloudAiplatformV1Context.labels.Rd
|
a8384ddfb67c8d6130b9c58f9dbaddf21f14e0f8
|
[] |
no_license
|
justinjm/autoGoogleAPI
|
a8158acd9d5fa33eeafd9150079f66e7ae5f0668
|
6a26a543271916329606e5dbd42d11d8a1602aca
|
refs/heads/master
| 2023-09-03T02:00:51.433755
| 2023-08-09T21:29:35
| 2023-08-09T21:29:35
| 183,957,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,194
|
rd
|
GoogleCloudAiplatformV1Context.labels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1Context.labels}
\alias{GoogleCloudAiplatformV1Context.labels}
\title{GoogleCloudAiplatformV1Context.labels Object}
\usage{
GoogleCloudAiplatformV1Context.labels()
}
\value{
GoogleCloudAiplatformV1Context.labels object
}
\description{
GoogleCloudAiplatformV1Context.labels Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded).
}
\seealso{
Other GoogleCloudAiplatformV1Context functions:
\code{\link{GoogleCloudAiplatformV1Context.metadata}()},
\code{\link{GoogleCloudAiplatformV1Context}()},
\code{\link{projects.locations.metadataStores.contexts.create}()},
\code{\link{projects.locations.metadataStores.contexts.patch}()}
}
\concept{GoogleCloudAiplatformV1Context functions}
|
894b125498c45f94f68ea2b85fe1a5a51ba6b999
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mgcViz/examples/check.gamViz.Rd.R
|
317a0e368c7705ae97b29f3953309711667f2726
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 529
|
r
|
check.gamViz.Rd.R
|
library(mgcViz)
### Name: check.gamViz
### Title: Some diagnostics for a fitted gam model
### Aliases: check.gamViz
### ** Examples
library(mgcViz)
set.seed(0)
dat <- gamSim(1, n = 200)
b <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat)
b <- getViz(b)
# Checks using default options
check(b)
# Change some algorithmic and graphical parameters
check(b,
a.qq = list(method = "tnorm",
a.cipoly = list(fill = "light blue")),
a.respoi = list(size = 0.2),
a.hist = list(bins = 10))
|
067d78bb12d6730ee128c5fbf0ed42747ed9ff05
|
ed6dd1bb9c27ac987f12efbe59c784c1ae368bcb
|
/man/yhat.transform_from_example.Rd
|
33036abdc34ddf84ae0b9e6a6eb08c97f0467be6
|
[] |
no_license
|
apatil/yhatr
|
4efb1b3c155d96d39e0bca8b7a1f426218f0486c
|
3296e47a3fef2f1298eeda4ee50f51bc2b1b8fca
|
refs/heads/master
| 2020-12-25T21:24:06.805322
| 2015-03-12T18:20:34
| 2015-03-12T18:20:34
| 33,420,008
| 1
| 0
| null | 2015-04-04T20:52:54
| 2015-04-04T20:52:54
| null |
UTF-8
|
R
| false
| false
| 774
|
rd
|
yhat.transform_from_example.Rd
|
% Generated by roxygen2 (4.0.2.9000): do not edit by hand
% Please edit documentation in R/yhatR.R
\name{yhat.transform_from_example}
\alias{yhat.transform_from_example}
\title{Generates a model.transform function from an example input data.frame.
Handles columns which need to be type casted further after the initial JSON
to Robject such as factors and ints.}
\usage{
yhat.transform_from_example(df)
}
\arguments{
\item{df}{A data.frame object which mirrors the kind of input to the model.}
}
\description{
Generates a model.transform function from an example input data.frame.
Handles columns which need to be type casted further after the initial JSON
to Robject such as factors and ints.
}
\examples{
\dontrun{
model.transform <- yhat.transform_from_example(iris)
}
}
|
c340dde9eaf0f2b36afa3deb457ab99160c03ef9
|
3b04bdd3babe0ac4cb0afb015b31d6d11d506e6c
|
/ArabMAGIC/R/zip_data.R
|
9edcb130d67229bbc4c269ac57239332fdd9c867
|
[] |
no_license
|
kbroman/qtl2data
|
21f4d43579cf681a4406766d9e4c780808346eea
|
502f9627ecc72876d93de3b66439527ec3914536
|
refs/heads/main
| 2021-12-04T07:11:10.325235
| 2021-09-17T12:41:58
| 2021-09-17T12:41:58
| 43,719,321
| 0
| 0
| null | 2015-10-05T23:17:16
| 2015-10-05T23:17:16
| null |
UTF-8
|
R
| false
| false
| 1,258
|
r
|
zip_data.R
|
# zip the Arabidopsis MAGIC data, in two version (TAIR8 and TAIR9)
# - first subset the _geno and _foundergeno data to the markers with positions in the corresponding map build
library(qtl2)
set.seed(20190215)
tmpdir <- file.path(tempdir(), paste(sample(letters, 20, replace=TRUE), collapse=""))
tmpdir <- sub("//", "/", tmpdir)
if(!dir.exists(tmpdir)) dir.create(tmpdir)
for(build in c(8, 9)) {
json_file <- paste0("arabmagic_tair", build, ".json")
file.copy(file.path("..", json_file), tmpdir)
file.copy("../arabmagic_pheno.csv", tmpdir)
pmap_file <- paste0("../arabmagic_pmap_tair", build, ".csv")
file.copy(pmap_file, tmpdir)
markers <- rownames(read_csv(pmap_file))
for(g in c("geno", "foundergeno")) {
file <- paste0("../arabmagic_", g, ".csv")
desc <- sub("^# ", "", readLines(file, n=1))
dat <- read_csv(file)
dat <- dat[markers,]
qtl2convert::write2csv(cbind(marker=rownames(dat), dat),
file.path(tmpdir, basename(file)),
comment=desc, overwrite=TRUE)
}
zip_datafiles(file.path(tmpdir, json_file))
file.copy(file.path(tmpdir, sub(".json", ".zip", json_file)), "..")
}
unlink(tmpdir, recursive=TRUE)
|
386b2c055d7204da855a6a5511029b3cf0b8dbbe
|
0c6d2f47a296217105f38ae32ad5cf8e5deef1e2
|
/man/inverseTAPE.Rd
|
1cc5088ac2db4d6ff087a2b0a74a39bfec4889be
|
[] |
no_license
|
cran/RFOC
|
338616505b90e1f3c9511ae9b55a1d17f73d1bd6
|
b2c97a028b6cf9802439a74d5a6140a3ce9d07e4
|
refs/heads/master
| 2021-06-01T20:24:37.900822
| 2018-01-18T10:13:23
| 2018-01-18T10:13:23
| 17,713,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,130
|
rd
|
inverseTAPE.Rd
|
\name{inverseTAPE}
\alias{inverseTAPE}
\title{Inverse Moment Tensor
}
\description{Inverse moment tensor from
Tape angles.
}
\usage{
inverseTAPE(GAMMA, BETA)
}
\arguments{
\item{GAMMA}{Longitude, degrees
}
\item{BETA}{CoLatitude, degrees
}
}
\details{Uses Tape and Tape lune angles
to estimate the moment tensor. This function
is the inverse of the SourceType calculation.
There are two solutions to the
systems of equations.
Vectors are scaled by the maximum value.
}
\value{Moment tensor list:
\item{Va}{vector, First solution}
\item{Vb}{vector, First solution}
}
\references{Tape, W.,and C.Tape(2012), A geometric comparison of source-type plots for moment tensors, Geophys. J. Int., 190, 499-510.
}
\author{
Jonathan M. Lees<jonathan.lees@unc.edu>
}
\note{The latitude is the CoLatitude.
Either vector can be used as a solution.
Orientation of moment tensor is not preserved int he lune plots.
}
\seealso{SourceType
}
\examples{
lats = seq(from = -80, to = 80, by=10)
lons = seq(from=-30, to=30, by=10)
i = 3
j = 3
u = inverseTAPE( lons[i], 90-lats[j] )
}
\keyword{misc}
|
42f92d6c8d4ec6fd80ce43e95ae9a8c2f967ba0b
|
c0347fe541c2c95ea44ba8a69c595008b2f222d2
|
/02_Rprogramming/Assignment3/best.R
|
5820593af1fc9ebea0ca2732d4eefbeee5454955
|
[] |
no_license
|
MhAmine/DataScience_CourseraSpecialization
|
d0162e51459bf1e1daf4341d004d5546247a14eb
|
cd41c89915bba427870c73fe60fd80d02bbef6f0
|
refs/heads/master
| 2021-01-19T08:41:02.277035
| 2017-02-17T11:25:46
| 2017-02-17T11:25:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
best.R
|
best <- function(state,outcome){
outcomes = c("pneumonia", "heart attack", "heart failure")
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
if (!is.element(state, data$State)) {
stop("invalid state")
} else if(!is.element(outcome, outcome)) {
stop("invalid outcome")
} else {
if (identical(outcome, "heart attack")){
column <- colnames(data)[11]
} else if(identical(outcome, "heart failure")){
column <- colnames(data)[17]
} else if(identical(outcome, "pneumonia")){
column <- colnames(data)[23]
}
data <- data[data$State == state,]
data[,column] <- as.numeric(data[,column])
data <- data[!is.na(data[,column]),]
min_mort_rate <- min(data[,column],na.rm=TRUE)
indices <- (data[,column] == min_mort_rate)
hospitals <- data[indices,"Hospital.Name"]
hospitals
}
}
|
90e17ab4c4e1e89177139a14d1fd7f4ae7e98071
|
c604c9c916065e77196f041dad992886aa55b72a
|
/plot3.R
|
791b4585195f4995db41028835aa4b5ab3890925
|
[] |
no_license
|
astropic/ExData_Plotting1
|
cae3e42f06558f7af7f75a5f9c7db808089c5f15
|
9acef41c197e388f1f67bb1f49b80d2a70eccf7d
|
refs/heads/master
| 2021-01-22T06:58:30.888921
| 2016-04-09T13:23:30
| 2016-04-09T13:23:30
| 55,845,319
| 0
| 0
| null | 2016-04-09T13:20:04
| 2016-04-09T13:20:03
| null |
UTF-8
|
R
| false
| false
| 1,087
|
r
|
plot3.R
|
# Make sure time locale is English
Sys.setlocale("LC_TIME", "English")
# Read and subset data. NA is represented by "?".
data <- read.csv("household_power_consumption.txt", sep=";", na.strings="?")
sub_data <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
# Join Date and Time columns into a Date-Time List
datetime <- paste(sub_data$Date, sub_data$Time)
datetime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
# Make sure it is 1 Graph layout and default font size.
par(mfrow=c(1,1))
par(cex=1)
# Generate Graph
plot(datetime, sub_data$Sub_metering_1, xlab="", ylab="Energy sub metering", type="n")
lines(datetime, sub_data$Sub_metering_1, type="l", col="darkgrey")
lines(datetime, sub_data$Sub_metering_2, type="l", col="red")
lines(datetime, sub_data$Sub_metering_3, type="l", col="blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd=c(2.5, 2.5, 2.5), col=c("darkgrey", "red", "blue"))
# Save to a File
dev.copy(png, file = "plot3.png", bg = "white", width=480, height=480)
dev.off()
|
ffa29abb3fa77e74f3fa4a879afe49ba419e3f39
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googledataprocv1alpha1.auto/man/JobScheduling.Rd
|
3785e95a03eb9e9db6ab005143a670588067da5a
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 694
|
rd
|
JobScheduling.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataproc_objects.R
\name{JobScheduling}
\alias{JobScheduling}
\title{JobScheduling Object}
\usage{
JobScheduling(maxFailuresPerHour = NULL)
}
\arguments{
\item{maxFailuresPerHour}{Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed}
}
\value{
JobScheduling object
}
\description{
JobScheduling Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release.
}
|
9c12cf2bb0965483f68716ae27fe5b5880113869
|
a153380b0bd7f9d10ac4e8d53e2a8238daf34b2a
|
/man/get_scores_and_T_by_score.Rd
|
100e631e6096c8b8632188dadff383b2926c9b47
|
[] |
no_license
|
gostevehoward/uniformrank
|
f31a915a25c39fd750af07dd4c5e0a60ca270e39
|
1f207c8abf1017d4d1e1c596304059d44c20d945
|
refs/heads/master
| 2022-09-02T00:34:45.562763
| 2020-05-31T04:01:45
| 2020-05-31T04:31:39
| 268,208,116
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,146
|
rd
|
get_scores_and_T_by_score.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internal.R
\name{get_scores_and_T_by_score}
\alias{get_scores_and_T_by_score}
\title{Generate the score for each pair difference and the observed
value of the random walk T using the raw pair differences and
the score function. Not meant to be called directly by users.}
\usage{
get_scores_and_T_by_score(outcomes, score_fn)
}
\arguments{
\item{outcomes}{A vector of matched pair differences.}
\item{score_fn}{A string indicating which score function should be
used. Must be one of 'sign' (sign score function), 'wsrt'
(Wilcoxon signed rank score function), 'normal_scores'
(normal score function) and 'redescending'.}
}
\value{
A list containing two elements, \code{scores} and
\code{T}. Vector \code{scores} gives the score for each
pair difference in non-decreasing order. Vector \code{T}
gives the corresponding random walk for the observed data.
}
\description{
Generate the score for each pair difference and the observed
value of the random walk T using the raw pair differences and
the score function. Not meant to be called directly by users.
}
|
17223ad89f2408cb5afbedb5a207ff563eb571c0
|
d7471f659e9d6e2293f6a5b880bd8fad7ea653c0
|
/code/scripts/init.R
|
70f81312c7e291b4c3dcc65a3acb1c69c3fda8d5
|
[] |
no_license
|
mabafaba/mse_sr
|
113d792e9990ed98b1f2ee8de9e0e4c34d274ee5
|
2a4ff98545ae6b5525d939691441979d63bb7ff1
|
refs/heads/master
| 2021-01-18T23:10:57.819898
| 2017-11-27T13:00:12
| 2017-11-27T13:00:12
| 87,091,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,043
|
r
|
init.R
|
setwd("/Users/m/paper_january17")
src<-"./code/data/all_LU.shp"
require("rgdal")
require("raster")
require("abind")
require("gdalUtils")
require("MASS")
source("./code/scripts/load_data.R")
source("./code/scripts/make_data.R")
source("./code/scripts/prep_data.R")
source("./code/scripts/analysis.R")
source("./code/scripts/plots.R")
source("./code/scripts/gis.R")
layerNames<-c("lu_ 1875_all","lu_1895_all","lu_1915_all","lu_1935_all","lu_1960_all","lu_1985_all","lu_2005_all")
years<-c(1875,1895,1915,1935,1960,1985,2005)
# parameters all
runs=10
# parameters synthetic
# synthetic_names<-c("uniform random","segregated 32x32 fields","segregated 8x8 fields","segregated 2x2 fields","sorted","1/f noise","additive cascade")
synthetic_names<-c("uniform random","segregated 8x8 fields","full segregation","sorted","1/f noise","additive cascade")
synthetic_names_abb<-c("unif","segmed","halfhalf","sorted","1/f","casc")
synthfig_letters<-paste0(letters[1:length(synthetic_names_abb)],")")
names(synthfig_letters)<-synthetic_names_abb
syntheticsize<-512
geospace_aggregationfactor = 128
lags_synthetic=c(3,13,65)
synth_phasespace_plot_scales=c(2,3)
# parameters real
pixelWidth_real<-50
lagsINmeters_real<-c(50,150,450,1350,4050)
# lagsINmeters_real<-c(50,150,450)
lags_real=lagsINmeters_real/pixelWidth_real
lags_real=2*round((lags_real+1)/2)-1
# parameters sensitivity
runs_sens<-50
sens_resolution_pixelWidth<-pixelWidth_real*2
sens_resolution_lagsINmeters<-c(100,500,1300,4100)
sens_resolution_lags<-sens_resolution_lagsINmeters/sens_resolution_pixelWidth
sens_resolution_lags<-2*round((sens_resolution_lags+1)/2)-1
sens_neighbourhoods_pixelWidth<-pixelWidth_real
sens_neighbourhoods_lagsINmeters<-c(50,100,300,700,2000)
sens_neighbourhoods_lags<-sens_neighbourhoods_lagsINmeters/sens_neighbourhoods_pixelWidth
sens_neighbourhoods_lags<-2*round((sens_neighbourhoods_lags+1)/2)-1
sens_bins_pixelWidth<-pixelWidth_real
sens_bins_lagsINmeters<-lagsINmeters_real
sens_bins_lags<-lags_real
sens_bins_bins=6
|
6faf6e2a772a6a007363d9933df9d686e95ecd99
|
a225f28de5ac8e3c47fd7b522957400d725b0ce4
|
/R/general_seq_fun.R
|
d19cda5e7a01d0f70f512d8b6ee8680aa5a3796a
|
[] |
no_license
|
cran/SeqBayesDesign
|
5f1b73109b73befb620811ad7c417d9c1cac5825
|
ccc2ee8ac2d904ab9f84fc06d6e597385f3901c1
|
refs/heads/master
| 2021-01-25T11:39:14.257819
| 2018-03-01T08:32:38
| 2018-03-01T08:32:38
| 123,413,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37,348
|
r
|
general_seq_fun.R
|
#library(MASS)
### data input ###
#' @export
data.read <- function(filename, exp.type = c("ALT", "CFT.EC"), ...) {
### the file is coded that c(stress levels, failure time, censored, weight)
if(class(filename) == "character"){
res <- read.csv(file = filename, ...)
}else if(class(filename) == "data.frame"){
res <- filename
}
if(exp.type == "ALT") {
colName <- colnames(res)
res[,1] <- as.numeric(res[,1])
res[,2] <- as.numeric(res[,2])
res[,3] <- as.numeric(res[,3])
res[,4] <- as.numeric(res[,4])
colnames(res) <- colName
rownames(res) <- 1:dim(res)[1]
} else if(exp.type == "CFT.EC") {
colName <- colnames(res)
res$Angle <- res$Angle*pi/180
res <- cbind(res, as.numeric(res[,2] == 1))
colnames(res) <- c(colName, "UTS")
rownames(res) <- 1:dim(res)[1]
}
return(res)
}
### set data ###
#' @export
dat.setting <- function(dat, exp.type = c("ALT", "CFT.EC"),
use.level, max.level = NULL, Cen.time,
mu.fun = c("linear", "Arrhenius", "Inv.P", "E-C"),
show=TRUE) {
res <- list()
colName <- colnames(dat)
if (exp.type == "ALT") {
res$data <- dat
rownames(res$data) <- 1:length(res$data[, 1])
colnames(res$data) <- c("x", "Y", colName[3:length(colName)])
res$x <- as.numeric(dat[, 1])
res$Y <- as.numeric(dat[, 2])
res$Censored <- dat[, 3]
res$wts <- dat[, 4]
res$Ncen <- sum(dat$Censored*dat[, 4])
res$Nobs <- sum(dat[is.na(dat[, 4]) == 0, 4])
res$max.level <- as.numeric(max.level)
res$Cen.time <- as.numeric(Cen.time)
} else if(exp.type == "CFT.EC") {
if (sum(dat$UTS == 1) != 0) {
sigmau <- as.numeric(tapply(X = dat[dat$UTS == 1, 1],
INDEX = dat$Angle[dat$UTS == 1], FUN= mean))
} else {
sigmau <- max.level
}
dat <- dat[dat$UTS != 1,]
res$data <- dat
rownames(res$data) <- 1:length(res$data[, 1])
colnames(res$data) <- c("x", "Y", colName[3:length(colName)])
res$x <- dat[, 1]
res$Y <- dat[, 2]
res$Censored <- dat[, 3]
res$wts <- dat[, 4]
res$sigmau <- sigmau
res$Rval <- as.numeric(as.character(dat$Rval))[1]
b <- table(dat$Frequency)
res$Freq.mod <- as.numeric(names(which.max(b)))
res$Angle <- dat$Angle[1]
res$Freq <- dat$Frequency
res$Ncen <- sum(res$Censored)
res$Nobs <- sum((dat$UTS != 1)*dat[, 4])
res$max.level <- sigmau
res$Cen.time <- Cen.time
}
if (length(use.level) == 1) {
res$use.level$use <- use.level
res$use.level$wts <- 1
} else {
res$use.level$use <- use.level[[1]]
res$use.level$wts <- use.level[[2]]
}
res$mu.fun <- mu.fun
res$exp.type <- exp.type
if (show == TRUE) {
print(list(data = res$data, max.level = res$max.level))}
invisible(res)
}
################################################################################
#' @export
std.level <- function(use.level, max.level, test.level,
mu.fun = c("linear", "Arrhenius", "Inv.P", "E-C")) {
if(mu.fun == "linear")
{
res <- (test.level-use.level$use)/(max.level-use.level$use)
}else if(mu.fun == "Arrhenius")
{
test.level <- test.level + 273.15
use.level$use <- use.level$use + 273.15
max.level <- max.level + 273.15
res <- (1/test.level-1/use.level$use)/(1/max.level-1/use.level$use)
}else if(mu.fun == "Inv.P")
{
res <- (log(test.level)-log(use.level$use))/(log(max.level)-log(use.level$use))
}else if(mu.fun == "E-C")
{
res <- test.level/max.level
}
return(res)
}
################################################################################
ori.level <- function(use.level, max.level, st.level,
mu.fun = c("linear", "Arrhenius", "Inv.P", "E-C")) {
if(mu.fun == "linear")
{
res <- st.level*(max.level-use.level$use) + use.level$use
}else if(mu.fun == "Arrhenius")
{
use.level$use <- use.level$use + 273.15
max.level <- max.level + 273.15
test.level <- st.level*(1/max.level-1/use.level$use)+1/use.level$use
test.level <- 1/test.level
res <- test.level - 273.15
}else if(mu.fun == "Inv.P")
{
res <- exp(st.level*(log(max.level)-log(use.level$use))+log(use.level$use))
}else if(mu.fun == "E-C")
{
res <- st.level*max.level
}
return(res)
}
################################################################################
std.stress.fun <- function(A, B, stlevels,
mu.fun = c("linear", "Arrhenius", "Inv.P", "E-C"),
dat) {
if(mu.fun == "linear" | mu.fun == "Arrhenius" | mu.fun =="Inv.P")
{
mu <- A+ B*stlevels
} else if(mu.fun == "E-C") {
Rval <- as.numeric(as.character(dat$Rval))
phi.R <- ifelse(Rval<=1, Rval, 1/Rval)
gamma.alpha <- 1.6-phi.R*abs(sin(dat$Angle))
ss <- 1/stlevels
cc <- (ss-1)*ss^(gamma.alpha-1)*(1-phi.R)^(-gamma.alpha)
DD <- B*dat$Freq^B*cc+A
mu <- (log(DD)-log(A))/B
}
return(mu)
}
################################################################################
ExpT <- function(pars, stlevels, Cen.time, model = c("lnor","wei"),
mu.fun = c("linear", "Arrhenius", "Inv.P", "E-C"), data) {
A <- pars[1]
B <- pars[2]
nu <- pars[3]
mu <- std.stress.fun(A, B, stlevels, mu.fun, data)
expTime <- rep(NA, length(stlevels))
for (i in 1:length(stlevels)) {
if (model=="lnor") {
pdf.int <- function(y) {
normpdf <- dlnorm(y, mu[i], nu)
res <- y*normpdf
return(res)
}
Time <- integrate(pdf.int, lower=0, upper=Cen.time,
stop.on.error = FALSE)[[1]]
Time <- Time + Cen.time * (1 - plnorm(Cen.time, mu[i], nu))
expTime[i] = Time
}
}
return(expTime)
}
################################################################################
mu.ders <- function(A, B, stlevels,
mu.fun = c("linear", "Arrhenius", "Inv.P", "E-C"), data) {
if(mu.fun == "linear" | mu.fun == "Arrhenius" | mu.fun == "Inv.P") {
mu1 <- 1
mu2 <- stlevels
mu11 <- 0
mu22 <- 0
mu12 <- 0
} else if(mu.fun == "E-C") {
phi.R <- ifelse(data$Rval<=1, data$Rval, 1/data$Rval)
gamma.alpha <- 1.6-phi.R*sin(data$Angle)
ss <- 1/stlevels
cc <- (ss-1)*ss^(gamma.alpha-1)*(1-phi.R)^(-gamma.alpha)
ff <- data$Freq
DD <- B*ff^B*cc+A
EE <- B*ff^B*log(ff)*cc+ff^B*cc
FF <- B*ff^B*(log(ff))^2*cc+2*ff^B*log(ff)*cc
mu1 <- 1/(B*DD)-1/(A*B)
mu2 <- log(A)/(B^2)-log(DD)/(B^2)+EE/(B*DD)
mu11 <- 1/(A^2*B)-1/(B*DD^2)
mu12 <- 1/(A*B^2)-1/(B^2*DD)-EE/(B*DD^2)
mu22 <- -2*log(A)/B^3+2*log(DD)/B^3-2*EE/(B^2*DD)-EE^2/(B*DD^2)+FF/(B*DD)
}
res <- list(mu1=mu1, mu2=mu2, mu11=mu11, mu12=mu12, mu22=mu22)
return(res)
}
################################################################################
mnius.loglikelihood <- function(dat, pars, model = c("lnor","wei"),
mu.fun = c("linear", "Arrhenius", "Inv.P",
"E-C")) {
if(mu.fun == "linear"| mu.fun == "Arrhenius" | mu.fun == "Inv.P") {
A <- pars[1]
B <- -exp(pars[2])
nu <- exp(pars[3])
stlevel <- std.level(dat$use.level, dat$max.level, dat$data[, 1], mu.fun)
mu <- std.stress.fun(A, B, stlevel, mu.fun, dat)
wts <- dat$data[, 4]
} else if(mu.fun == "E-C") {
A <- exp(pars[1])
B <- exp(pars[2])
nu <- exp(pars[3])
stlevel <- std.level(dat$use.level, dat$max.level, dat$data[, 1], mu.fun)
mu <- std.stress.fun(A, B, stlevel, mu.fun, dat)
wts <- rep(1, dat$Nobs)
}
cen <- dat$Censored
y <- dat$data[, 2]
yy <- log(y)
if (model=="lnor") {
normcdf <- ifelse(pnorm(yy, mu, nu) >= 0.99999999, 0.99999999,
pnorm(yy, mu, nu))
normcdf <- ifelse(normcdf <= 1-0.99999999, 1-0.99999999, normcdf)
normpdf <- ifelse(dnorm(yy, mu, nu)/(y) <= 1-0.99999999, 1-0.99999999,
dnorm(yy, mu, nu)/(y))
ll <- wts*((log(normpdf))*(1-cen)+(log(1-normcdf))*cen)
res <- (-1)*sum(ll)
} else if(model=="wei") {
zz <- (yy-mu)/nu
ll <- wts*((log(exp(zz-exp(zz)))-log(nu)-yy)*(1-cen) +
(log(exp(-exp(zz))))*cen)
res <- (-1)*sum(ll)
}
return(res)
}
################################################################################
#' @export
par.MLE <- function(dat, model, mu.fun, initial, method = "BFGS",
hessian = TRUE,...) {
#call=match.call()
f <- function(p) {
mnius.loglikelihood(dat, p, model, mu.fun)
}
if (mu.fun == "linear" | mu.fun == "Arrhenius" | mu.fun == "Inv.P") {
init.par <- c(initial[1], log(-initial[2]), log(initial[3]))
oout <- optim(init.par, f, method = method, hessian = hessian,...)
coef <- oout$par
Realcoef <- c(coef[1], -exp(coef[2]), exp(coef[3]))
} else if (mu.fun == "E-C") {
init.par <- log(initial)
oout <- optim(init.par, f, method = method, hessian = hessian,...)
coef <- oout$par
Realcoef=exp(coef)
}
if (hessian) {
vcov <- ginv(oout$hessian)
} else {
vcov <- NULL
}
min <- oout$value
return(list(est.par = Realcoef, log.likelihood = -min, vcov = vcov))
}
########## Choose responese madel ############
#' @export
lifetime.model <- function(dat, initials) {
res <- list()
lnor <- par.MLE(dat, "lnor", mu.fun = dat$mu.fun, initials)
wei <- par.MLE(dat, "wei", mu.fun = dat$mu.fun, initials)
if(lnor$log.likelihood > wei$log.likelihood){
res$lifetime.model <- "Log-normal"
res$est.par <- lnor$est.par
res$log.likelihood <- lnor$log.likelihood
}else{
res$lifetime.model <- "Weibull"
res$est.par <- wei$est.par
res$log.likelihood <- wei$log.likelihood
}
return(res)
}
################################################################################
ind.FIM <- function(pars, Cen.time=2000000, dat, stlevel, model, mu.fun) {
if(model == "lnor") {
Phi <- pnorm
phi <- dnorm
phi1Phi <- function(z) {
phi(z)/(1-Phi(z))
}
dphi1Phi <- function(z) {
(-z*phi(z)*(1-Phi(z))+phi(z)*phi(z))/(1-Phi(z))^2
}
dphiphi <- function(z) {
return(-z)
}
ddphiphi <- function(z) {
return(-1)
}
}else if(model == "wei") {
Phi <- function(z) {
return(1-exp(-exp(z)))
}
phi <- function(z) {
return(exp(z-exp(z)))
}
phi1Phi <- function(z) {
return(exp(z))
}
dphi1Phi <- function(z) {
return(exp(z))
}
dphiphi <- function(z) {
return(1-exp(z))
}
ddphiphi <- function(z) {
return(-exp(z))
}
}
#############
A <- pars[1]
B <- pars[2]
nu <- pars[3]
mu <- std.stress.fun(A, B, stlevel, mu.fun, dat)
mu.tmp <- mu.ders(A, B, stlevel, mu.fun, dat)
mu1 <- mu.tmp$mu1
mu2 <- mu.tmp$mu2
mu11 <- mu.tmp$mu11
mu12 <- mu.tmp$mu12
mu22 <- mu.tmp$mu22
######
zm <- (log(Cen.time) - mu)/nu
#print(zm)
Phizm1 <- try(1 - Phi(zm))
if (class(Phizm1) == "try-error" | Phizm1<1e-8) {
f11b <- 0
f22b <- 0
f33b <- 0
f12b <- 0
f13b <- 0
f23b <- 0
} else {
f11b <- mu1^2*dphi1Phi(zm)*Phizm1-nu*mu11*phi(zm)
f22b <- mu2^2*dphi1Phi(zm)*Phizm1-nu*mu22*phi(zm)
f33b <- (2*zm*phi1Phi(zm)+zm^2*dphi1Phi(zm))*Phizm1
f12b <- mu1*mu2*dphi1Phi(zm)*Phizm1-nu*mu12*phi(zm)
f13b <- mu1*(phi1Phi(zm)+dphi1Phi(zm)*zm)*Phizm1
f23b <- mu2*(phi1Phi(zm)+dphi1Phi(zm)*zm)*Phizm1
}
f11a.int <- function(z) {
res <- (-1)*mu1^2*ddphiphi(z)*phi(z)+nu*mu11*dphiphi(z)*phi(z)
return(res)
}
f22a.int <- function(z) {
res <- (-1)*mu2^2*ddphiphi(z)*phi(z)+nu*mu22*dphiphi(z)*phi(z)
return(res)
}
f33a.int <- function(z) {
res <- (-1)*(1+2*z*dphiphi(z)+z^2*ddphiphi(z))*phi(z)
return(res)
}
f12a.int <- function(z) {
res <- (-1)*mu1*mu2*ddphiphi(z)*phi(z)+nu*mu12*dphiphi(z)*phi(z)
return(res)
}
f13a.int <- function(z) {
res <- (-1)*mu1*(dphiphi(z)+z*ddphiphi(z))*phi(z)
return(res)
}
f23a.int <- function(z) {
res <- (-1)*mu2*(dphiphi(z)+z*ddphiphi(z))*phi(z)
return(res)
}
f11a <- integrate(f11a.int, lower=-Inf, upper=zm,
rel.tol = .Machine$double.eps^0.5)[[1]]
f22a <- integrate(f22a.int, lower=-Inf, upper=zm,
rel.tol = .Machine$double.eps^0.5)[[1]]
f33a <- integrate(f33a.int, lower=-Inf, upper=zm,
rel.tol = .Machine$double.eps^0.5)[[1]]
f12a <- integrate(f12a.int, lower=-Inf, upper=zm,
rel.tol = .Machine$double.eps^0.5)[[1]]
f13a <- integrate(f13a.int, lower=-Inf, upper=zm,
rel.tol = .Machine$double.eps^0.5,
stop.on.error = FALSE)[[1]]
f23a <- integrate(f23a.int, lower=-Inf, upper=zm,
rel.tol = .Machine$double.eps^0.5)[[1]]
f11 <- f11a+f11b
f22 <- f22a+f22b
f33 <- f33a+f33b
f12 <- f12a+f12b
f13 <- f13a+f13b
f23 <- f23a+f23b
res <- c(f11, f22, f33, f12, f13, f23)
return(res)
}
################################################################################
FImatrix <- function(pars, Cen.time, dat, stlevel, model, mu.fun, freq, n.new){
nu <- pars[3]
N <- dat$Nobs + n.new ### Sequential needed
pi.vec <- c(dat$wts, 1)/N ### wts
Imat <- matrix(0, 3, 3)
dat1 <- dat
for(i in 1:length(stlevel)) {
pi.val <- pi.vec[i]
dat1$Freq <- freq[i]
xres <- ind.FIM(pars, dat1$Cen.time, dat1, stlevel[i], model, dat1$mu.fun)
#xres=lsinf.CD(A=A, B=B, nu=nu, Nm=Cen.time, sigmai=sigmai, sigmau=sigmau,
# Rval=Rval, ff=ff[i], angle=angle)
tmp.mat <- (1/nu^2)*matrix(c(xres[1], xres[4], xres[5],
xres[4], xres[2], xres[6],
xres[5], xres[6], xres[3]), 3, 3)
Imat <- Imat+pi.val*tmp.mat
}
res <- N*Imat
return(res)
}
###################avar.fun###################
Eval.seq.criteria <- function(candidate, par.draw, hist.stlevel, dat,
use.pattern, quantile, Sinv, model, mu.fun) {
size <- length(par.draw)/3
par.draw <- matrix(par.draw, ncol=size)
if(length(candidate) == 0) {
DETer <- matrix(rep(NA, 1*size), 1, size)
TAVar <- matrix(rep(NA, 1*size), 1, size)
} else {
DETer <- matrix(rep(NA,length(candidate)*size), length(candidate), size)
TAVar <- matrix(rep(NA,length(candidate)*size), length(candidate), size)
}
for (n.draw in 1:size) {
A <- par.draw[1, n.draw]
B <- par.draw[2, n.draw]
if(length(candidate) == 0){
seq.Fmat <- FImatrix(par.draw[, n.draw], dat$Cen.time, dat, hist.stlevel,
model, mu.fun, dat$Freq, 0)
DETer[1, n.draw] <- log(det(seq.Fmat))
dat1 <- dat
dat1$Freq <- dat1$Freq.mod
der.mu <- mu.ders(A, B, use.pattern$use, mu.fun, dat1)
cvec <- matrix(c(der.mu$mu1,der.mu$mu2,
rep(qnorm(quantile, mean = 0, sd = 1),
length(use.pattern$use))), length(use.pattern$use), 3)
app.var <- ginv(seq.Fmat)
Vi <- c()
for(row in 1:length(use.pattern$use)) {
Vi <- c(Vi, sum(cvec[row,] %*% app.var*cvec[row,]))
}
wavar <- sum(use.pattern$wts*Vi)
TAVar[1, n.draw] <- ifelse(wavar>0, wavar, NA)
}else {
if(is.na(dat$x[1]) == 1) {
F1=Sinv
} else {
F1 <- FImatrix(par.draw[, n.draw], dat$Cen.time, dat, hist.stlevel,
model, mu.fun, dat$Freq, 1)
#F1=Seq.Fmat.CD(A, B, nu, Nm=2000000, dat1.NEW, q.vec=dat1.NEW$x/dat1.NEW$sigmau, dat1.NEW$Freq)
}
for (n.new in 1:length(candidate)) {
seq.Fmat <- c()
F2 <- FImatrix(par.draw[, n.draw], dat$Cen.time, dat, candidate[n.new],
model, mu.fun, dat$Freq.mod, 1)
#F2=Seq.Fmat.CD(A, B, nu, Nm=2000000, dat1.NEW, candidate[n.new], dat1.NEW$Freq.mod)
#seq.Fmat <- F1+F2+ginv(Sinv)
seq.Fmat=F1+F2+Sinv
DETer[n.new, n.draw] <- log(det(seq.Fmat))
dat1 <- dat
dat1$Freq <- dat1$Freq.mod
der.mu <- mu.ders(A, B, use.pattern$use, mu.fun, dat1)
cvec <- matrix(c(der.mu$mu1,der.mu$mu2,
rep(qnorm(quantile, mean = 0, sd = 1),
length(use.pattern$use))),
length(use.pattern$use), 3)
app.var <- ginv(seq.Fmat)
Vi <- c()
for(row in 1:length(use.pattern$use)) {
Vi <- c(Vi, sum(cvec[row,] %*% app.var*cvec[row,]))
}
wavar <- sum(use.pattern$wts*Vi)
TAVar[n.new, n.draw] <- ifelse(wavar>0, wavar, NA)
}
}
}
res.D <- rowMeans(DETer, na.rm=T)
res.AVar <- rowMeans(TAVar, na.rm=T)
return(list(res.D = res.D, res.AVar = res.AVar, TAVar = TAVar))
}
### next point #####
next.point <- function(candidate, par.sample, dat, model, quantile = 0.1, lambda, prior, Sinv){
#Sinv=diag(c(12/(prior[2]-prior[1])^2,12/(prior[4]-prior[3])^2,(prior[5]-1)^2*(prior[5]-2)/prior[6]^2))
hist.stlevel = as.vector(t(std.level(dat$use.level, dat$max.level,
dat$data[1], dat$mu.fun)))
Eval=Eval.seq.criteria(candidate, par.sample, hist.stlevel, dat, dat$use.level, quantile,
Sinv, model, dat$mu.fun)
RES <- matrix(rep(NA, 3*length(candidate)), 3, length(candidate))
RES[1,] <- Eval$res.D
RES[2,] <- Eval$res.AVar
row.names(RES) <- c("D-optimality", "C-optimality", "Dual-optimality")
colnames(RES) <- as.factor(candidate)
opt.point.D = candidate[which(Eval$res.D == max(Eval$res.D))]
opt.point.C = candidate[which(Eval$res.AVar == min(Eval$res.AVar))]
opt.cri.D = Eval$res.D[which(Eval$res.D == max(Eval$res.D))]
opt.cri.C = Eval$res.AVar[which(Eval$res.AVar == min(Eval$res.AVar))]
res.Dual <- lambda*(Eval$res.D)/opt.cri.D+(1-lambda)*opt.cri.C/(Eval$res.AVar)
RES[3,] <- res.Dual
opt.point.Dual = candidate[which(res.Dual == max(res.Dual))]
opt.cri.Dual = res.Dual[which(res.Dual == max(res.Dual))]
if (lambda == 1) {
next.point = data.frame(next.point = opt.point.D, log.det = opt.cri.D,
avar = Eval$res.AVar[which(Eval$res.D == max(Eval$res.D))],
row.names = "D-optimality")
}else if(lambda == 0) {
next.point = data.frame(next.point = opt.point.C,
log.det = Eval$res.D[which(Eval$res.AVar == min(Eval$res.AVar))],
avar = opt.cri.C,
row.names = "C-optimality")
}else {
next.point = data.frame(next.point = opt.point.Dual,
log.det = Eval$res.D[which(res.Dual == max(res.Dual))],
avar = Eval$res.AVar[which(res.Dual == max(res.Dual))],
row.names = "Dual-optimality")
}
return(list(next.point = next.point, eval = RES))
}
### Generate data #####
gen.data.EC <- function(partrue, st.next.design, hist.data, model, mu.fun, exp.type)
{
if(exp.type == "CFT.EC"){
hist.data$Freq <- hist.data$Freq.mod
stress.fun.NEW <- std.stress.fun(partrue[1],partrue[2], st.next.design,
mu.fun, hist.data)
if(model == "lnor") {
next.cycle.NEW <- rlnorm(1, stress.fun.NEW, partrue[3])
}else if(model == "wei") {
next.cycle.NEW <- rweibull(1, 1/partrue[3], exp(stress.fun.NEW))
}
next.cen.NEW <- ifelse(next.cycle.NEW > hist.data$Cen.time, 1, 0)
next.cycle.NEW=ifelse(next.cycle.NEW > hist.data$Cen.time,
hist.data$Cen.time, next.cycle.NEW)
data.NEW <- rbind(hist.data$data, c(st.next.design*hist.data$sigmau, next.cycle.NEW, next.cen.NEW, 1, hist.data$Rval, hist.data$Freq.mod, hist.data$Angle, 0))
data.NEW <- data.NEW[!is.na(data.NEW[, 1]),]
dat1.NEW <- dat.setting(data.NEW, exp.type, hist.data$use.level, hist.data$max.level, hist.data$Cen.time, mu.fun, show = FALSE)
dat1.NEW$new.data <- data.NEW[length(hist.data$data[,1]) + 1,]
cat("stlevel is ", st.next.design, "\n")
cat("Stress level is ", st.next.design*hist.data$sigmau, "\n")
}else if(exp.type == "ALT") {
stress.fun.NEW <- std.stress.fun(partrue[1],partrue[2], st.next.design, mu.fun, hist.data)
if(model == "lnor") {
next.cycle.NEW <- rlnorm(1, stress.fun.NEW, partrue[3])
}else if(model == "wei") {
next.cycle.NEW <- rweibull(1, 1/partrue[3], exp(stress.fun.NEW))
}
next.cen.NEW <- ifelse(next.cycle.NEW > hist.data$Cen.time, 1, 0)
next.cycle.NEW=ifelse(next.cycle.NEW > hist.data$Cen.time,
hist.data$Cen.time, next.cycle.NEW)
ori <- ori.level(hist.data$use.level, hist.data$max.level, st.next.design, mu.fun)
data.NEW <- rbind(hist.data$data, c(ori, next.cycle.NEW, next.cen.NEW, 1))
data.NEW <- data.NEW[!is.na(data.NEW[, 1]),]
dat1.NEW <- dat.setting(data.NEW, exp.type, hist.data$use.level,
hist.data$max.level, hist.data$Cen.time,
mu.fun, show = FALSE)
dat1.NEW$new.data <- data.NEW[length(hist.data$data[,1]) + 1,]
cat("stlevel is ", st.next.design, "\n")
cat("Stress level is ", ori, "\n")
}
return(dat1.NEW)
}
###################
MCMC.draw.EC <- function(dat, partrue, n.int, model, mu.fun, prior, priorDis,
L.lag=200, transp=0.5, show = TRUE) {
initials <- c(partrue[1], partrue[2], partrue[3])
print(initials)
stlevel <- std.level(dat$use.level, dat$max.level, dat$data[,1], mu.fun)
if(mu.fun == "E-C"){
FF <- FImatrix(initials, dat$Cen.time, dat, stlevel, model, mu.fun, dat$Freq, 0)
trans <- matrix(c(-1/initials[1], 0, 1/initials[2], transp*initials[2]^(transp-1)),2,2)
COV <- ginv(FF)[1:2,1:2]
Cov <- trans %*% COV %*% t(trans)
draw1 <- TMCMC(dat, n.int, initials, model, mu.fun, Cov, prior, priorDis, transp)
draws <- draw1$par
acp <- c(mean(draw1$acp[1,]), mean(draw1$acp[2,]))
}else {
FF <- FImatrix(initials, dat$Cen.time, dat, stlevel, model, mu.fun,
rep(0, length(stlevel)), n.new = 0)
Cov <- ginv(FF)
draw1 <- TMCMCALT(dat, n.int, initials, model, mu.fun, Cov, prior, priorDis, 0.1)
draws <- draw1$par
acp <- c(mean(draw1$acp[1,]), mean(draw1$acp[2,]), mean(draw1$acp[3,]))
}
if(show == TRUE){
par(mfrow=c(1,3))
}
yy1 <- acf(draws[1, (n.int/10):n.int], L.lag, plot=show, main="A")
yy2 <- acf(draws[2, (n.int/10):n.int], L.lag, plot=show, main="B")
yy3 <- acf(draws[3, (n.int/10):n.int], L.lag, plot=show, main="nu")
#ci=qnorm((1 + 0.95)/2)/sqrt(yy1$n.used)
ci <- 0.01
aa <- c(1:(L.lag+1))
lag <- max(c(min(c(L.lag, aa[yy1$acf < ci])), min(c(L.lag, aa[abs(yy2$acf) < ci])),
min(c(L.lag, aa[abs(yy3$acf) < ci]))))
pts <- seq(n.int/10, n.int, lag)
par.draw1 <- draws[, pts]
if(show == TRUE){
par(mfrow=c(1, 3))
}
yy1 <- acf(par.draw1[1, ], L.lag/2, plot=show, main="A")
yy2 <- acf(par.draw1[2, ], L.lag/2, plot=show, main="B")
yy3 <- acf(par.draw1[3, ], L.lag/2, plot=show, main="nu")
cov.matrix <- cov(t(par.draw1))
invisible(list( lag = lag, ori.draws = draws, samples = par.draw1,
n.pts = length(pts), acp=acp, cov = cov.matrix))
}
###################
### Seq_Design ###
#' @export
SBD.sim <- function(N.design, N.data, dat, prior, priorDis, n.int=100000,
n.sample=1000, partrue, model, mu.fun=dat$mu.fun,
candidate=seq(0.35, 0.75, 0.05), quantile=0.1, lambda=0,
L.lag=100, transp=0.5, show.acf = FALSE){
use.pattern <- dat$use.level
lag <- c()
covInf <- list()
if(priorDis == "uniform"){
Sinv <- diag(c((prior[2]-prior[1])^2/12, (prior[4]-prior[3])^2/12,
prior[6]^2/((prior[5]-1)^2*(prior[5]-2))))
}else if(priorDis == "normal"){
Sinv <- diag(c(prior[2]^2, prior[4]^2, prior[6]^2/((prior[5]-1)^2*(prior[5]-2))))
}
Bayesian.NEW <- matrix(rep(NA,3*(N.design+1)),3,N.design+1) ### collecting Bayesian estimators
datNEW <- dat
hist.stlevel <- as.vector(t(std.level(datNEW$use.level, datNEW$max.level,
datNEW$data[1], datNEW$mu.fun)))
if (N.data != 0){
draws <- MCMC.draw.EC(datNEW, partrue, n.int, model, mu.fun, prior, priorDis,
L.lag, transp, show = show.acf)
lag <- draws$lag
covInf[[1]] <- draws$cov
#print(lag)
sample.draw <- sample(seq(1,length(draws$samples[1,])), n.sample)
par.draw1 <- draws$samples[,sample.draw]
hist.inf <- Eval.seq.criteria(c(), par.draw1, hist.stlevel, datNEW,
use.pattern, quantile, Sinv, model, mu.fun)
hist.D <- hist.inf$res.D
hist.avar <- hist.inf$res.AVar
Bayesian.NEW[,1] <- c(mean(par.draw1[1,]),mean(par.draw1[2,]),mean(par.draw1[3,]))
print(hist.avar)
}else{
par.draw1 <- matrix(rep(NA, 3*n.int), 3, n.int)
par.draw1[1,] <- runif(n.int, prior[1], prior[2])
par.draw1[2,] <- runif(n.int, prior[3], prior[4])
par.draw1[3,] <- sqrt(1/rgamma(n.int, shape=prior[5], scale=prior[6]))
Bayesian.NEW[,1] <- c(mean(par.draw1[1,]),mean(par.draw1[2,]),mean(par.draw1[3,]))
draws <- par.draw1
covInf[[1]] <- cov(t(par.draw1))
}
if(N.design == 0){
invisible(list( hist.avar= hist.avar, ori.draw = draws, samples = par.draw1,
Bayes = Bayesian.NEW[,1], cov = covInf))
}else {
n.cand <- length(candidate)
OPT.Summary <- matrix(rep(NA, (N.design+1)*3), N.design+1, 3)
for (n.design in 1:N.design){
cat('Design number: ', n.design, "\n")
sample.draw <- sample(seq(1,length(par.draw1[1,])), n.sample)
par.draw <- par.draw1[,sample.draw]
if (N.data != 0) {
nextpoint <- next.point(candidate, par.draw, datNEW, model, quantile,
lambda, prior, Sinv)
}else {
if(dat$exp.type == "CFT.EC"){
data.NEW <- matrix(c(NA, 0, datNEW$Rval, datNEW$Freq.mod, datNEW$Angle, NA, 0),1, 7)
colnames(data.NEW) <- c("Stress", "Cycles", "Rval", "Freq", "Angle", "Censored" ,"UTS")
data.NEW <- data.frame(data.NEW)
dat1.NEW <- dat.setting(data.NEW, data.NEW$exp.type, datNEW$use.level,
datNEW$sigmau, datNEW$Cen.time, datNEW$mu.fun)
#Eval <- Eval_ObjFun(candidate, par.draw, datNEW, use.pattern, quantile, Sinv)
#Eval <- Eval.seq.criteria(candidate, par.sample, hist.stlevel, dat1.NEW, dat$use.level, quantile, Sinv, dat$model, dat$mu.fun)
}else{
data.NEW <- matrix(c(NA, 0, datNEW$Rval, datNEW$Freq.mod, datNEW$Angle, NA, 0),1, 7)
}
}
OPT.Summary[(n.design+1), 1:3] <- as.matrix(nextpoint$next.point)
nextp <- as.numeric(nextpoint$next.point[1])
cat(rownames(nextpoint$next.point), "\n")
########
datNEW <- gen.data.EC(partrue, nextp, datNEW, model, datNEW$mu.fun, datNEW$exp.type)
N.data <- N.data + 1
########
draws <- MCMC.draw.EC(datNEW, partrue, n.int, model, mu.fun, prior, priorDis,
L.lag, transp, show = show.acf)
covInf[[n.design+1]] = draws$cov
lag <- c(lag, draws$lag)
#print(draws$lag)
par.draw1 <- draws$samples
Bayesian.NEW[, n.design+1] <- c(mean(par.draw1[1,]), mean(par.draw1[2,]),
mean(par.draw1[3,]))
cat("Bayesian estimates:", round(Bayesian.NEW[, n.design+1], 4), "\n", "\n")
}
OPT.Summary[1,] <- c(NA, hist.D, hist.avar)
row.names(OPT.Summary) <- 0:N.design
colnames(OPT.Summary) <- c("next.point", "log.det", "avar")
print(OPT.Summary)
#cat("\n", "Data set:", "\n")
#print(dat1.NEW$data)
invisible(list(Bayes = Bayesian.NEW, Final.opt = OPT.Summary,
data = datNEW$data, hist.inf= c(hist.D, hist.avar)))
}
}
### Experiment-type Seq_Design (no generate data)###
#' @export
SBD.next.pt <- function(N.data, dat, prior, priorDis, n.int = 100000, n.sample = 1000,
initial, model, mu.fun=dat$mu.fun, candidate = seq( 0.35, 0.75, 0.05),
quantile=0.1, lambda=0, L.lag=100, transp=0.5, show.acf = FALSE) {
use.pattern <- dat$use.level
lag <- c()
Bayesian.NEW <- matrix(rep(NA,3*1), 3, 1) ### collecting Bayesian estimators
datNEW <- dat
hist.stlevel <- as.vector(t(std.level(datNEW$use.level, datNEW$max.level,
datNEW$data[1], datNEW$mu.fun)))
if (N.data != 0){
draws <- MCMC.draw.EC(datNEW, initial, n.int, model, mu.fun, prior, priorDis,
L.lag, transp, show = show.acf)
lag <- draws$lag
par.draw <- draws$samples
Bayesian.NEW <- c(mean(par.draw[1,]), mean(par.draw[2,]), mean(par.draw[3,]))
hist.inf <- Eval.criteria(Bayesian.NEW, hist.stlevel, datNEW, use.pattern,
quantile, model, mu.fun)
hist.D = hist.inf$res.D
hist.avar = hist.inf$res.AVar
}else{
par.draw <- matrix(rep(NA, 3*n.int), 3, n.int)
par.draw[1,] <- runif(n.int, prior[1], prior[2])
par.draw[2,] <- runif(n.int, prior[3], prior[4])
par.draw[3,] <- sqrt(1/rgamma(n.int, shape=prior[5], scale=prior[6]))
Bayesian.NEW <- c(mean(par.draw[1,]), mean(par.draw[2,]), mean(par.draw[3,]))
draws <- par.draw
}
OPT.Summary <- matrix(rep(NA, 2*3), 2, 3)
cat('Next Design:', "\n")
sample.draw <- sample(seq(1,length(par.draw[1,])), n.sample)
par.draw <- par.draw[, sample.draw]
if (N.data != 0) {
nextpoint <- next.point(candidate, par.draw, datNEW, model, quantile, lambda,
prior, draws$cov)
}else{
data.NEW <- matrix(c(NA, 0, datNEW$Rval, datNEW$Freq.mod, datNEW$Angle, NA, 0), 1, 7)
colnames(data.NEW) <- c("Stress", "Cycles", "Rval", "Freq", "Angle", "Censored" ,"UTS")
data.NEW <- data.frame(data.NEW)
datNEW <- dat.setting(data.NEW, data.NEW$exp.type, datNEW$use.level, datNEW$sigmau,
datNEW$Cen.time, datNEW$mu.fun)
#Eval <- Eval_ObjFun(candidate, par.draw, datNEW, use.pattern, quantile, Sinv)
}
OPT.Summary[2, 1:3] <- as.matrix(nextpoint$next.point)
nextp <- as.numeric(nextpoint$next.point[1])
cat(rownames(nextpoint$next.point), "\n")
data.NEW <- rbind(datNEW$data, c(nextp*dat$sigmau, NA, NA, 1, dat$Rval,
dat$Freq.mod, dat$Angle, 0))
data.NEW <- data.NEW[!is.na(data.NEW[, 1]),]
datNEW <- dat.setting(data.NEW, dat$exp.type, dat$use.level, dat$max.level,
dat$Cen.time, dat$mu.fun, show = FALSE)
OPT.Summary[1,] <- c(NA, hist.D, hist.avar)
row.names(OPT.Summary) <- 0:1
colnames(OPT.Summary) <- c("next.point", "log.det", "avar")
print(OPT.Summary)
#cat("\n", "Data set:", "\n")
#print(dat1.NEW$data)
invisible(list( Bayes = Bayesian.NEW, Final.opt = OPT.Summary, data = datNEW$data))
}
################################################################################
use.profile <- function(quseL=0.05, quseU=0.25, pts=20, pattern=1)
{
xx=seq(0, 1, pts)
quse=quseL+(quseU-quseL)*xx
if(pattern==1)
{
quse.wts=dbeta(xx, 2, 5)
}
if(pattern==2)
{
quse.wts=dbeta(xx, 5, 2)
}
if(pattern==3)
{
quse.wts=dbeta(xx, 3, 3)
}
if(pattern==4)
{
quse.wts=dbeta(xx, 2,5)+dbeta(xx, 10, 5)
}
quse.wts=quse.wts/sum(quse.wts)
par(mai=c(0.2,0.2,0.2,0.2))
#barplot(quse.wts, width=(quseU-quseL)/pts, axes=F)
res=list(use=quse, wts=quse.wts)
return(res)
}
### Plots ###
#' @export
SBD.plot <- function(dat, Obj, cri = c("C-optimality", "D-optimality"),
y.at = seq(0.3, 1, 0.1)) {
hist.stlevel = std.level(dat$use.level, dat$max.level, dat$x, dat$mu.fun)
#par(mar=c(8, 5, 5, 1), oma=c(2, 0, 0, 0))
layout(matrix(c(1,1,1,1,2,2), 2, 3))
plot(length(dat$data[,1]):length(Obj$data[,1]), Obj$Final.opt[, 1], type='b', col=2,
pch=19, ylim= c(0, 1), cex=1.5,
xlim=c(0, length(Obj$data[,1]) + 1), cex.axis=2, xlab='', ylab='',
xaxt = "n", yaxt = "n")
axis(side=1, at = 1:length(Obj$data[,1]), cex.axis=1.2)
axis(side=2, at = y.at, cex.axis=1.5)
mtext('Number of samples in current data', side=1, line=2, cex=1)
mtext('Standardized level', side=2, line=2, cex=1.5)
lines(c(dat$Nobs+0.5, dat$Nobs+0.5), c(-1,2), lty=5)
#aa=sort(hist.stlevel)
points(1:length(dat$data[,1]), hist.stlevel, pch=8, cex=1.5)
legend("bottomleft", c("historical stress level", "sequential design"),
pch=c(8, 19), cex=1.2, col=c(1,2))
mtext(cri, side=3, cex=1.5, line = 1)
if(cri == "C-optimality"){y <- Obj$Final.opt[, 3]}else{y <- Obj$Final.opt[, 2]}
Nnew <- length(Obj$data[, 1])
plot(length(dat$data[,1]):length(Obj$data[,1]), y, type='b', col=1, pch=19,
xlim=c(length(dat$data[,1])-1, Nnew+1), cex=1.5, cex.axis=1.2,
xlab='', ylab='', main="")
mtext('Avar', side=2, line=2, cex=1.5)
mtext(cri, side=3, cex=1.5, line = 1)
mtext('Sample size', side=1, line=2, cex=1)
}
### MLE non-sequential design ###
### C-optimality ###
### MLE non-sequential design C-opt ###
################################################################################
Eval.criteria <- function(pars, stlevel, dat, use.pattern, quantile, model, mu.fun, Sinv=c()) {
A <- pars[1]
B <- pars[2]
F1 <- FImatrix(pars, dat$Cen.time, dat, stlevel, model, mu.fun, dat$Freq, 0)
#F1=Fmat.CD(A, B, nu, Nm=2000000, dat, q.vec=dat$x/dat$sigmau, pi.vec=rep(1/length(dat$Y), length(dat$Y)), kk=length(dat$Y), ff=dat$Freq)
res.D <- log(det(F1))
dat1 <- dat
dat1$Freq <- dat1$Freq.mod
der.mu <- mu.ders(A, B, dat$use.level$use, mu.fun, dat1)
cvec <- matrix(c(der.mu$mu1, der.mu$mu2,
rep(qnorm(quantile, mean = 0, sd = 1), length(dat$use.level$use))),
length(dat$use.level$use), 3)
if(length(Sinv) == 0){
app.var <- ginv(F1)
} else {
app.var <- ginv(F1+Sinv)
}
Vi <- c()
for (row in 1:length(dat$use.level$use)) {
Vi <- c(Vi, sum(cvec[row,] %*% app.var*cvec[row,]))
}
wavar <- sum(dat$use.level$wts*Vi)
res.AVar <- ifelse(wavar>0, wavar, NA)
return(list( res.D = res.D, res.AVar = res.AVar))
}
########################################################
trad.design <- function(dat, partrue, N.design = 12, design.type = c("TOD", "EQD"), mu.fun,
can.stress = seq(0.35, 0.7, 0.05), stressH = 0.75, N.middle = 0, quantile) {
design.C <- list()
design.D <- list()
design.EQD <- list()
dq <- matrix(rep(NA, length(can.stress), N.design-N.middle-1), length(can.stress), N.design-N.middle-1)
cq <- matrix(rep(NA, length(can.stress), N.design-N.middle-1), length(can.stress), N.design-N.middle-1)
mdp <- c()
mcp <- c()
mmdp <- c()
mmcp <- c()
if (design.type == "EQD") {
design.EQD$opt <- c(can.stress, stressH)
design.EQD$pts <- sort(rep(design.EQD$opt, N.design/length(design.EQD$opt)))
design.EQD$n <- rep(N.design/length(design.EQD$opt), length(design.EQD$opt))
par.est = NA
} else if (design.type == "TOD") {
par.est <- par.MLE(dat, mnius.loglikelihood, "lnor", dat$mu.fun, starts=log(partrue))$est.par
for (j in 1:length(can.stress)) {
for (i in 1:(N.design-N.middle-1)) {
lower <- can.stress[j]
stlevel <- c(rep(lower, i), rep((lower + stressH)/2, N.middle), rep(stressH, N.design-i-N.middle))
dat$Freq <- rep(dat$Freq.mod, N.design)
dat$Nobs <- N.design
eval <- Eval.criteria(partrue, stlevel, dat, dat$use.level, quantile, dat$mu.fun)
cq[j,i] <- eval$res.AVar
dq[j,i] <- eval$res.D
}
position.D <- which(dq[j,] == max(dq[j,]))
position.C <- which(cq[j,] == min(cq[j,]))
if (length(position.D) == 0) {dqq = NA} else {dqq = position.D}
mdp <- c(mdp, dqq)
mmdp <- c(mmdp, dq[j, mdp[j]])
if (length(position.C) == 0) {cqq = NA} else {cqq = position.C}
mcp <- c(mcp, cqq)
mmcp <- c(mmcp, cq[j, mcp[j]])
}
pos.D <- min(which(mmdp == max(mmdp[mmdp>0], na.rm=T)))
pos.C <- min(which(mmcp == min(mmcp[mmcp>0], na.rm=T)))
if (is.finite(pos.D) == FALSE) {
design.D$opt <- rep(NA, 2)
design.D$pts <- rep(NA, N.design)
design.D$n <- rep(NA, 2)
} else {
nLow <- mdp[pos.D]
nHigh <- N.design-N.middle-nLow
stressL <- can.stress[pos.D]
stressM <- (stressL + stressH)/2
design.D$pts <- c(rep(stressL, nLow), rep(stressM, N.middle), rep(stressH, nHigh))
if (N.middle == 0) {
design.D$opt <- c(stressL, stressH)
design.D$n <- c(nLow, nHigh)
} else {
design.D$opt <- c(stressL, stressM, stressH)
design.D$n <- c(nLow, N.middle, nHigh)
}
}
if (is.finite(pos.C) == FALSE) {
design.C$opt <- rep(NA, 2)
design.C$pts <- rep(NA, N.design)
design.C$n <- rep(NA, 2)
} else {
nLow <- mcp[pos.C]
nHigh <- N.design-N.middle-nLow
stressL <- can.stress[pos.C]
stressM <- (stressL + stressH)/2
design.C$pts <- c(rep(stressL, nLow), rep(stressM, N.middle), rep(stressH, nHigh))
if (N.middle == 0) {
design.C$opt <- c(stressL, stressH)
design.C$n <- c(nLow, nHigh)
} else {
design.C$opt <- c(stressL, stressM, stressH)
design.C$n <- c(nLow, N.middle, nHigh)
}
}
}
return(list(C_opt = design.C, D_opt = design.D, EQD = design.EQD, planningValue = par.est))
}
|
ab13b252aa778b4e3f53fb9724d9a848ed8e695a
|
1e36964d5de4f8e472be681bad39fa0475d91491
|
/man/SDMXUtilityData.Rd
|
9d27f3d06f8c30bdf1d0004f0ba23797d0bfc74a
|
[] |
no_license
|
cran/rsdmx
|
ea299980a1e9e72c547b2cca9496b613dcf0d37f
|
d6ee966a0a94c5cfa242a58137676a512dce8762
|
refs/heads/master
| 2023-09-01T03:53:25.208357
| 2023-08-28T13:00:02
| 2023-08-28T13:30:55
| 23,386,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 929
|
rd
|
SDMXUtilityData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-SDMXUtilityData.R,
% R/SDMXUtilityData-methods.R
\docType{class}
\name{SDMXUtilityData}
\alias{SDMXUtilityData}
\alias{SDMXUtilityData-class}
\alias{SDMXUtilityData,SDMXUtilityData-method}
\title{Class "SDMXUtilityData"}
\usage{
SDMXUtilityData(xmlObj, namespaces)
}
\arguments{
\item{xmlObj}{object of class "XMLInternalDocument derived from XML package}
\item{namespaces}{object of class "data.frame" given the list of namespace URIs}
}
\value{
an object of class "SDMXUtilityData"
}
\description{
A basic class to handle a SDMX-ML UtilityData data set
}
\section{Warning}{
This class is not useful in itself, but all SDMX non-abstract classes will
encapsulate it as slot, when parsing an SDMX-ML document.
}
\seealso{
\link{readSDMX}
}
\author{
Emmanuel Blondel, \email{emmanuel.blondel1@gmail.com}
}
|
0ac5c2344508ceffbda139ed9903445e3696c04a
|
ea9348a94cb36ae70c2737e49ae03f6301be8ba1
|
/data_analysis_course/videogames.R
|
9ad453ba84ea9b4e8a0f06c35f32fd0fd333b703
|
[] |
no_license
|
quantum-pie/r-scripts
|
84cb82abc034a40b82e36dc970b66fbc2b55dea2
|
d3ba08a1a10c5b7c626018d7c43727deb459f74d
|
refs/heads/master
| 2020-12-24T09:29:54.573394
| 2018-07-26T14:18:14
| 2018-07-26T14:18:14
| 73,291,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,687
|
r
|
videogames.R
|
library(ggplot2)
library(dplyr)
library(forcats)
library(tidyr)
data <- read.csv("r-github/vgsales.csv")
# overall look
overall_data <-
data %>%
mutate(Year = strtoi(Year)) %>%
select(Year, Global_Sales, Platform) %>%
filter(Platform %in% c("PS", "PS2", "PS3", "PS4", "XB", "X360", "XOne"), !is.na(Year)) %>%
group_by(Year, Platform) %>%
summarize(Total_Sales = sum(Global_Sales))
# plot
ggplot(overall_data, aes(Year, Total_Sales))+
ggtitle("Annual Video Games Sales Timeline")+
geom_col(aes(fill = Platform))+
scale_x_continuous(breaks = seq(1990, 2017, 2))+
scale_y_continuous(breaks = seq(0, 350, 50), name = "Annual Sales, millions of copies")+
theme_bw()+
theme(plot.title = element_text(hjust = 0.5))
# timeline of percentage
timeline_data <-
overall_data %>%
mutate(Platform = fct_collapse(Platform,
PS = c("PS", "PS2", "PS3", "PS4"),
XBOX = c("XB", "X360", "XOne"))
) %>%
group_by(Year, Platform) %>%
summarise(Total_Sales = sum(Total_Sales)) %>%
mutate(Portion = 100 * Total_Sales / sum(Total_Sales)) %>%
filter(Platform == "PS" & Total_Sales != sum(Total_Sales))
# plot
ggplot(timeline_data, aes(Year, Portion))+
geom_smooth(method = "loess", se = F, color = "#ffa700", size = 1.4)+
geom_point(color = "#0057e7", size = 2)+
scale_x_continuous(breaks = seq(2000, 2017, 2))+
scale_y_continuous(breaks = seq(40, 100, 10), name = "PS Market Share, %")+
ggtitle("PS against XBOX market share timeline")+
theme_bw()+
theme(plot.title = element_text(hjust = 0.5))
# contingency table
console_gen_contingency <-
overall_data %>%
ungroup() %>%
filter(Platform != "PS") %>%
select(Total_Sales, Platform) %>%
mutate(Generation = droplevels(
fct_collapse(Platform,
First = c("PS2", "XB"),
Second = c("PS3", "X360"),
Third = c("PS4", "XOne"))
)) %>%
mutate(Platform = droplevels(
fct_collapse(Platform,
PS = c("PS2", "PS3", "PS4"),
XBOX = c("XB", "X360", "XOne"))
)) %>%
group_by(Platform, Generation) %>%
summarise(Total_Sales = sum(Total_Sales))
console_gen_table <- xtabs(Total_Sales ~ Generation + Platform, console_gen_contingency)
# plot
mosaicplot(console_gen_table,
color = c("#c95719", "#128a94"),
main = "Console-Gen Video Games Sales Table",
cex.axis = 1.1)
|
c9a93daaf761c0d5681618492335ccc43275d2e7
|
97fdd3114a09ce6b6f87e58487c2e2ba06b287f6
|
/connect_to_Db.R
|
2e57758242367614de838095084864b8150f6233
|
[] |
no_license
|
NathanJablonski/Formulations
|
46c19efed98a488bb0159fa74647fac216e8dee0
|
0107de1112dc047993efa074d9e192570c3853c3
|
refs/heads/master
| 2022-11-17T08:09:27.410621
| 2020-07-14T19:46:49
| 2020-07-14T19:46:49
| 273,584,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 703
|
r
|
connect_to_Db.R
|
##------------------------------------------------------------------------------
# Function: connect_to_Db
# Desc: sets up a database connection
# Parameters:
# In: raw text
# Out:
# Returns: text without space characters
##-------------------------------------------------------------------------------
connect_to_Db <- function(){
#connection= strsplit(decrypt_string(en_string, key = privkey, pkey = pubkey),",")
conn <- dbConnect(
odbc(),
driver = "SQL Server",
server = "L5500-NJAB-TF13", #connection[[1]][1],
database = "Test"#, ##connection[[1]][2],
#Uid = "GLB\njablonski", #connection[[1]][3],
#Pwd = "" #connection[[1]][4]
)
return(conn)
}
|
0f1f5cb1e0ab92226eaa93d33f1eb5c5178e1616
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/mosum/man/mosum.pValue.Rd
|
1d4805143feff3afdaee4048de23eb3d6aa67489
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 624
|
rd
|
mosum.pValue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mosum_test.R
\name{mosum.pValue}
\alias{mosum.pValue}
\title{MOSUM asymptotic p-value}
\usage{
mosum.pValue(z, n, G.left, G.right = G.left)
}
\arguments{
\item{z}{a numeric value for the observation}
\item{n}{an integer value for the length of the input data}
\item{G.left, G.right}{integer values for the left moving sum bandwidth (G.left,G.right)}
}
\value{
a numeric value for the asymptotic p-value for the asymmetric MOSUM test
}
\description{
Computes the asymptotic p-value for the MOSUM test.
}
\keyword{internal}
|
957bb655a7013041f3187dacc143efc5789ad55c
|
bd406d9bc18f29fc5397fffede48d621d91c3184
|
/cachematrix.R
|
cb94ac423524397718157db8d3ae61865ae57b93
|
[] |
no_license
|
XinyuZhengDeveloper/JHUcoursera_datascience
|
cf6af9053c19bd0213b9c9a9f179069dc646edde
|
5b9f4e788dfd625e9591508833471c196c608cc2
|
refs/heads/master
| 2022-09-09T12:11:11.158279
| 2020-05-27T15:09:17
| 2020-05-27T15:09:17
| 267,349,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
cachematrix.R
|
## The function is to cache the inverse of a matrix.
## functions do
## this function create a matrix cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<- function(y){
y<<-x
i<<-NULL
}
get<-function()x
set_inverse<-function(inverse) i<<-inverse
get_inverse<-function() i
list(set=set, get=get, set_inverse=set_inverse, get_inverse=get_inverse)
}
## this function compute the inverse of matrix returned above.
## If the inverse has already been calculated (and the matrix has
## not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
i<-x$get_inverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data<- x$get()
i<-solve(data,...)
x$set_inverse(i)
i
## Return a matrix that is the inverse of 'x'
}
|
5f3e7b08121ecfd4764cc261468b16151f917be4
|
cec9144da1e7568b8e3d474e75cbae5222fadb8e
|
/man/help.Rd
|
14aaca1933966a2b564412789586058c98cc8caf
|
[] |
no_license
|
jimhester/types
|
9055cd34389861af80b3c9cfc3417eb1a90303d4
|
7251e82d1d6a6996feef1566892e895f9ebfb2ab
|
refs/heads/master
| 2020-07-25T12:51:15.511091
| 2016-11-30T21:43:08
| 2016-11-30T21:43:08
| 66,026,087
| 90
| 2
| null | 2016-11-30T21:43:08
| 2016-08-18T19:49:22
|
R
|
UTF-8
|
R
| false
| false
| 239
|
rd
|
help.Rd
|
\name{?}
\alias{?}
\title{Documentation Shortcuts}
\usage{
"?"(e1, e2)
}
\arguments{
\item{e1}{The type of documentation}
\item{e2}{The topic of documentation}
}
\description{
Documentation Shortcuts
}
\seealso{
\code{\link[utils]{?}}
}
|
8d8b75034fdca33a687139488ec5f4964a0bf91a
|
bb275dc2cd2e7c722ac17938d2e5fa68af3bce97
|
/tests/testthat/test-summarize_across.R
|
dc9054d0e3bcb4c244c312f4a414aeffea5ea47b
|
[
"MIT"
] |
permissive
|
timjaya/tidytable
|
8ba648689162842fdb4aa7f5153d16d62895ece5
|
ea653f6260dafc9f1bda01ff40cf3b9eda3d72c0
|
refs/heads/master
| 2022-11-05T00:09:58.604231
| 2020-06-23T00:34:19
| 2020-06-23T00:34:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,166
|
r
|
test-summarize_across.R
|
test_that("single function works", {
test_df <- tidytable(a = 1:3, b = 4:6, z = c("a", "a", "b"))
result_df <- test_df %>%
summarize_across.(c(a, b), mean, na.rm = TRUE)
expect_named(result_df, c("a", "b"))
expect_equal(result_df$a, 2)
expect_equal(result_df$b, 5)
})
test_that("summarise spelling works", {
test_df <- tidytable(a = 1:3, b = 4:6, z = c("a", "a", "b"))
result_df <- test_df %>%
summarise_across.(c(a, b), mean, na.rm = TRUE)
expect_named(result_df, c("a", "b"))
expect_equal(result_df$a, 2)
expect_equal(result_df$b, 5)
})
test_that("single function works with .by", {
test_df <- tidytable(a = 1:3, b = 4:6, z = c("a", "a", "b"))
result_df <- test_df %>%
summarize_across.(c(a, b), mean, na.rm = TRUE, .by = z)
expect_named(result_df, c("z", "a", "b"))
expect_equal(result_df$a, c(1.5, 3))
expect_equal(result_df$b, c(4.5, 6))
})
test_that("can pass list of named functions", {
test_df <- tidytable(a = 1:3, b = 4:6, z = c("a", "a", "b"))
result_df <- test_df %>%
summarize_across.(c(a, b), list(avg = mean, max = max))
expect_named(result_df, c("avg_a", "avg_b", "max_a", "max_b"))
expect_equal(result_df$avg_a, 2)
expect_equal(result_df$avg_b, 5)
expect_equal(result_df$max_a, 3)
expect_equal(result_df$max_b, 6)
})
test_that("can pass unnamed list of functions", {
test_df <- tidytable(a = 1:3, b = 4:6, z = c("a", "a", "b"))
result_df <- test_df %>%
summarize_across.(c(a, b), list(avg = mean, max))
expect_named(result_df, c("avg_a", "avg_b", "fn_a", "fn_b"))
expect_equal(result_df$avg_a, 2)
expect_equal(result_df$avg_b, 5)
expect_equal(result_df$fn_a, 3)
expect_equal(result_df$fn_b, 6)
})
test_that("can pass list of named functions with .by", {
test_df <- tidytable(a = 1:3, b = 4:6, z = c("a", "a", "b"))
result_df <- test_df %>%
summarize_across.(c(a, b), list(avg = mean, max = max), .by = z)
expect_named(result_df, c("z", "avg_a", "avg_b", "max_a", "max_b"))
expect_equal(result_df$avg_a, c(1.5, 3))
expect_equal(result_df$avg_b, c(4.5, 6))
expect_equal(result_df$max_a, c(2, 3))
expect_equal(result_df$max_b, c(5, 6))
})
|
a5e9b070264b9ebd5b10950ed86d6adcf63c4939
|
aa180339fe5d6c0b6bb3bac86a72c5ebf65c60a4
|
/rTTManApi/R/R/TradeHistory.R
|
e6c5c09e21658a2150898a7efda489c9ddb5e883
|
[] |
no_license
|
SoftFx/TTManagerAPI
|
7ad6f66fd269f89125ebd9757bb65fea10a3ff05
|
f4eab7d678e5fb27bbebc459409505d4526cc298
|
refs/heads/master
| 2023-07-20T10:08:55.744338
| 2023-07-05T16:03:36
| 2023-07-05T16:03:36
| 95,440,101
| 0
| 2
| null | 2017-07-24T17:59:42
| 2017-06-26T11:35:27
|
C#
|
UTF-8
|
R
| false
| false
| 22,572
|
r
|
TradeHistory.R
|
#' Gets the Trade reports as requested
#'
#' @param accId a numeric vector. Accounts ids.
#' @param from a POSIXct object. Start time. By default, from = ISOdatetime(1970,01,01,0,00,00, tz ="GMT").
#' @param to a POSIXct object. End time. By default, to = ISOdatetime(2017,08,01,0,00,00, tz ="GMT").
#' @param transTypes a string. Values from set (OrderOpened, OrderCanceled, OrderExpired, OrderFilled, PositionClosed, Balance, Credit, PositionOpened, OrderActivated, TradeModified) are delimited by ' ', ',', '.', ':'.
#' @param reasons a string. Values from set (ClientRequest, PndOrdAct, StopOut, StopLossAct, TakeProfitAct, DealerDecision, Rollover, Delete, Expired, TransferMoney, Split, Dividend, OneCancelsTheOther) are delimited by ' ', ',', '.', ':'.
#' @param skipCancelled a logical. If TRUE (default), cancelled orders are not displayed.
#' @examples
#' ttmGetTradeReports(c(100181, 100182), ISOdatetime(1970,01,01,0,00,00, tz ="GMT"), ISOdatetime(2017,08,01,0,00,00, tz ="GMT"), TRUE)
#'
#' @export
ttmGetTradeReports <- function(accId, from = ISOdatetime(1970,01,01,0,00,00, tz ="GMT"), to = ISOdatetime(2017,08,01,0,00,00, tz ="GMT"), skipCancelled = TRUE, transTypes = "", reasons = "", getStringPosId = FALSE) {
print(paste("from =", from, "; to =", to))
print(paste("(Str)from =", str(from), "; to =", str(to)))
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeReports', accId, from, to, transTypes, reasons, skipCancelled)
res = GetTradeFrame()
if(getStringPosId)
res[, StringPositionId := GetStringPositionId()]
return(res)
}
# Get Trade report table
GetTradeFrame<-function()
{
TradeId = GetTradeId()
TradeDomain = GetTradeDomain()
TradeGroup = GetTradeGroup()
TradeOrderId = GetTradeOrderId()
TradeOrderActionNo = GetTradeOrderActionNo()
TradeClientOrderId = GetTradeClientOrderId()
TradeTrType = GetTradeTrType()
TradeTrReason = GetTradeTrReason()
TradeTrTime = GetTradeTrTime()
TradeSide = GetTradeSide()
TradeOrderType = GetTradeOrderType()
TradeParentOrderType = GetTradeParentOrderType()
TradeOrderCreated = GetTradeOrderCreated()
TradeOrderModified = GetTradeOrderModified()
TradeSymbol = GetTradeSymbol()
TradeSymbolAlias = GetTradeSymbolAlias()
TradeSymbolAliasOrName = GetTradeSymbolAliasOrName()
TradeSymbolFk = GetTradeSymbolFk()
TradeOrderAmount = GetTradeOrderAmount()
TradeOrderRemainingAmount = GetTradeOrderRemainingAmount()
TradeOrderHiddenAmount = GetTradeOrderHiddenAmount()
TradeOrderLastFillAmount = GetTradeOrderLastFillAmount()
TradeOrderPrice = GetTradeOrderPrice()
TradeOrderStopPrice = GetTradeOrderStopPrice()
TradeOrderFillPrice = GetTradeOrderFillPrice()
TradeReqOpenPrice = GetTradeReqOpenPrice()
TradeReqOpenAmount = GetTradeReqOpenAmount()
TradeReqClosePrice = GetTradeReqClosePrice()
TradeReqCloseAmount = GetTradeReqCloseAmount()
TradeClientApp = GetTradeClientApp()
TradeRequestTime = GetTradeRequestTime()
TradePosId = GetTradePosId()
TradePosById = GetTradePosById()
TradePosAmount = GetTradePosAmount()
TradePosRemainingAmount = GetTradePosRemainingAmount()
TradePosRemainingSide = GetTradePosRemainingSide()
TradePosRemainingPrice = GetTradePosRemainingPrice()
TradePosLastAmount = GetTradePosLastAmount()
TradePosOpenPrice = GetTradePosOpenPrice()
TradePosOpened = GetTradePosOpened()
TradePosClosePrice = GetTradePosClosePrice()
TradePosClosed = GetTradePosClosed()
TradeCommission = GetTradeCommission()
TradeAgentCommission = GetTradeAgentCommission()
TradeSwap = GetTradeSwap()
TradeProfitLoss = GetTradeProfitLoss()
TradeBalance = GetTradeBalance()
TradeBalanceMovement = GetTradeBalanceMovement()
TradeBalanceCurrency = GetTradeBalanceCurrency()
TradePlatformComment = GetTradePlatformComment()
TradeUserComment = GetTradeUserComment()
TradeManagerComment = GetTradeManagerComment()
TradeUserTag = GetTradeUserTag()
TradeManagerTag = GetTradeManagerTag()
TradeMagic = GetTradeMagic()
TradeMarginRateInitial = GetTradeMarginRateInitial()
TradeStopLoss = GetTradeStopLoss()
TradeTakeProfit = GetTradeTakeProfit()
TradeOpenConversionRate = GetTradeOpenConversionRate()
TradeCloseConversionRate = GetTradeCloseConversionRate()
TradeExpired = GetTradeExpired()
TradePosModified = GetTradePosModified()
TradeProfitToUsdConversionRate = GetTradeProfitToUsdConversionRate()
TradeUsdToProfitConversionRate = GetTradeUsdToProfitConversionRate()
TradeBalanceToUsdConversionRate = GetTradeBalanceToUsdConversionRate()
TradeUsdToBalanceConversionRate = GetTradeUsdToBalanceConversionRate()
TradeMarginCurrencyToUsdConversionRate = GetTradeMarginCurrencyToUsdConversionRate()
TradeUsdToMarginCurrencyConversionRate = GetTradeUsdToMarginCurrencyConversionRate()
TradeMarginCurrency = GetTradeMarginCurrency()
TradeProfitCurrencyToUsdConversionRate = GetTradeProfitCurrencyToUsdConversionRate()
TradeUsdToProfitCurrencyConversionRate = GetTradeUsdToProfitCurrencyConversionRate()
TradeProfitCurrency = GetTradeProfitCurrency()
TradeSrcAssetToUsdConversionRate = GetTradeSrcAssetToUsdConversionRate()
TradeUsdToSrcAssetConversionRate = GetTradeUsdToSrcAssetConversionRate()
TradeDstAssetToUsdConversionRate = GetTradeDstAssetToUsdConversionRate()
TradeUsdToDstAssetConversionRate = GetTradeUsdToDstAssetConversionRate()
TradeSrcAssetCurrency = GetTradeSrcAssetCurrency()
TradeSrcAssetAmount = GetTradeSrcAssetAmount()
TradeSrcAssetMovement = GetTradeSrcAssetMovement()
TradeDstAssetCurrency = GetTradeDstAssetCurrency()
TradeDstAssetAmount = GetTradeDstAssetAmount()
TradeDstAssetMovement = GetTradeDstAssetMovement()
TradeOptions = GetTradeOptions()
TradeOrderMaxVisibleAmount = GetTradeOrderMaxVisibleAmount()
TradeReducedOpenCommissionFlag = GetTradeReducedOpenCommissionFlag()
TradeReducedCloseCommissionFlag = GetTradeReducedCloseCommissionFlag()
TradeAccountId = GetTradeAccountId()
TradeSymbolPrecision = GetTradeSymbolPrecision()
TradeProfitCurrencyToReportConversionRate = GetTradeProfitCurrencyToReportConversionRate()
TradeMarginCurrencyToReportConversionRate = GetTradeMarginCurrencyToReportConversionRate()
TradeDstAssetToReportConversionRate = GetTradeDstAssetToReportConversionRate()
TradeSrcAssetToReportConversionRate = GetTradeSrcAssetToReportConversionRate()
TradeBalanceToReportConversionRate = GetTradeBalanceToReportConversionRate()
TradeProfitToReportConversionRate = GetTradeProfitToReportConversionRate()
TradeReportToBalanceConversionRate = GetTradeReportToBalanceConversionRate()
res <- data.table(TradeAccountId,TradeId, TradeDomain, TradeGroup, TradeOrderId, TradeOrderActionNo, TradeClientOrderId, TradeTrType,
TradeTrReason, TradeTrTime, TradeSide, TradeOrderType, TradeParentOrderType, TradeOrderCreated,
TradeOrderModified, TradeSymbol, TradeSymbolAlias, TradeSymbolAliasOrName, TradeSymbolFk, TradeOrderAmount,
TradeOrderRemainingAmount, TradeOrderHiddenAmount, TradeOrderLastFillAmount, TradeOrderPrice,
TradeOrderStopPrice, TradeOrderFillPrice, TradeReqOpenPrice, TradeReqOpenAmount, TradeReqClosePrice,
TradeReqCloseAmount, TradeClientApp, TradeRequestTime, TradePosId, TradePosById, TradePosAmount,
TradePosRemainingAmount, TradePosRemainingSide, TradePosRemainingPrice, TradePosLastAmount, TradePosOpenPrice,
TradePosOpened, TradePosClosePrice, TradePosClosed, TradeCommission, TradeAgentCommission, TradeSwap,
TradeProfitLoss, TradeBalance, TradeBalanceMovement, TradeBalanceCurrency, TradePlatformComment,
TradeUserComment, TradeManagerComment, TradeUserTag, TradeManagerTag, TradeMagic, TradeMarginRateInitial,
TradeStopLoss, TradeTakeProfit, TradeOpenConversionRate, TradeCloseConversionRate, TradeExpired,
TradePosModified, TradeProfitToUsdConversionRate, TradeUsdToProfitConversionRate,
TradeBalanceToUsdConversionRate, TradeUsdToBalanceConversionRate, TradeMarginCurrencyToUsdConversionRate,
TradeUsdToMarginCurrencyConversionRate, TradeMarginCurrency, TradeProfitCurrencyToUsdConversionRate,
TradeUsdToProfitCurrencyConversionRate, TradeProfitCurrency, TradeSrcAssetToUsdConversionRate,
TradeUsdToSrcAssetConversionRate, TradeDstAssetToUsdConversionRate, TradeUsdToDstAssetConversionRate,
TradeSrcAssetCurrency, TradeSrcAssetAmount, TradeSrcAssetMovement, TradeDstAssetCurrency, TradeDstAssetAmount,
TradeDstAssetMovement, TradeOptions, TradeOrderMaxVisibleAmount, TradeReducedOpenCommissionFlag,
TradeReducedCloseCommissionFlag, TradeSymbolPrecision, TradeProfitCurrencyToReportConversionRate,TradeMarginCurrencyToReportConversionRate,
TradeDstAssetToReportConversionRate,TradeSrcAssetToReportConversionRate,TradeBalanceToReportConversionRate,
TradeProfitToReportConversionRate,TradeReportToBalanceConversionRate)
return(res)
}
# Get Trade report field
GetTradeId<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeId')
}
# Get Trade report field
GetTradeDomain<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeDomain')
}
# Get Trade report field
GetTradeGroup<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeGroup')
}
# Get Trade report field
GetTradeOrderId<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderId')
}
# Get Trade report field
GetTradeOrderActionNo<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderActionNo')
}
# Get Trade report field
GetTradeClientOrderId<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeClientOrderId')
}
# Get Trade report field
GetTradeTrType<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeTrType')
}
# Get Trade report field
GetTradeTrReason<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeTrReason')
}
# Get Trade report field
GetTradeTrTime<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeTrTime')
}
# Get Trade report field
GetTradeSide<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSide')
}
# Get Trade report field
GetTradeOrderType<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderType')
}
# Get Trade report field
GetTradeParentOrderType<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeParentOrderType')
}
# Get Trade report field
GetTradeOrderCreated<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderCreated')
}
# Get Trade report field
GetTradeOrderModified<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderModified')
}
# Get Trade report field
GetTradeSymbol<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSymbol')
}
# Get Trade report field
GetTradeSymbolAlias<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSymbolAlias')
}
# Get Trade report field
GetTradeSymbolAliasOrName<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSymbolAliasOrName')
}
# Get Trade report field
GetTradeSymbolFk<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSymbolFk')
}
# Get Trade report field
GetTradeOrderAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderAmount')
}
# Get Trade report field
GetTradeOrderRemainingAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderRemainingAmount')
}
# Get Trade report field
GetTradeOrderHiddenAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderHiddenAmount')
}
# Get Trade report field
GetTradeOrderLastFillAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderLastFillAmount')
}
# Get Trade report field
GetTradeOrderPrice<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderPrice')
}
# Get Trade report field
GetTradeOrderStopPrice<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderStopPrice')
}
# Get Trade report field
GetTradeOrderFillPrice<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderFillPrice')
}
# Get Trade report field
GetTradeReqOpenPrice<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeReqOpenPrice')
}
# Get Trade report field
GetTradeReqOpenAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeReqOpenAmount')
}
# Get Trade report field
GetTradeReqClosePrice<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeReqClosePrice')
}
# Get Trade report field
GetTradeReqCloseAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeReqCloseAmount')
}
# Get Trade report field
GetTradeClientApp<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeClientApp')
}
# Get Trade report field
GetTradeRequestTime<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeRequestTime')
}
# Get Trade report field
GetTradePosId<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosId')
}
# Get Trade report field
GetTradePosById<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosById')
}
# Get Trade report field
GetTradePosAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosAmount')
}
# Get Trade report field
GetTradePosRemainingAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosRemainingAmount')
}
# Get Trade report field
GetTradePosRemainingSide<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosRemainingSide')
}
# Get Trade report field
GetTradePosRemainingPrice<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosRemainingPrice')
}
# Get Trade report field
GetTradePosLastAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosLastAmount')
}
# Get Trade report field
GetTradePosOpenPrice<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosOpenPrice')
}
# Get Trade report field
GetTradePosOpened<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosOpened')
}
# Get Trade report field
GetTradePosClosePrice<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosClosePrice')
}
# Get Trade report field
GetTradePosClosed<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosClosed')
}
# Get Trade report field
GetTradeCommission<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeCommission')
}
# Get Trade report field
GetTradeAgentCommission<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeAgentCommission')
}
# Get Trade report field
GetTradeSwap<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSwap')
}
# Get Trade report field
GetTradeProfitLoss<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeProfitLoss')
}
# Get Trade report field
GetTradeBalance<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeBalance')
}
# Get Trade report field
GetTradeBalanceMovement<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeBalanceMovement')
}
# Get Trade report field
GetTradeBalanceCurrency<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeBalanceCurrency')
}
# Get Trade report field
GetTradePlatformComment<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePlatformComment')
}
# Get Trade report field
GetTradeUserComment<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeUserComment')
}
# Get Trade report field
GetTradeManagerComment<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeManagerComment')
}
# Get Trade report field
GetTradeUserTag<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeUserTag')
}
# Get Trade report field
GetTradeManagerTag<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeManagerTag')
}
# Get Trade report field
GetTradeMagic<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeMagic')
}
# Get Trade report field
GetTradeMarginRateInitial<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeMarginRateInitial')
}
# Get Trade report field
GetTradeStopLoss<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeStopLoss')
}
# Get Trade report field
GetTradeTakeProfit<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeTakeProfit')
}
# Get Trade report field
GetTradeOpenConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOpenConversionRate')
}
# Get Trade report field
GetTradeCloseConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeCloseConversionRate')
}
# Get Trade report field
GetTradeExpired<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeExpired')
}
# Get Trade report field
GetTradePosModified<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradePosModified')
}
# Get Trade report field
GetTradeProfitToUsdConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeProfitToUsdConversionRate')
}
# Get Trade report field
GetTradeUsdToProfitConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeUsdToProfitConversionRate')
}
# Get Trade report field
GetTradeBalanceToUsdConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeBalanceToUsdConversionRate')
}
# Get Trade report field
GetTradeUsdToBalanceConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeUsdToBalanceConversionRate')
}
# Get Trade report field
GetTradeMarginCurrencyToUsdConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeMarginCurrencyToUsdConversionRate')
}
# Get Trade report field
GetTradeUsdToMarginCurrencyConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeUsdToMarginCurrencyConversionRate')
}
# Get Trade report field
GetTradeMarginCurrency<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeMarginCurrency')
}
# Get Trade report field
GetTradeProfitCurrencyToUsdConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeProfitCurrencyToUsdConversionRate')
}
# Get Trade report field
GetTradeUsdToProfitCurrencyConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeUsdToProfitCurrencyConversionRate')
}
# Get Trade report field
GetTradeProfitCurrency<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeProfitCurrency')
}
# Get Trade report field
GetTradeSrcAssetToUsdConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSrcAssetToUsdConversionRate')
}
# Get Trade report field
GetTradeUsdToSrcAssetConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeUsdToSrcAssetConversionRate')
}
# Get Trade report field
GetTradeDstAssetToUsdConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeDstAssetToUsdConversionRate')
}
# Get Trade report field
GetTradeUsdToDstAssetConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeUsdToDstAssetConversionRate')
}
# Get Trade report field
GetTradeSrcAssetCurrency<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSrcAssetCurrency')
}
# Get Trade report field
GetTradeSrcAssetAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSrcAssetAmount')
}
# Get Trade report field
GetTradeSrcAssetMovement<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSrcAssetMovement')
}
# Get Trade report field
GetTradeDstAssetCurrency<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeDstAssetCurrency')
}
# Get Trade report field
GetTradeDstAssetAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeDstAssetAmount')
}
# Get Trade report field
GetTradeDstAssetMovement<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeDstAssetMovement')
}
# Get Trade report field
GetTradeOptions<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOptions')
}
# Get Trade report field
GetTradeOrderMaxVisibleAmount<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeOrderMaxVisibleAmount')
}
# Get Trade report field
GetTradeReducedOpenCommissionFlag<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeReducedOpenCommissionFlag')
}
# Get Trade report field
GetTradeReducedCloseCommissionFlag<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeReducedCloseCommissionFlag')
}
# Get Trade report field
GetTradeSymbolPrecision<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSymbolPrecision')
}
# Get Trade report field
GetTradeAccountId<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeAccountId')
}
# Get Trade report field
GetTradeProfitCurrencyToReportConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeProfitCurrencyToReportConversionRate')
}
# Get Trade report field
GetTradeMarginCurrencyToReportConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeMarginCurrencyToReportConversionRate')
}
# Get Trade report field
GetTradeDstAssetToReportConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeDstAssetToReportConversionRate')
}
# Get Trade report field
GetTradeSrcAssetToReportConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeSrcAssetToReportConversionRate')
}
# Get Trade report field
GetTradeBalanceToReportConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeBalanceToReportConversionRate')
}
# Get Trade report field
GetTradeProfitToReportConversionRate<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeProfitToReportConversionRate')
}
# Get Trade report field
GetStringPositionId<-function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetStringPositionId')
}
# Get Trade report field
GetTradeReportToBalanceConversionRate <- function(){
rClr::clrCallStatic('rTTManApi.rTTManApiHost', 'GetTradeReportToBalanceConversionRate')
}
|
b0436b6f88743b7e129b29f8b39796088bdc028a
|
ed0ffe8895fef2f342e333b5019f539a57e3cba6
|
/concepts/numeric_interactions_setup.R
|
f639ea2bd1149a5503330c64bd4056bdcc2ca71a
|
[] |
no_license
|
tjvananne/kaggle_zillow
|
7072a0ee83173ff7170da8a1824082e705754191
|
3151121fb8dd82a8642e1049fe7d836add5fe488
|
refs/heads/master
| 2021-01-23T21:45:54.131133
| 2017-10-16T04:34:32
| 2017-10-16T04:34:32
| 102,903,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,348
|
r
|
numeric_interactions_setup.R
|
# testing the setup of multiple interactions
library(gtools) # <-- combinations and permutations (for column name combos)
library(assertthat) # <-- for testing
library(xgboost) # <-- for testing the creation of DMatrices
library(caret) # <-- for easier scaling
# example with iris data:
df <- head(iris)
names(df) <- gsub("\\.", "_", tolower(names(df)))
# isolate numeric fields
df_num <- df[, 1:4]
df_num
df_num_na <- data.frame(df_num) # <-- data.frame() constructor to create a copy of object
df_num_na[1, 3] <- NA
# don't do this, it won't line up with how model matrix handles the interactions
# # identify the interaction column names, combine those with the original column names
# combo_names_norep_mat <- gtools::combinations(n=ncol(df_num), r=2, v=names(df_num), set=T, repeats.allowed = F)
# combo_names <- apply(combo_names_norep_mat, 1, FUN=paste0, collapse="-")
# allnames <- c(names(df_num), combo_names)
# allnames
#' ideas for retaining column names while also leveraging the efficiency of sparse matrices...
#' take the initial dataframe of numeric values...
#' 1) limit it to only a single row (ideally one where
#' cases complete is equal to true, or at least test what happens with NA values).
#' 2) Run this 1-row version of the data.frame through stats::model.matrix to create a dense
#' matrix that will have the column names stored in "attr(<matrix name>, "dimnames")[[2]]"
#' 3) Save the column names somewhere (being sure to gsub out the ":" between interaction cols)
#' 4) generate the sparse matrix
# concise function ---------------------------------------------------------------------------------------------
#' one thing to keep in mind, model matrix with formula of (~ .^4) will make all of the
#' interactions UP THROUGH four variables. You'll be returned all of the raw numeric
#' variables, all of the two-way interactions, all of the three-way interactions, AND
#' finally all of the four-way interactions. So don't do a "^2" and a "^3" and a "^4"
#' because you'll have a significant amount of duplication. Just do the maximum amount
#' of interaction that you plan on having at all, then feature select from there. I would
#' suggest doing something high at first like "^7" (crazy, I know) and then remove all of
#' the non-interaction variables.
# write function ---------------------------------------------------------------------------
calc_7_way_interaction <- function(p_df, just_interactions=T, sparse=T) {
# pass in a data.frame
# defaults to pre-scaling & pre-centering your numeric features being passed in
# it doesn't hurt to rescale/recenter features twice, it just won't do anything
# defaults to only returning interactions and not the raw values
# defaults to sparse matrix being returned
# will also return feature names per sparse matrix "Dimnames"[[2]] values
# no scaling or normalization, do that stuff outside of here if you want it
# generate either the sparse or dense
if(sparse) {
mat <- Matrix::sparse.model.matrix(~ .^7 - 1, p_df)
} else {
mat <- model.matrix(~ . ^7 - 1, p_df)
}
# limit to just interactions if that's what we're after
if(just_interactions) {
nbr_cols <- ncol(p_df)
mat <- mat[, (nbr_cols + 1):ncol(mat)]
}
# standard interface for column names regardless of if it's sparse or dense matrix
if(sparse) {
col_names <- attr(mat, "Dimnames")[[2]]
} else {
col_names <- attr(mat, "dimnames")[[2]]
}
# replace the ":" with something else to separate interaction features
col_names <- gsub(":", "_i_", col_names)
# return
return_list <- list(spmat, col_names)
return(return_list)
}
pprange <- preProcess(airquality, method="range")
aq_ranged <- predict(pprange, airquality)
hist(aq_ranged$Ozone, col='light blue', breaks=20)
hist(airquality$Ozone, col='light green', breaks=20)
head(aq_ranged$Ozone)
summary(aq_ranged$Ozone)
summary(airquality$Ozone)
x <- calc_7_way_interaction(airquality)
head(x)
dim(x[[1]])
x[[2]]
# Cross validated question:
# taking a small sample of "airquality" data
set.seed(2)
my_aq <- data.frame(airquality[sample(1:nrow(airquality), 100), ])
# create a scaled/centered version
my_aq_pp_scaler <- caret::preProcess(my_aq, method=c("center", "scale"))
my_aq_scaled <- predict(my_aq_pp_scaler, my_aq)
# computing interactions with pre-scaled data
denmat_prescaled <- as.data.frame(model.matrix(~ .^2 - 1, data=my_aq_scaled))
hist(denmat_prescaled$`Ozone:Solar.R`, col='light blue', main="Pre-interaction-scale: Not Rescaled")
# 1) do I need to scale/center again?
denmat_pp_scaler <- caret::preProcess(denmat_prescaled, method=c("center", "scale"))
denmat_prescaled_scaled <- predict(denmat_pp_scaler, denmat_prescaled)
hist(denmat_prescaled_scaled$`Ozone:Solar.R`, col='light pink', main="Pre-interaction-scale: Also Rescaled")
# postscaled - not scaling until AFTER interactions have been computed
denmat2 <- model.matrix(~ .^3 - 1, data=my_aq)
denmat3 <- denmat2[, (ncol(my_aq) + 1):ncol(denmat2)]
df3 <- as.data.frame(denmat3)
denmat2_pp_scaler <- caret::preProcess(denmat2, method=c("center", "scale"))
denmat_postscaled <- as.data.frame(predict(denmat2_pp_scaler, denmat2))
hist(denmat_postscaled$`Ozone:Solar.R`, col='light green', main="No Pre-scale: Just Post-interaction-scale")
# examine difference
denmat_scaled[1, 7:17]
denmat2_scaled[1, 7:17]
denmat_df <- as.data.frame(denmat_scaled)
denmat2_df <- as.data.frame(denmat2_scaled)
hist(denmat_df$`Ozone:Solar.R`, col='light blue', breaks=40)
hist(denmat2_df$`Ozone:Solar.R`, col='light blue', breaks=40)
# end cross validated question:
my_aq <- data.frame(airquality)
sapply(my_aq, function(x) sum(is.na(x)))
sum(complete.cases(my_aq)) # 111 complete cases; 153 rows total
my_aq_pp_scaler <- caret::preProcess(my_aq, method=c("center", "scale"))
my_aq_scaled <- predict(my_aq_pp_scaler, my_aq)
# prescaled
denmat <- model.matrix(~ .^4 - 1, data=my_aq_scaled)
denmat_pp_scaler <- caret::preProcess(denmat, method=c("center", "scale"))
denmat_scaled <- predict(denmat_pp_scaler, denmat)
# postscaled
denmat2 <- model.matrix(~ .^4 - 1, data=my_aq)
denmat2_pp_scaler <- caret::preProcess(denmat2, method=c("center", "scale"))
denmat2_scaled <- predict(denmat2_pp_scaler, denmat2)
# examine difference
denmat_scaled[1, 7:17]
denmat2_scaled[1, 7:17]
denmat_df <- as.data.frame(denmat_scaled)
denmat2_df <- as.data.frame(denmat2_scaled)
hist(denmat_df$`Ozone:Solar.R`, col='light blue', breaks=40)
hist(denmat2_df$`Ozone:Solar.R`, col='light blue', breaks=40)
denmat_scaled[1, 1:10] == denmat2_scaled[1, 1:10]
round(denmat_scaled, 3) == round(denmat2_scaled, 3)
# Clean Procedural code ----------------------------------------------------------------------------------------
# 1: limit data to single row
df_num_onerow <- df_num[1,]
df_num_onerow_na <- df_num_na[1, ]
# # 2: run the single row version through stats::model.matrix to create dense matrix with interactions
#
# # DEFAULT options()$na.action is "na.omit" -- I want to change it to "na.pass" to retain NAs in the resulting model matrix
# options()$na.action
# options(na.action="na.pass")
#
# # run the single row through to get dense version of matrix
# df_num_densemat <- stats::model.matrix(~ .^2 - 1, data=df_num_onerow)
# df_num_densemat_na <- stats::model.matrix(~ .^2 - 1, data=df_num_onerow_na)
#
# assert_that(all(dim(df_num_densemat) == dim(df_num_densemat)))
#
# # 3: save the names of the dense matrix to a character vector object
#
# df_num_densemat_names <- attr(df_num_densemat, "dimnames")[[2]]
# df_num_densemat_names <- gsub(":", "-", df_num_densemat_names) # <-- prefer dash over colon
# df_num_densemat_names_justinter <- df_num_densemat_names[(ncol(df_num) + 1):ncol(df_num_densemat)]
# df_num_densemat_names # <-- all names including the raw columns
# df_num_densemat_names_justinter # <-- just the names of the interaction columns
# 4: run the full data through the sparse model matrix
#' ok so steps 2 and 3 are not necessary... even sparse matrices capture their respective column names
df_num_sparmat <- Matrix::sparse.model.matrix(~ .^2 - 1, data=df_num)
df_num_sparmat_na <- Matrix::sparse.model.matrix(~ .^2 - 1, data=df_num_na)
# get the column names
attributes(df_num_sparmat)
attr(df_num_sparmat, "Dimnames")[[2]]
dim(df_num_sparmat)
assert_that(all(dim(df_num_sparmat) == dim(df_num_sparmat_na)))
assert_that(all(ncol(df_num_sparmat) == ncol(df_num_densemat)))
# 5: generate xgb.DMatrix
df_num_dmat <- xgb.DMatrix(df_num_sparmat)
df_num_dmat
attributes(df_num_dmat)
#' oh wait,
# Dirty procedural stuff down here -------------------------------------------------------------
# dense matrix generation (minus 1 is to remove the intercept of all 1's)
mat_inter <- stats::model.matrix(~ .^3 - 1, data=df_num)
mat_just_inter <- mat_inter[, (ncol(df_num) + 1):ncol(mat_inter)]
mat_inter
attributes(mat_inter)
colnames <- attr(mat_inter, "dimnames")[[2]]
mat_just_inter
# convert to df
df_inter <- as.data.frame(mat_inter)
names(df_inter) <- allnames
df_inter
# convert only the interactions to df
df_just_inter <- as.data.frame(mat_just_inter)
names(df_just_inter) <- combo_names
df_just_inter
# sparse matrix generation (minus 1 is to remove the intercept of all 1's)
spmat_inter <- Matrix::sparse.model.matrix(~ .^2 - 1, data=df_num)
spmat_inter
# equal dimensions; equal values
assert_that(all(dim(mat_inter) == dim(spmat_inter)))
assert_that(all(spmat_inter == mat_inter))
# four columns, six rows; I want multiplication interaction between all of these to start with
combo_indx_norep <- gtools::combinations(n=ncol(df), r=2, v=1:ncol(df), set=T, repeats.allowed=F)
combo_indx_norep
for(i in 1:nrow(mymat)) {
print(i)
}
|
a856804067dee063c51966b3bb2d6c53f09984f6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qdap/examples/paste2.Rd.R
|
e40974cb5d9d8a374c77e0c08022910b78ac40ff
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
paste2.Rd.R
|
library(qdap)
### Name: paste2
### Title: Paste an Unspecified Number Of Text Columns
### Aliases: paste2 colpaste2df
### Keywords: paste
### ** Examples
## Not run:
##D ## paste2 examples
##D v <- rep(list(state.abb[1:8], month.abb[1:8]) , 5)
##D n <- sample(5:10, 1)
##D paste(v[1:n]) #odd looking return
##D paste2(v[1:n])
##D paste2(v[1:n], sep="|")
##D paste2(mtcars[1:10,], sep="|")
##D paste(mtcars[1:10,], sep="|") #odd looking return
##D paste2(CO2[1:10,], sep="|-|")
##D
##D ## colpaste2df examples
##D A <- list(
##D a = c(1, 2, 3),
##D b = qcv(mpg, hp),
##D c = c("disp", "am")
##D )
##D B <- list(
##D c(1, 2, 3),
##D new.col = qcv(mpg, hp),
##D c("disp", "am")
##D )
##D E <- list(
##D c(1, 2, 3, 4, 5),
##D qcv(mpg, hp),
##D c("disp", "am")
##D )
##D
##D colpaste2df(head(mtcars), A)
##D colpaste2df(head(mtcars), B)
##D colpaste2df(head(mtcars), E)
##D colpaste2df(head(mtcars), qcv(am, disp, drat), sep ="_", name.sep = "|")
##D colpaste2df(head(CO2), list(c(1, 2, 3, 4, 5), qcv("conc", "uptake")))
## End(Not run)
|
dc155b7ef202cace10bdb830b03092335e42418f
|
f245521e63b59e37470070092b7d1d38a87b2e48
|
/libs/plotAA.r
|
1f93740e03c91e8bfcd7862d7ffa61d20aa2c2c0
|
[] |
no_license
|
douglask3/UKESM-land-eval
|
3c10d10eba32bcef1e7b2a057db3b22fdf2fd621
|
aad3f6902e516590be02585ad926bfe1cf5770bf
|
refs/heads/master
| 2021-08-17T06:32:10.736606
| 2021-07-14T12:57:13
| 2021-07-14T12:57:13
| 242,747,830
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,063
|
r
|
plotAA.r
|
limits_aa = c(0, 0.1, 0.2, 0.5, 1, 2, 5)
aa_cols = c('#ffffe5','#f7fcb9','#d9f0a3','#addd8e','#78c679','#41ab5d',
'#238443','#006837','#004529')
plotAA <- function(r, lab = '', name = '', units = '',
cols = aa_cols, limits = limits_aa, regions = NULL,
addLegend = FALSE) {
if (is.list(r)) r = layer.apply(r, mean)
aa = mean(r)
if (nlayers(r) == 1) e = NULL
else e = eFromRange(r)
plotStandardMap(aa, e = e, limits = limits, cols = cols)
mtext(name, side = 2, adj = 0.9, line = -0.2)
addLetLab(lab)
if (addLegend) StandardLegend(aa, limits = limits, cols = cols, units = units,
add = TRUE, oneSideLabels = FALSE)
return(r)
}
eFromRange <- function(r) {
rr = range(r)
if (any(rr[[1]][] <0, na.rm = TRUE) && any(rr[[2]][] > 0, na.rm = TRUE)) {
e = abs(rr[[2]] - rr[[1]])/max(abs(rr))/2
e[rr[[2]]>0 & rr[[1]] <0] = 1
} else e = 1-rr[[1]]/rr[[2]]
return(e)
}
|
4ef4cc4636df45cc46c32b387e1c519035507e68
|
68e1ac98bf1c17a77f1074bf190e1acb763df791
|
/R/plotGeneBarPlot.R
|
75845142dae6775cb9df24097299cfc78e03f8af
|
[
"MIT"
] |
permissive
|
mukundvarma/pledger
|
db4656ce07140354f2bc65c65e04962fe57f3013
|
c41a865a541dc5038d0ebe7b7f2766abf4ad1b7b
|
refs/heads/master
| 2021-05-15T22:28:26.458772
| 2017-10-12T21:31:14
| 2017-10-12T21:31:14
| 106,713,748
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,124
|
r
|
plotGeneBarPlot.R
|
#' Barplot for gene expression values
#'
#' plotGeneBarPlot takes as input a counts matrix (or dataframe), a metadata dataframe
#' and the names of two metadata variables to plot a barplot with error bars of expression
#' values grouped along the x axis and with different fill colors to show
#' summarized expression of a gene across multiple conditions
#'
#' @param counts.mat Counts matrix (or dataframe) containing expression values,
#' columns are samples and rows are genes.
#' @param metadata Metadata dataframe, rows are samples and columns are metadata
#' variables.
#' @param gene Gene whose expression variables need to be plotted (character).
#' @param fill.variable Metadata column name which serves as fill variable for
#' barplot
#' @param x.variable Metadata column according to which x-axis of plot is arranged
#' @param cols Colors to use for fill
#'
#' @examples
#'
#' plotGeneBarPlot(counts.mat = counts.tmm, metadata=md, gene="Actb",
#' fill.variable="Celltype", x.variable=Stimulation")
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
plotGeneBoxPlot <- function(counts.mat, metadata, gene, fill.variable,
x.variable, cols = c("#e41a1c", "#377eb8", "#4daf4a", "#984ea3")) {
pdata = cbind(metadata, unname(counts.mat[gene, ] %>% as.numeric))
pdata = pdata[order(pdata[fill.variable,], ]
pdata$sample = rownames(pdata)
colnames(pdata)[ncol(pdata) - 1] = "expr"
plotdf = summarySE(pdata, measurevar = "expr", groupvars = c(fill.variable, x.variable))
# print(pdata)
ggplot(plotdf, aes_string(x = x.variable, y = expr, fill = fill.variable)) +
geom_col(position = "dodge") +
geom_errorbar(aes(ymin=expr-se, ymax=expr+se),position=position_dodge(0.9), width=0.2) +
theme_bw() +
theme(plot.title = element_text(size = 24)) +
scale_fill_manual(values = cols) +
labs(title = gene, y = paste(gene,
"Expression"), fill = fill.variable, x = x.variable) +
theme(strip.text.x = element_text(size = 16, face = "bold"))
}
|
d3599674b63c15837356037b7b59ca5e7ab06e97
|
08ea0442af72551490a4a8e5107ed0c752c3299f
|
/R/GC.adjust.R
|
dc5117c28458d4ac0d9364762538f3cd4f96fd08
|
[] |
no_license
|
cran/saasCNV
|
baaf78e61e70af648ffdf220f63e03771c53e7a6
|
2b74d199913425db6929decc04118ebbd55b1803
|
refs/heads/master
| 2020-12-31T04:56:39.453872
| 2016-05-18T02:04:56
| 2016-05-18T02:04:56
| 59,067,822
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,068
|
r
|
GC.adjust.R
|
GC.adjust <-
function(data, gc, maxNumDataPoints = 10000)
{
data$marker <- paste0(data$chr, ":", round(data$position/1000,0)*1000+1)
gc$marker <- paste0(gc$chr, ":", gc$position)
data <- merge(x=data, y=gc[,c("marker","GC")], by="marker", all=FALSE)
data <- data[order(data$chr, data$pos),]
data <- data[,-which(names(data)=="marker")]
data$log2ratio.woGCAdj <- data$log2ratio
ratio <- 2^data$log2ratio
idx <- which(!is.na(data$GC) & !is.na(ratio) & ratio > 0)
data <- data[idx,]
ratio <- ratio[idx]
if (nrow(data) > maxNumDataPoints) {
idx <- sample(1:nrow(data), maxNumDataPoints)
}
gcData <- data.frame(gc = data$GC[idx], ratio = ratio[idx])
gc.fit <- loess(ratio ~ gc, gcData)
normCoef <- predict(gc.fit, data.frame(gc = data$GC))
normCoef <- normCoef/median(normCoef, na.rm = TRUE)
ratio <- ratio/normCoef
data$log2ratio <- log2(ratio)
idx <- which(!is.na(data$log2ratio) & data$log2ratio < Inf & data$log2ratio > -Inf)
data <- data[idx,]
return(data)
}
|
ba83053f7b3b09c74e49dc0924f0664a324dfe17
|
9a8d950e524c52daf2baf77b84aaef20b3b64bdc
|
/Clean and Merge Waves 1-4.R
|
7e6b7520d8ffb9d6c2245c6b9625e31e4354acba
|
[] |
no_license
|
tylerleigh94/Prep-for-Wave-5
|
be4f20132a7e3eca7a1577691b68a0765f893426
|
db24204e30d49fc3046b41782d28a0276f945f0d
|
refs/heads/master
| 2020-09-08T23:07:51.476955
| 2019-11-23T16:44:17
| 2019-11-23T16:44:17
| 221,271,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,272
|
r
|
Clean and Merge Waves 1-4.R
|
####Libraries####
library(easypackages)
libs<-c("tidyr", "dplyr", "car", "haven")
libraries(libs)
####Read in Data####
dat.1 <- read_sav("Data/wave1 clean.sav")
dat.2<- read_sav("Data/wave 2 cleaned.sav")
dat.3 <- read_sav("Data/wave 3 cleaned.sav")
dat.4 <- read_sav("Data/wave 4 cleaned.sav")
####Merge Datasets together####
dat.12<-full_join(dat.1, dat.2, by="CaseId")
dat.123<-full_join(dat.12, dat.3, by='CaseId')
dat.1234all<-full_join(dat.123, dat.4, by='CaseId')
#### Fix wave Variable
for(x in 1:nrow(dat.1234all)){
if(is.na(dat.1234all$wave.x[x])) {dat.1234all$wave_1[x]=0}
else if(dat.1234all$wave.x[x]==1) {dat.1234all$wave_1[x]=1}
else{dat.1234all$wave_1[x]=0}
if(is.na(dat.1234all$wave.y[x])) {dat.1234all$wave_2[x]=0}
else if(dat.1234all$wave.y[x]==2) {dat.1234all$wave_2[x]=1}
else{dat.1234all$wave_2[x]=0}
if(is.na(dat.1234all$wave[x])) {dat.1234all$wave_3[x]=0}
else if(dat.1234all$wave[x]==3) {dat.1234all$wave_3[x]=1}
else{dat.1234all$wave_3[x]=0}
if(is.na(dat.1234all$wave_4[x])) {dat.1234all$wave_4[x]=0}
else if(dat.1234all$wave_4[x]==4) {dat.1234all$wave_4[x]=1}
else{dat.1234all$wave_4[x]=0}
}
####Save Dataframe####
saveRDS(dat.1234all, file="waves1234all.rds")
write_sav(dat.1234all, path="waves1234all.sav")
|
7ea24f0d578b294da28fbc2f9426e977b28e59b0
|
17dc451c33b8726441d03b1d604d7e6ed4a984b8
|
/R/ConvertSexFormat.R
|
d7f33455c16ba1bf5491468f580cfcca66b4bf32
|
[] |
no_license
|
TWilliamBell/angler
|
ad8d57bb3b902a0e87b20b9b3f8e2844c7cefd1d
|
ffa59d8aa92e256b673423c5572c3136ae37e53f
|
refs/heads/master
| 2022-03-12T16:29:48.936809
| 2022-03-03T17:00:11
| 2022-03-03T17:00:11
| 140,886,555
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 846
|
r
|
ConvertSexFormat.R
|
#' Convert Sex Data to format used by our Functions
#'
#' @param Sex A character or factor vector (vector produced will be the same type) that includes the sex of each individual.
#' @param female.string How sex for females is recorded currently (defaults to "F").
#' @param male.string How sex for males is recorded currently (defaults to "M").
#'
#' @export
#'
#' @examples
#' data(pupfish) ## In geomorph.
#' ConvertSexFormat(pupfish$Sex)
ConvertSexFormat <- function(Sex, female.string = "F", male.string = "M") {
Sex <- as.character(Sex)
for (i in 1:length(Sex)) {
if (Sex[i] == female.string) {
Sex[i] <- "f"
}
else if (Sex[i] == male.string) {
Sex[i] <- "m"
}
else {cat(paste("Cannot convert the ", i, "th term to the standardized format, \nconvert manually?"))}
}
Sex <- as.factor(Sex)
Sex
}
|
b74aba3d1b04a70e9d134cb8b8ff81b45a474a0b
|
8595bf1f5409b247a416e3d313f922e1c1425968
|
/code/CalculatePredictionMetrics.R
|
d5c016c65fc797686dc1abe7e23213a682f5619e
|
[] |
no_license
|
srp33/BCRiskPathways
|
29a88ff38ac1f50e559bcf0a835058d8abd2f0ce
|
fa9cb603373a196d1370bd8ffd4b5d7d883f41de
|
refs/heads/master
| 2021-01-19T03:10:14.984908
| 2015-12-29T14:55:05
| 2015-12-29T14:55:05
| 31,468,999
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,534
|
r
|
CalculatePredictionMetrics.R
|
inFilePath = commandArgs()[7]
targetClass = commandArgs()[8]
numPermutations = as.integer(commandArgs()[9])
outActualFilePath = commandArgs()[10]
outEmpiricalFilePath = commandArgs()[11]
suppressPackageStartupMessages(library(ROCR))
#suppressPackageStartupMessages(library(rms))
calcAuc = function(actual, prob)
{
pred = prediction(prob, actual)
perf = performance(pred, measure="auc", x.measure="cutoff")
auc = as.numeric(deparse(as.numeric(perf@y.values)))
return(auc)
}
#calcCStatistic = function(actual, prob)
#{
# return(val.prob(prob, actual, pl=FALSE)[2])
#}
data = read.table(inFilePath, sep="\t", stringsAsFactors=FALSE, header=TRUE, row.names=1, check.names=FALSE)
actualClasses = as.integer(data[,1]==targetClass)
actualProbabilities = as.numeric(data[,3])
actualAuc = calcAuc(actualClasses, actualProbabilities)
permutedAucs = NULL
for (i in 1:numPermutations)
{
set.seed(i)
permutedProbabilities = sample(actualProbabilities, length(actualProbabilities))
permutedAucs = c(permutedAucs, calcAuc(actualClasses, permutedProbabilities))
}
empiricalP = sum(permutedAucs >= actualAuc) / numPermutations
empiricalP = empiricalP + 1 / numPermutations
if (empiricalP > 1)
empiricalP = 1
#cStatistic = calcCStatistic(actualClasses, actualProbabilities)
write(actualAuc, outActualFilePath)
write.table(empiricalP, outEmpiricalFilePath, col.names=FALSE, row.names=FALSE, quote=FALSE)
print("AUC:")
print(actualAuc)
print("Empirical p-value:")
print(empiricalP)
#print("C-statistic:")
#print(cStatistic)
|
c2696ef3f8f4c6938597d6f23583219ab137a9b7
|
04bf444bf40498ba6672d8558b6aac2e7b2c8031
|
/tidyabs.R
|
a116a31d9651d6ac02bac88716c8b60d3db4f314
|
[] |
no_license
|
triadicaxis/quickr
|
dea5bb836dce9ece41c614db002bf7477f0a70e2
|
784723c3ac9a43304257788abcd7d0a2dc2e066a
|
refs/heads/master
| 2020-03-11T11:10:19.392031
| 2019-10-21T14:54:12
| 2019-10-21T14:54:12
| 129,503,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,071
|
r
|
tidyabs.R
|
# install.packages("devtools")
# install.packages("readabs")
# devtools::install_github("ianmoran11/tidyABS")
#####################################################################
library(tidyABS) ## Ian Moran
library(readabs) ## Matt Cowgill
library(tidyverse) ## Hadley Wickham
#####################################################################
## Ian's example
df <-
tidyABS_example("australian-industry.xlsx") %>%
process_sheet(sheets = "Table_1") %>%
assemble_table_components()
class(df$value)
#> [1] "character"
unique(df$value[duplicated(df$value)]) %>% head()
#> [1] "482" "485" "1005" "23643" "114" "104"
df %>% str()
## submitted github issue https://github.com/ianmoran11/tidyABS/issues/13
## more example datasets
tidyABS_example()
df <-
tidyABS_example("consumer-price-index.xlsx") %>%
# process_ABS_sheet(sheets = "Table_1") %>%
assemble_table_components()
#####################################################################
## Matt's example
df <- read_abs("6345.0")
df <- read_abs("5204.0", tables = c("63", "64"))
|
f2257e9807bce5895ee4e946caa0e087eda1c9cb
|
ee8733c46c91949478b44143e4977ca0ca857968
|
/man/vim.norm.Rd
|
85cf4c11e2a7a614bb8e918579297bae543e7d52
|
[] |
no_license
|
holgerschw/logicFS
|
0a7919ef1012814b83a114dbc485e8be3d21e7ae
|
ed8b0be37da919754b39e1a46e793e253b06ddaf
|
refs/heads/master
| 2021-06-06T01:10:52.594711
| 2020-04-12T21:34:42
| 2020-04-12T21:34:42
| 148,649,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,278
|
rd
|
vim.norm.Rd
|
\name{vim.norm}
\alias{vim.norm}
\alias{vim.signperm}
\title{Standardized and Sign-Permutation Based Importance Measure}
\description{
Computes a standarized or a sign-permutation based version of either the Single Tree Measure,
the Quantitative Response Measure, or the Multiple Tree Measure.
}
\usage{
vim.norm(object, mu = 0)
vim.signperm(object, mu = 0, n.perm = 10000, n.subset = 1000,
version = 1, adjust = "bonferroni", rand = NA)
}
\arguments{
\item{object}{either the output of \code{\link{logicFS}} or \code{\link{vim.logicFS}}
with \code{addMatImp = TRUE}, or the output of \code{\link{logic.bagging}}
with \code{importance = TRUE} and \code{addMatImp = TRUE}.}
\item{mu}{a non-negative numeric value against which the importances are tested. See \code{Details}.}
\item{n.perm}{the number of sign permutations used in \code{vim.signperm}.}
\item{n.subset}{an integer specifying how many permutations should be considered at once.}
\item{version}{either \code{1} or \code{2}. If \code{1}, then the importance measure is computed
by 1 - padj, where padj is the adjusted p-value. If \code{2}, the importance measure is determined
by -log10(padj), where a raw p-value equal to 0 is set to 1 / (10 * \code{n.perm}) to avoid
infinitive importances.
}
\item{adjust}{character vector naming the method with which the raw permutation based
p-values are adjusted for multiplicity. If \code{"qvalue"}, the function \code{qvalue.cal}
from the package \code{siggenes} is used to compute q-values. Otherwise,
\code{p.adjust} is used to adjust for multiple comparisons. See \code{p.adjust} for all
other possible specifications of \code{adjust}. If \code{"none"}, the raw p-values will
be used. For more details, see \code{Details}.}
\item{rand}{an integer for setting the random number generator in a reproducible case.}
}
\details{
In both \code{vim.norm} and \code{vim.signperm}, a paired t-statistic is computed for each
prime implicant, where the numerator is given by \eqn{VIM - }\code{mu} with VIM being the
single or the multiple tree importance, and the denominator is the corresponding standard
error computed by employing the \code{B} improvements of the considered prime implicant
in the \code{B} logic regression models, where VIM is the mean over these
\code{B} improvements.
Note that in the case of a quantitative response, such a standardization is not necessary.
Thus, \code{vim.norm} returns a warning when the response is quantitative, and \code{vim.signperm}
does not divide \eqn{VIM - }\code{mu} by its sample standard error.
Using \code{mu = 0} might lead to calling a prime implicant important, even though it actually
shows only improvements of 1 or 0. When considering the prime implicants, it might be therefore
be helpful to set \code{mu} to a value slightly larger than zero.
%A rule of thumb might be to set \code{mu} to about one third of \code{diff}, where a prime implicant
%should explain, i.e.\ be true for, at least \code{diff} more cases than controls to be considered
%as important.
In \code{vim.norm}, the value of this t-statistic is returned as the standardized importance
of a prime implicant. The larger this value, the more important is the prime implicant. (This applies
to all importance measures -- at least for those contained in this package.) Assuming normality,
a possible threshold for a prime implicant to be considered as important is the \eqn{1 - 0.05 / m} quantile
of the t-distribution with \eqn{B - 1} degrees of freedom, where \eqn{m} is the number of prime implicants.
In \code{vim.signperm}, the sign permutation is used to determine \code{n.perm} permuted values of the
one-sample t-statistic, and to compute the raw p-values for each of the prime implicants. Afterwards,
these p-values are adjusted for multiple comparisons using the method specified by \code{adjust}.
The permutation based importance of a prime implicant is then given by \eqn{1 -} these adjusted p-values.
Here, a possible threshold for calling a prime implicant important is 0.95.
}
\value{
An object of class \code{logicFS} containing
\item{primes}{the prime implicants,}
\item{vim}{the respective importance of the prime implicants,}
\item{prop}{NULL,}
\item{type}{the type of model (1: classification, 2: linear regression, 3: logistic regression),}
\item{param}{further parameters (if \code{addInfo = TRUE}),}
\item{mat.imp}{NULL,}
\item{measure}{the name of the used importance measure,}
\item{useN}{the value of \code{useN} from the original analysis with, e.g., \code{\link{logicFS}},}
\item{threshold}{the threshold suggested in \code{Details},}
\item{mu}{\code{mu}.}
}
\references{
Schwender, H., Ruczinski, I., Ickstadt, K. (2011). Testing SNPs and Sets of SNPs for Importance in Association Studies.
\emph{Biostatistics}, 12, 18-32.
}
\author{Holger Schwender, \email{holger.schwender@hhu.de}}
\seealso{
\code{\link{logic.bagging}}, \code{\link{logicFS}},
\code{\link{vim.logicFS}}, \code{\link{vim.chisq}}, \code{\link{vim.ebam}}
}
\keyword{logic}
\keyword{htest}
|
930dbb0ffff1f9b17d7a42703024af40464208a8
|
768270fa492cd7b2e18c6a333fdea9fecdc15fa8
|
/G4013/Lec7-multReg2.R
|
f1593aa167071af336bcb07ca5b65fcf20b9a51c
|
[] |
no_license
|
xw2239/QMSS-R-Code
|
2dc3ec24d92fc320f6f94b70b4d272210058c170
|
c7a62422a3724f870a9874ee7cf44b8776d5e2b0
|
refs/heads/master
| 2021-01-17T14:23:45.052722
| 2013-10-13T03:46:43
| 2013-10-13T03:46:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,859
|
r
|
Lec7-multReg2.R
|
# ---------------------------------------------------------
# Title: Multiple Regression 2
# R version: 3.0.0 (2013-04-03) -- "Masked Marvel"
# R Studio version: 0.97.336
# OS: Mac OS X 10.7.5
# Author: Eurry Kim <ek2758@columbia.edu>
# Maintainer: Eurry Kim <ek2758@columbia.edu>
# Description: G4013 Lecture 7
# ---------------------------------------------------------
# Load the saved workspace with cross-sectional GSS file
load("gssCS.RData")
# You can save table commands to objects to save as part of a workspace
home <- table(gss.cs$dwelown)
home
# ---------------------------------
# Predict vocab from home-ownership
# ---------------------------------
gss.cs$home1 <- ifelse(gss.cs$dwelown=="own or is buying", 1, 0)
wordHome.lm <- lm(wordsum ~ home1, data = gss.cs, !is.na(degree))
summary(wordHome.lm)
# Add degree
wordHomeD.lm <- update(wordHome.lm, ~ . + as.numeric(degree))
summary(wordHomeD.lm)
# ------------------------
# Categorical aggregations
# ------------------------
# Mean wordsum scores by home-ownership and degree
wordMean <- tapply(gss.cs$wordsum, list(gss.cs$home1, gss.cs$degree), mean, na.rm = TRUE)
wordMean
# The data needs to be re-structured; simple transpose is required
wordMean <- t(wordMean)
# Convert object to dataframe b/c plotting only takes dataframes as arguments
wordMean <- as.data.frame(wordMean)
# Change column names b/c not great to begin them with #s
colnames(wordMean) <- c("non.homeowner","homeowner")
# Add row with overall wordsum means per home-owner status
wordMean <- rbind(wordMean,
tapply(gss.cs$wordsum, gss.cs$home1, mean, na.rm = TRUE))
# Change rowname of overall mean to "overall"
rownames(wordMean)[9] <- "overall"
# Now we can calculate the differences between non-homeowners and homeowners
wordMean$diff <- wordMean$homeowner - wordMean$non.homeowner
# Create a column with the rownames
# This makes it easier to call in the plot
wordMean$degree <- rownames(wordMean)
# Remove the non-response degrees
wordMean <- subset(wordMean, !(degree %in% c("iap","dk","na")))
# Change the character degree variable to an ordered factor
class(wordMean$degree)
wordMean$degree <- factor(wordMean$degree, levels = c("overall",
"lt high school",
"high school",
"junior college",
"bachelor",
"graduate"),
ordered = TRUE)
# ----------
# Bar graphs
# ----------
install.packages("ggplot2") # A graphing package for nicely formatted plots
library(ggplot2)
ggplot(wordMean, aes(x = degree, y = diff, fill = degree)) + geom_bar(stat="identity")
# ---------------------------------
# Visualize 3-dimension scatterplot
# ---------------------------------
prestg.lm <- lm(prestg80 ~ educ + male, data = gss.cs)
# Sample 5 random pairs of educ & male for prestg80 predictions using model
s <- sample(nrow(gss.cs), 5)
prestg80.new <- gss.cs[s,]
# Predict prestg80 values of sample data
prestg80.new$prestg.pred <- predict(prestg.lm, newdata = prestg80.new)
install.packages("scatterplot3d")
library(scatterplot3d)
scatterplot3d(prestg80.new$educ, prestg80.new$prestg.pred, prestg80.new$male)
# 3-d plots are not usually useful -- I haven't used this command until now.
# -------------------------------
# How variables are held constant
# -------------------------------
# Need correlations between variables of interest
prestg <- subset(gss.cs, select = c(as.numeric(prestg80), as.numeric(educ), as.numeric(male)))
prestg <- na.omit(prestg) # pairwise deletion
cor(prestg)
# Need means and standard deviations
sapply(prestg, mean, na.rm = TRUE)
sapply(prestg, sd, na.rm = TRUE)
summary(prestg.lm)
# -------------------------------------------------------------
# For the sake of demonstration: purge education of "male-ness"
# -------------------------------------------------------------
educ.lm <- lm(educ ~ male, data = prestg)
summary(educ.lm)
# Extract residuals from model
prestg$educ.res <- residuals(educ.lm)
# educ.res is educ purged of maleness
head(prestg)
# educ.res and male are essentially not correlated
cor(prestg[,2:4])
# Now, estimate effect of education (purged of maleness) on prestige
educRes.lm <- lm(prestg80 ~ educ.res, data = prestg)
summary(educRes.lm) # 2.38 coefficient for educ.res
summary(prestg.lm) # 2.38 coefficient for educ
# Cool! Bias!
# -------------------------------------------------------------
# For the sake of demonstration: purge "male-ness" of education
# -------------------------------------------------------------
male.lm <- lm(male ~ educ, data = prestg)
summary(male.lm)
# Extract residuals from model
prestg$male.res <- residuals(male.lm)
# male.res is male purged of education
head(prestg)
# male-ness and educ are essentially not correlated
cor(prestg[,c(2:3,5)])
# Now, estimate effect of male-ness (purged of education) on prestige
maleRes.lm <- lm(prestg80 ~ male.res, data = prestg)
summary(maleRes.lm) # 0.37 coefficient for educ.res
summary(prestg.lm) # 0.37 coefficient for male
# Cool! Bias again!
# -----------------------------
# Do trust levels vary by race?
# -----------------------------
# Here, I employ the function, "within" to add multiple variables.
# You may run into a function called "attach" to enables one to "open up" a data frame to work on/from it.
# The problem with "attach" is that you have to "detach."
# Sure, it may be less typing (e.g., no need to do the $variable thing each time),
# but code is easier to follow and less likely to screw up.
# Besides, if you're using RStudio, just press the tab key after typing "gss.cs$"
# You will have a list of available variables in front of you. Niiice.
gss.cs <- within(gss.cs, {
newtrust <- factor(trust, levels = c("cannot trust","depends","can trust"), ordered = TRUE)
black <- ifelse(race == "black", 1, 0)
lnrealinc <- log(realinc)
})
trust.lm <- lm(as.numeric(newtrust) ~ black, data = gss.cs, !is.na(educ) & !is.na(lnrealinc) & !is.na(region))
summary(trust.lm)
# Global F-test
anova(trust.lm)
# Add in more predictors
trust2.lm <- lm(as.numeric(newtrust) ~ black + as.numeric(educ) + lnrealinc + region, data = gss.cs)
summary(trust2.lm)
# Did the addition of region variable add sig info to the model?
trust3.lm <- update(trust2.lm, ~ . - region)
# Partial F-test
anova(trust3.lm, trust2.lm) # Keep region
# ------------
# Collinearity
# ------------
lm(tvhours ~ age + age, data = gss.cs) # 2nd age variable is just dropped
# ------------------
# Heteroskedasticity
# ------------------
tv.lm <- lm(tvhours ~ as.numeric(degree), data = gss.cs)
summary(tv.lm)
install.packages("lmtest")
library(lmtest)
bptest(tv.lm) # Reject! We have heteroskedasticity
# Organize data to draw boxplots
tv <- subset(gss.cs, !is.na(degree) & !is.na(tvhours), select = c("tvhours","degree"))
tv <- cbind(tv, residuals(tv.lm))
colnames(tv)[3] <- "tvhours.res"
# Plot distributions of residuals for each degree category
ggplot(tv, aes(x = degree, y = tvhours.res)) + geom_boxplot()
# ----------------------
# Robust standard errors
# ----------------------
install.packages("sandwich")
library(sandwich)
tv.lmNW <- coeftest(tv.lm, vcov=NeweyWest(tv.lm, prewhite=FALSE))
tv.lmNW
# Histogram of the residuals
tv.res <- as.data.frame(residuals(tv.lm))
colnames(tv.res) <- "residuals"
# Density curve with overlaid normal curve in red
ggplot(tv.res, aes(x = residuals)) + geom_density(binwidth = .5, alpha = .5) +
stat_function(fun = dnorm, colour = "red")
# Test residuals for normality
?shapiro.test # only takes numeric vectors between 3 and 5,000 observations
??nortest::sf.test # only takes numeric vectors between 5 and 5,000 observations
install.packages("e1071")
library(e1071)
kurtosis(tv.res$residuals) # woah, highly significant. Definitely not normally distributed
# --------------------------------
# Observe influential observations
# --------------------------------
# Draw series of plots to gauge model fit
par(mfrow = c(2,2))
plot(tv.lm) # lower right-corner plot provides leverage plot
par(mfrow = c(1,1)) # reset plot window to default
# Draw influence plot to observe influential observations
install.packages("car")
library(car)
influencePlot(tv.lm)
# ---------------------
# Run robust regression
# ---------------------
install.packages("MASS")
library(MASS)
tv.rlm <- rlm(tvhours ~ as.numeric(degree), data = gss.cs)
summary(tv.rlm)
# -----------------------
# Run quantile regression
# -----------------------
install.packages("quantreg")
library(quantreg)
tv.rq <- rq(tvhours ~ as.numeric(degree), data = gss.cs)
summary(tv.rq)
# Save the workspace
save.image("gssCS.RData")
|
7ecf3125e2bc71197c58e07c8835f06a97c49856
|
91c17f2f5a3580c515df608a1531e5d258a6f9b4
|
/miso_rmats.R
|
1e37fbc60ac9efef40f680856be81dcf58b1b835
|
[] |
no_license
|
rnasys/PRMT_interactome
|
ea1e74c0d8e5dcbb54ea1e26684271e657d1cd11
|
66b45744e1f99b91ec890abd4fe50800f82ef581
|
refs/heads/master
| 2022-10-07T03:59:00.132026
| 2020-06-12T21:07:55
| 2020-06-12T21:07:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,666
|
r
|
miso_rmats.R
|
library(RMySQL)
library(stringr)
library(RColorBrewer)
library(ggplot2)
library(reshape2)
library(biomaRt)
library(future.apply)
library(pheatmap)
library(GeneOverlap)
library(venn)
library(Cairo)
library(plyr)
library(DBI)
library(pool)
#data breaks according to the quantile
quantile_breaks <- function(xs, n = 10) {
breaks <- quantile(xs, probs = seq(0, 1, length.out = n))
breaks[!duplicated(breaks)]
}
#read miso result
setwd("~/whh/miso_summary")
sample.l<-c('18R127','18R128','18R129','18R130','18R131','18R132','18R133','18R134','18R135','18R136','18R137','18R138')
sample_name.l<-c('Control_1','PRMT1_1','PRMT1_2','PRMT3_1','PRMT3_2','PRMT4_1','PRMT4_2','PRMT5_1','PRMT5_2','PRMT6_1','PRMT7_1','PRMT7_2')
event.l<-c('A3SS','A5SS','MXE','RI','SE')
sample.n<-c('PRMT1','PRMT3','PRMT4','PRMT5','PRMT6','PRMT7')
rep.l<-c(1,1,1,1,0,1)
#read miso summary file set path
read_miso_summary<-function(sample_list,event_list,sample_name_list,replicate=T){
all_data.l<-sapply(1:length(sample_list),function(i) {
data<-future_sapply(1:length(event_list),function(j) {
file_name<-paste(sample_list[i],event_list[j],sep='_')
print(file_name)
event_result<-read.delim(file_name,header=T)
event.m<-matrix(unlist(apply(event_result,1,filter_miso,sample_name=sample_name_list[i],event_name=event_list[j])),ncol=4,byrow=T)
colnames(event.m)<-c("sample_name","event_type","event_name","psi")
return(event.m)
})
df<-Reduce(rbind,data)
return(df)
})
all_data.df<-Reduce(rbind,all_data.l)
all_data.df<-as.data.frame(all_data.df)
all_data.df$psi<-as.numeric(as.character(all_data.df$psi))
#filter replicate
if(replicate){
exp_design<-matrix(unlist(lapply(as.character(all_data.df$sample_name),function(x) unlist(strsplit(x,split = '_')))),ncol=2,byrow=T)
new_data.df<-data.frame(exp_name=exp_design[,1],rep=exp_design[,2],subset(all_data.df,select = -sample_name))
new_data.l<-lapply(levels(new_data.df$exp_name),filter_rep,data_frame=new_data.df)
data.df<-Reduce(rbind,new_data.l)
}else{
data.df<-all_data.df
colnames(data.df)<-c('exp_name','event_type','event_name','psi')
}
return(data.df)
}
#replicate filter
filter_rep<-function(x,data_frame){
df=subset(data_frame,exp_name==x)
if(length(unique(df$rep))>1){
rep_event.l<-lapply(levels(df$rep),function(y,dt_frame) subset(dt_frame,rep==y)$event_name , dt_frame=df)
rep_event<-Reduce(intersect,rep_event.l)
rep_data.df<-subset(df,event_name%in%rep_event)
Var<-aggregate( psi ~ .,data=subset(rep_data.df,select=-rep),FUN=var)
idx<-which(Var$psi<0.05)
re<-aggregate( psi ~ .,subset(rep_data.df[-idx,],select=-rep),mean)
}
else re=subset(df,select=-rep)
return(re)
}
#differential event filter
filter_diff<-function(x,data_frame,ct,cut_off) {
event.idx<-intersect(subset(data_frame,exp_name==x)$event_name,ct)
diff.l<-future_sapply(event.idx,function(e,dt_frame,s_name){
as.numeric(subset(dt_frame,exp_name==s_name&event_name==e)$psi)-as.numeric(subset(dt_frame,exp_name=='Control'&event_name==e)$psi)
},dt_frame=data_frame,s_name=x)
idx<-which(abs(diff.l)>cut_off)
return(event.idx[idx])
}
#filter miso confidence interval w/o read count
filter_miso<-function(psi.l,sample_name,event_name) {
psi<-as.vector(psi.l)
if(as.numeric(psi[4])-as.numeric(psi[3])<=0.3)
return(c(sample_name,event_name,psi[1],psi[2]))
else
return(NULL)
}
#filter read count
filter_miso<-function(psi.l,sample_name,event_name) {
psi<-as.vector(psi.l)
count.s<-sum(as.numeric(sapply(unlist(strsplit(as.character(psi[7]),split = ",",fixed = T)),function(x) strsplit(x,split=":")[[1]][2])))
if((as.numeric(psi[4])-as.numeric(psi[3])<=0.3)&count.s>50)
return(c(sample_name,event_name,psi[1],psi[2]))
else
return(NULL)
}
data.df<-read_miso_summary(sample_list = sample.l,event_list = event.l,sample_name_list = sample_name.l)
#find differential event list for each sample
control<-subset(data.df,exp_name=="Control")$event_name
diff_event.l<-lapply(levels(data.df$exp_name)[-1],filter_diff,data_frame=data.df,ct=control,cut_off=0.1)
inter_event<-Reduce(intersect,diff_event.l)
inter.df=subset(data.df,event_name%in%inter_event)
#venn plot
#venn(diff_event.l, ilabels = TRUE,zcolor = "style", snames="PRMT1,PRMT3,PRMT4,PRMT5,PRMT6,PRMT7",size = 25, cexil = 2, cexsn = 2.5);
#annotation
annotate_event<-function(e.m,dt.df,inter=F,connection){
if(inter){
e.n<-e.m
event.n<-unlist(strsplit(e.n,split=":"))
strand<-event.n[length(event.n)]
event<-unlist(strsplit(event.n,split="-"))
eve<-unlist(strsplit(event,split="|",fixed = T))
chr<-event[1]
s1<-eve[2]
s2<-eve[length(eve)-1]
if(s1>s2){
start<-s2
end<-s1
}else{
end<-s2
start<-s1
}
name<-ucsc_query(chr,start,end,strand,connection)
event.m<-subset(dt.df,event_name==e.n)
delta_psi<-mean(subset(dt.df,event_name==e.n&exp_name%in%sample.n)$psi)-subset(dt.df,event_name==e.n&exp_name=="Control")$psi
event_type<-as.character(event.m[1,2])
return(c(chr,start,end,name,strand,e.n,event_type,delta_psi))
}else{
e.n<-as.character(e.m)
event.n<-unlist(strsplit(e.n,split=":"))
strand<-event.n[length(event.n)]
event<-unlist(strsplit(event.n,split="-"))
eve<-unlist(strsplit(event,split="|",fixed = T))
chr<-event[1]
s1<-eve[2]
s2<-eve[length(eve)-1]
if(s1>s2){
start<-s2
end<-s1
}else{
end<-s2
start<-s1
}
name<-ucsc_query(chr,start,end,strand,connection)
event.l<-subset(dt.df,event_name==e.n)
event_type<-as.character(event.l[2]$event_type)
return(c(chr,start,end,name,strand,e.n,event_type,event.l[4]))
}
}
ucsc_query <- function(chr,start,end,strand,connection) {
refGene <- dbGetQuery(connection,
stringr::str_interp(
"SELECT DISTINCT name2
FROM refGene
WHERE chrom = '${chr}' AND txStart <= ${end} AND txEnd >= ${start} AND strand='${strand}'"))
if(nrow(refGene)==1){
refGene<-refGene$name2
}else if(nrow(refGene)>1){
refGene<-lapply(refGene,paste,collapse=" ")$name2
}else{
refGene<-"NA"
}
return(refGene)
}
con_ucsc <- dbPool(drv = RMySQL::MySQL(), db = "hg19", user = "genome", host = "genome-mysql.soe.ucsc.edu")
lapply(1:length(diff_event.l),function(i,e.l,df,con) {
data.raw<-future_sapply(e.l[[i]],annotate_event,inter=F,dt.df=subset(data.df,exp_name==sample.n[i]),connection=con)
data<-matrix(unlist(data.raw),ncol=8,byrow=T)
colnames(data)<-c('chr','start','end','gene name','strand','event name','event type','delta_psi')
filename<-paste(sample.n[i],"all_event","csv",sep = ".")
write.csv(data,filename,row.names = F,col.names = T)
} ,e.l=diff_event.l,df=data.df,con=con_ucsc)
data.raw<-future_sapply(inter_event,annotate_event,inter=T,dt.df=inter.df,connection=con_ucsc)
data<-matrix(unlist(data.raw),ncol=8,byrow=T)
colnames(data)<-c('chr','start','end','gene name','strand','event name','event type','delta_psi')
filename<-paste("inter_event","csv",sep = ".")
write.csv(data,filename,row.names = F,quote = F)
poolClose(con_ucsc)
total_event.l<-unique(unlist(diff_event.l))
read_raw_miso<-function(sample_list,event_list,sample_name_list,replicate=T,total_event){
all_data.l<-lapply(1:length(sample_list),function(i) {
data<-future_sapply(1:length(event_list),function(j) {
file_name<-paste(sample_list[i],event_list[j],sep='_')
event_result<-read.delim(file_name,header=T)
event.m<-matrix(unlist(apply(event_result,1,filter_raw_miso,sample_name=sample_name_list[i],event_name=event_list[j])),ncol=4,byrow=T)
colnames(event.m)<-c("sample_name","event_type","event_name","psi")
event.df<-as.matrix(event.m[event.m[,3]%in%total_event,])
return(event.df)
})
df<-Reduce(rbind,data)
return(df)
})
all_data.df<-Reduce(rbind,all_data.l)
all_data.df<-as.data.frame(all_data.df)
all_data.df$psi<-as.numeric(as.character(all_data.df$psi))
#filter replicate
if(replicate){
exp_design<-matrix(unlist(lapply(as.character(all_data.df$sample_name),function(x) unlist(strsplit(x,split = '_')))),ncol=2,byrow=T)
new_data.df<-data.frame(exp_name=exp_design[,1],rep=exp_design[,2],subset(all_data.df,select = -sample_name))
new_data.l<-lapply(levels(new_data.df$exp_name),filter_rep,data_frame=new_data.df)
data.df<-Reduce(rbind,new_data.l)
}else{
data.df<-all_data.df
colnames(data.df)<-c('exp_name','event_type','event_name','psi')
}
return(data.df)
}
filter_raw_miso<-function(psi.l,sample_name,event_name) {
psi<-as.vector(psi.l)
return(c(sample_name,event_name,psi[1],psi[2]))
}
plan(multiprocess)
all.df<-read_raw_miso(sample_list = sample.l,event_list = event.l,sample_name_list = sample_name.l,total_event = total_event.l)
colnames(all.df)<-c("exp_name","event_type","event_name","delta_psi")
all.m<-acast(all.df, event_name~exp_name, value.var="delta_psi")
all_psi.m<-apply(all.m[,-1],2,function(x) x-all.m[,1])
check<-apply(all_psi.m,1,function(x) max(abs(x))<0.1)
all<-all_psi.m[-which(check),]
re.l<-apply(all,2,function(x) names(x[abs(x)>0.1]))
venn(re.l,zcolor = "style",size = 20, cexil = 2, cexsn = 1,borders = F)
all.sort<-apply(all,1,function(x){
a<-ifelse(abs(x)>0.1,sign(x),0)
s<-sign(sum(x))
n<-sum(abs(a))
re<-s*n+s*abs(sum(a*c(6,5,1,3,4,2))/30)
if(abs(re)<1){
idx<-which(abs(x)==max(abs(x)))
s<-sign(x[idx])
return(s*n+s*abs(sum(a*c(6,5,1,3,4,2))/30))
}
else
return(re)
})
names(all.sort)<-rownames(all)
row.idx<-names(sort(all.sort,decreasing = T))
quantile_breaks <- function(xs, n = 10) {
breaks <- quantile(xs, probs = seq(0, 1, length.out = n))
breaks[!duplicated(breaks)]
}
mat_breaks <- quantile_breaks(all_psi.m, n = 100)
cc = colorRampPalette(rev(brewer.pal(n = 7,
name = "RdYlBu")))
a<-sapply(floor(all.sort[row.idx]),function(x) {
ifelse(x < 0,abs(x)-1,x)})
row_anno<-data.frame(Var1 = factor(a))
col_anno<-c("#66c2a5","#fc8d62","#8da0cb","#e78ac3","#a6d854","#ffd92f")
names(col_anno)<-1:6
pheatmap(all_psi.m[row.idx,c(1,2,5,4,6,3)],show_rownames = F,cluster_rows = F,cluster_cols = F,breaks = mat_breaks,annotation_row = row_anno,annotation_colors=list(Var1=col_anno))
plot.df<-Reduce(rbind,lapply(1:6,function(x){
count.df<-subset(all.df,exp_name==names(re.l)[x]&event_name%in%re.l[[x]])
count.l<-sapply(event.l,function(x)nrow(subset(count.df,event_type==x)))
df<-data.frame(exp_name=rep(names(re.l)[x],5),event_type=event.l,count=count.l)
return(df)
}))
plot.df <- ddply(plot.df,.(exp_name), transform, pos = (sum(count)-cumsum(count)+0.5*count))
ggplot(plot.df,aes(x=exp_name,y=count,fill=event_type))+geom_bar(stat="identity")+ geom_text(data=plot.df,aes(x=exp_name,y=pos,label = count), size = 4,show.legend = F)+scale_fill_manual(values=c("#8dd3c7","#ffffb3","#bebada","#80b1d3","#fb8072"))+theme_classic()
scale_fill_manual(values=c("#fed9a6","#b3cde3","#ccebc5","#decbe4","#fbb4ae"))
read_miso_raw<-function(sample_name_list,total_event){
all_data.l<-lapply(1:length(sample_name_list),function(i) {
file_name<-paste(sample_name_list[i],"all_event.csv",sep='.')
print(file_name)
event_result<-read.csv(file_name,header=T)
event.m<-matrix(unlist(apply(event_result,1,filter_miso_raw,sample_name=sample_name_list[i])),ncol=4,byrow=T)
colnames(event.m)<-c("sample_name","event_type","event_name","gene_name")
event.df<-as.matrix(event.m[event.m[,3]%in%total_event,])
return(event.df)
})
all_data.df<-Reduce(rbind,all_data.l)
all_data.df<-as.data.frame(all_data.df)
colnames(all_data.df)<-c('exp_name','event_type','event_name','gene_name')
return(all_data.df)
}
filter_miso_raw<-function(psi.l,sample_name) {
psi<-as.vector(psi.l)
return(c(sample_name,psi[7],psi[6],psi[4]))
}
miso_gene.df<-read_miso_raw(sample.n,miso.n)
miso.m<-all
miso.n<-rownames(all)
miso.df<-cbind(t(sapply(miso.n,function(x) c(as.character(unique(subset(miso_gene.df,event_name==x)$event_type)),as.character(unique(subset(miso_gene.df,event_name==x)$gene_name))))),miso.n)
colnames(miso.df)<-c("event_type","gene_name","event_name")
rownames(miso.df)<-NULL
setwd("~/whh/rMATs_diff")
read_rmats_diff<-function(sample_list,event_list,rep_list=rep.l){
all_data.l<-sapply(1:length(sample_list),function(i) {
data<-future_sapply(1:length(event_list),function(j) {
file.n<-paste(sample_list[i],event_list[j],sep='_')
file_name<-paste(file.n,"diff",sep=".")
print(file_name)
event_result<-read.delim(file_name,header=F)
event.m<-matrix(unlist(apply(event_result,1,filter_rmats,sample_name=sample_list[i],event_type=event_list[j],ifrep=rep_list[i])),ncol=5,byrow=T)
colnames(event.m)<-c("sample_name","event_type","event_name","gene_name","delta_psi")
return(event.m)
})
df<-Reduce(rbind,data)
return(df)
})
all_data.df<-Reduce(rbind,all_data.l)
all_data.df<-as.data.frame(all_data.df)
all_data.df$delta_psi<-as.numeric(as.character(all_data.df$delta_psi))
data.df<-all_data.df
colnames(data.df)<-c('exp_name','event_type','event_name',"gene_name",'delta_psi')
return(data.df)
}
filter_rmats<-function(psi.l,sample_name,event_type,ifrep){
psi<-as.vector(psi.l)
if(ifrep)
count.s<-sum(as.numeric(psi[13]),as.numeric(psi[14]),as.numeric(unlist(strsplit(as.character(psi[15]),split = ','))),as.numeric(unlist(strsplit(as.character(psi[16]),split = ','))))
else
count.s<-sum(as.numeric(psi[13]),as.numeric(psi[14]),as.numeric(psi[15]),as.numeric(psi[16]))
if((count.s>50) & (abs(as.numeric(psi[23]))>0.1)){
eve.n<-paste(as.character(psi[6:11]),collapse = ":")
strand<-as.character(psi[5])
chr<-as.character(psi[4])
event.n<-paste(chr,eve.n,strand,sep = ":")
event_name<-gsub(" ","",event.n)
return(c(sample_name,event_type,event_name,psi[3],as.numeric(psi[23])))
}
else
return(NULL)
}
sample.l<-c('PRMT1','PRMT3','PRMT4','PRMT5','PRMT6','PRMT7')
rep.l<-c(1,1,1,1,0,1)
event.l<-c('A3SS','A5SS','RI','SE')
plan(multiprocess)
data.df<-read_rmats_diff(sample_list = sample.l,event_list = event.l,rep_list = rep.l)
diff_event.l<-lapply(levels(data.df$exp_name),function(x) subset(data.df,exp_name==x)$event_name)
inter_event<-Reduce(intersect,diff_event.l)
inter.df=subset(data.df,event_name%in%inter_event)
CairoJPEG(file="venn_rmats.jpeg",width=1000,height=1000)
venn(diff_event.l, ilabels = TRUE,zcolor = "style", snames="PRMT1,PRMT3,PRMT4,PRMT5,PRMT6,PRMT7",size = 25, cexil = 2, cexsn = 2.5);
dev.off()
lapply(1:length(diff_event.l),function(i,e.l,df) {
data.raw<-future_sapply(e.l[[i]],annotate_event,inter=F,dt.df=subset(data.df,exp_name==sample.n[i]))
data<-matrix(unlist(data.raw),ncol=8,byrow=T)
colnames(data)<-c('chr','start','end','gene name','strand','event name','event type','delta_psi')
filename<-paste(sample.n[i],"event","csv",sep = ".")
write.csv(data,filename,row.names = F,col.names = T)
} ,e.l=diff_event.l,df=data.df)
#write unique event
lapply(sample.l,function(x,df){
print(x)
uni.eve<-setdiff(subset(df,exp_name==x)$event_name,subset(df,exp_name!=x)$event_name)
print(length(uni.eve))
out.data<-subset(df,event_name %in% uni.eve)
filename<-paste(x,"event","csv",sep = ".")
write.csv(out.data,filename,row.names = F,col.names = T)
},df=data.df)
#write all event
lapply(sample.l,function(x,df){
print(x)
out.data<-subset(df,exp_name==x)
print(nrow(out.data))
filename<-paste(x,"all_event","csv",sep = ".")
write.csv(out.data,filename,row.names = F,col.names = T)
},df=data.df)
total_event.l<-as.character(unique(data.df$event_name))
read_rmats_raw<-function(sample_list,event_list,rep_list=rep.l,total_event){
all_data.l<-sapply(1:length(sample_list),function(i) {
data<-future_sapply(1:length(event_list),function(j) {
file.n<-paste(sample_list[i],event_list[j],sep='_')
file_name<-paste(file.n,"txt",sep=".")
print(file_name)
event_result<-read.delim(file_name,header=T)
event.m<-matrix(unlist(apply(event_result,1,filter_raw_rmats,sample_name=sample_list[i],event_type=event_list[j],ifrep=rep_list[i],event.t=total_event)),ncol=3,byrow=T)
colnames(event.m)<-c("sample_name","event_name","delta_psi")
return(event.m)
})
df<-Reduce(rbind,data)
return(df)
})
all_data.df<-Reduce(rbind,all_data.l)
all_data.df<-as.data.frame(all_data.df)
all_data.df$delta_psi<-as.numeric(as.character(all_data.df$delta_psi))
data.df<-all_data.df
colnames(data.df)<-c('exp_name','event_name','delta_psi')
return(data.df)
}
filter_raw_rmats<-function(psi.l,sample_name,event_type,ifrep,event.t){
psi<-as.vector(psi.l)
event.n<-paste(as.character(psi[4]),paste(as.character(psi[6:11]),collapse = ":"),as.character(psi[5]),sep = ":")
event_name<-gsub(" ","",event.n)
if(event_name %in% event.t){
return(c(sample_name,event_name,as.numeric(psi[23])))
}
else
return(NULL)
}
plan(multiprocess)
all.df<-read_rmats_raw(sample_list = sample.l,event_list = event.l,rep_list = rep.l,total_event = total_event.l)
all.m<-acast(all.df, event_name~exp_name, value.var="delta_psi")
all.m[is.na(all.m)]<-0
rmats.m<- -all.m
rmats.n<-rownames(all.m)
rmats.df<-cbind(t(sapply(rmats.n,function(x) c(as.character(unique(subset(data.df,event_name==x)$event_type)),as.character(unique(subset(data.df,event_name==x)$gene_name))))),rmats.n)
colnames(rmats.df)<-c("event_type","gene_name","event_name")
rownames(rmats.df)<-NULL
new_rmats_name<-matrix(unlist(apply(miso.df[miso.df[,1]!="MXE",],1,function(x) miso2rmats(x[3],x[1]))),ncol=2,byrow = T)
inter.n<-new_rmats_name[new_rmats_name[,2]%in%rmats.df[,3],]
event_name.df<-rbind(miso.df,rmats.df)
merge.m<-rbind(miso.m,rmats.m[!rownames(rmats.m)%in%inter.n[,2],])
miso2rmats<-function(name,tp){
event.n<-unlist(strsplit(name,split=":"))
strand<-event.n[length(event.n)]
event<-unlist(strsplit(event.n,split="-"))
eve<-unlist(strsplit(event,split="|",fixed = T))
if(tp=="A3SS"){
if(strand=="-"){
s1<-as.character(as.numeric(eve[7])-1)
s5<-as.character(as.numeric(eve[2])-1)
re<-paste(eve[1],s1,eve[6],s1,eve[5],s5,eve[3],strand,sep = ":")
}
if(strand=="+"){
s1<-as.character(as.numeric(eve[5])-1)
s3<-as.character(as.numeric(eve[6])-1)
s5<-as.character(as.numeric(eve[2])-1)
re<-paste(eve[1],s1,eve[7],s3,eve[7],s5,eve[3],strand,sep = ":")
}
}
if(tp=="A5SS"){
if(strand=="-"){
s1<-as.character(as.numeric(eve[3])-1)
s3<-as.character(as.numeric(eve[4])-1)
s5<-as.character(as.numeric(eve[6])-1)
re<-paste(eve[1],s1,eve[2],s3,eve[2],s5,eve[7],strand,sep = ":")
}
if(strand=="+"){
s1<-as.character(as.numeric(eve[2])-1)
s5<-as.character(as.numeric(eve[6])-1)
re<-paste(eve[1],s1,eve[4],s1,eve[3],s5,eve[7],strand,sep = ":")
}
}
if(tp=="RI"){
if(strand=="+"){
s1<-as.character(as.numeric(eve[2])-1)
s5<-as.character(as.numeric(eve[5])-1)
re<-paste(eve[1],s1,eve[6],s1,eve[3],s5,eve[6],strand,sep = ":")
}
if(strand=="-"){
s1<-as.character(as.numeric(eve[6])-1)
s5<-as.character(as.numeric(eve[3])-1)
re<-paste(eve[1],s1,eve[2],s1,eve[5],s5,eve[2],strand,sep = ":")
}
}
if(tp=="SE"){
if(strand=="+"){
s1<-as.character(as.numeric(eve[5])-1)
s3<-as.character(as.numeric(eve[2])-1)
s5<-as.character(as.numeric(eve[8])-1)
re<-paste(eve[1],s1,eve[6],s3,eve[3],s5,eve[9],strand,sep = ":")
}
if(strand=="-"){
s1<-as.character(as.numeric(eve[5])-1)
s5<-as.character(as.numeric(eve[2])-1)
s3<-as.character(as.numeric(eve[8])-1)
re<-paste(eve[1],s1,eve[6],s3,eve[9],s5,eve[3],strand,sep = ":")
}
}
return(c(name,re))
}
re<-sapply(intersect(rmats.df[,2],miso.df[,2]),function(x){
a=miso.df[miso.df[,2]==x,1]
b=rmats.df[rmats.df[,2]==x,1]
if(a%in%b){
print(x)
print(intersect(a,b))
print(miso.df[miso.df[,2]==x,3])
}
})
check<-apply(merge.m,1,function(x) max(abs(x))<0.1)
re.l<-apply(merge.m,2,function(x) names(x[abs(x)>0.1]))
venn(re.l,zcolor = "style",size = 20, cexil = 2, cexsn = 1,borders = F)
all.sort<-apply(merge.m,1,function(x){
a<-ifelse(abs(x)>0.1,sign(x),0)
s<-sign(sum(x))
n<-sum(abs(a))
re<-s*n+s*abs(sum(a*c(6,5,1,3,4,2))/30)
if(abs(re)<1){
idx<-which(abs(x)==max(abs(x)))
s<-sign(x[idx])
return(s*n+s*abs(sum(a*c(6,5,1,3,4,2))/30))
}
else
return(re)
})
names(all.sort)<-rownames(merge.m)
row.idx<-names(sort(all.sort,decreasing = T))
mat_breaks <- quantile_breaks(merge.m, n = 100)
a<-sapply(floor(all.sort[row.idx]),function(x) {
ifelse(x < 0,abs(x)-1,x)})
row_anno<-data.frame(Var1 = factor(a))
col_anno<-c("#66c2a5","#fc8d62","#8da0cb","#e78ac3","#a6d854","#ffd92f")
names(col_anno)<-1:6
pheatmap(merge.m[row.idx,c(1,2,5,4,6,3)],show_rownames = F,cluster_rows = F,cluster_cols = F,color=cc(100),breaks = mat_breaks,annotation_row = row_anno,annotation_colors=list(Var1=col_anno))
plot.df<-Reduce(rbind,lapply(1:6,function(x){
count.df<-event_name.df[event_name.df[,3]%in%re.l[[x]],]
count.l<-sapply(event.l,function(x) length(which(count.df[,1]==x)))
df<-data.frame(exp_name=rep(names(re.l)[x],5),event_type=event.l,count=count.l)
return(df)
}))
ggplot(plot.df,aes(x=exp_name,y=count,fill=event_type))+geom_bar(stat="identity")+ geom_text(data=plot.df,aes(x=exp_name,y=pos,label = count), size = 4,show.legend = F)+scale_fill_manual(values=c("#8dd3c7","#ffffb3","#bebada","#80b1d3","#fb8072"))+theme_classic()
scale_fill_manual(values=c("#fed9a6","#b3cde3","#ccebc5","#decbe4","#fbb4ae"))
|
8c9210898dc3d376ffc67a5a56f08d36aef9ea9d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pRolocdata/examples/groen2014.Rd.R
|
c25e03e77740cae6b0964840ddeefca76a21ce2c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 731
|
r
|
groen2014.Rd.R
|
library(pRolocdata)
### Name: groen2014
### Title: LOPIT experiments on Arabidopsis thaliana roots, from Groen et
### al. (2014)
### Aliases: groen2014 groen2014r1 groen2014r2 groen2014r3 groen2014cmb
### groen2014r1goCC
### Keywords: datasets
### ** Examples
data(groen2014r1)
data(groen2014r2)
data(groen2014r3)
data(groen2014cmb)
## The combine dataset can generated manually using
cmb <- combine(groen2014r1, updateFvarLabels(groen2014r2))
cmb <- filterNA(cmb)
cmb <- combine(cmb, updateFvarLabels(groen2014r3))
cmb <- filterNA(cmb)
fData(cmb) <- fData(cmb)[, c(1,2,5)]
cmb
## or can simply be loaded directly
data(groen2014cmb)
## check datsets are the same
all.equal(cmb, groen2014cmb, check.attributes=FALSE)
|
3583bf8d44137bd50d60667099263f4985025fab
|
7a667a26aa13315da508003bc71b285a2032e00c
|
/man/cocoSetLogLevel.Rd
|
2529caa1ff1357e3d2e84e0d1d79344057db9d3e
|
[
"BSD-2-Clause"
] |
permissive
|
berndbischl/rcoco
|
973bf74aaf3899961eb7137479f92f836c0d7a6e
|
848a862c0cbe583831853a978de6adc932c20b5f
|
refs/heads/master
| 2021-01-23T03:13:30.287541
| 2017-06-22T12:55:04
| 2017-06-22T12:55:04
| 86,060,085
| 3
| 1
| null | 2017-06-22T09:54:07
| 2017-03-24T11:17:20
|
C
|
UTF-8
|
R
| false
| true
| 449
|
rd
|
cocoSetLogLevel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cocoSetLogLevel.R
\name{cocoSetLogLevel}
\alias{cocoSetLogLevel}
\title{Set log level for coco platform.}
\usage{
cocoSetLogLevel(level = "info")
}
\arguments{
\item{level}{[\code{character(1)}]\cr
Log level, can be \dQuote{debug}, \dQuote{info}, \dQuote{warning}, \dQuote{error}.
Default is \dQuote{info}.}
}
\value{
[\code{invisible(NULL)}]
}
\description{
See title
}
|
bd2c2f1b4437150ca292e2faf075f4139ce80bcc
|
46450b27a8693ebba08bdd88f46d9d9dee4c10d4
|
/CSB 2018 Final Project Chris Lee/get_AUC_of_Samples.R
|
d41b96d1eabaeaf22be5868a4c4e3060175664e6
|
[] |
no_license
|
ChrisLeeUchicago/CSB-2018-Project
|
ee17ddc66ff09cf03b8beac56c6903a437a47108
|
c7a0497cc2bf023bfbf277f3369fc163540e0998
|
refs/heads/master
| 2021-09-09T00:23:47.577374
| 2018-03-12T22:37:53
| 2018-03-12T22:37:53
| 124,698,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,911
|
r
|
get_AUC_of_Samples.R
|
#this portion of the code directly follows from the get carbonyl and get OH signal functions
#it assumes that the global environment contains the carbonyl and OH signal data
#for this part, we'll need the package zoo so we'll need to install and load it
install.packages("zoo")
#if zoo is already installed, we just need to load the library
library(zoo)
get_AUC <- function(carbonyl, OH){
#separate Wavenumber and Absorbance values of the signal data
wave_carbonyl <-select (carbonyl, Wavenumbers)
absorb_carbonyl <- select (carbonyl, Absorbance)
wave_OH <-select (OH, Wavenumbers)
absorb_OH <-select (OH, Absorbance)
#we're going to use the rollmean function from the zoo library to approximate Area under the Curve (AUC)
#it's important to note that rollmean requires vectors so we'll be using the pull function to get a vector from the tbls
carbvecX <- as.numeric(pull(wave_carbonyl)) #getting vectors
carbvecY <- as.numeric(pull(absorb_carbonyl)) #getting vectors
carbid <- order(carbvecX) #providing indices for the rollmean function
AUC_carbonyl <- sum(diff(carbvecX[carbid])*rollmean(carbvecY[carbid],2)) #rollmean function giving AUC
OHvecX <- as.numeric(pull(wave_OH)) #getting vectors
OHvecY <- as.numeric(pull(absorb_OH)) #getting vectors
OHid <- order(OHvecX) #providing indices for the rollmean function
AUC_OH <- sum(diff(OHvecX[OHid])*rollmean(OHvecY[OHid],2)) #rollmean function giving AUC
areas_carb_and_OH <-c(AUC_carbonyl, AUC_OH) #concatenate the two AUC values
print(paste("The AUC for carbonyl is", AUC_carbonyl)) #print the results
print(paste("The AUC for OH is", AUC_OH)) #print the results
return(areas_carb_and_OH) #return the concatenated AUC values
}
#once the function is loaded, we can run it with our carbonyl and OH signal data and save the output
#for example:
sampleAUC <-get_AUC(carbonyl_signal_1, OH_signal_1)
|
662805faf3acc23d64853845aec6c1c60fd5bc7a
|
495ebf2ec08b9fabdaa689c7b9aa8bedd168a022
|
/man/ExploreModelMatrix.Rd
|
94fd132ee567a0a7ab2cdf136df24c8406779600
|
[
"MIT"
] |
permissive
|
csoneson/ExploreModelMatrix
|
bec8a9556beaa4bd906b5a8eb777fcc2f8470f50
|
5ec1ff318756631c4e1235457f5b41582dfe4580
|
refs/heads/devel
| 2023-05-25T18:55:09.510113
| 2023-05-13T07:39:31
| 2023-05-13T07:39:31
| 195,576,287
| 37
| 2
|
NOASSERTION
| 2023-04-22T18:53:05
| 2019-07-06T19:32:33
|
R
|
UTF-8
|
R
| false
| true
| 1,285
|
rd
|
ExploreModelMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExploreModelMatrix.R
\name{ExploreModelMatrix}
\alias{ExploreModelMatrix}
\title{Explore model matrix}
\usage{
ExploreModelMatrix(sampleData = NULL, designFormula = NULL)
}
\arguments{
\item{sampleData}{(optional) A \code{data.frame} or \code{DataFrame}
with sample information. If set to \code{NULL}, the user can upload
the sample information from a tab-separated text file inside the app, or
choose among a collection of example designs provided in the app.}
\item{designFormula}{(optional) A \code{formula}. All components of
the terms must be present as columns in \code{sampleData}. If set to
\code{NULL}, the design formula can be specified after launching the app.}
}
\value{
A Shiny app object
}
\description{
Given a sample data table and a design formula, explore the
resulting design
matrix graphically in an interactive application.
}
\examples{
app <- ExploreModelMatrix(
sampleData = data.frame(genotype = rep(c("A", "B"), each = 4),
treatment = rep(c("treated", "untreated"), 4)),
designFormula = ~genotype + treatment
)
if (interactive()) shiny::runApp(app)
}
\author{
Charlotte Soneson, Federico Marini, Michael I Love, Florian Geier,
Michael B Stadler
}
|
37cd05f764b304e21401f5a1aa40932993502708
|
1a5910e469aaa076b8d8f59577b4be6afbf5a054
|
/plot1.R
|
95ed885c1d0c9268fbb60a633b143ce5f52866b1
|
[] |
no_license
|
sshariqrizvi/ExData_Plotting1
|
ac5e1ba6db6025e90632c014fe0163681629bce9
|
ab01ebff9cc80a6a82988a3ac5e5f9dda0e57e5c
|
refs/heads/master
| 2021-01-21T05:40:35.409819
| 2015-07-12T22:27:53
| 2015-07-12T22:27:53
| 38,972,410
| 0
| 0
| null | 2015-07-12T18:15:43
| 2015-07-12T18:15:43
| null |
UTF-8
|
R
| false
| false
| 650
|
r
|
plot1.R
|
if(!exists("mydata"))
{
colClasses <- c("character","character",rep("numeric",7))
mydata <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";", na.strings="?", colClasses=colClasses)
mydata$DateTime <- strptime(paste(mydata$Date, mydata$Time), "%d/%m/%Y %H:%M")
mydata$Date = as.Date(mydata$Date, "%d/%m/%Y")
mydata <- mydata[mydata$Date >= as.Date("2007/02/01") & mydata$Date <= as.Date("2007/02/02"),]
}
hist(mydata$Global_active_power, freq=T,col = "red",
main="Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency")
dev.copy(png,"figure/plot1.png",width=480,height=480)
dev.off()
|
b9eec523ffdab6affdb781322862c40bff4805ba
|
e3837eaf2c65c74c0e01ad395dd39373ba5eafba
|
/src/gpdream/modules/Inferelator/src/main.R
|
c9cb4cd57da6a72dc5ab2d53cd4bb3afd26f6b74
|
[
"MIT"
] |
permissive
|
kevintee/Predicting-Gene-Networks
|
907df99b37e7de29db79ff041856e37fed06b949
|
bf415f2b11cd7289b13ab900752cf1f856ce4b47
|
refs/heads/master
| 2020-06-01T11:28:44.124804
| 2015-05-06T06:52:25
| 2015-05-06T06:52:25
| 31,395,188
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,037
|
r
|
main.R
|
## .-.-. .-.-. .-.-. .-.-. .-.-. .-.-. .-.-. .-.-.
## /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ / / \ \ / / \ \
##`-' `-`-' `-`-' `-`-' `-`-' `-`-' `-`-' `-`-' ' '
## May th17
## Bonneau lab - "Aviv Madar" <am2654@nyu.edu>,
## NYU - Center for Genomics and Systems Biology
## .-.-. .-.-. .-.-. .-.-. .-.-. .-.-. .-.-. .-.-.
## /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ / / \ \ / / \ \
##`-' `-`-' `-`-' `-`-' `-`-' `-`-' `-`-' `-`-' ' '
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 1- reads params, design and response matrices, found in PARAMS and INPUT list respectively
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# init PARAMS and INPUT
#params for main (object PARAMS defined in init.R)
b = 1 # this follow current iteration/bootstrap number
N_b = PARAMS$"general"$"numBoots" # number of bootstraps
btch_size = 10 # calculate this mumber of genes to all predictors MI scores in batches (to avoid running out of memory when calculating MI)
percentCoverage <- PARAMS[["general"]][["percentCoverage"]] # (usually 100) percent of matrix that we want to resample
lambda = PARAMS[["lars"]][["lambda"]] # set of l2 norm regularization weights to try in elastic net
cleanUp <- FALSE # clear functions and other intermediate variables at end of run (leaves important variables more visible for end users)
# response and design matrices for clr
Y_clr = INPUT[["clr"]][["response_matrix"]]
###########
X_clr = INPUT[["clr"]][["design_matrix"]] # single predictors
# response and design matrices for lars
Y_lars = INPUT[["lars"]][["response_matrix"]]
###########
X_lars = INPUT[["lars"]][["design_matrix"]] # single predictors
# store results (ODEs,Z scores, and error for each model for each bootstrap run, respectively)
betaList = vector("list", N_b)
modelErrorList = vector("list", N_b)
#startTime <- date() #times how long a run takes
allResults <- list() #list for storing all models
# check that size of dataset is OK
if(ncol(X_lars)<10){
stop("Too few conditions. Min number of conditions (experiments) required is 10! (30 or more for robust results). Bailing out...")
}
if(nrow(X_lars)<5){
stop("Too few regulators. Min number of regulators (TFs) required is 5! Bailing out...")
}
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 2- calculate Median corrected Zscores based on KO data
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# skip for gene pattern version
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 3- setup for bootstrap: create Pi-perm_vector/matrix, Y^pi,X^pi
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
while (b <= N_b) {
#create permutation matrix
cat("bootstrap #: ",b,"\n")
if(b == 1){
#here we want the original permutation, ie. getOrigPerm = TRUE (i.e. first bootstrap is exact dataset, no resampling)
Pi_s_clr=createPermMatrix(cS=INPUT[["general"]][["clusterStack"]], allConds = colnames(Y_clr), getOrigPerm = TRUE, percentCoverage = percentCoverage)
Pi_s_lars=Pi_s_clr
} else {
Pi_s_clr=createPermMatrix(cS=INPUT[["general"]][["clusterStack"]], allConds = colnames(Y_clr), getOrigPerm = FALSE, percentCoverage = percentCoverage)
Pi_s_lars=Pi_s_clr
}
#create bicluster specific permutation matrix (ie. read from Pi_g, algorithm described in method comments)
#this should be changed to be general for both cases where we have only single genes and cases where we havee biclusters
Y_clr_p = permuteCols(Y_clr,Pi_s_clr)
X_clr_p = permuteCols(X_clr,Pi_s_clr)
#----added by Alex----9/19-----------#
#the code below speeds up the calculation of Ms and Ms_bg by
#making a smaller design matrix
if( PARAMS$clr$speedUp){
if(PARAMS$clr$numGenes < nrow(Y_clr)){
tfIx <- which(rownames(X_clr_p) %in% INPUT$general$tf_names)
otherIx <- c(1:nrow(X_clr_p))[-tfIx]
rIx1 <- c(tfIx,sample( otherIx, PARAMS$clr$numGenes -length(tfIx),replace=F))
} else {
rIx1 <- 1:nrow(Y_clr)
}
}
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 4- pass one: fill M - mutual information matrix or correlation matrix
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# dynamic MI scores stored here
cat("calculating dynamic MI ")
#if(PARAMS[["general"]][["processorsNumber"]] > 1){
if( PARAMS$clr$speedUp){
Ms <- calc_MI_one_by_one_parallel( Y_clr, X_clr[rIx1,], Pi_s_clr, processorsNumber = PARAMS[["general"]][["processorsNumber"]], n.bins=PARAMS[["clr"]][["n.bins"]])
}
else{
Ms <- calc_MI_one_by_one_parallel( Y_clr, X_clr, Pi_s_clr, processorsNumber = PARAMS[["general"]][["processorsNumber"]], n.bins=PARAMS[["clr"]][["n.bins"]])
}
if(PARAMS$clr$speedUp & ( PARAMS$clr$numGenes < nrow(Y_clr) )){
x <- cbind(rIx1,1:PARAMS$clr$numGenes)
for(i in 1:nrow(x)){
Ms[x[i,1],x[i,2]] <- 0
}
} else {
diag(Ms) = 0
}
cat("\n")
# static MI scores stored here
cat("calculating background MI ")
#if(PARAMS[["general"]][["processorsNumber"]] > 1){
if(PARAMS$clr$speedUp & ( PARAMS$clr$numGenes < nrow(Y_clr) ) ){
Ms_bg <- calc_MI_one_by_one_parallel( X_clr, X_clr[rIx1,], Pi_s_clr, processorsNumber = PARAMS[["general"]][["processorsNumber"]], n.bins=PARAMS[["clr"]][["n.bins"]])
}else{
Ms_bg <- calc_MI_one_by_one_parallel( X_clr, X_clr, Pi_s_clr, processorsNumber = PARAMS[["general"]][["processorsNumber"]], n.bins=PARAMS[["clr"]][["n.bins"]])
}
if(PARAMS$clr$speedUp & ( PARAMS$clr$numGenes < nrow(Y_clr) )){
x <- cbind(rIx1,1:PARAMS$clr$numGenes)
for(i in 1:nrow(x)){
Ms_bg[x[i,1],x[i,2]] <- 0
}
}else{
diag(Ms_bg) = 0
}
cat("\n")
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 5- calculate mixed-CLR (or clr) matrix
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
if(PARAMS[["general"]][["use_mixCLR"]]){
cat("running mix-CLR ")
# Z_nt_fltrd = mixed_clr(Ms_bg,Ms)
Z_nt_fltrd = mixed_clr_parallel(Ms_bg,Ms,processorsNumber=PARAMS[["general"]][["processorsNumber"]])
} else {
cat("running CLR ")
Z_nt_fltrd = clr(Ms)
}
cat("\n")
if(PARAMS$clr$speedUp){
colnames(Z_nt_fltrd) <- rownames(X_clr)[rIx1]
}else{
colnames(Z_nt_fltrd) <- rownames(X_clr)
}
rownames(Z_nt_fltrd) <- rownames(X_clr)
Z <- Z_nt_fltrd[,INPUT[["general"]][["tf_names"]]]
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 6- apply MCZ filter -- i.e. remove unlikely reg inters from further consideration by mixedCLR (and thus from Inf)
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# filter cutoff
# KOs first
ct = PARAMS[["general"]][["MCZ_fltr_prcntile"]]
if(!is.null(INPUT$general$knockOutFilterList)){
x <- INPUT$general$knockOutFilterList
tfs <- names(x)
if(!is.null(tfs))
for(i in 1:length(x)) {
bad.trgts <- names(x[[ tfs[i] ]])[which(x[[ tfs[i] ]] < quantile(x[[ tfs[i] ]],ct))]
Z[bad.trgts,tfs[i]] <- 0
}
}
# make sure Z has at least two non-zero predictors for each target (without allowing self regulation)
n.pred.per.trgt <- apply(Z,1,function(i) length(which(i!=0)))
ix <- which(n.pred.per.trgt<2)
if(length(ix)>0){
min.z <- min(Z[which(Z!=0)])
ix <- which(n.pred.per.trgt==1)
if(length(ix)>0){
for(k in 1:length(ix)){
ix.replacable <- which(Z[ix[k],]==0)
ix.bad <- which(names(ix.replacable) %in% names(ix[k]))
if(length(ix.bad)>0){
ix.replacable <- ix.replacable[-ix.bad]
}
Z[ix[k],which(Z[ix[k],]==0)[ix.replacable[1]]] <- min.z
}
}
ix <- which(n.pred.per.trgt==0)
if(length(ix)>0){
for(k in 1:length(ix)){
ix.replacable <- which(Z[ix[k],]==0)
ix.bad <- which(names(ix.replacable) %in% names(ix[k]))
if(length(ix.bad)>0){
ix.replacable <- ix.replacable[-ix.bad]
}
Z[ix[k],which(Z[ix[k],]==0)[ix.replacable[1:2]]] <- min.z
}
}
}
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 7- run Inferelator (elastic net with ODE based modifications to response and design matrices)
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
cat("running elastic net ")
nCv <- min(10,floor(ncol(Y_lars)/2))
x = calc_ode_model_weights_parallel(Xs = X_lars,Y = Y_lars, Pi = Pi_s_lars, M1 = Z, nS = PARAMS[["lars"]][["max_single_preds"]], nCv = nCv,
lambda=lambda, processorsNumber = PARAMS[["general"]][["processorsNumber"]], plot.it = FALSE,
plot.file.name = "",verbose = FALSE)
cat("\n")
betaList[[b]] = x[[1]]
modelErrorList[[b]] = t(x[[2]])
betaList[[b]]=add_weight_beta(bL=betaList[[b]],model_errors=modelErrorList[[1]],n=nrow(Y_lars),pS=nrow(X_lars),pD=0,col=4,col_name = "prd_xpln_var" )
betaList[[b]]=add_zscore(bL=betaList[[b]],M1=Z,M2=NULL,col=5,col_name = "clr_zs")
betaList[[b]]=add_bias_term(bL=betaList[[b]],bT=t(x[[3]]),col=6,col_name = "bias")
rm(x)
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 8- run heuristic to combine results from different methods (MCZ, mixCLR, and Inf)--- i.e. results from different pipelines
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# elastic net produces a model for each l2 norm weight (choose a model for each target from the l2 norm weight with minimum CV error)
beta.mat = combine_l2_net_res(betaList[[b]],modelErrorList[[b]],col="beta")
# beta list is a sparse matrix representation. Turn it into a matrix
beta.mat = unsparse(beta.mat ,matrix(0,dim(Z)[1],dim(Z)[2]) )
# same as beta.mat only instead of having beta weight as values it has predictive value for each reg inter
pred.mat.lnet = combine_l2_net_res(betaList[[b]],modelErrorList[[b]],col="prd_xpln_var")
pred.mat.lnet = unsparse(pred.mat.lnet,matrix(0,dim(Z)[1],dim(Z)[2]) )
# for each trgt get the bias term (needed to predict system's response to new perturbations)
pred.mat.bias = combine_l2_net_res(betaList[[b]],modelErrorList[[b]],col="bias")
# this is the heuristic described in DREAM3 and DREAM4 papers z = sqrt(z1^2+z2^2)^2
# first for DREAM3 pipeline (not additive with MCZ)
base.vec <- sort(Z,decreasing=T)
base.vec <- base.vec[which(base.vec>0)]
pred.mat.lnet.mixCLR = combine_mtrcs_new(Z,pred.mat.lnet,base.vec=base.vec)
# second for DREAM5 pipeline
# apply threshold to combine z-scores from annotated KOs, then use to push up scores
if(!is.null(INPUT$general$knockOutCombine)){
z.annot.ko <- INPUT$general$knockOutCombine
z.annot.ko[which(z.annot.ko < PARAMS[["general"]][["z_score_co"]])] <- 0
pred.mat.mixCLR.ko <- combine_mtrcs_new(pred.mat.lnet.mixCLR, z.annot.ko, base.vec=base.vec)
}else {
pred.mat.mixCLR.ko = NULL
}
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 10- store current re-sampling results
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
allResults[[b]] <- list()
# allResults[[b]][["betaMat"]] <- beta.mat
# allResults[[b]][["bias"]] <- pred.mat.bias
allResults[[b]][["MixCLR.Inf"]] <- pred.mat.lnet.mixCLR
allResults[[b]][["MixCLR.Inf.ko"]] <- pred.mat.mixCLR.ko
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 11- while b<N_b increament b by 1 adn repeat steps 2-6
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
b = b + 1
}
#save(allResults,file=paste(PARAMS$general$saveToDir,"/all_results_",b-1,".RData",sep=""))
if(is.null(allResults[[1]][["MixCLR.Inf.ko"]])){
median.conf.scores <- getMedianNetworkFromBootstraps(allResults, "MixCLR.Inf")
dimnames(median.conf.scores) <- dimnames(allResults[[1]][["MixCLR.Inf"]])
}else{
median.conf.scores <- getMedianNetworkFromBootstraps(allResults, "MixCLR.Inf.ko")
dimnames(median.conf.scores) <- dimnames(allResults[[1]][["MixCLR.Inf.ko"]])
}
#write.table(median.conf.scores, file=paste("mix_clr_inf_median_scores_",b-1,".xls",sep=""),sep="\t")
#formatting the filename
file.name = paste(formatOutFNameGP(PARAMS$general$d.path), "_InferelatorPipeline_predictions.txt", sep = "")
x <- save.predictions.dream5(median.conf.scores, file.name, PARAMS[["general"]][["num.inters.out"]])
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
# 13- cleanup tmp variables functions
## .-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.***.-.-.
if (cleanUp) {
rm(numGenesInNet,make_final_design_and_response_matrix,add_bias_term,add_weight_beta,add_zscore,calc_MI_inBatces,calc_ode_model_weights,
calcDblKoRmsd,calcFoldChange,calcZscores,create_Pi_g,create_Pi_s,create_Xpi,createPermMatrix,fName,get_all_perms,get_best_preds_idx,
get_usr_chosen_dataset,get_usr_chosen_design_matrix,get_usr_chosen_response,let_usr_choose_dataset,let_usr_choose_design_matrix,
let_usr_choose_response,load_gold_standard,load_predictions,make_sparse2,makePredictions,modelErrorList,percentCoverage,permuteCols,
Pi_s_clr,Pi_s_lars,saveInt,splitDreamDataByType,btch_size)
}
|
cb5f08405b699af0ca4f02a04fd15929a2643035
|
a85284bff5a7cb382e737d2aad40c1429c458741
|
/R/straf.R
|
d29bf60cbaa986e76ce6a9180a9d1134d5253975
|
[] |
no_license
|
asitav-sen/straf
|
0db902e187b78d0c17d570a866dbf6d5f407698e
|
aac57d4566624bd3408e85fb31aded0eaf6f4ed1
|
refs/heads/master
| 2023-04-19T23:39:03.689994
| 2021-05-14T18:24:37
| 2021-05-14T18:24:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,524
|
r
|
straf.R
|
#' straf: STR Analysis for Forensics
#'
#' straf is a Shiny application to perform Short Tandem Repeats (STRs, also
#' known as microsatellites) data analysis. The application allows one to
#' compute forensic parameters, population genetics indices, and investigate
#' population structure through various methods and generate relevant data
#' visualisations. It also implements file conversion to other popular formats.
#'
#' @section Running the app:
#' One simply needs to call the runStraf() function to start the application.
#'
#' @docType package
#' @name straf
#' @rawNamespace import(shiny, except = c(dataTableOutput, renderDataTable))
#' @importFrom ade4 dudi.pca
#' @importFrom adegenet as.genind df2genind pop<- locNames genind2genpop transp funky dist.genpop loadingplot makefreq pop
#' @importClassesFrom adegenet genind
#' @importFrom colourpicker colourInput
#' @importFrom DT dataTableOutput renderDataTable datatable
#' @importFrom ggplot2 ggplot geom_point labs theme_minimal aes
#' @importFrom ggrepel geom_text_repel
#' @importFrom graphics abline axis barplot hist image legend par
#' @importFrom hierfstat pairwise.WCfst genind2hierfstat
#' @importFrom magrittr "%>%"
#' @importFrom openxlsx write.xlsx
#' @importFrom pegas LD LD2 genind2loci hw.test
#' @importFrom reshape2 acast
#' @importFrom shinyWidgets awesomeCheckbox pickerInput
#' @importFrom stats as.dist cmdscale cov frequency ks.test qqplot qunif
#' @importFrom tidyr gather
#' @importFrom utils read.table write.table
NULL
#> NULL
|
f2d2b630dc143ee065560d504cf4493ce6de6407
|
1aff1c32b46fd0c88f97bbb787b49c46c4138c49
|
/R/emodnet_wfs.R
|
ccd6c8caa8f9dce655bf4c9d56a6cf668692302f
|
[
"MIT"
] |
permissive
|
EMODnet/EMODnetWFS
|
31d6a48709cbee11a332b83fb23c31d7f788330a
|
8b52b4de54ff9517da82932e6c46abc1ed0c48ba
|
refs/heads/main
| 2023-05-22T12:47:09.644761
| 2023-04-13T07:03:42
| 2023-04-13T07:03:42
| 270,789,361
| 6
| 4
|
NOASSERTION
| 2023-04-11T12:22:36
| 2020-06-08T18:57:56
|
R
|
UTF-8
|
R
| false
| false
| 335
|
r
|
emodnet_wfs.R
|
.emodnet_wfs <- function() {
utils::read.csv(
system.file("services.csv", package = "EMODnetWFS"),
stringsAsFactors = FALSE
)
}
#' Available EMODnet Web Feature Services
#'
#' @return Tibble of available EMODnet Web Feature Services
#'
#' @examples
#' emodnet_wfs()
#' @export
emodnet_wfs <- memoise::memoise(.emodnet_wfs)
|
172c49b51444b1b8c7297a04b9ee86406e0c3c4d
|
d79f3401546deaa155f0788139b5b5175be13091
|
/plot4.R
|
15b15ab25fd42f4b02db1ce34d4b526f3c198533
|
[] |
no_license
|
q5deng/ExData_Plotting1
|
760648888a038f3dc1eae1e3747c1575fa83730f
|
718c530b6468c46e518d346ceaf079806aebbc16
|
refs/heads/master
| 2021-01-24T04:09:18.336813
| 2015-08-04T17:38:52
| 2015-08-04T17:38:52
| 40,198,493
| 0
| 0
| null | 2015-08-04T17:24:42
| 2015-08-04T17:24:41
| null |
UTF-8
|
R
| false
| false
| 1,322
|
r
|
plot4.R
|
# plot4. R
## load data ##
dataset <- read.table("C:/Users/Deng/Documents/household_power_consumption.txt",
header = TRUE, sep = ";", na = "?",
colClasses = c("character", "character", rep("numeric",7)) )
attach(dataset)
## subset the data ##
newdat <- dataset[Date == "1/2/2007" | Date == "2/2/2007", ]
attach(newdat)
DT <- paste(Date, Time)
newdat$DateTime <- strptime(DT, "%d/%m/%Y %H:%M:%S")
rownames(newdat) <- 1:nrow(newdat)
attach(newdat)
## plot figure 4 ##
png(filename = "plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
## 1. Top-left
plot(newdat$DateTime, newdat$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
## 2.Top-right
plot(newdat$DateTime, newdat$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
## 3. Bottom-left
plot(newdat$DateTime, newdat$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black")
lines(newdat$DateTime, newdat$Sub_metering_2, col = "red")
lines(newdat$DateTime, newdat$Sub_metering_3, col = "blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"),lwd = 1, bty = "n")
## 4.Bottom-right
plot(newdat$DateTime, newdat$Global_reactive_power, type = "l",xlab = "datetime", ylab = "Global Active Power",col = "black")
dev.off()
|
13fad7d669d94e4b477838d940e7b11eff622521
|
c750c1991c8d0ed18b174dc72f3014fd35e5bd8c
|
/pkgs/oce/man/argoGrid.Rd
|
4ef986099d66ad1aa65e829ba25d32154894875c
|
[] |
no_license
|
vaguiar/EDAV_Project_2017
|
4b190e66fe7a6b4078cfe1b875bccd9b5a594b25
|
288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f
|
refs/heads/base
| 2021-01-23T02:39:36.272851
| 2017-05-01T23:21:03
| 2017-05-01T23:21:03
| 86,010,131
| 1
| 0
| null | 2017-05-01T23:43:04
| 2017-03-24T00:21:20
|
HTML
|
UTF-8
|
R
| false
| true
| 2,649
|
rd
|
argoGrid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/argo.R
\name{argoGrid}
\alias{argoGrid}
\title{Grid Argo float data}
\usage{
argoGrid(argo, p, debug = getOption("oceDebug"), ...)
}
\arguments{
\item{argo}{A \code{argo} object to be gridded.}
\item{p}{Optional indication of the pressure levels to which interpolation
should be done. If this is not supplied, the pressure levels will be
calculated based on the existing values, using medians. If \code{p="levitus"},
then pressures will be set to be those of the Levitus atlas, given by
\code{\link{standardDepths}}, trimmed to the maximum pressure in \code{argo}.
If \code{p} is a single numerical value, it is taken as the number of
subdivisions to use in a call to \code{\link{seq}} that has range from 0 to the
maximum pressure in \code{argo}. Finally, if a vector numerical values is
provided, then it is used as is.}
\item{debug}{A flag that turns on debugging. Higher values provide deeper
debugging.}
\item{...}{Optional arguments to \code{\link{approx}}, which is used to do the
gridding.}
}
\value{
An object of \code{\link{argo-class}} that contains a pressure matrix
with constant values along the first index.
}
\description{
Grid an Argo float, by interpolating to fixed pressure levels.
The gridding is done with \code{\link{approx}}. If there is
sufficient user demand, other methods may be added, by analogy to
\code{\link{sectionGrid}}.
}
\section{A note about flags}{
Data-quality flags contained within the original object are ignored by this
function, and the returned value contains no such flags. This is because such
flags represent an assessment of the original data, not of quantities derived
from those data. This function produces a warning to this effect. The
recommended practice is to use \code{\link{handleFlags}} or some other means to
deal with flags before calling the present function.
}
\examples{
library(oce)
data(argo)
g <- argoGrid(argo, p=seq(0, 100, 1))
par(mfrow=c(2,1))
t <- g[["time"]]
z <- -g[["pressure"]][,1]
## Set zlim because of spurious temperatures.
imagep(t, z, t(g[['temperature']]), ylim=c(-100,0), zlim=c(0,20))
imagep(t, z, t(g[['salinity']]), ylim=c(-100,0))
}
\seealso{
Other things related to \code{argo} data: \code{\link{[[,argo-method}},
\code{\link{[[<-,argo-method}}, \code{\link{argo-class}},
\code{\link{argoNames2oceNames}}, \code{\link{argo}},
\code{\link{as.argo}},
\code{\link{handleFlags,argo-method}},
\code{\link{plot,argo-method}}, \code{\link{read.argo}},
\code{\link{subset,argo-method}},
\code{\link{summary,argo-method}}
}
\author{
Dan Kelley and Clark Richards
}
|
30056500c64c92e31647af4018706ad769b0ab1b
|
34f6961b79a5d283b50e6ba0792d8d9ddca796f4
|
/R/normalize.R
|
b21921419239cf3637338c3561d03f34a56b8918
|
[] |
no_license
|
ttdtrang/cdev-paper
|
f9b364d9e1a7c82d4eecdb1740088174bb97f8b0
|
5020b44cc95cd5fba8ff69572a9cb48b36ca55b2
|
refs/heads/main
| 2023-06-24T19:17:00.967612
| 2021-07-22T21:59:16
| 2021-07-22T21:59:16
| 342,710,467
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,596
|
r
|
normalize.R
|
#' normalize.by.qsmooth
#'
#' @import qsmooth
#' @export
normalize.by.qsmooth <- function(X, group_factor, ...) {
qs <- qsmooth::qsmooth(t(X), group_factor = group_factor, ...)
qs@qsmoothData %>%
t() %>%
return()
}
#' normalize.by.tmm
#'
#' Call calcNormFactors by edgeR
#' @import edgeR
#' @param X read count matrix with samples in rows and genes in columns
#' @export
normalize.by.tmm <- function(X,...) {
normfactors.tmm = apply(X, 1, sum) / edgeR::calcNormFactors(t(X), method = 'TMM',...) # effective library sizes
return(normalize.by.scaleFactors(X, normfactors.tmm))
# X.normed = t(sapply(1:length(normfactors.tmm), FUN = function(j) {return(X[j,]/normfactors.tmm[j])}))
# rownames(X.normed) = rownames(X)
# return(X.normed)
}
#' normalize.by.deseq
#'
#' Call estimateSizeFactorsForMatrix by DESeq2
#' @import DESeq2
#' @param X read count matrix in the form genes x samples
#' @export
normalize.by.deseq <- function(X, ...) {
normFactors = DESeq2::estimateSizeFactorsForMatrix(X)
return(normalize.by.scaleFactors(X, normFactors))
# X.normed = t(sapply(1:length(normFactors), FUN = function(j) {return(X[j,]/normFactors[j])}))
# rownames(X.normed) = rownames(X)
# return(X.normed)
}
#' normalize.by.poissonseq
#'
#' @import PoissonSeq
#' @export
normalize.by.poissonseq <- function(X, ...) {
PoissonSeq::PS.Est.Depth(t(X), ...) %>%
normalize.by.scaleFactors(X, .) %>%
return()
}
#' normalize.by.refs
#'
#' normalize a read count matrix given the set of reference genes identified by id
#' @param X a read-count matrix of the form genes x samples
#' @param ref.idx an integer vector specifying the column indices of X to be used as reference
#' @export
normalize.by.refs <- function(X, ref.idx, scale = TRUE) {
if (length(ref.idx) == 1) { # need to be treated specially since dim(X) reduces to NULL and cause error in apply
Xref = matrix(X[ref.idx,], nrow=1)
} else {
Xref = X[ref.idx,]
}
normFactors = colSums(Xref) # apply(Xref,MARGIN = 2,FUN = sum)
if (scale) {
normFactors = normFactors / geom.mean(normFactors)
}
# sanity check
idx.zero = which(normFactors == 0)
if (length(idx.zero) > 0) {
message(paste0("All reference transcripts are zero in the sample ", idx.zero, ". Please remove.\n"))
return(NULL)
}
# X.norm = sapply(1:length(normFactors), FUN = function(i) {
# return(X[,i] / normFactors[i])
# })
X.norm = sweep(X, MARGIN = 2, STATS = normFactors, FUN = '/')
colnames(X.norm) = colnames(X)
rownames(X.norm) = rownames(X)
return(X.norm)
}
#' normalize.by.scaleFactors
#'
#' @param X count matrix in the form genes x samples
#' @scaleFactors a vector of scaling factor, must be the same length as the number of samples
#' @export
normalize.by.scaleFactors <- function(X, scaleFactors) {
if (length(scaleFactors) != ncol(X)) {
stop("scaleFactors should have the same length as number of samples in the input matrix.")
}
X.normed = sapply(1:length(scaleFactors), FUN = function(j) {
return(X[,j]/scaleFactors[j])
})
`colnames<-`(X.normed, colnames(X))
return(X.normed)
}
uq.v1 <- function(X, group, ...) {
effLibSizes = colSums(X) * edgeR::calcNormFactors(X, method = 'upperquartile', group = group, ...) # effective library sizes
sweep(X, 2, effLibSizes, "/") %>%
return()
}
uq.v2 <- function(X, group, ...) {
effLibSizes = colSums(X) * edgeR::calcNormFactors(X, method = 'upperquartile', group = group, ...) # effective library sizes
sweep(X, 2, mean(effLibSizes) / effLibSizes, "*") %>%
return()
}
ruv_r.1 <- function(X, group) {
if (!is.factor(group)) group <- factor(group)
design <- model.matrix(~group, data=as.data.frame(X))
y <- edgeR::DGEList(counts=X, group=group)
y <- edgeR::calcNormFactors(y, method="upperquartile")
y <- edgeR::estimateGLMCommonDisp(y, design)
y <- edgeR::estimateGLMTagwiseDisp(y, design)
fit <- edgeR::glmFit(y, design)
res <- residuals(fit, type="deviance")
if (is.null(res)) {
message(str(y))
}
X.normed <- RUVSeq::RUVr(X, 1:nrow(X), k=1, res, round = FALSE)$normalizedCounts
return(X.normed)
}
tmm.v1 <- function(X, group, ...) {
effLibSizes = colSums(X) * edgeR::calcNormFactors(X, method = 'TMM', group = group, ...) # effective library sizes
sweep(X, 2, effLibSizes, "/") %>%
return()
}
tmm.v2 <- function(X, group, ...) {
effLibSizes = colSums(X) * edgeR::calcNormFactors(X, method = 'TMM', group = group, ...) # effective library sizes
sweep(X, 2, mean(effLibSizes) / effLibSizes, "*") %>%
return()
}
pseq.v1 <- function(X, group = NA, ...) {
PoissonSeq::PS.Est.Depth(X, ...) %>%
sweep(X, 2, ., "/") %>%
return()
}
deseq.v1 <- function(X, group) {
DESeq2::estimateSizeFactorsForMatrix(X) %>%
sweep(X, 2, ., "/") %>%
return()
}
deges.3 <- function(X, group, norm.method = 'tmm', test.method = 'edger', iteration = 1) {
tcc <- TCC::TCC(X, group) %>%
TCC::calcNormFactors(norm.method = 'tmm', test.method = 'edger', iteration = iteration) %>%
TCC::getNormalizedData() %>%
return()
}
tc <- function(X, ...) {
normFactors = colSums(X)
normFactors = normFactors / geom.mean(normFactors)
return(sweep(X, 2, normFactors, "/"))
}
affy.loess <- function(X, refs.idx, verbose = FALSE, ...) {
affy::normalize.loess(log2(X+1), subset = refs.idx, log.it = FALSE, verbose = verbose, ...)
}
|
d97871a968e2c4558820bcc5e0bb72123221f594
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/Rmixmod/man/summary-methods.Rd
|
8bb782132ef73ebb6c706a5b0aae6010a5cf953d
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,250
|
rd
|
summary-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/global.R, R/MultinomialParameter.R,
% R/GaussianParameter.R, R/CompositeParameter.R, R/MixmodResults.R,
% R/Mixmod.R, R/MixmodPredict.R
\docType{methods}
\name{summary}
\alias{summary}
\alias{summary,MultinomialParameter-method}
\alias{summary,GaussianParameter-method}
\alias{summary,CompositeParameter-method}
\alias{summary,MixmodResults-method}
\alias{summary,Mixmod-method}
\alias{summary,MixmodPredict-method}
\title{Produce result summaries of a Rmixmod class}
\usage{
\S4method{summary}{MultinomialParameter}(object, ...)
\S4method{summary}{GaussianParameter}(object, ...)
\S4method{summary}{CompositeParameter}(object, ...)
\S4method{summary}{MixmodResults}(object, ...)
\S4method{summary}{Mixmod}(object, ...)
\S4method{summary}{MixmodPredict}(object, ...)
}
\arguments{
\item{object}{An object (???)}
\item{...}{further arguments passed to or from other methods}
}
\value{
NULL. Summaries to standard out.
}
\description{
Produce result summaries of a Rmixmod class
}
\examples{
data(geyser)
xem <- mixmodCluster(geyser,3)
summary(xem)
summary(xem["bestResult"])
summary(xem["bestResult"]["parameters"])
}
\seealso{
\code{\link{summary}}
}
|
ec1a5fde40d31d65d7551c78ec39f15dd22b3e6e
|
fe43291e58d0ba74929b0c25e257072923c356d3
|
/PCA_functions.R
|
17d520a98b54477d9a557f569d7ee765680a186a
|
[] |
no_license
|
Jing0831/Data-Mining-for-Business-Analytics
|
fad73dbb7d6218db64c0851af3012faebbfee055
|
e05119576749a1a5cee75e27c6a0fc64b7693150
|
refs/heads/main
| 2023-07-31T22:30:58.199933
| 2021-09-21T23:48:14
| 2021-09-21T23:48:14
| 409,006,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
PCA_functions.R
|
## Five Element Analytics
## Author: Alex Pelaez
## Date Revised: 01-02-2019
##
## TO USE THIS
## Use the PCA function from FactomineR package and
## pass in the pca object into Pass in a vector
## Result will be the vector of min_max_normalization
#
# Example:
##
#Usage: communality(data)
#Example: p = PCA(houses)
# communality(p)
communality <- function(pca) {
pca2 = pca$var$cor^2
p=ncol(pca2)
n=nrow(pca2)
m = matrix(c(0), ncol=p,nrow=n)
colnames(m) = colnames(pca2)
rownames(m) = rownames(pca2)
for (i in 1:n){
m[i,1] = pca2[i,1]
for (j in 2:p) {
m[i,j] = m[i,j-1] + pca2[i,j]
}
}
round(m,4)
}
# Example:
##
#Usage: display_pc(p, cutoff, rnd)
# p - pca object
# cutoff - cutoff value
# rnd - rounding significant digits
#
#Example: p = PCA(houses)
# display_pc(p, .5, 4)
display_pc <- function(pca, cutoff=.5, rnd=4) {
m = as.table(round(pca$var$cor, rnd))
m[abs(m) < cutoff] = 0
print(m, zero.print=".")
}
|
bac466054ff4fa075f7253197e0198edfa817f27
|
b39c8b303071e7aeaec8dd41a8bfffca3501b5f5
|
/cachematrix.R
|
f0f0083d81d0b84a700484aa4ca897a7974bdbda
|
[] |
no_license
|
PhilAIUK/ProgrammingAssignment2
|
3ef6da8fe5c9ed0979dd2a88adc142b614d72143
|
ab5329caa9e5e3361d94a804df472d21951a9841
|
refs/heads/master
| 2020-04-21T19:22:48.181699
| 2019-02-08T23:13:35
| 2019-02-08T23:13:35
| 169,804,208
| 0
| 0
| null | 2019-02-08T21:56:49
| 2019-02-08T21:56:49
| null |
UTF-8
|
R
| false
| false
| 1,565
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## These functions will show how we can retrieve data from the cache
## instead of re-calculating; which could be time consuming
## Write a short comment describing this function
## This function creates a "matrix" object and cache its inverse
## It enables to create (set) and get the matrix
## It enables to set and get tge inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) { ## Sets the matrix
x <<- y
inv <<- NULL
}
get <- function() x ## Gets the matrix
setinverse <- function(inverse) { ## Creates the inverse
inv <<- inverse
}
getinverse <- function() inv ## Gets the inverse
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## Useful to use the $ operator later
}
## Write a short comment describing this function
## This function checks if the inverse of the matrix has already been calculated and cached
## If yes, it retrieves the inverse of the matrix from the cache
## If not, it computes the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("Retrieving the cached inverse of the matrix")
return(inv) ## Return cached inverse
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv ## Return calculated inverse
}
|
3a3b6bf6fc5296f931552b914a402bd0ac8375a1
|
b287d87a513892632473e8348c2c017410fb5fd1
|
/inst/doc/qtbase.R
|
80aa5b57ef0fa7853bff267abbad6557564f1037
|
[] |
no_license
|
cran/qtbase
|
ec4194483b608ecb055ce4fd1b14693d0a311eb0
|
d1e773fc79e5dae7148d1e52ef70be54795e9c95
|
refs/heads/master
| 2020-06-07T09:34:42.764850
| 2018-05-09T08:03:20
| 2018-05-09T08:03:20
| 17,698,852
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,223
|
r
|
qtbase.R
|
### R code from vignette source 'qtbase.Rnw'
###################################################
### code chunk number 1: setup
###################################################
options(width=72)
library(qtbase)
supported <-
!length(grep("darwin", R.version$platform)) ||
nzchar(Sys.getenv("SECURITYSESSIONID"))
###################################################
### code chunk number 2: syntax (eval = FALSE)
###################################################
## button <- Qt$QPushButton("Press Me!")
## qconnect(button, "pressed", function() print("Pressed"))
## button$show()
###################################################
### code chunk number 3: syntax-real
###################################################
if (supported) {
button <- Qt$QPushButton("Press Me!")
qconnect(button, "pressed", function() print("Pressed"))
button$show()
}
###################################################
### code chunk number 4: Qt (eval = FALSE)
###################################################
## Qt
###################################################
### code chunk number 5: Qt-real
###################################################
if (supported) {
Qt
}
###################################################
### code chunk number 6: libraries-as-environments (eval = FALSE)
###################################################
## head(ls(Qt))
## Qt$QPushButton
###################################################
### code chunk number 7: libraries-as-environments-real
###################################################
if (supported) {
head(ls(Qt))
Qt$QPushButton
}
###################################################
### code chunk number 8: QWidget (eval = FALSE)
###################################################
## button <- Qt$QPushButton("Press Me!")
###################################################
### code chunk number 9: QWidget-real
###################################################
if (supported) {
button <- Qt$QPushButton("Press Me!")
}
###################################################
### code chunk number 10: tr (eval = FALSE)
###################################################
## Qt$QPushButton$tr("Hello World")
###################################################
### code chunk number 11: tr-real
###################################################
if (supported) {
Qt$QPushButton$tr("Hello World")
}
###################################################
### code chunk number 12: show (eval = FALSE)
###################################################
## button$show()
###################################################
### code chunk number 13: show-real
###################################################
if (supported) {
button$show()
}
###################################################
### code chunk number 14: text (eval = FALSE)
###################################################
## button$text
## button$text <- "PUSH ME!"
###################################################
### code chunk number 15: text-real
###################################################
if (supported) {
button$text
button$text <- "PUSH ME!"
}
###################################################
### code chunk number 16: qconnect (eval = FALSE)
###################################################
## qconnect(button, "pressed", function() print("pushed"))
###################################################
### code chunk number 17: qconnect-real
###################################################
if (supported) {
qconnect(button, "pressed", function() print("pushed"))
}
###################################################
### code chunk number 18: qsetClass (eval = FALSE)
###################################################
## qsetClass("PositiveValidator", Qt$QValidator)
###################################################
### code chunk number 19: qsetClass-real
###################################################
if (supported) {
qsetClass("PositiveValidator", Qt$QValidator)
}
###################################################
### code chunk number 20: list-validator-class (eval = FALSE)
###################################################
## PositiveValidator
###################################################
### code chunk number 21: list-validator-class-real
###################################################
if (supported) {
PositiveValidator
}
###################################################
### code chunk number 22: validate (eval = FALSE)
###################################################
## validatePositive <- function(input, pos) {
## val <- suppressWarnings(as.integer(input))
## if (!is.na(val)) {
## if (val > 0)
## Qt$QValidator$Acceptable
## else Qt$QValidator$Invalid
## } else {
## if (input == "")
## Qt$QValidator$Acceptable
## else Qt$QValidator$Invalid
## }
## }
###################################################
### code chunk number 23: validate-real
###################################################
if (supported) {
validatePositive <- function(input, pos) {
val <- suppressWarnings(as.integer(input))
if (!is.na(val)) {
if (val > 0)
Qt$QValidator$Acceptable
else Qt$QValidator$Invalid
} else {
if (input == "")
Qt$QValidator$Acceptable
else Qt$QValidator$Invalid
}
}
}
###################################################
### code chunk number 24: qsetMethod (eval = FALSE)
###################################################
## qsetMethod("validate", PositiveValidator, validatePositive)
###################################################
### code chunk number 25: qsetMethod-real
###################################################
if (supported) {
qsetMethod("validate", PositiveValidator, validatePositive)
}
###################################################
### code chunk number 26: construct-validator (eval = FALSE)
###################################################
## validator <- PositiveValidator()
###################################################
### code chunk number 27: construct-validator-real
###################################################
if (supported) {
validator <- PositiveValidator()
}
###################################################
### code chunk number 28: text-entry (eval = FALSE)
###################################################
## e <- Qt$QLineEdit()
## v <- PositiveValidator(e)
## e$setValidator(v)
## e$show()
###################################################
### code chunk number 29: text-entry-real
###################################################
if (supported) {
e <- Qt$QLineEdit()
v <- PositiveValidator(e)
e$setValidator(v)
e$show()
}
###################################################
### code chunk number 30: extend-window-title (eval = FALSE)
###################################################
## qsetClass("SaveConfirmationDialog", Qt$QMessageBox,
## function(filename = NULL, parent = NULL)
## {
## super(icon = Qt$QMessageBox$Question, title = "Save confirmation",
## text = "Save the current document?",
## buttons = Qt$QMessageBox$Cancel | Qt$QMessageBox$Discard |
## Qt$QMessageBox$Save,
## parent = parent)
## this$filename <- filename
## })
###################################################
### code chunk number 31: extend-window-title-real
###################################################
if (supported) {
qsetClass("SaveConfirmationDialog", Qt$QMessageBox,
function(filename = NULL, parent = NULL)
{
super(icon = Qt$QMessageBox$Question, title = "Save confirmation",
text = "Save the current document?",
buttons = Qt$QMessageBox$Cancel | Qt$QMessageBox$Discard |
Qt$QMessageBox$Save,
parent = parent)
this$filename <- filename
})
}
###################################################
### code chunk number 32: accept-override (eval = FALSE)
###################################################
## qsetMethod("accept", SaveConfirmationDialog, function() {
## saveDocument(filename)
## super("accept")
## })
###################################################
### code chunk number 33: accept-override-real
###################################################
if (supported) {
qsetMethod("accept", SaveConfirmationDialog, function() {
saveDocument(filename)
super("accept")
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.