blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68ea8f49c991f685beaa51ea27bbd1a6f5db78b7 | bbb3e575d2c00abeeccc88d560e8b0be7c98f3de | /Generate_PRISM_Graphs_and_Maps.R | 6a3d5d59542c68f8ada5083780fe65a4c3084939 | [] | no_license | michael-noel/PRISM-Crop-Health-Data-Filter-and-Visualize | 8cb4ebc40a718b543e17d0dcdbe87564b6ff6ed7 | 1070bbfac7e6184f2ad294879044b21883ed4a5e | refs/heads/master | 2020-12-26T01:38:15.096084 | 2014-11-20T03:43:11 | 2014-11-20T03:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 889 | r | Generate_PRISM_Graphs_and_Maps.R | ##############################################################################
# title : Generate_PRISM_Graphs_and_Maps.R;
# purpose : generate graphs and maps of PRISM data;
# producer : prepared by A. Sparks;
# last update : in Los Baños, Philippines, Nov. 2014;
# inputs : na;
# outputs : na;
# remarks 1 : simply sources other files to generate the output files;
# Licence: : GPL2;
##############################################################################
source("Other_graphs.R") # Graphs of visit numbers by location and site
source("Nonsystemic_disease_graphs.R")
source("Nonsystemic_disease_maps.R")
source("Pest_graphs.R")
source("Pest_maps.R")
source("Systemic_disease_graphs.R")
source("Systemic_disease_maps.R")
source("Weed_graphs.R")
source("Yield_graph.R")
source("Yield_map.R")
source("Cleaned_Data_for_Regions.R")
#eos
|
3a79aae0002a153758f132acba537e66a6d72216 | 63d50cbf64469abd6d4729ba0266496ced3433cf | /harsha1/check-number-positive-or-negative.R | 181b380d18eee09254cf6a2063c86448d73414fd | [] | no_license | tactlabs/r-samples | a391a9a07022ecd66f29e04d15b3d7abeca7ea7c | a5d7985fe815a87b31e4eeee739bc2b7c600c9dc | refs/heads/master | 2023-07-08T09:00:59.805757 | 2021-07-25T15:16:20 | 2021-07-25T15:16:20 | 381,659,818 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 317 | r | check-number-positive-or-negative.R | # Created on : 29-06-2021
# Course work:
# @author: Harsha Vardhan
# Source:
#check if the number is positive or negative or zero
num = as.double(readline(prompt="Enter a number: "))
if(num > 0) {
print("Positive number")
} else {
if(num == 0) {
print("Zero")
} else {
print("Negative number")
}
}
|
3891d63b7df22ac220b6424d42fc8cf4fefc5e62 | 283c7729e8642dcb2a3d75563b7b190df999ebfa | /man/dot-PermutedVar.Rd | 934f4de8ef23f839d4adefa4bd228984be10820c | [
"MIT"
] | permissive | vivekkohar/sRACIPE | 5960894521fc76749342e3e0db43e392ad3130f2 | 0b76e5981aa8fd2622ef36e307be6cd0bdf6c977 | refs/heads/master | 2021-06-17T11:38:56.541129 | 2021-03-17T16:19:14 | 2021-03-17T16:19:14 | 178,456,885 | 0 | 0 | MIT | 2021-03-17T16:19:14 | 2019-03-29T18:25:23 | R | UTF-8 | R | false | true | 631 | rd | dot-PermutedVar.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heatmapSimilarity.R
\name{.PermutedVar}
\alias{.PermutedVar}
\title{Find variance of permutations}
\usage{
.PermutedVar(simulated.refCor, clusterCut, permutations, refClusterVar)
}
\arguments{
\item{simulated.refCor}{Correlation matrix of simulated and reference data}
\item{clusterCut}{The original cluster assignments}
\item{permutations}{The number of permutations}
\item{refClusterVar}{Reference Cluster Variance}
}
\value{
An array of dimension n.models by nClusters by permutations
}
\description{
A utility function to generate permutations
}
|
e4acfc363e2846abef915c4a57e380acf4454ae0 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.database/man/rds_switchover_blue_green_deployment.Rd | 135fd55f7fa66165f81a667fcb79b406419b5334 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 968 | rd | rds_switchover_blue_green_deployment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rds_operations.R
\name{rds_switchover_blue_green_deployment}
\alias{rds_switchover_blue_green_deployment}
\title{Switches over a blue/green deployment}
\usage{
rds_switchover_blue_green_deployment(
BlueGreenDeploymentIdentifier,
SwitchoverTimeout = NULL
)
}
\arguments{
\item{BlueGreenDeploymentIdentifier}{[required] The unique identifier of the blue/green deployment.
Constraints:
\itemize{
\item Must match an existing blue/green deployment identifier.
}}
\item{SwitchoverTimeout}{The amount of time, in seconds, for the switchover to complete.
Default: 300
If the switchover takes longer than the specified duration, then any
changes are rolled back, and no changes are made to the environments.}
}
\description{
Switches over a blue/green deployment.
See \url{https://www.paws-r-sdk.com/docs/rds_switchover_blue_green_deployment/} for full documentation.
}
\keyword{internal}
|
2322a74e9abac7dba57d0c21daf9be00d1c5806b | d0db39e156c1303c92aa76b1cb8bb4657e990947 | /plot2.R | 5798495e85edabad82efe09bbcddff916ce1e884 | [] | no_license | user8077/ExData_Plotting1 | b26be883078b78a1004ae811d8e1412c005c3592 | 99e02b7b49ae1e36187d9ae1eed52231fc5027e1 | refs/heads/master | 2021-01-18T08:49:55.522426 | 2015-03-09T01:28:09 | 2015-03-09T01:28:09 | 31,857,922 | 0 | 0 | null | 2015-03-08T17:10:01 | 2015-03-08T17:10:00 | null | UTF-8 | R | false | false | 478 | r | plot2.R | data <- read.table("household_power_consumption.txt", sep=';', header=T, na.strings="?")
mydata <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
datetime <- paste(mydata$Date, mydata$Time)
datetime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
par(pch=" ")
with(mydata, plot(datetime, Global_active_power,type="o",
ylab="Global Active Power (kilowatts)",
xlab=""))
dev.copy(png,"plot2.png",width=480, height=480, units="px")
dev.off()
|
b6d43b52c99570c6d4ff66d18cc454dd978fc01d | 07ff09cfd43fdd5874b0736bd8a67d727d7d5633 | /R/simExtraction.r | 8b5b7cdc8fe50a2de4ee9f4b0bb089c3527da2c8 | [] | no_license | OskarHansson/pcrsim | 11af8a891473bb1568dcabdd8d72b55880ca8295 | a4a05b21aaadfbda04524b7dbfb33d7bd333769a | refs/heads/master | 2022-01-28T05:51:10.443211 | 2022-01-16T11:59:32 | 2022-01-16T11:59:32 | 8,688,053 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,964 | r | simExtraction.r | ################################################################################
# TODO LIST
# TODO: Use truncated normal distributions?
################################################################################
# NOTES
# ...
################################################################################
# CHANGE LOG (10 last changes)
# 14.04.2016: Version 1.0.0 released.
#' @title DNA Extraction Simulator
#'
#' @description Simulates the DNA extraction process.
#'
#' @details Simulates the DNA extraction process by a series of normal
#' distributions. The number of molecules is taken from the required column
#' 'DNA' which is \code{floor}ed to avoid NAs in the \code{rbinom} function.
#'
#' @param data data.frame with simulated data. Preferably output from
#' \code{\link{simSample}}. Required columns are 'Marker', 'Allele', 'Sim', and 'DNA'.
#' @param vol.ex numeric for the final extraction volume (volume after extraction).
#' @param sd.vol numeric for the standard deviation of \code{vol.ex}.
#' @param prob.ex numeric for probability that an allele survives the extraction
#' (extraction efficiency).
#' @param sd.prob numeric for the standard deviation of \code{prob.ex}.
#' @param cell.dna numeric to indicate the DNA content of a diploid cell in nanograms (ng).
#' @param debug logical TRUE to indicate debug mode.
#'
#' @return data.frame with simulation results in columns 'Ex.Vol', 'Ex.Prob',
#' Ex.DNA', 'Ex.Conc', and updated 'DNA' and 'Volume' columns (added if needed).
#'
#' @importFrom utils head tail str
#' @importFrom stats rbinom rnorm
#'
#' @export
#'
#' @seealso \code{\link{simSample}}
#'
#' @examples
#' # Create a data frame with a DNA profile.
#' markers = rep(c("D3S1358","TH01","FGA"), each=2)
#' alleles = c(15,18,6,10,25,25)
#' df <- data.frame(Marker=markers, Allele=alleles)
#'
#' # Simulate profile.
#' res <- simProfile(data=df, sim=3, name="Test")
#'
#' # Simulate diploid sample.
#' res <- simSample(data=res, cells=100, sd.cells=20)
#'
#' # [OPTIONAL] Simulate degradation.
#' res <- simDegradation(data=res, kit="ESX17", deg=0.003, quant.target=80)
#'
#' # Simulate extraction.
#' res <- simExtraction(data=res, vol.ex=200, sd.vol=10, prob.ex=0.3, sd.prob=0.1)
simExtraction <- function(data=NULL, vol.ex=100, sd.vol=0,
prob.ex=0.3, sd.prob=0,
cell.dna=0.006, debug=FALSE) {
# Debug info.
if(debug){
print(paste(">>>>>> IN:", match.call()[[1]]))
print("CALL:")
print(match.call())
print("###### PROVIDED ARGUMENTS")
print("STRUCTURE data:")
print(str(data))
print("HEAD data:")
print(head(data))
print("TAIL data:")
print(tail(data))
}
# CHECK PARAMETERS ##########################################################
if(!is.data.frame(data)){
stop(paste("'data' must be of type data.frame."))
}
if(!is.logical(debug)){
stop(paste("'debug' must be logical."))
}
if(!"Marker" %in% names(data)){
stop(paste("'data' must have a colum 'Marker'."))
}
if(!"Allele" %in% names(data)){
stop(paste("'data' must have a colum 'Allele'."))
}
if(!"Sim" %in% names(data)){
stop(paste("'data' must have a colum 'Sim'."))
}
if(!"DNA" %in% names(data)){
stop(paste("'data' must have a colum 'DNA'."))
}
if(is.null(vol.ex) || !is.numeric(vol.ex) || vol.ex < 0){
stop(paste("'vol.ex' must be a positive numeric giving the final",
"extraction volume."))
}
if(is.null(sd.vol) || !is.numeric(sd.vol) || sd.vol < 0){
stop(paste("'sd.vol' must be a positive numeric giving the standard",
"deviation of 'vol.ex'."))
}
if(is.null(prob.ex) || !is.numeric(prob.ex) || prob.ex < 0 || prob.ex > 1){
stop(paste("'prob.ex' must be a positive numeric {0,1} giving the final",
"extraction probability."))
}
if(is.null(sd.prob) || !is.numeric(sd.prob) || sd.prob < 0){
stop(paste("'sd.prob' must be a positive numeric giving the standard",
"deviation of 'prob.ex'."))
}
# PREPARE ###################################################################
message("SIMULATE EXTRACTION")
# Get number of simulations.
.sim <- max(data$Sim)
# Get number of rows per simulation.
.rows <- plyr::count(data$Sim)$freq
# Get total number of observations.
.obs <- nrow(data)
if(debug){
print(paste("Number of simulations:", .sim))
print(paste("Number of rows per simulation:", paste(unique(.rows), collapse=",")))
print(paste("Number of observations:", .obs))
}
# SIMULATE ##################################################################
# VOLUME --------------------------------------------------------------------
if(debug){
print("PARAMETERS TO SIMULATE THE EXTRACTION VOLUME")
print("rnorm(n, mean, sd)")
print("n:")
print(.sim)
print("mean:")
print(vol.ex)
print("sd:")
print(sd.vol)
}
# Draw random extraction volumes for each simulation.
rvolume <- rnorm(n=.sim, mean=vol.ex, sd=sd.vol)
# Extraction volume cannot be negative.
# TODO: use a truncated normal distribution?
rvolume[rvolume < 0] <- 0
# Check if column exist.
if("Ex.Vol" %in% names(data)){
message("The 'Ex.Vol' column was overwritten!")
data$Ex.Vol <- NA
} else {
data$Ex.Vol <- NA
message("'Ex.Vol' column added.")
}
# Add a column indicating the extraction volume.
data$Ex.Vol <- rep(rvolume, times=.rows)
# PROBABILITY ---------------------------------------------------------------
if(debug){
print("PARAMETERS TO SIMULATE THE EXTRACTION PROBABILITY")
print("rnorm(n, mean, sd)")
print("n:")
print(.sim)
print("mean:")
print(prob.ex)
print("sd:")
print(sd.prob)
}
# Draw random extraction probabilities for each simulation.
rprob <- rnorm(n=.sim, mean=prob.ex, sd=sd.prob)
# Extraction probability must be between 0 and 1.
# TODO: USe a truncated normal distribution?
rprob[rprob < 0] <- 0
rprob[rprob > 1] <- 1
if("Ex.Prob" %in% names(data)){
message("The 'Ex.Prob' column was overwritten!")
data$Ex.Prob <- NA
} else {
data$Ex.Prob <- NA
message("'Ex.Prob' column added.")
}
# Add a column indicating the extraction volume.
data$Ex.Prob <- rep(rprob, times=.rows)
# EXTRACTION ----------------------------------------------------------------
obs <- nrow(data)
dnain <- floor(data$DNA)
probin <- data$Ex.Prob
if(debug){
print("PARAMETERS TO SIMULATE THE EXTRACTION")
print("rbinom(n, size, prob)")
print("n:")
print(obs)
print("size:")
print(head(dnain))
print("prob:")
print(head(probin))
}
# During extraction, there is a probability
# 'probin' extraction (the extraction efficiency) that a given DNA
# molecule will survive the process.
# For diploid cells there are 1 of each allele copy per cell.
# number of cells = number of each allele (molecules).
dnaout <- rbinom(n=obs, size=dnain, prob=probin)
if("Ex.DNA" %in% names(data)){
message("The 'Ex.DNA' column was overwritten!")
data$Ex.DNA <- NA
} else {
data$Ex.DNA <- NA
message("'Ex.DNA' column added.")
}
# Add a column indicating the extraction volume.
data$Ex.DNA <- dnaout
# CONCENTRATION -------------------------------------------------------------
exvol <- data$Ex.Vol
exdna <- data$Ex.DNA
# Calculate concentration per allele, hence use: cell.dna / 2.
dnaconc <- (exdna * (cell.dna / 2)) / exvol
if("Ex.Conc" %in% names(data)){
data$Ex.Conc <- NA
message("The 'Ex.Conc' column was overwritten!")
} else {
data$Ex.Conc <- NA
message("'Ex.Conc' column added.")
}
# Add a column indicating the DNA concentration.
data$Ex.Conc <- dnaconc
# Update Curren Columns -----------------------------------------------------
# Volume.
if("Volume" %in% names(data)){
data$Volume <- NULL # Remove first so that the column always appear to the right.
data$Volume <- NA
message("'Volume' column updated!")
} else {
data$Volume <- NA
message("'Volume' column added.")
}
# Add number of cells/molecules to data.
data$Volume <- data$Ex.Vol
# DNA/Molecules.
if("DNA" %in% names(data)){
data$DNA <- NULL # Remove first so that the column always appear to the right.
data$DNA <- NA
message("'DNA' column updated!")
} else {
data$DNA <- NA
message("'DNA' column added.")
}
# Add number of cells/molecules to data.
data$DNA <- dnaout
# RETURN ####################################################################
# Debug info.
if(debug){
print("RETURN")
print("STRUCTURE:")
print(str(data))
print("HEAD:")
print(head(data))
print("TAIL:")
print(tail(data))
print(paste("<<<<<< EXIT:", match.call()[[1]]))
}
# Return result.
return(data)
} |
4a08427fd13ca7e77bce065bab311b090c50bad8 | f22ee4eed9787b55a7a97413117576ece0cbdeb3 | /man/mvrnorm.Rd | cbb5c3eed68fa99b27d15a10a33443325c2ee181 | [] | no_license | xza666/StatComp20038 | 815191f3131fa18140c37a528d164b0c76eb2271 | a29e5f9777e83da1fc2310a06076f7b532129b11 | refs/heads/master | 2023-01-29T20:14:36.121823 | 2020-12-17T05:30:15 | 2020-12-17T05:30:15 | 321,563,449 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 365 | rd | mvrnorm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StatCompR.R
\name{mvrnorm}
\alias{mvrnorm}
\title{A function to generate a sample with Multivariate normal distribution}
\description{
A function to generate a sample with Multivariate normal distribution
}
\examples{
{
sigma1=matrix(c(1,0,0,1),2,2)
MASS::mvrnorm(1,rep(0,2),sigma1)
}
}
|
920115d0abb216e77e83936723443accf3607df8 | 3638e75fe8127096645dd0348e67668e034dc00b | /exp/chaotic-bank/bank.R | eaada76073eca70122d1dea30d93425a14f561d4 | [] | no_license | oleks/diku | dd0b7bc85f96d71b561be3180ff031f830f0b98c | 39c59bb9a202d417f905aada012f64221e4c9419 | refs/heads/master | 2021-06-13T07:38:38.952618 | 2017-01-25T20:47:31 | 2017-01-25T20:47:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 176 | r | bank.R | printf <- function(...) invisible(print(sprintf(...)))
account <- exp(1) - 1;
printf("%f", account);
for (i in 1:25) {
account = i * account - 1;
printf("%f", account);
}
|
7d953ebcb518b9bcfd816c693c80c237a24310a3 | db1ed89c9234364e43c56c9ef2dac5e1e3dd6b9d | /plot3.R | 2a01a2aeab56a86cec92505821fa01db4be21b75 | [] | no_license | pandabrowski/Exploratory-data-analysis---2nd-project | 648c7d7cac8ac06a4fd6e4991479d7386f9738ff | 5dc052cf1a39a7f3fc6d030db2ff8d6a4cd451b4 | refs/heads/master | 2020-06-02T04:13:59.498024 | 2014-10-26T22:29:24 | 2014-10-26T22:29:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 787 | r | plot3.R | #Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable,
#which of these four sources have seen decreases in emissions from 1999-2008 for Baltimore City?
#Which have seen increases in emissions from 1999-2008?
#Use the ggplot2 plotting system to make a plot answer this question.
library('ggplot2')
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#limit to baltimore city only
dataset_3 <- NEI[NEI$fips == "24510",]
maxs_3 <- aggregate( Emissions ~ year + type, data = dataset_3 , FUN = sum)
#split series by type of the variable
image = qplot(year, Emissions , color = type , data=maxs_3 , geom = c("point", "smooth") ) +
xlab('Years') + ylab("Emissions [tons]")
ggsave(file="plot3.png", plot=image) |
0d097e5161ce18937ff38a17aa89339932f98935 | 27cf2d56ebb117703873f45745a2da819b039467 | /man/datacggm.Rd | dbe57635d0fdefce3ded01f0b41e1fecfbedd72a | [] | no_license | cran/cglasso | 019589498b51044816bed94e93f2349fe588a236 | 4ccfb2d8b3eecd47905f5a50e556ba3ddebb496e | refs/heads/master | 2023-01-20T23:58:16.552881 | 2023-01-17T16:00:15 | 2023-01-17T16:00:15 | 148,851,182 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,194 | rd | datacggm.Rd | \name{datacggm}
\alias{datacggm}
\title{Create a Dataset from a Conditional Gaussian Graphical Model with Censored and/or Missing Values}
\description{
\sQuote{The \code{datacggm}} function is used to create a dataset from a conditional Gaussian graphical model with censored and/or missing values.
}
\usage{
datacggm(Y, lo = -Inf, up = +Inf, X = NULL, control = list(maxit = 1.0E+4,
thr = 1.0E-4))
}
\arguments{
\item{Y}{a \eqn{(n\times p)}{(n x p)}-dimensional matrix; each row is an observation from a conditional Gaussian graphical model with censoring vectors \code{lo} and \code{up}. Missing-at-random values are recorded as \sQuote{\code{NA}}.}
\item{lo}{the lower censoring vector; \code{lo[j]} is used to specify the lower censoring value for the random variable \eqn{Y_j}{Y_j}.}
\item{up}{the upper censoring vector; \code{up[j]} is used to specify the upper censoring value for the random variable \eqn{Y_j}{Y_j}.}
\item{X}{an optional \eqn{(n\times q)}{(n x q)}-dimensional data frame of predictors. If missing (default), a dataset from a Gaussian graphical model is returned otherwise a dataset from a conditional Gaussian graphical model is returned.}
\item{control}{a named list used to pass the arguments to the EM algorithm (see below for more details). The components are:
\itemize{
\item \code{maxit}: maximum number of iterations. Default is \code{1.0E+4}.
\item \code{thr}: threshold for the convergence. Default value is \code{1.0E-4}.}
}
}
\details{
The function \sQuote{\code{datacggm}} returns an \R object of class \sQuote{\code{datacggm}}, that is a named list containing the elements needed to fit a conditional graphical LASSO (cglasso) model to datasets with censored and/or missing values.
A set of specific method functions are developed to decsribe data with censored/missing values. For example, the method function \sQuote{\code{print.datacggm}} prints out the left and right-censored values using the following rules: a right-censored value is labeled adding the symbol \sQuote{\code{+}} at the end of the value, whereas the symbol \sQuote{\code{-}} is used for the left-censored values (see examples below). The summary statistics can be obtained using the method function \sQuote{\code{\link{summary.datacggm}}}. The matrices \code{Y} and \code{X} are extracted from a \code{datacggm} object using the function \sQuote{\code{\link{getMatrix}}}.
For each column of the matrix \sQuote{\code{Y}}, mean and variance are estimated using a standard EM-algorithm based on the assumption of a Gaussian distribution. \sQuote{\code{maxit}} and \sQuote{\code{thr}} are used to set the number of iterations and the threshold for convergence, respectively. Marginal means and variances can be extracted using the accessor functions \sQuote{\code{\link{ColMeans}}} and \sQuote{\code{\link{ColVars}}}, respectively. Furthermore, the plotting functions \sQuote{\code{\link{hist.datacggm}}} and \sQuote{\code{\link{qqcnorm}}} can be used to inspect the marginal distribution of each column of the matrix \sQuote{\code{Y}}.
The status indicator matrix, denoted by \code{R}, can be extracted by using the function \code{\link{event}}. The entries of this matrix specify the status of an observation using the following code:
\itemize{
\item \sQuote{\code{R[i, j] = 0}} means that the \eqn{y_{ij}}{y_{ij}} is inside the open interval \code{(lo[j], up[j])};
\item \sQuote{\code{R[i, j] = -1}} means that the \eqn{y_{ij}}{y_{ij}} is a left-censored value;
\item \sQuote{\code{R[i, j] = +1}} means that the \eqn{y_{ij}}{y_{ij}} is a right-censored value;
\item \sQuote{\code{R[i, j] = +9}} means that the \eqn{y_{ij}}{y_{ij}} is a missing value.
}
See below for the other functions related to an object of class \sQuote{\code{datacggm}}.
}
\value{
\sQuote{\code{datacggm}} returns an \R object of S3 class \dQuote{\code{datacggm}}, that is, a nested named list containing the
following components:
\item{Y}{the \eqn{(n\times p)}{(n x p)}-dimensional matrix \code{Y}.}
\item{X}{the \eqn{(n\times q)}{(n x q)}-dimensional data frame \code{X}.}
\item{Info}{
\itemize{
\item \code{lo:} the lower censoring vector;
\item \code{up:} the upper censoring vector;
\item \code{R:} the status indicator matrix encoding the censored/missing values (mainly for internal purposes);
\item \code{order:} an integer vector used for the ordering of the matrices \code{Y} and \code{X} (for internal purposes only);
\item \code{Pattern:} a matrix encoding the information about the the patterns of censored/missing values (for internal purposes only);
\item \code{ym:} the estimated marginal means of the random variables \eqn{Y_j}{Y_j};
\item \code{yv:} the estimated marginal variances of the random variables \eqn{Y_j}{Y_j};
\item \code{n:} the sample size;
\item \code{p:} the number of response variables;
\item \code{q:} the number of columns of the data frame \code{X}.
}
}
}
\author{Luigi Augugliaro (\email{luigi.augugliaro@unipa.it})}
\references{
Augugliaro L., Sottile G., Wit E.C., and Vinciotti V. (2023) <\doi{10.18637/jss.v105.i01}>.
cglasso: An R Package for Conditional Graphical Lasso Inference with Censored and Missing Values.
\emph{Journal of Statistical Software} \bold{105}(1), 1--58.
Augugliaro, L., Sottile, G., and Vinciotti, V. (2020a) <\doi{10.1007/s11222-020-09945-7}>.
The conditional censored graphical lasso estimator.
\emph{Statistics and Computing} \bold{30}, 1273--1289.
Augugliaro, L., Abbruzzo, A., and Vinciotti, V. (2020b) <\doi{10.1093/biostatistics/kxy043}>.
\eqn{\ell_1}{l1}-Penalized censored Gaussian graphical model.
\emph{Biostatistics} \bold{21}, e1--e16.
}
\seealso{
Related to the \R objects of class \dQuote{\code{datacggm}} there are the accessor functions, \code{\link{rowNames}}, \code{\link{colNames}}, \code{\link{getMatrix}}, \code{\link{ColMeans}}, \code{\link{ColVars}}, \code{\link{upper}}, \code{\link{lower}}, \code{\link{event}}, \code{\link{qqcnorm}} and the method functions \code{\link{is.datacggm}}, \code{\link{dim.datacggm}}, \code{\link{summary.datacggm}} and \code{\link{hist.datacggm}}. The function \code{\link{rcggm}} can be used to simulate a dataset from a conditional Gaussian graphical model whereas \code{\link{cglasso}} is the model fitting function devoted to the l1-penalized censored Gaussian graphical model.
}
\examples{
set.seed(123)
# a dataset from a right-censored Gaussian graphical model
n <- 100L
p <- 3L
Y <- matrix(rnorm(n * p), n, p)
up <- 1
Y[Y >= up] <- up
Z <- datacggm(Y = Y, up = up)
Z
# a dataset from a conditional censored Gaussian graphical model
n <- 100L
p <- 3L
q <- 2
Y <- matrix(rnorm(n * p), n, p)
up <- 1
lo <- -1
Y[Y >= up] <- up
Y[Y <= lo] <- lo
X <- matrix(rnorm(n * q), n, q)
Z <- datacggm(Y = Y, lo = lo, up = up, X = X)
Z
# a dataset from a conditional censored Gaussian graphical model
# and with missing-at-random values
n <- 100L
p <- 3L
q <- 2
Y <- matrix(rnorm(n * p), n, p)
NA.id <- matrix(rbinom(n * p, 1L, 0.01), n, p)
Y[NA.id == 1L] <- NA
up <- 1
lo <- -1
Y[Y >= up] <- up
Y[Y <= lo] <- lo
X <- matrix(rnorm(n * q), n, q)
Z <- datacggm(Y = Y, lo = lo, up = up, X = X)
Z
}
\keyword{classes}
\keyword{methods}
|
a0d4837c1c463e76d3107e6588c7de2b16d365f1 | bc3a175d59dd615cd7435254496f5a1da08e42cf | /man/rich_pois.Rd | 588db386c80f1dfc215d3f1ca596f20ff6d7da65 | [
"MIT"
] | permissive | joon3216/funpark | 1dcf3a1d9893beb4fd90d85b79542406af58f40d | 61e05ded919fa530d936f15defc7899dec25f3da | refs/heads/master | 2020-06-01T04:17:01.712343 | 2019-06-17T18:44:13 | 2019-06-17T18:44:13 | 190,632,295 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 800 | rd | rich_pois.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funpark_data.R
\docType{data}
\name{rich_pois}
\alias{rich_pois}
\title{rich_pois dataset}
\format{A data.table with 8 rows and 4 variables:
\describe{
\item{gender}{gender, either \code{M}ale or \code{F}emale}
\item{criminal_record}{indicating whether a person has a criminal record (\code{yes}) or not (\code{no})}
\item{is_rich}{factors, \code{rich} if a person is rich and \code{not_rich} otherwise}
\item{count}{the number of people that satisfies \code{(gender, criminal_record, is_rich)} combination}
}}
\usage{
data(rich_pois)
}
\description{
A sample dataset to test functions. This dataset has a count response.
}
\examples{
data(rich_pois)
binarize_pois(rich_pois, 'count')
}
\keyword{datasets}
|
6bceafe6b906287b19d96d465ae3a0dbe6e445f7 | b9114e35d530b9b4ddd5947fddb488edbf798be6 | /man/make_filename.Rd | 2f478d8cbf6ee104edcfd2c8bc9980fba3c50c58 | [
"MIT"
] | permissive | franc1995/Nuevo | d333ca56a9dbcf93dd6a72e812a8fe9ada0b1dee | 1a572ea8177b1520c7bb3c9a00b7b8fd75139634 | refs/heads/master | 2021-05-12T04:48:17.755022 | 2018-01-10T11:53:31 | 2018-01-10T11:53:31 | 117,174,338 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 543 | rd | make_filename.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{make_filename}
\alias{make_filename}
\title{Builds the name of a FARS file}
\usage{
make_filename(year)
}
\arguments{
\item{year}{The year to build the file name}
}
\value{
The name of the FARS file containg the data
}
\description{
Builds the file name of a FARS file. The file name is constructed based on the year
}
\examples{
# Make the filename for year 2001
make_filename(2001)
# Make the filename for year 2016
make_filename(2016)
}
|
af7d96f8a16a25009ccdf581ae5a7181dfb7aaff | 8cc3c330ccfdeaee44117ab31354a7f8302e8498 | /cachematrix.R | 739a1f4c95b8b72f42057c6840349edf1a11677f | [] | no_license | paultchatcha/ProgrammingAssignment2 | 2753dc09bb7a5402513c5534ea0bf0d929f38898 | b71c5b1e956373a4f82946dd4aca5c4516ccc0f1 | refs/heads/master | 2021-01-18T16:34:33.911099 | 2016-02-05T14:11:35 | 2016-02-05T14:11:35 | 51,145,151 | 0 | 0 | null | 2016-02-05T12:25:56 | 2016-02-05T12:25:55 | null | UTF-8 | R | false | false | 869 | r | cachematrix.R | ## These functions intend to invert a squarre invertible matrix
## using the cache
## This first function creates a special 'matrix'
## which is really a list containing pseudo-functions
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(solve) inv <<- solve
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## And this compute the inverted matrix of the matrix
## passed as argument of the first function if not yet in the cache
## otherwise, it just give us the inverted matrix already in the cache
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
e44982edcf881025b1a8dcc63f1b30e7d070c72b | d4ae1c925add14251620f7fa4b92d52573185829 | /SNU/R/Data_mining/170829 ridge regression.R | b9b200a590cddf44f41da065dc8c64a39b0c1cbd | [] | no_license | kookoowaa/Repository | eef9dce70f51696e35cec6dc6a5d4ce5ba28c6d7 | 26f9016e65dbbc6a9669a8a85d377d70ca8a9057 | refs/heads/master | 2023-03-08T01:36:29.524788 | 2023-02-21T16:11:43 | 2023-02-21T16:11:43 | 102,667,024 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 503 | r | 170829 ridge regression.R | library(glmnet)
credit = read.csv('../Data_Mining/Credit.csv')
head(credit)
attach(credit)
Income = scale(Income)
Limit = scale(Limit)
Rating = scale(Rating)
grid = 10^seq(10,-2, length = 100)
x = cbind(Income, Limit, Rating, Student)
y = Balance
ridge.mod = glmnet(x, y, alpha=0, lambda = grid)
dim(coef(ridge.mod))
coef(ridge.mod)[,50]
plot(log(grid), coef(ridge.mod)[2,], type = 'l', col='red', ylim = c(min(coef(ridge.mod)),max(coef(ridge.mod)) ))
lines(log(grid), coef(ridge.mod)[3,], col='blue') |
be60c350833971b975fbdafeda23251aa71eb4af | 4b626cfff176e5520e60eb8e60d051090cecd15c | /src/GLFMR/init_default_params.R | 9e986552ad9df5d5c49d2ea429d3c5cee6a31ee0 | [] | no_license | ml-lab/GLFM | 366a876cc8772c3c56ed5deb772de616e33ad19d | 2dd55a64a62d54f46d0c7099905d80ded644a8d7 | refs/heads/master | 2021-01-15T22:08:35.292235 | 2017-08-04T12:10:16 | 2017-08-04T12:10:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,222 | r | init_default_params.R | #' Function to initialize or complete params structure with all
#' simulation parameters and hyperparameters for the GLFM
#' @param data is a list with X, Z, C in order to be able to initialize
#' @param params with data-based values
#' @param t: eventual external transform of obs. X = params.t{d}(Xraw)
#' @param t_1: inverse transform
#' @param dt_1:derivative of the inverse transform
init_default_params<-function(data,params_init){
param_names<-c("missing","alpha","bias","s2u","s2B","Niter","maxK","verbose","numS","t","t_1","dt_1","transf_dummie","ext_datatype","idx_transform")
param_values<-list(-1,1,0,0.01,1,1000,dim(data$X)[2],1,100,c(),c(),c(),c(),c(),c())
names(param_values)<-param_names
if (length(params_init)>0){
idx_to_fill<-setdiff(1:length(param_names),which(param_names %in% names(params_init)))
filled_param_names<-param_names[idx_to_fill]
#filled_param_names<-paste("params",param_names[idx_to_fill],sep=".")
#names(param_values)<-param_names
params_to_return_aux<-param_values[idx_to_fill]
names(params_to_return_aux)<-filled_param_names
params_to_return<-append(params,params_to_return_aux)
print(params_to_return$Niter)
return(params_to_return)
}
else{
return(param_values)
}
}
|
cca0c4bc912c6832863dcecf497c4f59da799e67 | 527cff34256966e0c0798879da45e829d8d143c5 | /Hamilton/HamiltonDF.R | 85f3d0b90ebfe2d4afa31a38cc62fe36f19782eb | [] | no_license | DomiSchr/Hamilton | 92286095c08fb2631d297039268aac202a0d9269 | 5b5b05f51cef7279d0ec954312e02cbb98d5b08f | refs/heads/master | 2022-11-14T00:35:32.203949 | 2020-07-07T14:57:05 | 2020-07-07T14:57:05 | 248,734,286 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,644 | r | HamiltonDF.R | Hamilton <- function(p, h, q = 0) {
# Largest remainder method for the Apportionment Problem
#
# Args:
# p: a data frame with the names of the states as first column
# and the equivalent population
# size in the second column.
# h: the house size as variable.
# q: the quota. Choose: 0 for Hare Quota(Default)
# 1 for Droop Quota
# 2 for Hagenbach-Bischoff Quota
# 3 for Imperiali Quota
#
# Returns:
# A DataFrame containing all the Information
# and the final result in the last column.
psum <- sum(p[, 2])
if(q == 0){
#Hare Quota(Default):
a <- floor(psum / h)
} else if(q == 1){
#Droop Quota
a <- floor(1 + psum/(h + 1))
} else if(q == 2){
#Hagenbach-Bischoff Quota
a <- floor(psum/(h + 1))
} else if(q == 3){
#Imperiali Quota
a <- floor(psum/(h + 2))
} else {
# Exception!
stop("Chosen quota option not valid!")
}
p["share"] <- array(0, length(p[,1]))
p["share.floor"] <- array(0, length(p[,1]))
p["fraction"] <- array(0, length(p[,1]))
for (i in 1:length(p[,2])) {
p[i, "share"] <- p[i, 2] / a
p[i, "share.floor"] <- floor(p[i, "share"])
p[i, "fraction"] <- p[i, "share"] - p[i, "share.floor"]
}
if (sum(p[, "share.floor"]) == h) {
return(p)
}
p[["result"]] <- NA
p["result"] <- replicate(1, p[["share.floor"]])
ranks <- order(p$fraction, decreasing = TRUE)
for (i in 1:(h - sum(p[, "share.floor"]))) {
p[[ranks[i], "result"]] <- p[[ranks[i], "result"]] + 1
}
return(p)
} |
f781318cabd03ecd5d16800d3652281a9541b285 | 98d0a78eec76dd9ff4f11a230946096e9915a049 | /cachematrix.R | a3e3fd9713b1a7d935b72e2db688463b347b3067 | [] | no_license | ThurstonT/ProgrammingAssignment2 | e1484a38a884ecfbe54012bf0a3e892b41277f54 | b69e01920c688f431fc27e3cf935a387cc99a67d | refs/heads/master | 2021-01-18T16:11:25.909845 | 2015-10-24T05:52:07 | 2015-10-24T05:52:07 | 44,719,467 | 0 | 0 | null | 2015-10-22T03:18:23 | 2015-10-22T03:18:23 | null | UTF-8 | R | false | false | 2,338 | r | cachematrix.R | ## These functions will allow a matrix to be stored in the global environment.
## MakeCacheMatrix is a function that puts the matrix in the global enviroment.
## Cachesolve stores the inverse of the matrix in the global environment.
## These functions work with the understanding that the matrix is a square matrix.
makeCacheMatrix <- function(x = matrix()) {
##This function contains four functions. The functions are set, get, setmatrix, getmatrix
## the Set function sets x to the matrix input to the makeCacheMatrix. x is saved in the Gloabl Environment
mm <- NULL
set <- function(y) {
x <<- y
mm <<- NULL
}
get <- function() x # get returns the matrix from the global environment.
## setmatrix stores the matrix in the global environment
setmatrix <- function(matrix) mm <<- matrix
getmatrix <- function() mm ## getmatrix retrieves the matrix from the global environment
list(set = set, get = get, ## return a list of functions.
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## cacheSolve will invert the matrix is the matrix has not previously been inverted
## the inverted matrix will be stored, 'cached' in the global environment and returned to the user.
##
## Once the inverse of the matrix is computed it is stored in the global environment. This operation
## saves computational time when frequently computing the same matrix inversion
## the special programming about this function is the use of the <<- operator.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mm <- x$getmatrix()
if(!is.null(mm)) { ## check to see if the inverted matrix has been stored in the global environment
message("getting cached data") ## this is a debug/status statement to ensure cached data is returned
return(mm) ## exit the function here
}
data <- x$get() ## if the inverse of the matrix had not previously been computed, compute it now
mm <- solve(data, ...) ## the matrix is inverted with this command
x$setmatrix(mm) ## the inverted matrix is stored with this command
mm
}
|
4fb7c6ef67ccc4dca7024748ebf8653d34f925ce | 27a252b1403e7dcbf239dcab428cf6ab3f9a75f2 | /blblm/man/confint.blblm.Rd | 9a52419387119c8e42ca73a81d2a0df44ccaf5f1 | [
"MIT"
] | permissive | AWayzy/blblmProject | e7a726ba93f86ea00662b0dd8fd846e91bd2a4ce | 8abe92677bc379fe25f487ed67d8a5d6ce1d63c5 | refs/heads/main | 2023-06-29T17:38:32.042667 | 2021-07-23T20:39:57 | 2021-07-23T20:39:57 | 348,892,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 591 | rd | confint.blblm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{confint.blblm}
\alias{confint.blblm}
\title{Confint blblm}
\usage{
\method{confint}{blblm}(object, parm = NULL, level = 0.95, ...)
}
\arguments{
\item{object}{A blblm object}
\item{parm}{The given parameters to be estimated}
\item{level}{The given level of confidence (default .95)}
\item{...}{...}
}
\description{
Confint blblm
}
\details{
Returns a confidence interval for the given parameters (if no parameters are given, gives a confidence interval for all parameters in the model formula)
}
|
fd107bf980ce7d2929ba9f2d445eaabe5068b437 | dbf154fc24d0361587fbb24c0e6f94858d659578 | /Data_Analyse_Park/R_file/day10_hojin.R | e4e3a4aaff5bdb20ad85266e4bbe6757404ec171 | [] | no_license | aporlo21/multicampus | 9a81f30f499aa56d2cb481733ac921ccc7d8d389 | 4d6781dde53f343fb6058b4efcc378d9c9f8897f | refs/heads/master | 2020-05-18T03:57:52.280718 | 2019-11-22T04:51:46 | 2019-11-22T04:51:46 | 184,158,913 | 0 | 0 | null | null | null | null | UHC | R | false | false | 8,936 | r | day10_hojin.R | install.packages("RWeka")
library(RWeka) #package 함수 load
library(tm)
#Weka_control : min max 값 설정
mytext<-c("The United States comprises fifty states.", "In the United States, each state has its own laws.","However, federal law overrides state law in the United States.")
mytext #본래의 mytext
#말뭉치 생성
mytemp<-VCorpus(VectorSource(mytext)) #벡터로 부터 바로 corpus 생성 => vertor sauce 사용함
mytemp # 본래의 mytext -> corpus로 변환
#VCorpus(DirSource("경로"))
ngram.tdm<-TermDocumentMatrix(mytemp) #함수를 통해 출력하겠다 # 17*3 51
ngram.tdm #non-/sparse entries : 해당단어가 특정문장안에 포함 되지 않았다 절반은 0, 절반은 0이 아니다
inspect(ngram.tdm) #해당단어 빈도수 체크
bigramTokenizer<-function(x)
NGramTokenizer(x, Weka_control(min=2,max=3)) #ngramTokenizer min = 2 : bingram // max = 3 : tringram
#weka내에 포함된 함수
ngram.tdm<-TermDocumentMatrix(mytemp, control=list(tokenize=bigramTokenizer)) #control 을 통해 특정함수를 통해 특정 정렬화됨
#어떻게 토큰화할지 #bigramTokenizer => x를 전달받아서 ngram에 x값 전달해줌
#토큰화 = 단어를 나누는 방식
str(ngram.tdm) #18*3 = 54요소 존재
ngram.tdm$dimnames$Terms # 참조 #주어진문장들을 bigram으로 나눠보자
#각 각의 요소간 거리를 통해 요소분석
ngram.tdm[,]
#전체 문서에서 해당단어가 몇번 등장했는가에 대한 기능을 수행함
bigramlist<-apply(ngram.tdm[,], 1, sum)
# matrix를 포함한 array # spply 입력 list 출력 vector
sort(bigramlist,decreasing=TRUE)
#오후숙제
#mycorpus에도 위 작업을 수행
#최상위 10개의 bigram & trigram 을 수행
#한국어 처리
install.packages("KoNLP")
library(KoNLP)
library(stringr)
library(rJava)
library(tm)
mytextlocation<-"논문/"
#파일들을 모두 가져와서 코퍼스 생성
mypaper<-VCorpus(DirSource(mytextlocation))
mykorean<-mypaper[[19]]$content #첫번째 mypaper 문서내용 확인
mykorean
#mykorean 문서 편집
#문서속에 영문자 제거하기
#소괄호 제거하기
#가운데 점 제거하기
#홑따옴표 제거하기
#한글위주 텍스트 남기기
library(stringr)
#영문자 제거
mytext<-str_replace_all(mykorean, "[[:lower:]]","") # mykorean : 문서, lower : 영문자 , "" : 제거
# "a-z" 정규표현식 #파이썬과 같은 정규표현식 적용가능
mykorean<-str_replace_all(mykorean, "\\(","") #여는 소괄호 단독 사용은 소괄호의미를 지니므로 역슬래시 필요
mykorean<-str_replace_all(mykorean, "\\)","") #닫는 소괄호
mykorean<-str_replace_all(mykorean, "[a-zA-Z]", "")
mykorean
#.제거, ''제거
mykorean<-str_replace_all(mykorean, " . ", "")
mykorean<-str_replace_all(mykorean, " ' ", "")
mykorean<-str_replace_all(mykorean, " ' ", "")
mykorean
#-------------------------------------------------------------------------
mydigits<-lapply(mypaper, function(x)
(str_extract_all(x, "[[:digit:]]{1,}")))
table(unlist(mydigits))
mycorpus<-tm_map(mypaper, removeNumbers)
str(mycorpus)
inspect(mycorpus[[3]])
mypuncts<-lapply(mypaper, function(x)
(str_extract_all(x,"\\b[[:alpha:]]{1,}[[:punct:]]{1,}[[:alpha:]]{1,}\\b")))
table(unlist(mypuncts))
mytempfunct<-function(myobject, oldexp, newexp){
tm_map(myobject,
content_transformer(function(x,pattern) gsub(pattern,newexp,x)),
oldexp)
}
# 전체적인 문맥을 파악한뒤 문맥을 기반으로 필요 + 성 붙어 나오거나
# 필요 ,성 이 확률적, 빈도적으로 많았느냐 분석하고 앞단어의 출현 기반으로 분석 == baisian theory
#숫자표현에 대한 추출 ( 데이터 분석 )
#숫자 표현 추출
mydigits<-lapply(mypaper, function(x)
(str_extract_all(x, "[[:digit:]]{1,}")))
table(unlist(mydigits)) # 전체 추출이 가능 with 정규표현식
mydigits
unlist(mydigits) #파일이름 과 파일 해당 텍스트 출력
table(unlist(mydigits)) #
#숫자들 제거
#일괄 적용
mycorpus<-tm_map(mypaper, removeNumbers)
mycorpus$content
str(mycorpus)
myNounFun<-function(mytext) {
myNounList<-paste(extractNoun(mytext),collapse = " ")
return(myNounList)
#print(myNounList)
}
mycorpus[[2]]
inspect(mycorpus[[3]])
mypuncts<-lapply(mypaper, function(x)
(str_extract_all(x,"\\b[[:alpha:]]{1,}[[:punct:]]{1,}[[:alpha:]]{1,}\\b")))
table(unlist(mypuncts))
table(unlist(mypuncts))
mytempfunct<-function(myobject, oldexp, newexp){
tm_map(myobject,
content_transformer(function(x,pattern) gsub(pattern,newexp,x)),
oldexp)
}
mycorpus<-mytempfunct(mycorpus, "-","")
mycorpus<-mytempfunct(mycorpus, "_","")
mycorpus<-mytempfunct(mycorpus, "\\?","")
mycorpus<-mytempfunct(mycorpus, "/","")
mycorpus<-mytempfunct(mycorpus, "\\.","")
mycorpus<-mytempfunct(mycorpus, " . ","")
mycorpus<-mytempfunct(mycorpus, "‘","")
mycorpus<-mytempfunct(mycorpus, "’","")
mycorpus<-tm_map(mycorpus, stripWhitespace)
myNounCorpus<-mycorpus
library(KoNLP)
myNounFun<-function(mytext) {
myNounList<-paste(extractNoun(mytext),collapse = " ")
return(myNounList)
#print(myNounList)
}
myNounListRes<-myNounFun(mycorpus[[3]]$content)
myNounListRes
myNounCorpus<-mycorpus
length(mycorpus)
for(i in 1:length(mycorpus)){
myNounCorpus[[i]]$content<-
myNounFun(mycorpus[[i]]$content)
}
myNounCorpus[[19]]$content
myNounCorpus[[19]]$meta
library(stringr)
table(unlist(lapply(myNounCorpus,function(x)
str_extract_all(x,boundary("word")))))
# "커뮤니[[:alpha:]]{1,}","커뮤니케이션"
imsi<-myNounCorpus
#length(imsi)
for(i in 1:length(myNounCorpus)){
myNounCorpus[[i]]$content<-
str_replace_all(imsi[[i]]$content,
"커뮤니[[:alpha:]]{1,}",
"커뮤니케이션")
myNounCorpus[[i]]$content<-
str_replace_all(imsi[[i]]$content,
"위키리크스[[:alpha:]]{1,}",
"위키리크스")
}
library(tm)
dtm.k<-DocumentTermMatrix(myNounCorpus)
dtm.k
colnames(dtm.k)
#기술통계
#숫자 표현 추출
word.freq<-apply(dtm.k[,],2,sum)
head(word.freq)
length(word.freq)
#빈도를 기준으로 한 정렬
sort.word.freq<-sort(word.freq, decreasing = TRUE)
sort.word.freq[1:20]
cumsum.word.freq<-cumsum(sort.word.freq) #누적합
cumsum.word.freq[1:20]
prop.word.freq<-cumsum.word.freq/cumsum.word.freq[length(cumsum.word.freq)]
prop.word.freq[1:20]
plot(1:length(word.freq))
plot(1:length(word.freq),prop.word.freq, type='l')
library("wordcloud")
#names(word.freq)
library(RColorBrewer)
mypal=brewer.pal(4,"Dark2")
wordcloud(names(word.freq), freq=word.freq,
min.freq = 5,
col=mypal,
random.order = FALSE,
scale=c(4,0.2))
teens<-read.csv("c:\\rwork/sns.csv")
str(teens)
table(teens$gender) #NA 값들을 빼고난 빈도수 조사 함수 table
table(teens$gender,useNA= "ifany") #NA값 포함 도출
summary(teens$age)
teens$age<-ifelse(teens$age>=13 & teens$age<20, teens$age, NA)
summary(teens$age)
teens$female<-ifelse(teens$gender=="F"& !is.na(teens$gender), 1, 0) #female 이면 false, 아니면 na or male
teens$nogender<-ifelse(is.na(teens$gender), 1, 0)
table(teens$gender,useNA="ifany")
table(teens$female)
table(teens$nogender)
mean(teens$age)
mean(teens$age,na.rm=TRUE)
table(teens$gradyear)
aggregate(data=teens,age~gradyear,mean,na.rm=TRUE)
table(teens$gradyear)
class(aggregate(data=teens,age~gradyear,mean, na.rm=T))
# "data.frame"으로출력
ave_age<-ave(teens$age, teens$gradyear,
FUN=function(x) mean(x,na.rm=TRUE))
#벡터로출력
class(ave_age) #vector성분
teens$age<-ifelse(is.na(teens$age), ave_age, teens$age)
summary(teens$age)
str(teens)
interests<-teens[5:40]
interests #해당축의 단어의 빈도수
#특정축에 의한 값때문에 전체값이 왜곡될수 있다. => 표준화 필요
lapply(interests, scale) #표준화
interests_z<-as.data.frame(lapply(interests, scale))
interests_z
#-------------거리계산 끝-------------------------
#random point (with Kmeans function - updating role)
# 정규화 | 0 ~ 1로 설정
# 표준화 | 토익 / 토플점수
# 클러스터링
set.seed(2345)
teen_clusters<-kmeans(interests_z, 5)
str(teen_clusters)
teen_clusters$size
teen_clusters$centers
teen_clusters$clustercen<-teen_clusters$centers
teen_clusters$clustercen
#teen_clusters[1:5, c("clustercen","gender","age","friends")]
str(teen_clusters)
table(teen_clusters$cluster)
#각 클러스터에 대한 나이의 평균을 출력
teens$cluster<-teen_clusters$cluster
str(teens)
aggregate(data=teens, age~cluster, mean)
aggregate(data=teens, female~cluster, mean)
aggregate(data=teens, friends~cluster, mean)
|
c09d2d120353a347e96ab9818753e4361e1d72c9 | b9742e90e1ed5f00a95e537ba7694d8d067fe2fd | /Assignment_3_2.R | b30389bad39af71585e22a1537d97718e210cf24 | [] | no_license | Pankaj2019/Turbo-Train1 | d69cf507ba11458bf76c70c29573a87192819526 | 9a09291ef233d75a818cfc4037dece479f3783b9 | refs/heads/master | 2021-04-09T17:53:29.701584 | 2018-04-14T10:06:00 | 2018-04-14T10:06:00 | 125,724,767 | 0 | 1 | null | 2018-04-14T09:11:48 | 2018-03-18T12:54:06 | R | UTF-8 | R | false | false | 3,036 | r | Assignment_3_2.R | cerealData<- read.csv("cereal3.csv")
cerealData
cerealData <- data.matrix(cerealData)
cerealData
# NOTE : the 1st column ' CEREAL ' is Categorical data
# KMEANS needs only Numerical data. So, convert the Categorical data to Numeric first
cerealData[,1] <- as.numeric(cerealData[,1])
class(cerealData[,1])
# ````````````````````````````Using Elbow method to find optimal no. of clusters``````````````````````````
set.seed(6)
wcss <- vector()
for(i in 1:10)wcss[i]<-sum(kmeans(cerealData,i)$withinss)
plot(1:10,wcss,type="b",main=paste('Clusters of Cereals'),xlab="no. of clusters",ylab="wcss")
# So, 5 is the optimal Cluster no. size
cerealData.5MC.Elbow <- kmeans(cerealData,5,iter.max= 300)
cerealData.5MC.Elbow
# Vizualizing the Clusters
clusplot(cerealData,cerealData.5MC.Elbow$cluster,lines=0
,shade=TRUE,color=TRUE,labels=2,plotchar=FALSE,span=TRUE,main=paste("Cluster of Cereals"))
# `````````````````````````` Using Silhouette method to find optimal no. of clusters ````````````````````````
#2MC
set.seed(123)
cerealData.2MC.Silhouette <-kmeans(cerealData,2)
cerealData.2MC.Silhouette
class(cerealData.2MC.Silhouette)
cerealData.2MC.Silhouette$cluster
D2 <- daisy(cerealData)
plot(silhouette(cerealData.2MC.Silhouette$cluster, D2)) # Average Silhouette value : 0.41
# 3MC
set.seed(123)
cerealData.3MC.Silhouette <-kmeans(cerealData,3)
cerealData.3MC.Silhouette
class(cerealData.3MC.Silhouette)
cerealData.3MC.Silhouette$cluster
D3 <- daisy(cerealData)
plot(silhouette(cerealData.3MC.Silhouette$cluster, D3)) # Average Silhouette value : 0.44
# 4MC
set.seed(12)
cerealData.4MC.Silhouette <-kmeans(cerealData,4)
cerealData.4MC.Silhouette
class(cerealData.4MC.Silhouette)
cerealData.4MC.Silhouette$cluster
D4 <- daisy(cerealData)
plot(silhouette(cerealData.4MC.Silhouette$cluster, D4)) # Average Silhouette value : 0.38
# 5MC
set.seed(12)
cerealData.5MC.Silhouette <-kmeans(cerealData,5)
cerealData.5MC.Silhouette
class(cerealData.5MC.Silhouette)
cerealData.5MC.Silhouette$cluster
D5 <- daisy(cerealData)
plot(silhouette(cerealData.5MC.Silhouette$cluster, D5)) # Average Silhouette value : 0.38
# 6MC
set.seed(12)
cerealData.6MC.Silhouette <-kmeans(cerealData,6)
cerealData.6MC.Silhouette
class(cerealData.6MC.Silhouette)
cerealData.6MC.Silhouette$cluster
D6 <- daisy(cerealData)
plot(silhouette(cerealData.6MC.Silhouette$cluster, D6)) # Average Silhouette value : 0.4
# 7MC
set.seed(12)
cerealData.7MC.Silhouette <-kmeans(cerealData,7)
cerealData.7MC.Silhouette
class(cerealData.7MC.Silhouette)
cerealData.7MC.Silhouette$cluster
D7 <- daisy(cerealData)
plot(silhouette(cerealData.7MC.Silhouette$cluster, D7)) # Average Silhouette value : 0.39
# Since 3MC gives max silhouette value, chosing 3MC Clustering
# Vizualizing the Clusters
clusplot(cerealData,cerealData.3MC.Silhouette$cluster,lines=0
,shade=TRUE,color=TRUE,labels=2,plotchar=FALSE,span=TRUE,main=paste("Cluster of Cereals"))
|
91cf36ca371b3666f6f1f2b62c3702bc3cffa750 | fc9ef092fa687376d9bd538525d8a01553b489b4 | /Statystyka/R/pierwsze_zajęcia.r | acd0af4fcea0ef50659b78fcb261780159c73224 | [] | no_license | BatuIks/GOG | 3e0fe9190f7405352e7640d3552c682cb559ccbf | a86ed1952937c8b6e5c5aa97b83d5a27eda24b5c | refs/heads/master | 2020-12-30T16:40:03.406316 | 2017-11-13T03:28:25 | 2017-11-13T03:28:25 | 91,002,558 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 640 | r | pierwsze_zajęcia.r | x = 5
y = c(2,6,7)
z = c(4,5,9)
z+y
z-y
z*y
z/y
wektor = c("Daniel", "nie odmawia", "pacierza", "przed","snem")
wektor2 = c(1,43,63,26)
#nie rozumiem
wektor2 = as.factor(wektor2)
#nie rozumiem
wektor2 = as.numeric(wektor2)
x = 1:9
tab = matrix(x, nrow = 3, ncol = 3, byrow = TRUE)
tab
x = c(2,3,"haafg")
tab = matrix(x, nrow = 3, ncol = 3, byrow = TRUE)
tab
tab2 = data.frame(y,x,c("płeć","wiek","wykształcenie"))
tab2
colnames(tab2)
colnames(tab2) <- c("label1", "label2", "label3")
colnames(tab2)
tab2
x <- c(2,3,4)
x[3]
tab[,c(2,3)]
tab[c(1,2),]
tab2[,c("label3")]
tab2$label1
tab2$label2
tab[3,"label2"]
w <- which(tab2$label2 == 3)
|
d17f38020d336aa775e780b8d71e3c356c37449c | f06d2040542a5d1362e1bc339f7ead20e70862c6 | /man/TraceBrewer.Rd | 3f1201f7bcd85c4683f7732e56de4a7a81c880fa | [
"MIT"
] | permissive | leeleavitt/procPharm | 89d65f718d6328b605578b2d8308113af0b39e7f | b09ce82a76658cf46c7427b0c106822c8cadfdf7 | refs/heads/master | 2023-04-13T10:26:45.239945 | 2021-02-02T18:06:58 | 2021-02-02T18:06:58 | 213,467,533 | 0 | 1 | NOASSERTION | 2023-03-25T00:53:08 | 2019-10-07T19:22:32 | R | UTF-8 | R | false | true | 548 | rd | TraceBrewer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracePrep.R
\name{TraceBrewer}
\alias{TraceBrewer}
\title{This is our trace cleaning protocol}
\usage{
TraceBrewer(dat, bscore = F, blcPrep = T, verbose = F)
}
\arguments{
\item{dat}{is the RD.experiment to input}
\item{bscore}{logical, if true all scores will be replaced by bscore2 function}
\item{blcPrep}{logical, if this is true, an expensive baseline correction schema occurs. If false only some SNR occurs}
}
\description{
This is our trace cleaning protocol
}
|
e362fbd1f6f616d94a84b1ad8dcd4e779165c095 | 9df69dbe58ff3a85b9e62b3249b36b3bdc3f828e | /P556HW2/R_Files/Prob1.R | fe0c0d63153230f89aa149708afb1eb851b9e422 | [] | no_license | chuckjia/P556-AppliedML-Fall2017 | e14ab07d56b75afd70cebb017e62e762e8ce584e | 5f5fe35363dc86bccbd48b4edcf91a87b9d0a0ff | refs/heads/master | 2021-09-27T22:09:51.151350 | 2018-11-12T05:10:38 | 2018-11-12T05:10:38 | 112,528,741 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,151 | r | Prob1.R | # Problem 1
# Part (a)
rm(list = ls())
dissimMat = matrix(c(0, 0.3, 0.4, 0.7, 0.3, 0, 0.5, 0.8, 0.4, 0.5, 0, 0.45, 0.7, 0.8, 0.45, 0), nrow = 4, byrow = T)
dissimMat = as.dist(dissimMat)
hclustRes = hclust(dissimMat, method = "complete")
plot(hclustRes, col = "darkgreen", xlab = "Observations")
# Hand calculated result
# This is the result after putting 1 and 2 in one cluster
# The first row/column represent the cluster of (1, 2)
dissimMat2 = matrix(c(0, 0.5, 0.8, 0.5, 0, 0.45, 0.8, 0.45, 0), nrow = 3, byrow = T)
print(dissimMat2)
# Part (b)
rm(list = ls())
dissimMat = matrix(c(0, 0.3, 0.4, 0.7, 0.3, 0, 0.5, 0.8, 0.4, 0.5, 0, 0.45, 0.7, 0.8, 0.45, 0), nrow = 4, byrow = T)
dissimMat = as.dist(dissimMat)
hclustRes = hclust(dissimMat, method = "single")
plot(hclustRes, col = "darkgreen", xlab = "Observations")
# Part (d)
rm(list = ls())
dissimMat = matrix(c(0, 0.3, 0.4, 0.7, 0.3, 0, 0.5, 0.8, 0.4, 0.5, 0, 0.45, 0.7, 0.8, 0.45, 0), nrow = 4, byrow = T)
dissimMat = as.dist(dissimMat)
hclustRes = hclust(dissimMat, method = "complete")
label = c(2, 1, 4, 3)
plot(hclustRes, labels = label, col = "darkgreen", xlab = "Observations")
|
c4996a4f54919dfaf840799827f3a7de32b92835 | a2fc8a30ac33d8a2a4f0bc254ce4042b2fe66a1d | /Rscripts/GenerateTestsOutputs.R | 911936a99fe7c8cd4d3330570ee77d33831ec09c | [] | no_license | UAQpqd/synthSignal | c9c017f4d7b1bcbcf313b4a22dfb4cdc3f63700f | 963f52a50a34d35e3858a45ae4792d0eaf5ed075 | refs/heads/master | 2021-09-10T14:57:01.641419 | 2018-03-28T05:01:14 | 2018-03-28T05:01:14 | 116,705,546 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,103 | r | GenerateTestsOutputs.R |
PlotCSVWithTitleAndLabels <- function(csvFilename, plotTitle, xLabel, yLabel)
{
df = read.csv(csvFilename)
plot(df$time, df$amplitude, type = 'l',
main = plotTitle, xlab = xLabel, ylab = yLabel)
grid()
}
SignalTest_SignalWithOneWaveform_CheckWaveformIsGenerated <- function()
{
PlotCSVWithTitleAndLabels(
'CheckWaveformIsGenerated.csv',
'CheckWaveformIsGenerated',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithWhiteNoise_CheckWhiteNoiseMeanIsZero <- function()
{
PlotCSVWithTitleAndLabels(
'CheckWhiteNoiseMeanIsZero.csv',
'CheckWhiteNoiseMeanIsZero',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithWhiteNoise_CheckFrequencyChanging <- function()
{
PlotCSVWithTitleAndLabels(
'CheckFrequencyChanging.csv',
'CheckFrequencyChanging',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithHarmonics_Check3rdHarmonicIsPresent <- function()
{
PlotCSVWithTitleAndLabels(
'Check3rdHarmonicIsPresent.csv',
'Check3rdHarmonicIsPresent',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithHarmonics_CheckHarmonicsArePresent <- function()
{
PlotCSVWithTitleAndLabels(
'CheckHarmonicsArePresent.csv',
'CheckHarmonicsArePresent',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSag <- function()
{
PlotCSVWithTitleAndLabels(
'CheckSag.csv',
'CheckSag',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSwell <- function()
{
PlotCSVWithTitleAndLabels(
'CheckSwell.csv',
'CheckSwell',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithOneWaveform_CheckWaveformIsGenerated()
SignalTest_SignalWithWhiteNoise_CheckWhiteNoiseMeanIsZero()
SignalTest_SignalWithWhiteNoise_CheckFrequencyChanging()
SignalTest_SignalWithHarmonics_Check3rdHarmonicIsPresent()
SignalTest_SignalWithHarmonics_CheckHarmonicsArePresent()
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSag()
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSwell()
shell.exec('Rplots.pdf')
|
639942bf7e09847370edbf944df9e08c7deddeb3 | 66a7e8ca677bed32c72c4cc10cf32f6e6fcf6da9 | /jhickeyST790/R/convex_functions.R | 690380999b880503dd4f6bc9a9501aa5f6402d48 | [] | no_license | JimmyJHickey/ST790-Advanced-Computing | e6086ac43a36ff3d4d4c9f847b13525b9b53aa7c | 1d3c5e445e287c2a36b81a44c1e5acd2c123c91a | refs/heads/master | 2022-11-17T09:53:54.052499 | 2020-07-17T20:40:22 | 2020-07-17T20:40:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 460 | r | convex_functions.R | #' Take the log of the determinant of a matrix.
#'
#' @param in_matrix A square, positive definite matrix.
#' @return The log of the determinant of \code{in_matrix}.
logdet <- function(in_matrix) {
# check that the input matrix is square
if (nrow(in_matrix) != ncol(in_matrix))
stop("'X' must be a square matrix")
matrix_det <- det(in_matrix)
if (matrix_det <= 0)
stop("'in_matrix' must be positive definite")
return(-log(matrix_det))
}
|
33b755da313febf104fb4a0090eddfc97fb81280 | 9bba31a6551902ea5f032521d87c3b97eaf2bb5e | /Step2_SJmaker.R | 5b9116d73b12ae92ccdec1a86b2246fcf47b38dc | [] | no_license | Raul-Nicolas/SAVIRAV | a12bc83515f2d355615498b8093386ee2bc46251 | 710427ab8cfaa171c2f335981ab9313890ab8f1a | refs/heads/main | 2023-02-20T07:55:19.835352 | 2021-01-15T07:36:51 | 2021-01-15T07:36:51 | 319,856,949 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,891 | r | Step2_SJmaker.R | library(Rsamtools)
library(stringr)
cigarcleaner <- function(cigarunsplitted){
value = c()
class = c()
cigardf = data.frame("value" = character(0), "class" = character(0), stringsAsFactors = F)
cigarspread = (str_split(cigarunsplitted, "")[[1]])
row = 1
for(cont in 1: length(cigarspread)){
if(!(cigarspread[cont] %in% 0:9) ){
cigardf[row, 1] <- value
cigardf[row, 2] <- cigarspread[cont]
value = c()
row = row +1
} else {
value = paste0(value, cigarspread[cont])
}
}
return(cigardf)
}
coordinateofcigarcalculator <- function(case_of_cigar, case_of_startread){
whichareN = grep("N", case_of_cigar$class)
countforcoordinates = c("M", "D", "=", "X", "N")
Start = c()
End = c()
for(N in 1:length(whichareN)){
beforeandsplicing = case_of_cigar[1:whichareN[N],]
Start = c(Start, as.numeric(case_of_startread) + sum(as.numeric(beforeandsplicing[-dim(beforeandsplicing)[1],]$value[beforeandsplicing[-dim(beforeandsplicing)[1],]$class %in% countforcoordinates])))
End = c(End, as.numeric(case_of_startread) + sum(as.numeric(beforeandsplicing[-dim(beforeandsplicing)[1],]$value[beforeandsplicing[-dim(beforeandsplicing)[1],]$class %in% countforcoordinates]))
+ as.numeric(beforeandsplicing$value[dim(beforeandsplicing)[1]]))
}
return(apply(cbind(Start,End),1,paste0, collapse="-"))
}
errors = c()
args <- commandArgs(trailingOnly=TRUE)
bamlist = list.files(args[1])
bamlist = bamlist[grep("bam", bamlist)]
bamlist = bamlist[grep("bai", bamlist, invert = T)]
for(Bamsample in 1: length(bamlist)){
bgzf_stream = gzfile(paste0(args[1],bamlist[Bamsample]), 'r')
magic = readChar(bgzf_stream, 4)
if(identical(magic, 'BAM\1')){
aln <- scanBam(paste0(args[1],bamlist[Bamsample]))
N_ones = grep("N", aln[[1]]$cigar)
#selection of the cases with N
N_chrom = aln[[1]]$rname[N_ones]
N_cigars = aln[[1]]$cigar[N_ones]
N_startreads = aln[[1]]$pos[N_ones]
N_strands = aln[[1]]$strand[N_ones]
Global_SJ_coordinates = c()
if(length(N_ones)> 0){
for(SJcont in 1: length(N_ones)){
case_of_cigar = cigarcleaner(N_cigars[SJcont])
case_of_startread = N_startreads[SJcont]
case_of_strands = N_strands[SJcont]
Global_SJ_coordinates = c(Global_SJ_coordinates,apply(cbind(N_chrom[SJcont],coordinateofcigarcalculator(case_of_cigar, case_of_startread)),1, paste0, collapse=":") )
}
#the warnings are caused by turning a character into a number. I am using it on purpose to separate numbers and characters from cigar
}
write.table( data.frame(table(Global_SJ_coordinates)), paste0(c(args[2],bamlist[Bamsample]), collapse = "" ), col.names = F, row.names = F, quote = F)
} else {
errors = c(errors,Bamsample )
}
close(bgzf_stream)
}
|
6ce8b4f1bad5e28d3499d5a88cace185f389685b | 39e0322af1bc2ff6711a051bc984ca38a1e6152e | /gg/ggplot_04-03_geometries_line-plots.R | 8689f8ad890b31cdde306a4c1a266824eb497e5c | [
"MIT"
] | permissive | achungerford/Datacamp | eebf1a3d8f00b9d40f81ede1bf85b65769822f5a | 400f55a24c0588698f672c1e757ce6021e9988df | refs/heads/master | 2021-06-25T07:04:25.771035 | 2019-07-27T01:10:55 | 2019-07-27T01:10:55 | 150,895,641 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,547 | r | ggplot_04-03_geometries_line-plots.R | #
# author: Alexander C. Hungerford
#
# created: 28 November 2018
#
# title: Datacamp
# Data visualization with ggplot2 (Part 1)
# Ch. 4 - geometries
# section 3: line plots
library(tidyverse)
data("mtcars")
load("gg_recess.RData")
load("gg_fish.RData")
load("gg_iris.RData")
###############################################################################
# Exercise: Line plots
# Print out head of economics
head(economics)
# Plot unemploy as a function of date using a line plot
ggplot(economics, aes(x = date, y = unemploy)) + geom_line()
# Adjust plot to represent the fraction of total population that is unemployed
ggplot(economics, aes(x = date, y = unemploy/pop)) + geom_line()
###############################################################################
# Exercise: Periods of recession.
# Basic line plot
ggplot(economics, aes(x = date, y = unemploy/pop)) +
geom_line()
# Expand the following command with geom_rect() to draw the recess periods
ggplot(economics, aes(x = date, y = unemploy/pop)) +
geom_rect(data = recess,
aes(xmin = begin, xmax = end, ymin = -Inf, ymax = +Inf),
inherit.aes = FALSE, fill = "red", alpha = 0.2) +
geom_line()
# The geom_rect() command shouldn't inherit aesthetics from the base ggplot()
# command it belongs to. It would result in an error, since you're using
# a different dataset and it doesn't contain unemploy or pop. That's why you
# should specify inherit.aes = FALSE in geom_rect().
###############################################################################
# Exercise: Multiple time series, part 1
# Check the structure as a starting point
str(fish.species)
# Each variable (column) is a Salmon Species
# each observation (row) is one Year
# To get a multiple time series plot, however, both Year and Species
# should be in their own column. You need tidy data: one variable per column.
# Once you have that you can get the plot shown in the viewer by mapping Year
# to the x aesthetic and Species to the color aesthetic.
# gather() takes four arguments: the original data frame (fish.species),
# the name of the key column (Species), the name of the value column (Capture)
# and the name of the grouping variable, with a minus in front (-Year).
# They can all be specified as object names (i.e. no "").
# Use gather to go from fish.species to fish.tidy
fish.tidy <- gather(fish.species, Species, Capture, -Year)
str(fish.tidy)
ggplot(fish.tidy, aes(x = Year, y = Capture, color = Species)) + geom_line()
|
8694f56aec7000820b7f6d213e4138f760349c55 | f8731866d3dfc0d36cd7109ac72a270417372796 | /Boxplots/TaxasReprovacaoAbandono.R | 864c55e98415db2ab02d2ef7e633a63513088f49 | [] | no_license | supervedovatto/AnexoA | 68f58e0ebd4cabb87d4b61cda0c49af612d06099 | d330ce98c4d94ceeb6504ac2a3dca9e3cca50537 | refs/heads/master | 2023-04-30T07:38:28.522871 | 2021-05-11T16:37:22 | 2021-05-11T16:37:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 937 | r | TaxasReprovacaoAbandono.R | Dados <- Taxas %>%
filter(Ano >= "2013-01-01") %>%
mutate(Valor = Valor/100)
TaxasReprovacaoAbandono <- Dados %>%
ggplot(aes(x = factor(lubridate::year(Ano)), y = Valor)) +
geom_boxplot(color = mypallete[1]) +
geom_point(data = filter(Dados,Localidade == LocRef$Localidade),
aes(x = factor(lubridate::year(Ano)), y = Valor, group = 1), color = mypallete[2], size = 2) +
geom_line(data = filter(Dados,Localidade == LocRef$Localidade),
aes(x = factor(lubridate::year(Ano)), y = Valor, group = 1), color = mypallete[2], size = 1) +
scale_color_manual(values = mypallete) +
scale_y_continuous(labels = scales::percent_format(accuracy = 2),limits = c(0,NA)) +
theme_bw() +
labs(y = "Taxas",x="",
caption = "Fonte: Elaborado pelo núcleo de base do OMT/GYN a partir de dados do BDE/IMB, com acesso em 19/03/2020.") +
facet_wrap(~Taxa,ncol = 1,scales = "free_y")
|
b43400352e28ac270b38799b52d89abcc183e0b0 | 70a7ea1267bebcca99d74d8d3237afd7f0500053 | /R/get_cluster.R | 971bb8b757bce0b6997e2ef785f70b393c1b21f6 | [
"MIT"
] | permissive | s3alfisc/summclust | 91bed1c958bc6e0d0b6fbaab630f021f0cdb41b7 | 2d25c68eb046028b680fdc8c9827820f7e791827 | refs/heads/main | 2023-06-25T05:09:55.973582 | 2023-06-11T16:55:10 | 2023-06-11T16:55:10 | 504,094,498 | 5 | 2 | NOASSERTION | 2023-06-12T21:38:15 | 2022-06-16T09:33:24 | R | UTF-8 | R | false | false | 4,345 | r | get_cluster.R | get_cluster <-
function(object,
cluster,
N,
call_env) {
#' function creates a data.frame with cluster variables
#'
#' @param object An object of type lm, fixest, felm or ivreg
#' @param cluster the name of the cluster variable(s) as
#' a character vector
#' @param N the number of observations used in the bootstrap
#' @param call_env the environment in which the 'object' was evaluated
#'
#' @noRd
#'
#' @return a list, containing a data.frame of the
#' cluster variables
#'
#' @importFrom stats update
#'
# ----------------------------------------------------------------------- #
# Note: a large part of the following code was taken and adapted from the
# sandwich R package, which is distributed under GPL-2 | GPL-3
# Zeileis A, Köll S, Graham N (2020). "Various Versatile Variances:
# An object-Oriented Implementation of Clustered Covariances in R."
# _Journal of Statistical Software_, *95*(1), 1-36.
# doi: 10.18637/jss.v095.i01 (URL: https://doi.org/10.18637/jss.v095.i01).
# changes by Alexander Fischer:
# no essential changes, but slight reorganization of pieces of code
dreamerr::check_arg(cluster, "formula")
clustid_fml <- cluster
clustid_char <- all.vars(cluster)
# Step 1: create cluster df
# drop all variables except an intercept
# so that none of them are created in the expand.model.frame call
# later
manipulate_object <- function(object){
if(inherits(object, "fixest")){
if(!is.null(object$fixef_vars)){
update(object, . ~ + 1 | . + 1)
} else {
update(object, . ~ + 1 )
}
} else {
update(object, . ~ +1)
}
}
cluster_tmp <-
if ("Formula" %in% loadedNamespaces()) {
## FIXME to suppress potential warnings due to | in Formula
suppressWarnings(
expand.model.frame(
model =
manipulate_object(object),
extras = clustid_fml,
na.expand = FALSE,
envir = call_env
)
)
} else {
expand.model.frame(
model =
manipulate_object(object),
extras = clustid_fml,
na.expand = FALSE,
envir = call_env
)
}
cluster_df <-
model.frame(clustid_fml, cluster_tmp, na.action = na.pass)
# data.frames with clusters, bootcluster
cluster <- cluster_df[, clustid_char, drop = FALSE]
if(inherits(object, "fixest")){
if(N != nrow(cluster)){
cluster <- cluster[unlist(object$obs_selection), , drop = FALSE]
}
}
if(inherits(object, "lm")){
## handle omitted or excluded observations (works for lfe, lm)
if ((N != NROW(cluster)) &&
!is.null(object$na.action) &&
(class(object$na.action) %in% c("exclude", "omit"))) {
cluster <- cluster[-object$na.action, , drop = FALSE]
}
}
if (NROW(cluster) != N) {
rlang::abort(
"The number of observations in 'cluster' and 'nobs()' do not match",
use_cli_format = TRUE
)
}
if (any(is.na(cluster))) {
rlang::abort(
"`vcov_CR3J()` cannot handle NAs in `cluster` variables that are not
part of the estimated model object.",
use_cli_format = TRUE
)
}
clustid_dims <- length(clustid_char)
i <- !vapply(cluster, is.numeric, logical(1))
cluster[i] <- lapply(cluster[i], as.character)
# taken from multiwayvcov::cluster.boot
acc <- list()
for (i in 1:clustid_dims) {
acc <-
append(acc, utils::combn(1:clustid_dims, i, simplify = FALSE))
}
vcov_sign <- vapply(acc, function(i) {
(-1)^(length(i) + 1)
}, numeric(1))
acc <- acc[-1:-clustid_dims]
if (clustid_dims > 1) {
for (i in acc) {
cluster <- cbind(cluster, Reduce(paste, cluster[, i]))
names(cluster)[length(names(cluster))] <-
Reduce(paste, names(cluster[, i]))
}
}
N_G <- vapply(cluster, function(x) {
length(unique(x))
}, numeric(1))
res <- list(
vcov_sign = vcov_sign,
clustid_dims = clustid_dims,
cluster_df = cluster,
N_G = N_G,
cluster_names = names(cluster)
)
res
}
|
6c302efc86f505e8bdcf4a26956dd745ece703c4 | e907e2dbd415ad7e733a774e4b72322f70f6f28e | /R/getUrlListByCategory.R | 995d1149635ab4370bfb431e9b0ba5a8e75162a8 | [
"MIT"
] | permissive | ilyeong-ai/N2H4 | fc5826bc2f3fcd3f36230b2e5503132ce7e32575 | 46bb3608d32d77822bfd748decd3b9ed19349588 | refs/heads/master | 2023-06-11T10:46:29.289974 | 2016-11-17T18:35:16 | 2016-11-17T18:35:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,136 | r | getUrlListByCategory.R | #' Get Url List By Category
#'
#' Get naver news titles and links from target url.
#'
#' @param turl is target url naver news.
#' @return Get data.frame(news_title, news_links).
#' @export
#' @import xml2
#' @import rvest
#' @import stringr
getUrlListByCategory <- function(turl = url) {
tem <- read_html(turl)
news_title <- tem %>% rvest::html_nodes("dt a") %>% rvest::html_text()
Encoding(news_title) <- "UTF-8"
rm_target <- tem %>% rvest::html_nodes("dt.photo a") %>% rvest::html_text()
Encoding(rm_target) <- "UTF-8"
news_links <- tem %>% rvest::html_nodes("dt a") %>% rvest::html_attr("href")
news_lists <- data.frame(news_title = news_title, news_links = news_links, stringsAsFactors = F)
news_lists$news_title <- str_trim(news_lists$news_title, side="both")
news_lists <- news_lists[nchar(news_lists$news_title) > 0,]
rm_target <- str_trim(rm_target, side="both")
rm_target <- rm_target[nchar(rm_target) > 0]
if (!identical(paste0(rm_target, collapse = " "), "")) {
news_lists <- news_lists[-grep(rm_target[1], news_lists$news_title),]
}
return(news_lists)
}
|
34908def7c14c963153c680894fd4f30bef191ed | 181509c405fe09389ad16a78ec972f9063bead65 | /company.R | e5a20e48bdee1baa5606ed72ac75c11feff7e716 | [] | no_license | justdevelopingstuff/cli-invoicing-app | fecfb3bd24778610acf2e8bd91e7d6381e93f3b5 | 5ee8a6ea99b9eb94d4ffe9566655473992a965f3 | refs/heads/master | 2021-01-12T06:47:02.153535 | 2017-06-15T06:44:38 | 2017-06-15T06:44:38 | 76,827,323 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 372 | r | company.R | database <- read.csv("company_database.csv")
selection <- read.table("company.txt")
companies <- as.data.frame(unique(database$company))
chosen <- as.character(companies[as.numeric(selection[1]),])
write.csv(chosen, "company.csv", row.names = FALSE)
company <- subset(database, subset = database$company == chosen)
write.csv(company, "selcompany.csv", row.names = FALSE)
|
b61528cdbc8490114b47138c12a27b5e984fe608 | e5102597e47c4e4077aaffab3b6c063a5b526af5 | /plot3.R | 552a73f0af08720de18adadb66475eb38351e84f | [] | no_license | knjenga/ExData_Proj1 | a4dec46e7008427528388fd495570324a19e5706 | 66e6eb2428834a80ebc74bbd8fe86a4998631ee3 | refs/heads/master | 2021-01-01T16:00:22.570945 | 2015-03-07T20:16:53 | 2015-03-07T20:16:53 | 31,825,597 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,783 | r | plot3.R | # This srcipt is an Exploratory Data Analysis script
# The script evaluates a single households electricity house usage for two days in a year
# The script creates an Exploratory line graph of the sub meter reading instaled at the house
#
#Load the required library functions
library(dplyr)
# The function assumes that the input are in the working directory
# Get the users working directory
file_dir <- getwd()
# Create the file name
file_location <- paste0(file_dir,"/household_power_consumption.txt")
# Read the whole data set
edata <- read.table(file_location,header = TRUE,sep=";",na.strings = "?",dec = ".")
# filter the data to just the required two days of intrest
gdata <- filter(edata, Date == "1/2/2007"|Date == "2/2/2007")
# create a new field that holds both the data and time
gdata$DateTime <- strptime(paste(gdata$Date,gdata$Time,sep=" "),format="%d/%m/%Y %H:%M:%S")
# format the Date and time fields in from text to date fields
gdata$Date <- as.Date(gdata$Date,format="%d/%m/%Y")
gdata$Time <- as.Date(gdata$Time,format="%H/%M/%S")
#plot the Global active power line graph for diffrent types of the days
par(3,4,1,1)
#get the axis range ranges for the graph
xrange <- range(gdata$DateTime)
yrange <- range(gdata$Sub_metering_1)
plot(xrange,yrange,xlab="",ylab="Energy sub metering",type="n")
lines(gdata$DateTime,gdata$Sub_metering_1, col="black",lty=1)
lines(gdata$DateTime,gdata$Sub_metering_2, col="red",lty=1)
lines(gdata$DateTime,gdata$Sub_metering_3, col="blue",lty=1)
# format the graphs legend
legend("topright",
col=c("black","red","blue"),
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
cex=0.7,
lty=1
)
# copy the graph to a png file called plot3.png
dev.copy(png,file ="plot3.png",height=480,width=580)
dev.off() |
e22b83311af43af4cf468ef5754fc137867a3f25 | 5cad1fb4d5af966bd0014c9bdd67087befffd953 | /cachematrix.R | ea892c5da75eaa7c6dfdaa87771eae1915a0e55f | [] | no_license | ptricker/ProgrammingAssignment2 | 7d8086657922353bbc12f5111086e23afbfa349c | 81bb0d64ad2b2d6b0f1741dbb8e0329c220d3589 | refs/heads/master | 2021-01-18T11:01:42.511545 | 2014-06-16T10:05:51 | 2014-06-16T10:05:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,323 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function assigns a list of four functions to x
## First it sets the value of the matrix then it gets it
## Then it sets the value of the inverse of the matrix then it gets it.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL #good
}
get <- function() x
setinv <- function(inverse) m <<- inverse
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
##This function returns the inverse of a matrix created with makeCacheMatrix.
## If the inverse has already been calculated it returns the cached value.
## If it hasn't been calculated on the same matrix it calculates it, then caches it and then
## returns the inverse of the matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(x$get())
x$setinv(m)
m
}
|
2e094d68a25bc5bb5962c7eba6d813b084b8a20b | baeb6f631cce546c005cb1054184dc8c2f808043 | /R/getLossFoodGroup.R | 5ac54d7350fd59fab2f7282fcb5849c21d9ef12c | [] | no_license | AEENRA/faoswsLoss | ccd5401fcab754d786a3d6c24f41b2e6231e63e4 | 20a17e9919e37577472eaeb8dcc801ff85dafa8d | refs/heads/master | 2021-06-12T09:35:20.169083 | 2017-01-18T14:02:21 | 2017-01-18T14:02:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,216 | r | getLossFoodGroup.R | ##' Get Loss Food Group
##'
##' Function to load the loss food group classification
##'
##' @export
getLossFoodGroup = function(){
## update in SWS to match contents of CSV file
## see functions decribed using ?faosws::SaveDatatable
## lossFoodGroup = ReadDatatable(table = "loss_food_group")
## lossFoodGroup = data.table(read.csv(file.path("data-raw", "foodPerishableGroup.csv"))) %>%
## select(FCL..Item..code.,FCL..Title,Group.Name,P.D,FBS..GROUP.Number,PERISHABLE) %>%
## filter(PERISHABLE != "")
## data("lossFoodGroupData", package = "faoswsLoss", envir = environment())
lossFoodGroup <- lossFoodGroupData
setnames(lossFoodGroup,
old = colnames(lossFoodGroup),
new = c("measuredItemFCL", "measuredItemNameFCL", "foodGroupName",
"foodGeneralGroup", "measuredItemFBS", "foodPerishableGroup"))
lossFoodGroup =
lossFoodGroup[, list(measuredItemFCL, foodGroupName,
foodGeneralGroup, foodPerishableGroup)]
## Adding headings to FCL codes
lossFoodGroup[, measuredItemFCL := addHeadingsFCL(measuredItemFCL)]
lossFoodGroup[, measuredItemCPC := faoswsUtil::fcl2cpc(as.character(measuredItemFCL))]
}
|
0403746d780cce6767f255ff1bba20b7ced4e759 | b4fd4d908d8a1e6d5926f1b27ba5ebb8373ee5d0 | /script.r | 0c1ded46687950372f72965903f19f110bfada0a | [] | no_license | JonMinton/d_with_shp | ac18dcee2b0ee31e5cd6637fb8a10d8432500155 | 278495de885cf8ae2f613d66b0ddd39f8aee1ae8 | refs/heads/master | 2019-01-02T08:48:26.482365 | 2015-05-11T15:53:27 | 2015-05-11T15:53:27 | 27,070,229 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,950 | r | script.r | rm(list=ls())
#####################################################################################################
require(reshape2)
require(plyr)
require(stringr)
require(ggplot2)
require(maptools)
require(grid)
require(spdep)
require(Rcpp)
require(MASS)
require(CARBayes)
require(shiny)
require(dplyr)
runApp("select_num_denom")
#
# ###################################################################################################
# # scripts
# source("scripts/functions/binomial_iar_car.r")
# source("scripts/functions/D_compute.r")
# sourceCpp("scripts/functions/cppfunctions.cpp")
#
#
# #################################################################################################
# # Specify clean theme
# theme_clean <- function(base_size=12){
# theme_grey(base_size) %+replace%
# theme(
# axis.title=element_blank(),
# axis.text=element_blank(),
# panel.background=element_blank(),
# panel.grid=element_blank(),
# axis.ticks.length=unit(0, "cm"),
# axis.ticks.margin=unit(0, "cm"),
# panel.margin=unit(0, "lines"),
# plot.margin=unit(c(0,0,0,0), "lines"),
# complete=TRUE
# )
# }
#
#
#
# ########################################################################
# data_option <- "working_age_population"
#
#
# #################################################################################################
# # DATA MANAGEMENT
# # Load data for n/N
#
# if (data_option=="working_age_population"){
# attribute_data <- read.csv("data/working_age_people_1996.csv")
# attribute_data <- rename(attribute_data, replace=c("workingage_count" = "numerator_count"))
# } else {
# if (data_option=="council_houses"){
# attribute_data <- read.csv("data/council_houses_2011.csv")
# attribute_data <- rename(attribute_data, replace=c("councilhouse_count"="numerator_count"))
# }
# }
#
#
# # Load shapefiles
# datazones_shp <- readShapeSpatial(
# "shp/scotland_2001_datazones/scotland_dz_2001.shp"
# )
#
# # add example_pop as to data slot in datazone_shp here?!
# # If so, how?
#
# datazones_shp@data <- rename(datazones_shp@data, replace=c("zonecode"="datazone"))
#
# datazones_shp@data <- join(
# datazones_shp@data,
# attribute_data,
# type="inner"
# )
#
# datazones_shp <- datazones_shp[duplicated(datazones_shp@data$datazone)==F,]
#
# datazones_shp <- datazones_shp[datazones_shp@data$total_count > 0,]
#
#
# # uses code from spdep
#
# ## Create the neighbourhood matrix
#
# W_nb <- poly2nb(datazones_shp)
# W_mat <- nb2mat(W_nb, style="B", zero.policy=TRUE)
#
#
# #####################################################################################
# #####################################################################################
#
# ## Run the Bayesian model
#
# # in the latest version of CARBayes
# # the function
# # binomial.iarCAR
# # has been replaced with
# # iarCAR.re
# # with the argument
# # family="binomial"
#
# D_classical <- Dissimilarity.compute(
# minority=datazones_shp@data$numerator_count,
# total=datazones_shp@data$total_count
# )
#
#
#
# model <- iarCAR.re(
# formula = numerator_count ~ 1,
# trials = datazones_shp@data$total_count,
# W=W_mat,
# data=datazones_shp@data,
# family="binomial"
# )
#
# posterior.D <- array(NA, c(1000))
# for(k in 1:1000){
# p.current <- exp(
# model$samples$phi[k ,] + model$samples$beta[k,1]
# ) / (
# 1 + exp(
# model$samples$phi[k ,] + model$samples$beta[k,1]
# )
# )
#
# p.current.overall <- sum(
# p.current * datazones_shp@data$total_count
# ) / sum(
# datazones_shp@data$total_count
# )
#
# posterior.D[k] <- sum(
# datazones_shp@data$total_count * abs(p.current - p.current.overall)
# ) / (
# 2 * sum(datazones_shp@data$total_count) * p.current.overall * (1-p.current.overall))
#
# }
#
#
# Dbayes <- round(quantile(posterior.D, c(0.5, 0.025, 0.975)),4)
#
# ##########################
# #### Set up the simulation
# ##########################
#
#
#
# seed_value <- 20
# mean_value <- mean(example_pop$proportion)
# sd_value <- sd(example_pop$proportion)
# n_area <- nrow(example_pop)
# # to begin with assume no correlation
# sigma <- diag(1, nrow=n_area, ncol=n_area)
#
#
# mean.logit <- log(mean_value / (1 - mean_value))
#
# logit.probs <- mvrnorm(
# n=1,
# mu=rep(mean.logit, n_area),
# Sigma=(sd_value*sigma)
# )
#
# probs <- exp(logit.probs) / (1 + exp(logit.probs))
#
#
# y <- rbinom(
# n=n.area,
# size=rep(example_pop$total_count, n_area),
# prob=probs
# )
#
# N <- rep(input$n.population,n.area)
# x.true <- round(probs() * N, 0)
# probs.overall <- sum(x.true) / sum(N)
# Dtrue <- sum(N * abs(probs() - probs.overall)) / (2 * sum(N) * probs.overall * (1-probs.overall))
## Run the classical method
#
#
#
#
# iarCAR.re
# model <- binomial.iarCAR(formula=data()~1, trials=N, W=W, burnin=1000, n.sample=2000)
# posterior.D <- array(NA, c(1000))
# for(k in 1:1000){
# p.current <- exp(model$samples$phi[k ,] + model$samples$beta[k,1]) / (1 + exp(model$samples$phi[k ,] + model$samples$beta[k,1]))
# p.current.overall <- sum(p.current * rep(input$n.population,n.area)) / sum(rep(input$n.population,n.area))
# posterior.D[k] <- sum(rep(input$n.population,n.area) * abs(p.current - p.current.overall)) / (2 * sum(rep(input$n.population,n.area)) * p.current.overall * (1-p.current.overall))
# }
#
# Dbayes <- round(quantile(posterior.D, c(0.5, 0.025, 0.975)),4)
#
# ## Save the results
# results2 <- array(NA, c(2,3))
# rownames(results2) <- c("Classical results", "Bayesian results")
# colnames(results2) <- c("", "", "")
# results2[1 , ] <- Dclassical
# results2[2 , ] <- Dbayes
# results2 <- round(results2, 4)
#
# results1 <- Dtrue
# results <- list(results1, results2)
# names(results) <- c("True value of D", "Estimated values of D")
# results
|
93644fed827e0cbdc077963822d280d328d998c2 | 1b8e377dbd6ae1a43f41d061b687f08d418be8f9 | /scripts/S13_gal2_phylogenetics.R | 879b0067fa480ee3de193840a16454a72d6f7c62 | [] | no_license | theboocock/ancient_bal_scripts | 05b466cf93b1d565d61c2ab2e3b1e596a3480c3b | 1feb21de62bda07d328db08a1863c518f4954fb1 | refs/heads/main | 2023-04-07T23:57:55.883197 | 2022-03-04T00:53:40 | 2022-03-04T00:53:40 | 306,750,947 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,193 | r | S13_gal2_phylogenetics.R | library(ggtree)
library(phangorn)
library(treeio)
library(tidyverse)
sliding_divergence = read.table("data/popgen_in/sliding_divergence_noah.txt", header=F)
#sliding_divergence %>% ggplot(aes(y=1-V3,x=V4 ,color=V1)) + geom_line()
gal2_prot = read.table("data/popgen_in/gal2_protein_structure.txt", header=F,stringsAsFactors = F)
gal2_prot$domain = gal2_prot$V3
gal2_prot$start = gal2_prot$V4 * 3 -2
gal2_prot$end = gal2_prot$V5 * 3 -2
gal2_prot$domain[1] = "N-terminal Cytosolic"
gal2_prot$domain[gal2_prot$domain == "TMhelix"] = "Transmembrane helix"
gal2_prot$gene = "GAL2"
gal2_prot$domain[gal2_prot$domain == "inside"] = "Cytosolic"
gal2_prot$domain[gal2_prot$domain == "outside"] = "Extracellular"
gal2_prot$domain[nrow(gal2_prot)] = "C-terminal Cytosolic"
for(i in 1:nrow((gal2_prot))){
if( i==1){
gal2_prot$end[i] = gal2_prot$end[i] +3
}else{
gal2_prot$start[i] = gal2_prot$start[i] + 3
gal2_prot$end[i] = gal2_prot$end[i] + 3
}
}
gal2_prot_all = gal2_prot[1,]
gal2_prot_all$end = tail(gal2_prot$end,n=1)
sliding_divergence$V1 = factor(sliding_divergence$V1,levels=c("gal2a_cbs","gal2a_chin","Suva_10.164","gal2a_baya"))
#sliding_divergence %>% filter(V2 == "MISSING") %>% ggplot(aes(y=1-V2,x=V3 ,color=V1)) + geom_line() + theme_bw()
#p1 = sliding_divergence %>% filter(V2 == "MISSING") %>% ggplot(aes(y=abs(V3 * 100),x=(V4 * 10)/3 ,color=V1)) + geom_line() + theme_bw() + ylab("Sequence similarity (%)") + geom_vline(xintercept = 68) +
# xlab("Base position") + scale_color_manual(values = c("#e41a1c","#377eb8","#984ea3","#ff7f00")) + theme(legend.position = "none") + xlab("Position in gene") + coord_cartesian(xlim=c(0,566.66))
p1 = sliding_divergence %>% filter(V2 == "MISSING") %>% ggplot(aes(y=abs(V3 * 100),x=(V4 * 10)/3 ,color=V1)) + geom_line() + theme_bw() + ylab("Sequence similarity (%)") + geom_vline(xintercept = 68) +
xlab("Base position") + scale_color_manual(values = c("#e41a1c","#377eb8","#984ea3","#ff7f00")) + xlab("Position in gene") + coord_cartesian(xlim=c(0,566.66))
#ggplot(data=gal2_prot,aes(y=))
#ggplot(aes(y=))
gal2_prot$start_gene = gal2_prot_all$start
gal2_prot$end_gene = gal2_prot_all$end
gal2_prot$region = gal2_prot$domain
gal2_prot$region[1] = "N-terminal Cytosolic"
gal2_prot$region[2] = "Rest of the protein"
gal2_prot$end = tail(gal2_prot$end,n=1)
gal2_prot = head(gal2_prot,n=2)
p3 = ggplot() + geom_subgene_arrow(arrowhead_height = unit(12,"mm"),arrowhead_width = unit(12,"mm"),arrow_body_height = unit(8,"mm"),data=gal2_prot,aes(xmin=start_gene/3,xmax=end_gene/3,xsubmin=start/3,xsubmax=end/3,y=gene,fill=domain)) +
theme_bw() + theme(legend.position = "bottom") + xlab("Position in gene") + coord_cartesian(xlim=c(0,566.66)) + scale_fill_manual(values=c("#4daf4a","grey50"))
svg("figures/S13a.svg",width=5,height=3)
cowplot::plot_grid(p1,p3,nrow=2,rel_heights = c(1,1),axis="lr",align="v")
dev.off()
#strings = readAAStringSet("popgen_phylo_notebooks/data/ygob_w_cbs/R_codon_alignment.fasta")
#ggplot() + geom_subgene_arrow(data=gal2_prot,aes(xmin=start_gene/3,xmax=end_gene/3,xsubmin=start/3,xsubmax=end/3,y=gene,fill=domain)) +
# theme_bw() + theme(legend.position = "bottom") + xlab("Position in gene") + coord_cartesian(xlim=c(0,566.66))
thatdna_align = read.raxml("data/trees//RAxML_bipartitionsBranchLabels.R_codon_alignment.fasta.raxml")
p1 = ggtree(dna_align) + geom_tiplab()+ geom_nodelab(aes(label=bootstrap,x=branch),vjust=-0.5) + theme_tree2() + ggtitle("DNA (amino acids 1-67)")
dna_align = read.raxml("data/trees/RAxML_bipartitionsBranchLabels.R_codon_alignment.aa.raxml")
p2 = ggtree(dna_align) + geom_tiplab()+ geom_nodelab(aes(label=bootstrap,x=branch),vjust=-0.5) + theme_tree2() + ggtitle("Protein (amino acids 1-67)")
dna_align = read.raxml("data/trees/RAxML_bipartitionsBranchLabels.third.fasta.raxml")
p3= ggtree(dna_align) + geom_tiplab()+ geom_nodelab(aes(label=bootstrap,x=branch),vjust=-0.5) + theme_tree2() + ggtitle("DNA (amino acids 1-67) 3-fold sites")
dna_align = read.raxml("data/trees/RAxML_bipartitionsBranchLabels.R_codon_alignment_end.fasta.raxml")
p4 = ggtree(dna_align) + geom_tiplab()+ geom_nodelab(aes(label=bootstrap,x=branch),vjust=-0.5) + theme_tree2() + ggtitle("DNA (amino acids 68-575)")
dna_align = read.raxml("data/trees/RAxML_bipartitionsBranchLabels.R_codon_alignment_end.aa.raxml")
p5 =ggtree(dna_align) + geom_tiplab()+ geom_nodelab(aes(label=bootstrap,x=branch),vjust=-0.5) + theme_tree2() + ggtitle("Protein (amino acids 68-575)")
dna_align = read.raxml("data/trees/RAxML_bipartitionsBranchLabels.R_codon_alignment_full.aa.raxml")
p6= ggtree(dna_align) + geom_tiplab()+ geom_nodelab(aes(label=bootstrap,x=branch),vjust=-0.5) + theme_tree2() + ggtitle("DNA full length")
dna_align = read.raxml("data/trees/RAxML_bipartitionsBranchLabels.R_codon_alignment_full.fasta.raxml")
p7= ggtree(dna_align) + geom_tiplab()+ geom_nodelab(aes(label=bootstrap,x=branch),vjust=-0.5) + theme_tree2() + ggtitle("Protein full length")
plots_dna = list(p1=p1,p2=p2,p3=p3,p4=p4,p5=p5,p6=p6,p7=p7)
cowplot::save_plot("figures/S13bc.svg",cowplot::plot_grid(p1,p4,nrow=1,rel_heights = c(1,1), align="v"), base_height = 5)
|
101955ad159989b77c685637575ed73c34314597 | 59b353f88282b1e5e4ee5c681f5d4883e5710c4b | /R/phylo__makeFastANIDistMatrix.R | e77da5efa2b48f0fa29a76276ffb9b6e7447f2fe | [
"Apache-2.0"
] | permissive | wanyuac/handyR | 189cc702473bee1a84a936395b36634dc44b9857 | 9052418c6c25f681b609dc7932e4b7cec349346d | refs/heads/master | 2023-02-22T08:08:23.821924 | 2023-02-18T13:04:33 | 2023-02-18T13:04:33 | 143,516,913 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,198 | r | phylo__makeFastANIDistMatrix.R | #' @title Make a distance matrix from FastANI output (tab-delimited file)
#' @description This function coverts a FastANI output into a distance matrix.
#' @param f Path to the tab-delimited output file of FastANI
#' @param keep_asym A logical flag specifying whether to keep the original asymmetric distance matrix.
#' @param frac A logical flag specifying whether to convert percentages to decimal fractions. This option
#' does not affect the tree topology as the change is proportional.
#' @param suffix Filename extension of input FASTA files for FastANI, such as fna and fasta.
#' @return One or two n-by-n distance matrices (depending on keep_asm), where n denotes the number of genomes.
#' @author Yu Wan (\email{wanyuac@@126.com})
#' @export
#
# Copyright 2020-2022 Yu Wan <wanyuac@126.com>
# Licensed under the Apache License, Version 3.0
# Publication: 6 April 2020; last update: 24 May 2022
makeFastANIDistMatrix <- function(f, keep_asym = FALSE, frac = FALSE, suffix = "fasta") {
# Initiation
ani <- read.delim(file = f, header = FALSE, sep = "\t", stringsAsFactors = FALSE)[, 1 : 3]
names(ani) <- c("Query", "Reference", "ANI")
ani$Query <- sapply(ani$Query, .extractSampleName, suffix)
ani$Reference <- sapply(ani$Reference, .extractSampleName, suffix)
ani$D <- 100 - ani$ANI # Calculate distances from ANIs
ani <- ani[, -3] # Remove the column "ANI"
if (frac) {
ani$D <- ani$D / 100 # Convert percentages to decimal fractions
precision <- 6 # Number of decimals to keep
} else {
precision <- 4 # The same as FastANI
}
ids <- sort(union(ani$Query, ani$Reference), decreasing = FALSE)
n <- length(ids)
M <- matrix(data = NA, nrow = n, ncol = n, dimnames = list(ids, ids))
diag(M) <- 0
# Stage one: copy values from the data frame to matrix M
for (i in 1 : nrow(ani)) {
rw <- ani[i, ] # Extract one row from the data frame to increase the speed
q <- rw$Query
r <- rw$Reference
if (r != q) {
M[q, r] <- rw$D
}
}
# Stage two: convert M into a symmetric matrix by taking the mean distance between every pair of genomes
# This is the same method that FastANI uses for generating the PHYLIP-formatted lower triangular matrix.
# See https://github.com/ParBLiSS/FastANI/issues/36
if (keep_asym) {
M_asym <- M
}
for (i in 1 : (n - 1)) {
for (j in (i + 1) : n) {
val_up <- M[i, j] # The value in the upper triangle
val_lo <- M[j, i] # The value in the lower triangle
v <- round((val_up + val_lo) / 2, digits = precision) # The same precision as FastANI (after dividing values by 100)
M[i, j] <- v
M[j, i] <- v
}
}
# Return the result
if (keep_asym) {
out <- list("D" = M, "D_asym" = M_asym)
} else {
out <- M
}
return(out)
}
.extractSampleName <- function(fasta_path, suffix) {
fields <- unlist(strsplit(x = fasta_path, split = "/", fixed = TRUE))
f <- fields[length(fields)]
f <- gsub(pattern = paste0(".", suffix), replacement = "", x = f, fixed = TRUE)
return(f)
}
|
f776f14272a43e306250a9928421112fb21ecce7 | 26eb90e452144a64ac6b300e61533eb5d7923b35 | /Final Project/Maps/Previous Project - AMAR_Mapping.R | a89f5ab9984de302adc3edd50f9bcec7acd8dab8 | [] | no_license | ETD-988-MD/psyc789w-HM | 5e575385cce72e1eae83691cf8cc9db444915932 | 728734aa46052104df459eda247c0adabcdff494 | refs/heads/master | 2021-01-04T22:33:03.536064 | 2015-05-01T19:20:38 | 2015-05-01T19:20:38 | 29,163,326 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 41,213 | r | Previous Project - AMAR_Mapping.R | ####### MAPS ########
install.packages("ggmap")
require(ggmap)
exeratio <- read.csv("exe_access_ratio.csv", sep=",", header=T)
View(exeratio)
install.packages("rworldmap")
require(rworldmap)
#To view inside the function: mapCountryData()
map1 <- joinCountryData2Map(exeratio,
joinCode = "NAME",
nameJoinColumn = "country",
verbose=T)
par(mai=c(0,0,0.2,0),xaxs="i",yaxs="i")
mapParams <- mapCountryData(map1,
nameColumnToPlot="incl.ratio",
mapTitle='Worldwide Group Access to Central Government',
#aspect=1,
addLegend=FALSE )
do.call( addMapLegend, c(mapParams, legendWidth=0.5, legendMar = 2))
#install.packages("RColorBrewer")
require(RColorBrewer)
Purple <- brewer.pal(6, "Purples")
#Another shot at a regional display
#Eurasia
par(mai=c(0,0,0.2,0),xaxs="i",yaxs="i")
eurasia <-mapCountryData( map1,
nameColumnToPlot='incl.ratio',
catMethod='pretty',
mapTitle='Group Access to Central Government in Europe',
colourPalette=Purple,
oceanCol='white',
missingCountryCol='white',
mapRegion='Eurasia',
borderCol='black',
lwd=.5,
addLegend=F)
do.call( addMapLegend, c(eurasia, legendWidth=0.5, legendMar = 2, horizontal=T))
######Colors for the maps#####
OrRd <- brewer.pal(4, "OrRd")
Oranges <- brewer.pal(3, "Oranges")
BuGn <- brewer.pal(2,"BuGn")
Greens <- brewer.pal(5,"Greens")
#Africa - grey
par(mai=c(0,0,0.2,0),xaxs="i",yaxs="i")
africa <- mapCountryData( map1,
nameColumnToPlot='incl.ratio',
catMethod='pretty',
mapTitle='Group Access to Central Government in Africa',
colourPalette= 'white2Black', #Oranges,
oceanCol='white',
missingCountryCol='white',
mapRegion='Africa',
borderCol='black',
lwd=.5,
addLegend=F,
add=F)
do.call( addMapLegend, c(africa, legendWidth=0.5, legendMar = 2, horizontal=T))
#More Maps
socrelmap <- mapCountryData(map1,
nameColumnToPlot='total.no.groups',
mapTitle='Number of Socially Relevant Groups',
catMethod='pretty',
colourPalette=Oranges,
oceanCol='white',
missingCountryCol='white',
aspect=1,
addLegend=F )
#Inclusion by quartile
qworld <- mapCountryData(map1,
nameColumnToPlot="incl.quartile",
mapTitle='Worldwide Group Access to Central Government',
catMethod='categorical',
colourPalette=Oranges,
addLegend=F )
qworld$legendText <- c('0-25%','26-50%',
'51-75%',
'76-100%')
do.call( addMapLegendBoxes, c(qworld,x='bottomleft',title="Inclusion into the Executive",
cex=.75, pt.cex=1, horiz=F))
barplotCountryData( exeratio
, nameColumnToPlot = "total.no.groups"
, nameCountryColumn = "NAME"
, numPanels = 4
, scaleSameInPanels = FALSE
, main="nameColumnToPlot"
, numCats = 5
, catMethod="quantiles"
, colourPalette= "heat"
, addLegend=TRUE
, toPDF = FALSE
, outFile = ""
, decreasing = TRUE
, na.last = TRUE
, cex = 0.7)
mapBubbles(map1, nameZSize="grgdpch",nameZColour="grgdpch"
,colourPalette='topo',numCats=5,catMethod="quantiles")
##### MAPS with GGMAPS #####
map.world <- map_data(map = "world")
str(map.world)
length(unique(map.world$region))
length(unique(map.world$group))
p1 <- ggplot(map.world)
p1 <- p1 + geom_polygon(aes(x=long, y=lat, group=group, fill=country),
data = exeratio, colour="grey30", alpha=.75, size=.2)
p1 <- p1 + geom_path(aes(x=incl.ratio, y=group=country), data=exeratio, color='black')
p1 <- p1 + labs(title = "World, plain") + theme_bw()
print(p1)
ggworld <- ggplot(map.world, aes(x = long, y = lat, group = group,
fill=region)) + geom_polygon() +
theme(legend.position="none") +
labs(title = "World, filled regions") + theme_bw()
ggworld
p <- get_map(location="world", zoom=4, maptype="toner", source='stamen')
########################
######### Plots ####
########################
#Graph looking at GDP growth
g1 <- ggplot(exeratio, aes(x=incl.ratio, y=grgdpch, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="2009 Growth GDP", limits=c(-25,25))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth")
g1
#Similar graph to the one above looking at HDI
g2 <- ggplot(exeratio, aes(x=incl.ratio, y=IHDI.2012, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="darkblue", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="IHDI 2012", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "Human Development Index (Adjusted)")
g2
#Graph only accounting for more authoritarian regimes
g3 <- ggplot(auth, aes(x=incl.ratio, y=grgdpch, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="2009 Growth GDP", limits=c(-25,25))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Only Authoritarian Regimes)")
g3
#same but with IHDI
g4 <- ggplot(auth, aes(x=incl.ratio, y=IHDI.2012, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="darkblue", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="IHDI 2012", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "Human Development Index (Only Authoritarin Regimes)")
g4
#Graph only accounting for more Democratic regimes
g5 <- ggplot(democ, aes(x=incl.ratio, y=grgdpch, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="2009 Growth GDP", limits=c(-25,25))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Only Democratic Regimes)")
g5
#same but with IHDI
g6 <- ggplot(democ, aes(x=incl.ratio, y=IHDI.2012, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="darkblue", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="IHDI 2012", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "Human Development Index (Only Democratic Regimes)")
g6
require(gridExtra)
plots <- grid.arrange(g1, g2, g3, g4, g5, g6, ncol=2, main="Findings Thus Far Utilizing GRGDPCH (2009) and the IHDI (2012) Variables to Measure Development")
plots
###########################
#### ANALYSIS #####
###########################
#Plot of inclusion ratio given the total no. of socially relevant groups there are.
qplot(exeratio$total.no.groups,exeratio$incl.ratio) + theme_bw() +
labs(x="Total Number of Groups In Country", y="Executive Inclusion Proportion", title = "Access to the Executive Given Group Size")
#As the number of groups (as one might expect) the inclusion ratio decreases.
#Penn World Data
install.packages("pwt")
require(pwt)
data("pwt7.0")
penn <- subset(pwt7.0, year==2009,select = c(country,isocode,year,pop,grgdpch))
View(penn)
write.csv(penn,"pwt7.0.csv")
#I've grafted the relevant variable on my working dataset.
exeratio <- read.csv("exe_access_ratio.csv", sep=",", header=T)
View(exeratio)
#quick OLS reg to see if there is even a statistically significant relationship
summary(lm(exeratio$grgdpch~exeratio$incl.ratio+exeratio$total.no.groups+exeratio$polity.2009))
summary(lm(exeratio$IHDI.2012~exeratio$incl.ratio+exeratio$total.no.groups+exeratio$polity.2009))
#For only authoritarian, we lose statistical significance
summary(lm(auth$grgdpch~auth$incl.ratio+auth$total.no.groups))
#For only democracies, inclusion ratio is only stat sig at .1 level
summary(lm(democ$grgdpch~democ$incl.ratio+democ$total.no.groups))
#xy visualization
qplot(exeratio$incl.ratio, exeratio$grgdpch)
qplot(exeratio$incl.ratio, exeratio$IHDI.2012)
#Authoritarian Regime subset (using polity iv data)
auth <- subset(exeratio, polity.2009 <= 0, select=c("country", "incl.ratio", "incl.quartile", "total.no.groups",
"grgdpch", "HDI.2012",
"IHDI.2012"))
View(auth)
#Democratic Regime subset (using polity iv data)
democ <- subset(exeratio, polity.2009 >= 1, select=c("country", "incl.ratio", "incl.quartile", "total.no.groups",
"grgdpch", "HDI.2012",
"IHDI.2012"))
View(democ)
#Simple xy graphs for auth using development indicators
qplot(auth$incl.ratio, auth$grgdpch) + geom_abline() + theme_bw()
qplot(auth$incl.ratio, auth$IHDI.2012) + geom_abline() + theme_bw()
#simple xy graphs for democ using development indicators
qplot(democ$incl.ratio, democ$grgdpch) + geom_abline() + theme_bw()
qplot(democ$incl.ratio, democ$HDI.2012) + geom_abline() + theme_bw()
#There is nothing really here.
##################################
##### DAY 2 - Making the maps work
##################################
require(plyr)
wmap <- map1
wmap <- fortify(wmap)
wmap <- ddply(wmap, exeratio)
x <- ggplot(map1)
x <- x + geom_polygon(colour = "grey30", size = 3.0)
x <- x + stat_bin2d(
aes(x = long, y = lat, colour = "grey30", fill = total.no.groups),
size = .5, bins = 30, alpha = 1/2,
data = exeratio
)
x
fuck <- ggplot(map1) + geom_polygon(aes(x=long,y=lat,group=group), fill='grey30') + theme_bw()
fuck
world <- get_map("world", zoom=2)
world2 <- ggmap(world, extent="device", legend="bottomleft")
world2
############################
####### After B.'s Email ##
############################
#First, Let's load up the pen data to create growth averages using the grgdpch variable.
require(pwt)
data("pwt7.0")
View(pwt7.0)
attach(pwt7.0)
penn2 <- data.frame(country,isocode,year,pop,grgdpch)
View(penn2)
detach(pwt7.0)
#Average Growth from 1950-2010
meangrowth <- aggregate(grgdpch~country, data=penn2, FUN=function(penn2) c(mean=mean(penn2), count=length(penn2)))
View(meangrowth)
#Merging this to the existing dataset
exeratio2 <- merge(meangrowth[,2], exeratio, by =exeratio[,1])
View(exeratio2)
meangrowth[,2]
exeratio[,1]
exeratio2 <- merge(exeratio,meangrowth, by="country", sort=F)
#Alas, we can't just merge these data frames due to the inconsistency in how the countries are labeled.
#So---we have to do this the hard way.
rm(exeratio2)
write.csv(meangrowth,"grgdpch_mean.csv")
#The values were manually compared. Also, regions were added to the exeratio dataset.
#Loading revised dataset
exeratio <- read.csv("exe_access_ratio.csv", sep=",", header=T)
View(exeratio)
########### NEW PLOTS W/ MEAN GRGDPCH VARIABLE #########
#Graph looking at GDP growth
#Aggregate
ggplot(exeratio, aes(x=incl.ratio, y=grgdpch.mean, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="Mean Growth GDP (grgdpch)", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth")
#Only Authoritarian
ggplot(auth, aes(x=incl.ratio, y=grgdpch.mean, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="Mean Growth GDP (grgdpch)", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Only Authoritarian Regimes)")
#Only Democratic
ggplot(democ, aes(x=incl.ratio, y=grgdpch.mean, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="Mean Growth GDP (grgdpch)", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Only Democratic Regimes)")
#Still, the effect isn't there. Let's focus on *just* non-OECD countries
#Manually entered the new values for OECD countries...see other Do_File
#Graph of Non-OECD countires GDP growth
ggplot(nonOECD, aes(x=incl.ratio, y=grgdpch.mean, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="Mean Growth GDP (grgdpch)", limits=c(-5,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Non-OECD Countries)")
#Graph of Non-OECD countires HDI 2012
ggplot(nonOECD, aes(x=incl.ratio, y=HDI.2012, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="IHDI 2012", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "Human Development Index")
#Graph of Non-OECD countires IHDI 2012
ggplot(nonOECD, aes(x=incl.ratio, y=IHDI.2012, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="darkblue", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="IHDI 2012", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "Human Development Index (Adjusted)")
#Graph looking at growth given total # of groups
ggplot(nonOECD, aes(x=total.no.groups, y=grgdpch.mean, size=incl.ratio,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Total Number of Groups", limits=c(0,40))+
scale_y_continuous(name="Mean Growth GDP (grgdpch)", limits=c(-5,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Non-OECD Countries)")
#Graph looking at IHDI 2012 given total # of groups
ggplot(nonOECD, aes(x=total.no.groups, y=IHDI.2012, size=incl.ratio,label=""),guide=T)+
geom_point(colour="white", fill="darkblue", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Total Number of Groups", limits=c(0,40))+
scale_y_continuous(name="IHDI 2012", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "Human Development Index (Adjusted)")
#None of these graphs are too informative... hmmm... one more thing:
#Graph y=ratio x=total.no.groups size=growth
ggplot(nonOECD, aes(y=incl.ratio, x=total.no.groups, size=grgdpch.mean,label=""),guide=T)+
geom_point(colour="white", fill="darkgreen", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Total Number of Groups", limits=c(0,40))+
scale_y_continuous(name="Inclusion Proportion", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Non-OECD Countries)")
#Graph y=ratio x=total.no.groups size=IHDI
ggplot(nonOECD, aes(y=incl.ratio, x=total.no.groups, size=IHDI.2012,label=""),guide=T)+
geom_point(colour="white", fill="darkgreen", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Total Number of Groups", limits=c(0,40))+
scale_y_continuous(name="Inclusion Proportion", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Non-OECD Countries)")
#Well, that's still uninformative.
#Graph that only looks at non-OECD countries with the largest population of socially relevant groups
#First, Create the subset
groups <- subset(nonOECD, total.no.groups >= 5, select=c("country", "incl.ratio", "incl.quartile", "total.no.groups",
"grgdpch", "HDI.2012", "polity.2009",
"IHDI.2012", "grgdpch.mean","grgdpch.count", "region"))
ggplot(groups, aes(x=incl.ratio, y=grgdpch.mean, label=""),guide=T)+
geom_point(colour="white", fill="black", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="Mean Growth GDP (grgdpch)", limits=c(-5,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Non-OECD Countries that have more than 5 groups)")
#Maybe *by region*?
#New subset
africa <- subset(exeratio, region=="SSAfrica", select=c("country", "incl.ratio", "incl.quartile", "total.no.groups",
"grgdpch", "HDI.2012", "polity.2009",
"IHDI.2012", "grgdpch.mean","grgdpch.count", "region"))
asia <- subset(exeratio, region=="Asia", select=c("country", "incl.ratio", "incl.quartile", "total.no.groups",
"grgdpch", "HDI.2012", "polity.2009",
"IHDI.2012", "grgdpch.mean","grgdpch.count", "region"))
#Africa
ggplot(africa, aes(x=incl.ratio, y=grgdpch.mean, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="Mean Growth GDP (grgdpch)", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Africa Only)")
ggplot(africa, aes(x=incl.ratio, y=IHDI.2012, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="darkblue", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="IHDI 2012", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "Inequality Human Development Index (Africa Only)")
#Asia
ggplot(asia, aes(x=incl.ratio, y=grgdpch.mean, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="Mean Growth GDP (grgdpch)", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (Asia Only)")
ggplot(asia, aes(x=incl.ratio, y=IHDI.2012, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="darkblue", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="IHDI 2012", limits=c(0,1))+
geom_text(size=4)+
theme_bw() + labs(title = "Inequality Human Development Index (Asia Only)")
#############
###### Different Measures #####
#############
#Let's focus on "averages" of HDI over a decades span.
#Manual inclusion of these variables
exeratio <- read.csv("exe_access_ratio.csv", sep=",", header=T)
View(exeratio)
#Aggregate glance --
#Aggregate - HDI Growth Ave 1980-1990
ggplot(exeratio, aes(x=incl.ratio, y=HDI.ave.8090, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="darkblue", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1980-1990)")
#Aggregate - HDI Growth Ave 1980-1990 (non-OECD)
ggplot(nonOECD, aes(x=incl.ratio, y=HDI.ave.8090, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1980-1990) - Non-OECD Only")
#Aggregate - HDI Growth Ave 1990-2000
ggplot(exeratio, aes(x=incl.ratio, y=HDI.ave.9000, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="darkblue", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000)")
#Aggregate - HDI Growth Ave 1990-2000 (non-OECD)
ggplot(nonOECD, aes(x=incl.ratio, y=HDI.ave.9000, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000) - Non-OECD Only")
#Aggregate - HDI Growth Ave 2000-2012 (non-OECD)
ggplot(nonOECD, aes(x=incl.ratio, y=HDI.ave.0012, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000) - Non-OECD Only")
#Regional Glances
#Africa
#Africa - HDI Growth Ave 1980-1990
ggplot(africa, aes(x=incl.ratio, y=HDI.ave.8090, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1980-1990) - Africa Only")
#Africa - HDI Growth Ave 1990-2000
ggplot(africa, aes(x=incl.ratio, y=HDI.ave.9000, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000) - Africa Only")
#Africa - HDI Growth Ave 2000-2012
ggplot(africa, aes(x=incl.ratio, y=HDI.ave.0012, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000) - Africa Only")
#Test - Africa (3 decades stacked)
ggplot(africa, aes(x=incl.ratio, y=HDI.ave.8090, size=total.no.groups,label=""),guide=T)+
geom_point(colour="white", fill="lightgrey", shape=21) + geom_point(data=africa, aes(x=incl.ratio, y=HDI.ave.9000, size=total.no.groups), colour="white", fill="grey", shape=21)+
geom_point(data=africa, aes(x=incl.ratio, y=HDI.ave.0012, size=total.no.groups, label=country), colour="white", fill="darkgrey", shape=21)+scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average 1980-2012", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1980-1990) - Africa Only")
#Asia
#Asia - HDI Growth Ave 1980-1990
ggplot(asia, aes(x=incl.ratio, y=HDI.ave.8090, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1980-1990) - Asia Only")
#Asia - HDI Growth Ave 1990-2000
ggplot(asia, aes(x=incl.ratio, y=HDI.ave.9000, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000) - Asia Only")
#Asia - HDI Growth Ave 2000-2012
ggplot(asia, aes(x=incl.ratio, y=HDI.ave.0012, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000) - Asia Only")
#Middle East
#ME - HDI Growth Ave 1980-1990
ggplot(Meast, aes(x=incl.ratio, y=HDI.ave.8090, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1980-1990) - Middle East Only")
#ME - HDI Growth Ave 1990-2000
ggplot(Meast, aes(x=incl.ratio, y=HDI.ave.9000, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000) - Middle East Only")
#ME - HDI Growth Ave 2000-2012
ggplot(Meast, aes(x=incl.ratio, y=HDI.ave.0012, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (2000-2012) - Middle East Only")
#Okay focusing on Africa might be the way to go.
#Let's combine Northern Africa and Southern Africa
Allafrica <- rbind(africa, Meast)
View(Allafrica)
write.csv(Allafrica, "Allafrica.csv")
Allafrica <- read.csv("Allafrica.csv", sep=",", header=T)
Allafrica <- subset(Allafrica, Allafrica==1, select=(all=T))
View(Allafrica)
#All of Africa
#Africa - HDI Growth Ave 1980-1990
ggplot(Allafrica, aes(x=incl.ratio, y=HDI.ave.8090, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1980-1990) - Africa Only")
#Africa - HDI Growth Ave 1990-2000
ggplot(Allafrica, aes(x=incl.ratio, y=HDI.ave.9000, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000) - Africa Only")
#Africa - HDI Growth Ave 2000-2012
ggplot(Allafrica, aes(x=incl.ratio, y=HDI.ave.0012, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index (1990-2000) - Africa Only")
#For GDP growth, let's do it by 10yr averages from 1980-2012
Grw <- subset(penn2, year >= 1980 & year <= 1990, select=(all=T))
Grw <- aggregate(grgdpch~country, data=Grw, FUN=function(Grw) c(mean=mean(Grw), count=length(Grw)))
View(Grw) #GDP growth from 1980 - 1990
Grw2 <- subset(penn2, year >= 1990 & year <= 2000, select=(all=T))
Grw2 <- aggregate(grgdpch~country, data=Grw2, FUN=function(Grw2) c(mean=mean(Grw2), count=length(Grw2)))
View(Grw2) #GDP growth from 1990 - 2000
Grw3 <- subset(penn2, year >= 2000 & year <= 2012, select=(all=T))
Grw3 <- aggregate(grgdpch~country, data=Grw3, FUN=function(Grw3) c(mean=mean(Grw3), count=length(Grw3)))
View(Grw3) #GDP growth from 2000- 2012
write.csv(Grw, "growth_by_decade_1980_1990.csv")
write.csv(Grw2, "growth_by_decade_1990_2000.csv")
write.csv(Grw3, "growth_by_decade_2000_2012.csv")
#Growth by decade average has been incorporated into the csv.
exeratio <- read.csv("exe_access_ratio.csv", sep=",", header=T)
View(exeratio)
####Graphs of growth with decage average GRGDPCH
#SSAfrica
ggplot(africa, aes(x=incl.ratio, y=grgdpch.mean.8090, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="GDP Growth Average '80-'90", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (1980-1990) - Africa Only")
ggplot(africa, aes(x=incl.ratio, y=grgdpch.mean.9000, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="GDP Growth Average '90-'00", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (1990-2000) - Africa Only")
ggplot(africa, aes(x=incl.ratio, y=grgdpch.mean.0012, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="GDP Growth Average '00-'12", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (2000-2012) - Africa Only")
#Asia
ggplot(asia, aes(x=incl.ratio, y=grgdpch.mean.8090, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="GDP Growth Average '80-'90", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (1980-1990) - Asia Only")
ggplot(asia, aes(x=incl.ratio, y=grgdpch.mean.9000, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="GDP Growth Average '90-'00", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (1990-2000) - Asia Only")
ggplot(asia, aes(x=incl.ratio, y=grgdpch.mean.0012, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="GDP Growth Average '00-'12", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (2000-2012) - Asia Only")
#Middle East
ggplot(Meast, aes(x=incl.ratio, y=grgdpch.mean.0012, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="GDP Growth Average '00-'12", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (2000-2012) - Middle East Only")
#Latin America
ggplot(LA, aes(x=incl.ratio, y=grgdpch.mean.0012, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="GDP Growth Average '00-'12", limits=c(-10,10))+
geom_text(size=4)+
theme_bw() + labs(title = "GDP Growth (2000-2012) - Latin America Only")
#Growth just isn't that useful of an indicator. Let's stick with HDI and see what B. says from there.
#Let's average all the yearly averages of HDI to see if there is any effect.
AV <- aggregate(HDI.ave.8090+HDI.ave.9000+HDI.ave.0012~country, data=exeratio, FUN=mean)
View(AV)
names(AV)
colnames(AV)[2] <- "ave"
View(AV)
AV$ave <- AV$ave/3
Poop <- merge(exeratio,AV, by="country")
View(Poop)
#Is there anything here?
ggplot(Poop, aes(x=incl.ratio, y=ave, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index")
#Nope...Africa Only?
Poop <- subset(Poop, region=="SSAfrica", select=(all=T))
ggplot(Poop, aes(x=incl.ratio, y=ave, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,2.5))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index - Africa Only")
#Maybe let's simplify this and only look at the 1990 >=
AV <- aggregate(HDI.ave.9000+HDI.ave.0012~country, data=exeratio, FUN=mean)
View(AV)
names(AV)
colnames(AV)[2] <- "ave"
View(AV)
AV$ave <- AV$ave/2
Poop <- merge(exeratio,AV, by="country")
View(Poop)
#First...
ggplot(Poop, aes(x=incl.ratio, y=ave, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index")
#Africa only...
Poop <- subset(Poop, region=="SSAfrica", select=(all=T))
ggplot(Poop, aes(x=incl.ratio, y=ave, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index - Africa Only")
#ME
Poop <- subset(Poop, region=="Middle_East", select=(all=T))
ggplot(Poop, aes(x=incl.ratio, y=ave, size=total.no.groups,label=country),guide=T)+
geom_point(colour="white", fill="red", shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="Inclusion Proportion", limits=c(0,1))+
scale_y_continuous(name="HDI Average '80-'90", limits=c(0,4))+
geom_text(size=4)+
theme_bw() + labs(title = "Average Human Development Index - Asia Only")
#Nothing here....at least looking at it from this angle.
rm(AV)
rm(Poop)
#only looking at groups where inclusion mostly likely matters the most
poop1 <- subset(exeratio, total.no.groups>=10, select=(all=T))
poop2 <- subset(poop1, exclude==0, select=(all=T))
poop2 <- subset(poop2, OECD.dummy==0, select=(all=T))
View(poop2)
ggplot(poop2, aes(x=incl.ratio, y=grgdpch.mean, label=""),guide=T)+
geom_point(colour="white", fill="red", size=4, shape=21)+ scale_area(range=c(1,25))+
scale_x_continuous(name="", limits=c(0,1))+
scale_y_continuous(name="", limits=c(-10,10))+
geom_text(size=4) + #geom_smooth(method=lm) +
theme_bw() + labs(title = "")
#Still nothing...
ggplot(auth, aes(x=polity.2009, y=incl.ratio)) + geom_point()
# nothing
testdata <- exeratio
testdata$regime.type[testdata$polity.2009 <= -6] <- "1very.auth"
testdata$regime.type[testdata$polity.2009 <=0 & testdata$polity.2009 >=-5] <- "2somewhat.auth"
testdata$regime.type[testdata$polity.2009 >=1 & testdata$polity.2009 <= 5] <- "3somewhat.demo"
testdata$regime.type[testdata$polity.2009 >= 6] <- "4very.dem"
testdata$regime.type[testdata$polity.2009 == "NA"] <- NA
ggplot(na.omit(testdata), aes(x=regime.type, y=incl.ratio, colour=regime.type, size=total.no.groups)) + geom_point(position = position_jitter(w = 0.1)) +
theme_bw() +scale_size_area()
#### New GPD Measure
peek <- read.csv("wb_GDPgrowth_80_12.csv")
newlook <- exeratio
newlook <- merge(newlook,peek, by="country.code")
View(newlook) #Shitty second Country column--remove later
rm(peek) #remove GDP upload to reduce object clutter
#Again with SSA data
SSAnewlook <- SSAdata #Some cases have been excluded.
SSAnewlook <- merge(SSAnewlook,peek, by="country.code")
View(SSAnewlook) #close enough 40 out of 41 ... check later for the difference.
rm(peek) #remove GDP upload to reduce object clutter
ggplot(SSAnewlook, aes(x=incl.ratio.y, y=X2012)) + geom_point()
t(peek)
peek <- data.frame(peek)
View(peek)
peek <- ts(SSAnewlook, start=c("X1980"), end=c("X2013"), frequency=1)
q()
y
|
7fb2c1bb2055a9f92e38a1c0e84dc0d0aeb5552a | a3514194e5bcb9eac1c7df6c1f00b66cf48c2546 | /man/grapher_line.Rd | 0da555f8b1c45285767df89c3605aec1e0137b31 | [
"MIT"
] | permissive | piersyork/owidGrapher | 066e1888adadf5b51e487bdfee5dd2617cba3963 | 463642a4532201cee01f454b88e6fc2ac20724ce | refs/heads/main | 2023-08-07T12:36:35.731238 | 2021-09-13T18:02:14 | 2021-09-13T18:02:14 | 404,801,520 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 558 | rd | grapher_line.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/owid_grapher.R
\name{grapher_line}
\alias{grapher_line}
\title{Add a line graph to the grapher}
\usage{
grapher_line(
grapher,
selected = c("United Kingdom", "France", "Spain", "Ireland"),
change_selected = TRUE
)
}
\arguments{
\item{grapher}{An object of class "grapher".}
\item{selected}{The entities displayed when the graph first loads.}
\item{change_selected}{Allow the entities to be changed from within the graph.}
}
\description{
Add a line graph to the grapher
}
|
d2825cb6cf8e5c304013ebfd0ed498d36d16fbd7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ConR/examples/EOO.computing.Rd.R | 96432184ed01171b77a47737186200e7b151655a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 377 | r | EOO.computing.Rd.R | library(ConR)
### Name: EOO.computing
### Title: Extent of Occurrences
### Aliases: EOO.computing
### ** Examples
data(dataset.ex)
data(land)
## Not run:
##D EOO <- EOO.computing(dataset.ex)
##D
##D ## This exclude areas outside of land (i.e. ocean) for EOO computation
##D EOO <- EOO.computing(dataset.ex,
##D exclude.area=TRUE, country_map=land)
## End(Not run)
|
51468c15eae24c8736298aa621a6d64c05be137b | 6edafc6ceeb25871f99475ba6816b27842c516f7 | /man/basic_probes.Rd | fb73bbf50a2dd67a5f97924cf2c24e752ddc1b3b | [] | no_license | ashtonbaker/pomp | 194c5a9349ea3d42661353730ed85bce10c40aac | 01eecebcb5394ecb0ea755e5decf1e8100f7379a | refs/heads/master | 2021-01-22T01:42:46.973424 | 2016-08-26T16:44:58 | 2016-08-26T16:44:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,956 | rd | basic_probes.Rd | \name{Probe functions}
\title{Some useful probes for partially-observed Markov processes}
\alias{Probe functions}
\alias{probe functions}
\alias{basic.probes}
\alias{probe.mean}
\alias{probe.median}
\alias{probe.var}
\alias{probe.sd}
\alias{probe.period}
\alias{probe.quantile}
\alias{probe.acf}
\alias{probe.ccf}
\alias{probe.marginal}
\alias{probe.nlar}
\description{
Several simple and configurable probes are provided with in the package.
These can be used directly and as templates for custom probes.
}
\usage{
probe.mean(var, trim = 0, transform = identity, na.rm = TRUE)
probe.median(var, na.rm = TRUE)
probe.var(var, transform = identity, na.rm = TRUE)
probe.sd(var, transform = identity, na.rm = TRUE)
probe.marginal(var, ref, order = 3, diff = 1, transform = identity)
probe.nlar(var, lags, powers, transform = identity)
probe.acf(var, lags, type = c("covariance", "correlation"),
transform = identity)
probe.ccf(vars, lags, type = c("covariance", "correlation"),
transform = identity)
probe.period(var, kernel.width, transform = identity)
probe.quantile(var, prob, transform = identity)
}
\arguments{
\item{var, vars}{
character; the name(s) of the observed variable(s).
}
\item{trim}{
the fraction of observations to be trimmed (see \code{\link{mean}}).
}
\item{transform}{
transformation to be applied to the data before the probe is computed.
}
\item{na.rm}{
if \code{TRUE}, remove all NA observations prior to computing the probe.
}
\item{kernel.width}{
width of modified Daniell smoothing kernel to be used in power-spectrum computation:
see \code{\link{kernel}}.
}
\item{prob}{
a single probability; the quantile to compute: see \code{\link{quantile}}.
}
\item{lags}{
In \code{probe.ccf}, a vector of lags between time series.
Positive lags correspond to \code{x} advanced relative to \code{y};
negative lags, to the reverse.
In \code{probe.nlar}, a vector of lags present in the nonlinear autoregressive model that will be fit to the actual and simulated data.
See Details, below, for a precise description.
}
\item{powers}{
the powers of each term (corresponding to \code{lags}) in the the nonlinear autoregressive model that will be fit to the actual and simulated data.
See Details, below, for a precise description.
}
\item{type}{
Compute autocorrelation or autocovariance?
}
\item{ref}{
empirical reference distribution.
Simulated data will be regressed against the values of \code{ref}, sorted and, optionally, differenced.
The resulting regression coefficients capture information about the shape of the marginal distribution.
A good choice for \code{ref} is the data itself.
}
\item{order}{
order of polynomial regression.
}
\item{diff}{
order of differencing to perform.
}
\item{\dots}{
Additional arguments to be passed through to the probe computation.
}
}
\value{
A call to any one of these functions returns a probe function, suitable for use in \code{\link{probe}} or \code{\link{probe.match}}.
That is, the function returned by each of these takes a data array (such as comes from a call to \code{\link{obs}}) as input and returns a single numerical value.
}
\details{
Each of these functions is relatively simple.
See the source code for a complete understanding of what each does.
\describe{
\item{\code{probe.mean}, \code{probe.median}, \code{probe.var}, \code{probe.sd}}{
return functions that compute the mean, median, variance, and standard deviation of variable \code{var}, respectively.
}
\item{\code{probe.period}}{
returns a function that estimates the period of the Fourier component of the \code{var} series with largest power.
}
\item{\code{probe.marginal}}{
returns a function that
regresses the marginal distribution of variable \code{var} against the reference distribution \code{ref}.
If \code{diff>0}, the data and the reference distribution are first differenced \code{diff} times and centered.
Polynomial regression of order \code{order} is used.
This probe returns \code{order} regression coefficients (the intercept is zero).
}
\item{\code{probe.nlar}}{
returns a function that
fit a nonlinear (polynomial) autoregressive model to the univariate series (variable \code{var}).
Specifically, a model of the form \eqn{y_t = \sum \beta_k y_{t-\tau_k}^{p_k}+\epsilon_t}{y[t] = \sum beta[k] y[t-tau[k]]^p[k]+e[t]} will be fit, where \eqn{\tau_k}{tau[k]} are the \code{lags} and \eqn{p_k}{p[k]} are the \code{powers}.
The data are first centered.
This function returns the regression coefficients, \eqn{\beta_k}{beta[k]}.
}
\item{\code{probe.acf}}{
returns a function that,
if \code{type=="covariance"}, computes the autocovariance of variable \code{var} at lags \code{lags};
if \code{type=="correlation"}, computes the autocorrelation of variable \code{var} at lags \code{lags}.
}
\item{\code{probe.ccf}}{
returns a function that,
if \code{type=="covariance"}, computes the cross covariance of the two variables named in \code{vars} at lags \code{lags};
if \code{type=="correlation"}, computes the cross correlation.
}
\item{\code{probe.quantile}}{
returns a function that estimates the \code{prob}-th quantile of variable \code{var}.
}
}
}
\references{
B. E. Kendall, C. J. Briggs, W. M. Murdoch, P. Turchin, S. P. Ellner, E. McCauley, R. M. Nisbet, S. N. Wood
Why do populations cycle? A synthesis of statistical and mechanistic modeling approaches,
Ecology, 80:1789--1805, 1999.
S. N. Wood
Statistical inference for noisy nonlinear ecological dynamic systems,
Nature, 466: 1102--1104, 2010.
}
\author{
Daniel C. Reuman (d.reuman at imperial dot ac dot uk)
Aaron A. King (kingaa at umich dot edu)
}
\seealso{
\link{pomp}
}
\keyword{ts}
|
5e6dceafd2b444b8cd21db76886fca06d4edc508 | 593b129c6fdcb8335cf85680def6e1ffb031ca5c | /05-Model_parameters_table.R | 22bae3707b5be10f0120d621cbf60b1291d4de74 | [] | no_license | qureshlatif/Northern-Sierra-Woodpeckers | 4d11e56311f490140d7f6315e051f62de2bfc715 | 6449a0eadd339b6ada28776a9091949d7deaf3e5 | refs/heads/master | 2021-01-20T08:01:13.466686 | 2020-02-27T16:21:30 | 2020-02-27T16:21:30 | 90,082,572 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,619 | r | 05-Model_parameters_table.R | setwd("F:/research stuff/FS_PostDoc/consult_&_collaborate/PtBlue_Sierra/")
#################################################
# Tabulate descriptive statistics of predictors #
#################################################
library(dplyr)
library(R.utils)
require(stringr)
load("Data_compiled.RData")
RS.BBWO <- loadObject("Model_RS_BBWO")
RS.HAWO <- loadObject("Model_RS_HAWO")
RS.WHWO <- loadObject("Model_RS_WHWO")
RS.NOFL <- loadObject("Model_RS_NOFL")
CB.BBWO <- loadObject("Model_CMB_BBWO")
CB.HAWO <- loadObject("Model_CMB_HAWO")
CB.WHWO <- loadObject("Model_CMB_WHWO")
CB.NOFL <- loadObject("Model_CMB_NOFL")
pars <- c(coef(RS.BBWO) %>% names,
coef(RS.HAWO) %>% names,
coef(RS.WHWO) %>% names,
coef(RS.NOFL) %>% names,
coef(CB.BBWO) %>% names,
coef(CB.HAWO) %>% names,
coef(CB.WHWO) %>% names,
coef(CB.NOFL) %>% names) %>% unique
spp <- c("BBWO", "HAWO", "WHWO", "NOFL")
cols <- c(paste0("RS.", spp), paste0("CB.", spp))
out <- matrix("", nrow = length(pars), ncol = length(cols))
dimnames(out) <- list(pars, cols)
for(c in 1:length(cols)) {
m <- eval(as.name(cols[c]))
par <- coef(m) %>% names
p <- summary(m)$coefficients[, "Pr(>|z|)"]
signif <- character(length = length(p))
signif[which(p <= 0.1)] <- "*"
signif[which(p <= 0.05)] <- "**"
out[par, cols[c]] <- summary(m)$coefficients[, c("Estimate")] %>%
round(digits = 2) %>%
paste0(" (",
summary(m)$coefficients[, c("Std. Error")] %>%
round(digits = 2),
")",
signif)
}
write.csv(out, "Model_param_table.csv")
|
0ad05fb47dddc261197b93b0c163bc6cef575e7b | 1545a9a83a534c13efb9788ed255a94861579d3f | /pca.r | 217b2080d0c48aec4109d1b35c806096f326b5a5 | [] | no_license | sgkamal/Data-Mining | 5d0d04f81e9fe6881273858f2a287fa331a90012 | 45522ebf3f597bee806de2c5a186742a4eae3dc3 | refs/heads/master | 2020-03-19T02:21:30.369510 | 2018-05-31T19:35:47 | 2018-05-31T19:35:47 | 135,621,548 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,073 | r | pca.r | #demonstration of principal components to remove redundant features in data matrix
data('iris');
X = as.matrix(iris); # the apparent dimension of the data ... you'll see
X
#stopp;
r = ncol(X); # the real dimension
n = nrow(X); # num data points
X = matrix(runif(n*r),nrow=n,ncol=r);
X
# original data matrix with r indepedent variables
#T = matrix(0,nrow=r,ncol=d); # transformation matrix
#for (i in 1:r) for (j in 1:d) { # create rxd transformation matrix, T
# T[i,j] = j %% (i+1);
#}
#Y = X %*% T; # even though Y has d features, there can only be r independent ones so there
# is a lot of redundancy in the new features
Y = scale(X) # make columns 0 mean
#pairs(Y);
S = t(Y) %*% Y/(n-1); # the sample covariance matrix
svd = svd(S); # take the singular value decomposition S = UDU^t
d = svd$d; # d is diag(D) # only the first are should be different from 0.
d
length(d)
ncol(d)
U = svd$u; # U are the "loadings" or independent directions
Z = Y %*% U; # transform Y to a new matrix that removes the redundancy
pairs(Z)
#Positive Definite
|
337e248dd5adf9dfdaa19417c844172a52dc34ed | 59a11a2b13b2cda5eec0d6c27710477b458cc2c9 | /210202_excel 불러오기_miju.R | c196b5f807d1e63d3e224068840bf82b8675283f | [] | no_license | mijoo159/scnu_R_class | 766e033ff0070647abe1e7efcfa960c7dfaddbe2 | 8f407838ba7062461433b4bfc297486b563d69d7 | refs/heads/master | 2023-03-02T20:01:30.427454 | 2021-02-05T08:21:09 | 2021-02-05T08:21:09 | 334,851,709 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 129 | r | 210202_excel 불러오기_miju.R | # 엑셀 불러오기
install.packages("readxl")
library(readxl)
test <- read_excel("./teacher/Data/excel_exam.xlsx")
View(test)
|
1772a364d4b17739a3add3fd4a315abc308df5b8 | 2ba6f0a982c3092e70de12fff4ccac047feecab0 | /pkg/tests/calcSpreadDV01.test.R | e2927a3ca4b08343319cd9e1d57f3d941c0f0ed0 | [] | no_license | kanishkamalik/CDS2 | 7b77cce8a5ae9fa367ff6818311ea8f745affbab | 18644a02c9e8031de42d548618717b09ed327053 | refs/heads/master | 2020-04-05T03:15:34.360446 | 2014-05-03T16:29:47 | 2014-05-03T16:29:47 | 19,469,784 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,310 | r | calcSpreadDV01.test.R | ## calcSpreadDV01.test.R
library(CDS)
## truth1 <- calcSpreadDV01(TDate = "2014-01-14",
## currency = "USD",
## maturity = "5Y",
## dccCDS = "Act/360",
## freqCDS = "1Q",
## stubCDS = "F",
## badDayConvCDS = "F",
## calendar = "None",
## parSpread = 32,
## couponRate = 100,
## recoveryRate = 0.4,
## notional = 1e7)
## save(truth1, file = "calcSpreadDV01.test.RData")
load("calcSpreadDV01.test.RData")
result1 <- calcSpreadDV01(TDate = "2014-01-14",
currency = "USD",
maturity = "5Y",
dccCDS = "Act/360",
freqCDS = "1Q",
stubCDS = "F",
badDayConvCDS = "F",
calendar = "None",
parSpread = 32,
couponRate = 100,
recoveryRate = 0.4,
notional = 1e7)
stopifnot(all.equal(result1, truth1))
|
27ce2289710b03bf5f6388f7fc6ed06bfa332f57 | 423acf0de71a2a6be96b52dec91f8484e7b4990e | /man/pool_estimated_probs.Rd | 26c9865885e17dbb3f1c3014fd5e2e5aa8114522 | [] | no_license | cran/NPBayesImputeCat | 0a7bb36a5eb0ca56ae516a558077611576e57d00 | 1f7f89a1076edc4dfc10a31543c9e42277a7554b | refs/heads/master | 2022-10-14T10:42:56.975697 | 2022-10-03T12:30:02 | 2022-10-03T12:30:02 | 158,117,009 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 659 | rd | pool_estimated_probs.Rd | \name{pool_estimated_probs}
\alias{pool_estimated_probs}
\title{
Pool probability estimates from imputed or synthetic datasets
}
\description{
Pool probability estimates from imputed or synthetic datasets
}
\usage{
pool_estimated_probs(ComputeProbsResults, method =
c("imputation", "synthesis_full", "synthesis_partial"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ComputeProbsResults}{output from the compute_probs function}
\item{method}{choose between "imputation", "synthesis_full", "synthesis_partial"}
}
\value{Results: a list of marginal and joint probability results after combining rules}
|
727c1c6da67d75bb81ea77ac4a0c64f3f7744a89 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mondate/examples/YearQuartersFormat.rd.R | a5748784f6c76758921f1bf28b40b75371ae89d7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 250 | r | YearQuartersFormat.rd.R | library(mondate)
### Name: YearQuartersFormat
### Title: Formatting Functions for mondate Objects
### Aliases: YearQuartersFormat
### ** Examples
b <- mondate(1:12, formatFUN = YearQuartersFormat) # end of first 12 months of 2000
b
|
a045e61772798ddad6a0765a2954556bf6b81797 | e234e6e2eeb20cdc42e144c61d503cc266eb462b | /man/get_c_stat.Rd | f15a0ab59315c2b882c65e21548481e0b2a8ca4d | [] | no_license | tinyheero/survutils | 5f12272c97d99d0646104a8de5ce8eb5a5760a02 | 7676954de71aff49d99bc8695d7d83ad44bc2e44 | refs/heads/master | 2021-07-15T08:38:29.707595 | 2018-07-21T15:05:42 | 2018-07-21T15:05:42 | 39,530,728 | 11 | 3 | null | 2020-07-08T21:53:03 | 2015-07-22T21:15:11 | R | UTF-8 | R | false | true | 1,031 | rd | get_c_stat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_c_stat.R
\name{get_c_stat}
\alias{get_c_stat}
\title{Calculate C-statistics}
\usage{
get_c_stat(in.df, endpoint, endpoint.code, prog.factor, tau.val)
}
\arguments{
\item{in.df}{data.frame containing all the input data.}
\item{endpoint}{Column name of endpoint.}
\item{endpoint.code}{Column name of endpoint code.}
\item{prog.factor}{Column name of the prognostic factor to test.}
\item{tau.val}{Vector of tau values to be used for C-statistics inference.}
}
\value{
data.frame containing the c-statistic, 95% CI, and standard error.
}
\description{
Wrapper around the Inf.Cval function from the survC1 R package to calculate
C-statistics.
}
\examples{
# Example taken from survC1
\dontrun{
library("survival")
in.df <- survC1::CompCase(pbc[1:200, c(2:4,10:14)])
in.df[, 2] <- as.numeric(in.df[,2]==2)
tau <- 365.25*8
prog.factor <- c("trt", "edema", "bili", "chol", "albumin", "copper")
get_c_stat(in.df, "time", "status", prog.factor, tau)
}
}
|
bc1407e714a0819f0b2f7ed0b17b1e52dcde0006 | 09c2196beab7422bda070c28afd41dec2a9d094d | /tests/testthat/test-weightFunctions.R | ce3e1ae4940f520537b67df71fab35ce954b34e3 | [
"Apache-2.0"
] | permissive | sverchkov/SelfControlledCaseSeries | c37964ba69f675da217fd93c8837487ed088b529 | 920493cac367e1f8812bafd0a72f777d53f79e41 | refs/heads/master | 2020-03-28T02:31:13.046277 | 2018-03-19T16:43:42 | 2018-03-19T16:43:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,677 | r | test-weightFunctions.R | library("testthat")
# The code below is taken verbatim (with permission) from the SCCS package by
# Yonas Ghebremichael-Weldeselassie, Heather Whitaker, and Paddy Farrington.
### Functions used to calculate the weights to be used as offset in the new model ###
#--------------------------------------------------------#
# Weight function for Exponential- Weibull (Age) #
# mixture Model #
#--------------------------------------------------------#
# p<-p_ewad2
wsmall_ewad2<-function(t,p, present,astart,aend, Dmatrix){
thetaA <- p[which.max(Dmatrix)]
thetaB <- p[(length(Dmatrix))+ (which.max(Dmatrix))] + p[2*(length(Dmatrix))+ (which.max(Dmatrix))]*(log(astart))
eta <- p[3*(length(Dmatrix))+ (which.max(Dmatrix))] + p[4*(length(Dmatrix))+ (which.max(Dmatrix))]*t
gamma0 <- p[5*(length(Dmatrix))+ (which.max(Dmatrix))] + p[6*(length(Dmatrix))+ (which.max(Dmatrix))]*(log(astart))
lamA <-(exp(-thetaA)) # 1/rho in the paper
lamB <-(exp(-thetaB)) # 1/mu
pi0 <-(exp(eta)/(1+exp(eta))) # pi
nu0 <-(exp(gamma0)) # nu
val <- ((1-present)*log(pi0*lamA*exp(-lamA*(aend-t))+
(1-pi0)*nu0*lamB*((aend*lamB)^(nu0-1))*exp(-((aend*lamB)^nu0-(t*lamB)^nu0))) +
present *log(pi0*exp(-lamA*(aend-t))+
(1-pi0)*exp(-((aend*lamB)^nu0-(t*lamB)^nu0))))
exp(val)
}
#--------------------------------------------------------------#
# Weight function for Exponential- Weibull (Interval) #
# mixture Model #
#--------------------------------------------------------------#
# p<-p_ewid2
wsmall_ewid2<-function(t, p, present, aend, Dmatrix){
thetaA <- p[which.max(Dmatrix)]
thetaB <- p[(length(Dmatrix))+ (which.max(Dmatrix))] + p[2*(length(Dmatrix))+ (which.max(Dmatrix))]*(log(t))
eta <- p[3*(length(Dmatrix))+ (which.max(Dmatrix))] + p[4*(length(Dmatrix))+ (which.max(Dmatrix))]*t
gamma0 <- p[5*(length(Dmatrix))+ (which.max(Dmatrix))] + p[6*(length(Dmatrix))+ (which.max(Dmatrix))]*(log(t))
lamA<-exp(-thetaA) # 1/rho in the paper
lamB<-exp(-thetaB) # 1/mu
pi0 <-exp(eta)/(1+exp(eta)) # pi
nu0<-exp(gamma0) # nu
int<-aend-t
val<- ((1-present)*log(pi0*lamA*exp(-lamA*int)+
(1-pi0)*nu0*lamB*((int*lamB)^(nu0-1))*exp(-((int*lamB)^nu0))) +
present *log(pi0*exp(-lamA*int)+
(1-pi0)*exp(-((int*lamB)^nu0))))
exp(val)
}
#--------------------------------------------------------#
# Weight function for Exponential- Gamma (Age) #
# mixture Model #
#--------------------------------------------------------#
# p<-p_egad2
wsmall_egad2 <- function(t,p,present,astart,aend,Dmatrix){
thetaA <- p[which.max(Dmatrix)]
thetaB <- p[(length(Dmatrix))+ (which.max(Dmatrix))] + p[2*(length(Dmatrix))+ (which.max(Dmatrix))]*(log(astart))
eta <- p[3*(length(Dmatrix))+ (which.max(Dmatrix))] + p[4*(length(Dmatrix))+ (which.max(Dmatrix))]*t
gamma0 <- p[5*(length(Dmatrix))+ (which.max(Dmatrix))] + p[6*(length(Dmatrix))+ (which.max(Dmatrix))]*(log(astart))
lamA <-exp(-thetaA) # 1/rho in the paper
lamB <-exp(-thetaB) # 1/mu
pi0 <-exp(eta)/(1+exp(eta)) # pi
nu0 <-exp(gamma0) # nu
rate0 <-nu0*lamB
# val<- ((1-present)*log(pi0*lamA*exp(-lamA*(aend-t))+
# (1-pi0)*dgamma(aend,shape=nu0,rate=rate0)/pgamma(t,shape=nu0,rate=rate0,lower.tail=F)) +
# present*log(pi0*exp(-lamA*(aend-t))+
# (1-pi0)*pgamma(aend,shape=nu0,rate=rate0,lower.tail=F)/pgamma(t,shape=nu0,rate=rate0,lower.tail=F)))
val<- ((1-present)*log(pi0*lamA*exp(-lamA*(aend-t))+
(1-pi0)*dgamma(aend,shape=nu0,rate=rate0)/ifelse(pgamma(t,shape=nu0,rate=rate0,lower.tail=F)==0,0.000000001, pgamma(t,shape=nu0,rate=rate0,lower.tail=F))) +
present *log(pi0*exp(-lamA*(aend-t))+
(1-pi0)*pgamma(aend,shape=nu0,rate=rate0,lower.tail=F)/ifelse(pgamma(t,shape=nu0,rate=rate0,lower.tail=F)==0, 0.000000001, pgamma(t,shape=nu0,rate=rate0,lower.tail=F))))
exp(val)
}
#--------------------------------------------------------#
# Weight function for Exponential- Gamma (Interval)#
# mixture Model #
#--------------------------------------------------------#
# p<-p_egid2
wsmall_egid2 <- function(t,p,present,astart,aend,Dmatrix) {
thetaA <- p[which.max(Dmatrix)]
thetaB <- p[(length(Dmatrix))+ (which.max(Dmatrix))] + p[2*(length(Dmatrix))+ (which.max(Dmatrix))]*(log(t))
eta <- p[3*(length(Dmatrix))+ (which.max(Dmatrix))] + p[4*(length(Dmatrix))+ (which.max(Dmatrix))]*t
gamma0 <- p[5*(length(Dmatrix))+ (which.max(Dmatrix))] + p[6*(length(Dmatrix))+ (which.max(Dmatrix))]*(log(t))
lamA<-exp(-thetaA) # 1/rho in the paper
lamB<-exp(-thetaB) # 1/mu
pi0 <-exp(eta)/(1+exp(eta)) # pi
nu0<-exp(gamma0) # nu
rate0 <-nu0*lamB
int <-aend-t
val<- ((1-present)*log(pi0*lamA*exp(-lamA*int)+
(1-pi0)*dgamma(int,shape=nu0,rate=rate0)) +
present *log(pi0*exp(-lamA*int)+
(1-pi0)*pgamma(int,shape=nu0,rate=rate0,lower.tail=F)))
exp(val)
}
test_that("Weight functions match those in SCCS package", {
p <- c(0.1,0.2,0.1,0.2,0.1,0.2,0.1)
present <- 1
astart <- 1
aend <- 10
start <- 1
end <- 2
Dmatrix <- c(1)
w1 <- SelfControlledCaseSeries:::testEwad(p, present, astart, aend, start, end)
w2 <- integrate(wsmall_ewad2, lower = start, upper = end, p = p, present = present, astart = astart, aend = aend, Dmatrix = Dmatrix)$value
expect_equal(w1,w2, tolerance = 1E-6)
w1 <- SelfControlledCaseSeries:::testEwid(p, present, astart, aend, start, end)
w2 <- integrate(wsmall_ewid2, lower = start, upper = end, p = p, present = present, aend = aend, Dmatrix = Dmatrix)$value
expect_equal(w1,w2, tolerance = 1E-6)
w1 <- SelfControlledCaseSeries:::testEgad(p, present, astart, aend, start, end)
w2 <- integrate(wsmall_egad2, lower = start, upper = end, p = p, present = present, astart = astart, aend = aend, Dmatrix = Dmatrix)$value
expect_equal(w1,w2, tolerance = 1E-6)
w1 <- SelfControlledCaseSeries:::testEgid(p, present, astart, aend, start, end)
w2 <- integrate(wsmall_egid2, lower = start, upper = end, p = p, present = present, aend = aend, Dmatrix = Dmatrix)$value
expect_equal(w1,w2, tolerance = 1E-6)
})
|
3c66441008ffde9891e59c8e445c4dd59c948740 | 9f55904bee65b9d9f86fc3aa4643aa5d2b2f3f77 | /R/Region__medianTotalFamilyIncome.R | d6fdccfe0d74b27737d8c12f1c401d087dbc22e7 | [] | no_license | EmilJeyaratnam/Census2016.DataPack | 6580fef077146b3324cd0e5c5646b313071622c5 | 822cb07ccb2892c5e121c0a289527e4c9a300ce2 | refs/heads/master | 2020-04-08T10:43:06.955803 | 2018-11-26T12:11:19 | 2018-11-26T12:11:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 690 | r | Region__medianTotalFamilyIncome.R | #' @title Median total family income
#' @name Region__medianTotalFamilyIncome
#' @details
#' \describe{
#' \item{\code{medianTotalPersonalIncome}}{The \strong{annual} income.
#' Median total household income is applicable to occupied private dwellings. It excludes households where at least one member aged 15 years and over did not state an income and households
#' where at least one member aged 15 years and over was temporarily absent on Census Night. It excludes 'Visitors only' and 'Other non-classifiable' households.
#' }
#' }
NULL
#' @rdname Region__medianTotalFamilyIncome
"LGA__medianTotalFamilyIncome"
#' @rdname Region__medianTotalFamilyIncome
"CED__medianTotalFamilyIncome"
|
28b0666ab3e84d6ef6f575ddb0db06db21c6a755 | f78121fe0d58d63c1f537077fc434bb112e8a565 | /2-R Programming/Week 2/cachematrix.R | 4308cc83bc86bfd106de96b83cc20bff53f9b32b | [] | no_license | ManmohitRekhi/-datasciencecoursera | 4fb237d5caa63b7006dc806d2073c66255a1a187 | 13ad2c7c12505f3c6db5edd9c02bbbe8307f0f67 | refs/heads/master | 2021-01-18T21:32:01.358620 | 2014-06-22T19:09:11 | 2014-06-22T19:09:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,647 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#This is a function that makes a chached matrix, it can set a matrix in x
#And set the inverse in m. It returns a list containg of
#set, get, setinverse, getinverse
makeCacheMatrix <- function(x = matrix())
{
#The variable m which conatins the inverse is NULL
m <- NULL
#This is the set function, it sets the value of matrix x to the matrix y and also m(inverse) to null
set <- function(y)
{
x <<- y
m <<- NULL
}
#This function return the orignal matrix
get <- function() x
#This sets the valriable m to the inverse of the matrix
setinverse <- function(solve) m <<- solve
#This function returns the inverse stored in m
getinverse <- function() m
#This creates a list of (set, get, setinverse, getinverse)
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
#This is a function which checks whether the inverse is cashed or not
#If it is cached, it returns the inverse
#Else it calculates it and then caches it
cacheSolve <- function(x, ...)
{
## Return a matrix that is the inverse of 'x'
#Here we set the variable m with the inverse
m <- x$getinverse()
#This cchecks if the m is NULL or not.
#If it is not null, means that the inverse is cached
#Else it needs to calculate the inverse
if(!is.null(m))
{
message("getting cached data")
return(m)
}
#The inverse is calculated and cached
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
#This is the inverse m
m
}
|
644aadf3d415cc25501b68aafdfd6303cd0c395f | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.machine.learning/man/sagemaker_delete_association.Rd | d3200244d2c61bc026249ed5c6772b479c173dc9 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 579 | rd | sagemaker_delete_association.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_delete_association}
\alias{sagemaker_delete_association}
\title{Deletes an association}
\usage{
sagemaker_delete_association(SourceArn, DestinationArn)
}
\arguments{
\item{SourceArn}{[required] The ARN of the source.}
\item{DestinationArn}{[required] The Amazon Resource Name (ARN) of the destination.}
}
\description{
Deletes an association.
See \url{https://www.paws-r-sdk.com/docs/sagemaker_delete_association/} for full documentation.
}
\keyword{internal}
|
b1f737c3a1dd90d7cfbfde62b04b50a5120b5a0d | a4cbdeb1ec2975786dec9bec6aef0c768a7afe6d | /Test_R_script.R | 063cb975958988dff09f82859abe8f4d9bd0db3e | [
"MIT"
] | permissive | saspeak/Test_repo | 8aa9ca9f8a6d3c474cf2f1d56d84a7f90cd806d1 | 438f6c9828cb89a7f410241109b1dfe5e02c0f22 | refs/heads/main | 2023-02-21T05:17:19.269871 | 2021-01-22T14:34:29 | 2021-01-22T14:34:29 | 331,970,652 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 73 | r | Test_R_script.R | #### I made a new one as I made the first Privat oops
library(ggplot2)
|
d2ca92be8e361edcee6b7eac392a47b0b8a2ed1e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RAMpath/examples/plot.RAMpath.Rd.R | 4fbc61e135b459671a9db6458b000476c5cdeb87 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 480 | r | plot.RAMpath.Rd.R | library(RAMpath)
### Name: plot.RAMpath
### Title: Plot the path diagram according to RAM path and bridges or Plot
### the vector field for the bivariate latent change score model
### Aliases: plot.RAMpath plot plot.blcs
### ** Examples
data(ex3)
test.blcs<-ramBLCS(ex3, 1:6, 7:12, ram.out=TRUE)
ramVF(test.blcs, c(0,80),c(0,80), length=.05, xlab='X', ylab='Y',scale=.5, ninterval=9)
plot(test.blcs, c(0,80),c(0,80), length=.05, xlab='X', ylab='Y',scale=.5, ninterval=9)
|
c14cc13721b2eaca8775d13c3bd40243ee54cea5 | 22d1d14435a9f23b1ad657a0172f86ed9b7b5292 | /Statistiques/TP1/tp1.R | 437fed2805a9b044e2a296d88ca745d40477440b | [] | no_license | Lizeem/R | 244360dc89422a46bbdf769f97d782b52bef16b3 | f5392252c2a60f4b10426b3176ef27eec6e6880e | refs/heads/master | 2020-12-22T14:26:02.604846 | 2020-03-06T08:44:53 | 2020-03-06T08:44:53 | 236,822,702 | 2 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 2,405 | r | tp1.R | # commentaire
help(sort)
# Les vecteurs
ages <- c(28,25,23,24,26,23,21,22,24,29,24,26,31,28,27,24,23,25,27,25,24,21,24,23,25,31,28,27,24,23)
ages
seq (2,6)
seq(1,10, by=0.5)
vecteur1 <- (2.5)
vecteur2 <- (2.6)
vecteur3 <- c(vecteur1, vecteur2)
vecteur1[2]
x <-0
vecteur1[vecteur1 >x]
length(vecteur1)
length(vecteur1[vecteur1 >x])
# Les matrices
matrice <- matrix(c(1.5,2.1,3.2,1.6,1.4,1.5),nr = 3, nc = 2)
matrice
matrice[1,1]
matrice[1,]
matrice[2:3,1:2]
# Les listes
liste <- list("AH",55)
liste
# Les tableaux de données
tab <- read.table("trees.csv", sep=',', header = TRUE)
tab
tab <- read.table("trees.txt", sep='\t', header = TRUE)
tab
# TRUE nom des colonne sur la première ligne sinon FALSE
names(tab)
ls(tab)
dim(tab)
summary(tab)
row.names = 1
tab
tab2 <- read.table("trees_modif.txt", sep='\t', header = TRUE)
tab2
tab[1,2]
tab[1,"HEIGHT"]
tab[1,]
tab[,"Girth"]
tab$Girth
tab[tab$Girth>12,]
tab[tab$Girth ==12,]
subset(tab,Girth ==12.9)
sub <- tab[tab$Girth ==12,]
sub
attach(tab)
detach(tab)
attach(tab2)
Girth
summary(tab)
ncol(tab)
nrow(tab)
mean(Girth)
median(Girth)
var(Girth)
sd(Girth)
mean(Girth, na.rm = TRUE)
# Représentation graphique
plot(Species)
plot(Volume~Height)
plot(Girth~Species)
plot(x=Species, y=Volume, main= "Volume des arbres en fonction de leur hauteur", xlab = "Hauteur", ylab ="Volume")
hist(Girth)
hist(Girth,freq=F)
boxplot(Girth)
boxplot(Girth~Species)
par(mfrow=c(1,))
par(mfrow=c(1,))
boxplot(Girth, Height, Volume)
box<-c("Girth","Height","Volume")
boxplot(Girth,Height,Volume,names=box)
## Exercice
dim(herbicide)
#5 variables, herbicides = qualitative, plante = qualitative,nb de plante= quantitative discret, survivants = continu borné
sans <-subset(herbicide,herbicide=="aucun")
herb1<-subset(herbicide, herbicide=="herbicide1")
herb2<-subset(herbicide, herbicide=="herbicide2")
herb3<-subset(herbicide, herbicide=="herbicide3")
données <- c("sans","herb1","herb2","herb3")
boxplot(sans$survivants, herb1$survivants, herb2$survivants, herb3$survivants,main = "survivants", xlab = "herbicides", ylab ="taux de survivants",names = données, col=c("grey","skyblue","tomato","yellow"))
points(1,mean(sans$survivants),pch=24,col="white")
points(2,mean(herb1$survivants),pch=16, col="white")
points(3,mean(herb2$survivants),pch=18, col="white")
points(4,mean(herb3$survivants),pch=22,col="white")
|
e2fa8b03aa12fae1463b38e350fe809adbd3fc32 | 98bd6231799180bfda360a8c926258813a96dd52 | /Lab07/RUTTEN_Lab07.R | e6e778e12f697c4e7783546866f6145432b9416b | [] | no_license | Jeru2124/CompBioLabsAndHomework | e5deb8fd1b79e58a926b6c411e5e676249c192c6 | 728008b638b4df4b80149453325e02ea5c0b3c54 | refs/heads/master | 2021-01-11T14:49:43.755391 | 2017-05-06T02:22:16 | 2017-05-06T02:22:16 | 80,226,683 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,384 | r | RUTTEN_Lab07.R | #PROBLEM 1: Fibonacci Sequence Loop
#function must return vector of the first n fibonacci numbers >=3
#include 2 arguments:function must work for any number including n<3 and
#check imput for - or non integer #'s, returning an appropriate message
FibFunc <- function(n0=0,n){
if(n<=0) {
paste(n, "is not a positive integer")
}
if(n<3) {
dataFibonacci <- c(n0,n-1)
dataFibonacci
}else{
# ^ two if statements: is n a positive #? what if n<3?
dataFibonacci <- rep(1,n)
dataFibonacci[1] <- n0
for (i in 3:n) {
dataFibonacci[i] <- dataFibonacci[i-1]+dataFibonacci[i-2]
}
return(dataFibonacci)
#returns vector of first n in sequence
}
}
#testing it out:
FibFunc(0,2)
#PROBLEM 2: Logistic Growth
#create a function that returns log growth data + give defaults
LogGrowthFunc <- function(n=2,totGens=10,K=1000,r=0.5){
n <- rep(n,totGens)
for (t in 2:totGens) {
n[t] <- n[t-1]+(r*n[t-1]*((K-n[t-1])/K))
}
plot(n, xlab="Number of Generations", ylab="Population Size" )
return(n)
}
#testing it out:
LogGrowthFunc(2500,12,10000,0.8)
#PROBLEM 3:
#representing simple network data
scaryFunc1 <- function(myMatrix){
for(i in 1:nrow){
for(j in 1:ncol){
if(myMatrix[i,j]!=0){
}
}
}
}
mytestMatrix <- matrix(
c(0,1,1,1,0,0,1,0,0),
nrow=3,
ncol=3)
mytestMatrix
|
2d6f49ca43ac42e61f073720aef617e9f4f8501e | 3705643c14ad2dc479bc4151f428b3eb431c428b | /R/fig4.R | cf393ac386b6c406bf325940c9fd380f83e92a4e | [
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | zofibos837/Paper_MPPdiag | 7ff732ef9783d78111cf046d72402f881a4f2850 | 535ea23d8bcc1ac2e96389e57ecd37143e4c37fa | refs/heads/master | 2023-07-20T21:25:16.534643 | 2021-09-03T12:19:35 | 2021-09-03T12:19:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,421 | r | fig4.R | # genotype frequencies in individuals (with markers split by MAF)
library(broman)
library(qtl2)
gf_ind <- readRDS("diag_cache/snp_freq_ind.rds")
for(type in c("pdf", "eps")) {
if(type=="pdf") pdf("../Figs/fig4.pdf", width=6.5, height=5.5, pointsize=12)
else postscript("../Figs/fig4.eps", width=6.5, height=5.5, pointsize=12, paper="special", horizontal=FALSE, onefile=FALSE)
par(mfrow=c(2,2), mar=c(0.6, 0.6, 2.1, 0.6))
for(i in 1:4) {
triplot(c("AA", "AB", "BB"))
title(main=paste0("MAF = ", i, "/8"), line=0.2)
mtext(side=3, adj=0, font=2, LETTERS[i], line=0.2)
tripoints(gf_ind[[i]], pch=21, bg="lightblue", cex=0.6)
tripoints(c((1-i/8)^2, 2*i/8*(1-i/8), (i/8)^2), pch=21, bg="violetred", cex=0.7)
if(i>=3) { # label mouse with lowest het
wh <- which(gf_ind[[i]][,2] == min(gf_ind[[i]][,2]))
trilines(rbind(gf_ind[[i]][wh,,drop=FALSE],
gf_ind[[i]][wh,,drop=FALSE] + c(0.09, -0.06, -0.030)))
tripoints(gf_ind[[i]][wh,,drop=FALSE], pch=21, bg="lightblue", cex=0.6)
tritext(gf_ind[[i]][wh,,drop=FALSE] + c(0.095, -0.065, -0.030),
names(wh), adj=c(0, 1), cex=0.7)
}
# label other mice
if(i==1) {
lab <- rownames(gf_ind[[i]])[gf_ind[[i]][,2]>0.3]
}
else if(i==2) {
lab <- rownames(gf_ind[[i]])[gf_ind[[i]][,2]>0.48]
}
else if(i==3) {
lab <- rownames(gf_ind[[i]])[gf_ind[[i]][,2]>0.51]
}
else if(i==4) {
lab <- rownames(gf_ind[[i]])[gf_ind[[i]][,2]>0.6]
}
for(ind in lab) {
if((i==1 && ind %in% c("F326","M392")) ||
(i==2 && ind %in% c("F326","M392","M388")) ||
(i==3 && ind == "M388") ||
(i==4 && ind == "F326")) {
tritext(gf_ind[[i]][ind,,drop=FALSE] + c(-0.015, 0, +0.015), ind, adj=c(1,0.5), cex=0.7)
} else if(i==3 && ind=="M392") {
tritext(gf_ind[[i]][ind,,drop=FALSE] + c(-0.025, 0.02, +0.005), ind, adj=c(1,0.5), cex=0.7)
} else if(i==3 && ind=="F326") {
tritext(gf_ind[[i]][ind,,drop=FALSE] + c(-0.01, -0.01, +0.02), ind, adj=c(1,0.5), cex=0.7)
} else if(i>1 && ind == "M405") {
tritext(gf_ind[[i]][ind,,drop=FALSE] + c(-0.008, 0.016, -0.008), ind, adj=c(0.5,0), cex=0.7)
} else {
tritext(gf_ind[[i]][ind,,drop=FALSE] + c(0.015, 0, -0.015), ind, adj=c(0,0.5), cex=0.7)
}
}
}
dev.off()
}
|
7f5522770c6dba8e80e0136ad864422a40c3dec2 | 33b409d36eeea285326b35035b3a4a1fdbb9b1fc | /man/property.Rd | 8cadc0296ad09be7d86258c1c1e3994f0c1d3bd2 | [
"MIT"
] | permissive | krlmlr/OOP-WG | a5fde3fec28f563c3177e401832477d726164c74 | 6bfa5bafc593c12e3ad95d35de2e131d03120487 | refs/heads/master | 2023-09-06T04:51:49.090980 | 2021-10-29T14:25:09 | 2021-10-29T14:25:09 | 422,904,541 | 0 | 0 | NOASSERTION | 2021-10-30T14:31:12 | 2021-10-30T14:31:11 | null | UTF-8 | R | false | true | 1,479 | rd | property.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/property.R
\name{property}
\alias{property}
\alias{property_safely}
\alias{property<-}
\alias{@}
\title{Get or set value of a property}
\usage{
property(object, name)
property_safely(object, name)
property(object, name, check = TRUE) <- value
object@name
}
\arguments{
\item{object}{An object from a R7 class}
\item{name}{The name of the parameter as a character. Partial matching
is not performed.}
\item{check}{If \code{TRUE}, check that \code{value} is of the correct type and run
\code{\link[=validate]{validate()}} on the object before returning.}
\item{value}{A replacement value for the parameter. The object is
automatically checked for validity after the replacement is done.}
}
\description{
\itemize{
\item \link{property} and \code{@}, gets the value of the given property, throwing an
error if the property doesn't exist for that object.
\item \link{property_safely} returns \code{NULL} if a property doesn't exist,
rather than throwing an error.
\item \link{property<-} and \verb{@<-} set a new value for the given property.
}
}
\examples{
horse <- new_class("horse", properties = list(
name = "character",
colour = "character",
height = "numeric"
))
lexington <- horse(colour = "bay", height = 15)
lexington@colour
property(lexington, "colour")
lexington@height <- 14
property(lexington, "height") <- 15
try(property(lexington, "age"))
property_safely(lexington, "age")
}
|
0186ae8d968a99bc44660e1cf4739425f89963d6 | 36d0c91ce570714903d27ed2b259575aec4ef2f7 | /man/SCE-internal.Rd | 957e7c6278595f3d8811d122926cd422cbfa0452 | [] | no_license | vd4mmind/SingleCellExperiment | c7e7d2bcec4d76f1a57faa836220b2eea100cd71 | 59a7e79cf8cbd3103f4d39536142ae318b61c3f5 | refs/heads/master | 2020-07-10T07:30:57.866369 | 2019-08-24T06:09:32 | 2019-08-24T06:09:32 | 204,205,190 | 1 | 0 | null | 2019-08-24T19:49:18 | 2019-08-24T19:49:17 | null | UTF-8 | R | false | false | 1,820 | rd | SCE-internal.Rd | \name{SCE internals}
\alias{int_colData}
\alias{int_elementMetadata}
\alias{int_metadata}
\alias{int_colData,SingleCellExperiment-method}
\alias{int_elementMetadata,SingleCellExperiment-method}
\alias{int_metadata,SingleCellExperiment-method}
\alias{int_colData<-}
\alias{int_elementMetadata<-}
\alias{int_metadata<-}
\alias{int_colData<-,SingleCellExperiment-method}
\alias{int_elementMetadata<-,SingleCellExperiment-method}
\alias{int_metadata<-,SingleCellExperiment-method}
\title{SCE internal methods}
\description{Methods to get or set internal fields from the SingleCellExperiment class.}
\usage{
\S4method{int_elementMetadata}{SingleCellExperiment}(x)
\S4method{int_elementMetadata}{SingleCellExperiment}(x) <- value
\S4method{int_colData}{SingleCellExperiment}(x)
\S4method{int_colData}{SingleCellExperiment}(x) <- value
\S4method{int_metadata}{SingleCellExperiment}(x)
\S4method{int_metadata}{SingleCellExperiment}(x) <- value
}
\arguments{
\item{x}{A SingleCellExperiment object.}
\item{value}{For \code{int_elementMetadata}, a \linkS4class{DataFrame} with number of rows equal to \code{nrow(x)}.
For \code{int_colData}, a DataFrame with number of rows equal to \code{ncol(x)}.
For \code{int_metadata}, a list.}
}
\details{
These functions are intended for package developers who want to add protected fields to a SingleCellExperiment.
They should \emph{not} be used by ordinary users of the \pkg{SingleCellExperiment} package.
Package developers intending to use these methods should read the development vignette for instructions.
}
\value{
A SingleCellExperiment object equivalent to \code{x} but with modified internal fields.
}
\seealso{
\code{\link{SingleCellExperiment}}
}
\examples{
example(SingleCellExperiment, echo=FALSE) # Using the class example
int_metadata(sce)$whee <- 1
}
|
722c0d0fdf1a0498ea5a7771790b315e84200264 | 0f70bfdefadc81a490f4a8e3f26bf22c5e387b09 | /tests/testthat/test-mutates.R | bd07c40c7b25eb0c9d37ee7baca58dfaa236a6ae | [
"MIT"
] | permissive | LCBC-UiO/MOAS | 1249458d53cf3044fcfe2c75a3f8ec2a02485fff | 043c2bc24dc0493a5de3b38c89bb7df36b86519c | refs/heads/master | 2023-08-31T04:50:56.920389 | 2020-08-14T12:06:56 | 2020-08-14T12:06:56 | 144,706,570 | 0 | 0 | null | 2018-11-05T12:31:48 | 2018-08-14T10:42:19 | HTML | UTF-8 | R | false | false | 1,665 | r | test-mutates.R |
# mutate_tp
test_that("mutate_tp works",{
dt <- data.frame(
CrossProject_ID = c(rep(1,4), rep(2,2),3,4),
Project_Name = c(rep("MemP", 5), rep("MemC", 3)),
Project_Wave = c(1:3, 3, 2, rep(1,3)),
Age = c(10:12, 12:13, 14, 16, 18)
)
dt2 <- mutate_tp(dt)
expect_equal( dt2$Subject_Timepoint,
c(1, 2, 3, 3, 1, 2, 1, 1))
expect_length(dt2, 5)
})
# mutate_mean_date ----
test_that("mutate_mean_date works", {
dt <- data.frame(
CrossProject_ID = c(rep(1,3), rep(2,2),3,4),
Project_Name = c(rep("MemP", 4), rep("MemC", 3)),
Project_Wave = c(1:3, 2, rep(1,3)),
Test_Date = c("12.02.2012", "22.05.2015", "03.10.2017",
NA, "12.02.2012", NA, "22.05.2015"),
MRI_Date = c("18.02.2012", "02.06.2015", "28.09.2017",
NA, NA, "22.05.2015", "30.05.2015"),
stringsAsFactors = FALSE
) %>%
mutate(MRI_Date = as.Date(MRI_Date, format="%d.%m.%Y"),
Test_Date = as.Date(Test_Date, format="%d.%m.%Y"))
dt2 <- mutate_mean_date(dt)
expect_equal(dt2$Date, structure(
c(15385, 16582.5, 17439.5, 15382, 16582.5, 16577, 16581),
class = "Date"))
expect_equal(names(dt2), c("CrossProject_ID", "Project_Name",
"Project_Wave", "Test_Date",
"MRI_Date", "Date"))
expect_error(mutate_mean_date(select(dt, -Project_Name)),
"columns are missing")
})
test_that("mean_date works",{
expect_error(mean_date("2012-08-10", "2012-08-05"),
"numeric")
expect_equal(mean_date(as.Date("2012-08-10"), as.Date("2012-08-05")),
structure(15559.5, class = "Date"))
})
|
43c55e4b97ea3c7d305ffa3e2d0ac2f750db4b0a | d978a612b20798e1ba38630a314a21f2e622e583 | /Scripts/Figures_and_Tables/Figure_4_Beta_diversity_Bray.R | ac349cbc6c75b40fbcb7a8a3ea98b9a6842162fa | [] | no_license | wu-lab-uva/16S-rRNA-GCN-Predcition | df7056595881e75fec2f5334ff4de32ec304fc43 | 00e9e5f120cb552518a2781fd681f866ad51c00c | refs/heads/main | 2023-07-15T22:27:03.644669 | 2021-08-31T19:26:56 | 2021-08-31T19:26:56 | 399,828,036 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,090 | r | Figure_4_Beta_diversity_Bray.R | #
library(RasperGade16S)
library(phyloseq)
library(vegan)
library(ggplot2)
library(ggpubr)
# read in command arguments
arg = commandArgs(TRUE)
# setup parallel running
if(Sys.info()["sysname"]=='Windows'){
numCores = 1
}else{
numCores = min(detectCores()-1,as.numeric(arg[1]))
if(is.na(numCores)) numCores = 1
}
# load data
cat("Loading data\n")
#
NSTD.cutoff = 10^seq(-3,0,length.out = 10)[1:9]
true.NSTD.cutoff = c(0,NSTD.cutoff[-1])
#
trait = readRDS("Reference/homogeneous_data.RDS")$dat
#
CV.adjusted.NSTD = readRDS("CV/CV.NSTD.RDS")
#
CV.res.summarized = readRDS("CV/GCN.PE.CV.RDS")
#
cat("Data loaded\n")
# simulate communities for RAD and beta diversity
sim.meta = readRDS("Sim/sim_2env_5otu_2f_Beta_meta.RDS")
CV.community.sim = readRDS("Sim/sim_2env_5otu_2f_Beta_data.RDS")
cat("Data simulated\n")
#
CV.sim.table = lapply(1:length(CV.community.sim),function(i){
mclapply(CV.community.sim[[i]],function(this.batch){
gene.table=as.data.frame(data.table::rbindlist(lapply(this.batch,function(x){
data.frame(t(x$gene)/sum(x$gene))
}),fill=TRUE))
cell.table=as.data.frame(data.table::rbindlist(lapply(this.batch,function(x){
data.frame(t(x$cell)/sum(x$cell))
}),fill=TRUE))
gene.table[is.na(gene.table)] = 0
cell.table[is.na(cell.table)] = 0
return(list(gene=gene.table,cell=cell.table))
},mc.cores = numCores)
})
#
CV.sim.rf = readRDS("Sim/Sim_5_2rf_test.RDS")
CV.sim.beta = readRDS("Sim/Sim_5_2beta.RDS")
CV.sim.PERMANOVA = readRDS("Sim/Sim_5_2PERMANOVA.RDS")
CV.sim.diff = readRDS("Sim/Sim_5_2_diff.RDS")
# plot example PCA
i=1
j=1
adj.sim.meta = data.frame(sampleID = 1:dim(sim.meta)[1],environment = sim.meta$V1)
example.gene.physeq = phyloseq(sample_data(adj.sim.meta),
otu_table(CV.sim.table[[i]][[j]]$gene,taxa_are_rows = FALSE))
example.cell.physeq = phyloseq(sample_data(adj.sim.meta),
otu_table(CV.sim.table[[i]][[j]]$cell,taxa_are_rows = FALSE))
sample_names(example.gene.physeq) = paste0("Gene",sample_names(example.gene.physeq))
sample_names(example.cell.physeq) = paste0("Cell",sample_names(example.cell.physeq))
sample_data(example.gene.physeq)$type="Gene"
sample_data(example.cell.physeq)$type="Cell"
example.correct.physeq = example.gene.physeq
pred.GCN = round(CV.res.summarized[[i]][[j]]$hsp$x)
names(pred.GCN) = CV.res.summarized[[i]][[j]]$hsp$label
true.GCN = trait[CV.res.summarized[[i]][[j]]$hsp$label]
if(attr(example.gene.physeq@otu_table,which = "taxa_are_rows")){
correct.example.table = correct_abundance_table(abundance = t(example.gene.physeq@otu_table),
GCN = pred.GCN[rownames(example.gene.physeq@otu_table)])
}else{
correct.example.table = correct_abundance_table(abundance = example.gene.physeq@otu_table,
GCN = pred.GCN[colnames(example.gene.physeq@otu_table)])
}
example.correct.physeq@otu_table = otu_table(correct.example.table,taxa_are_rows = FALSE)
sample_names(example.correct.physeq) = paste0("Correct",sample_names(example.correct.physeq))
sample_data(example.correct.physeq)$type = "Correct"
example.combined.physeq = merge_phyloseq(example.cell.physeq,
example.gene.physeq,
example.correct.physeq)
example.combined.ord = ordinate(example.combined.physeq,"PCoA",distance = "bray")
correct.plot = plot_ordination(physeq = example.combined.physeq,
ordination = example.combined.ord,color = "environment")
#
example.plot.data = correct.plot$data
example.plot.data$environment = as.character(example.plot.data$environment)
example.arrow.data = data.frame(gene.PC1=example.plot.data$Axis.1[example.plot.data$type=="Gene"],
gene.PC2=example.plot.data$Axis.2[example.plot.data$type=="Gene"],
correct.PC1=example.plot.data$Axis.1[example.plot.data$type=="Correct"],
correct.PC2=example.plot.data$Axis.2[example.plot.data$type=="Correct"],
cell.PC1=example.plot.data$Axis.1[example.plot.data$type=="Cell"],
cell.PC2=example.plot.data$Axis.2[example.plot.data$type=="Cell"],
environment=example.plot.data$environment)
example.plot = ggplot()+
geom_point(mapping = aes(x=Axis.1,y=Axis.2,group=type,shape=type,color=environment),data = example.plot.data,
alpha=0.5)+
geom_segment(mapping = aes(x=cell.PC1,y=cell.PC2,xend=correct.PC1,yend=correct.PC2,color=environment),
linetype="dotted",alpha=0.5,data = example.arrow.data)+
geom_segment(mapping = aes(x=cell.PC1,y=cell.PC2,xend=gene.PC1,yend=gene.PC2,color=environment),
linetype="solid",alpha=0.5,data = example.arrow.data)+
coord_cartesian(xlim=c(-0.35,0.45),ylim = c(-0.4,0.4))+
scale_x_continuous(breaks = seq(-1,1,0.25))+
scale_shape_manual(values = c(Correct=16,Gene=17,Cell=15),
labels=c(Correct="Corrected abundance",
Gene="Gene abundance",
Cell="True cell abundance"))+
guides(shape=guide_legend(title="Method",order = 1),color=guide_legend(title = "Environment",order = 2))+
xlab("PC1")+ylab("PC2")+
theme(#legend.position = c(0.9,0.15),legend.box="vertical",
legend.position = c(0.7,0.08),legend.box="horizontal",
panel.background = element_blank(),
axis.line = element_line(),
legend.key.size = unit(0.1, 'in'), #change legend key size
legend.key.height = unit(0.1, 'in'), #change legend key height
legend.key.width = unit(0.1, 'in'), #change legend key width
legend.title = element_text(size=8), #change legend title font size
legend.text = element_text(size=6),
legend.background = element_blank())
#
rm(list=c("i","j"))
beta.data = lapply(1:length(CV.sim.beta),function(i){
good.sample = which(sapply(CV.sim.beta[[i]],function(x){!is.null(x$gene)}))
this.cutoff = CV.sim.beta[[i]][good.sample]
print(length(this.cutoff))
do.call(rbind,lapply(1:length(this.cutoff),function(j){
this.batch = this.cutoff[[j]]
total.shift = c(gene=total_shift(this.batch$gene,this.batch$cell,squared=FALSE),
correct=total_shift(this.batch$correct,this.batch$cell,squared=FALSE))
mean.shift = total.shift/dim(as.matrix(this.batch$cell))
mean.dist = mean(as.matrix(vegdist(as.matrix(this.batch$cell),
method = "euclidean"))[1:10,11:20])
mean.within.dist = mean(c(vegdist(as.matrix(this.batch$cell)[1:10,1:10],
method = "euclidean"),
vegdist(as.matrix(this.batch$cell)[11:20,11:20],
method = "euclidean")))
element.cp = c(gene=elementwise_coverage_dist(dist1 = this.batch$gene,CI = this.batch$CI,detail = FALSE),
cell=elementwise_coverage_dist(dist1 = this.batch$cell,CI = this.batch$CI,detail = FALSE))
this.NSTD = sapply(CV.community.sim[[i]][[good.sample[j]]],function(x){
exp(sum(log(CV.adjusted.NSTD[[i]][2,names(x$cell)])*relative_abundance(x$cell)))
})
data.frame(shift=t(mean.shift),cp=t(element.cp),
NSTD = mean(this.NSTD),improvement=unname(1-total.shift[2]/total.shift[1]),
mean.pairwise.dist = mean.dist,mean.within.dist = mean.within.dist,
relative.shift = t(mean.shift)/mean.dist)
}))
})
#
PERMANOVA.data = lapply(1:length(CV.sim.PERMANOVA),function(i){
good.sample = which(sapply(CV.sim.PERMANOVA[[i]],function(x){!is.null(x$gene)}))
this.cutoff = CV.sim.PERMANOVA[[i]][good.sample]
print(length(this.cutoff))
do.call(rbind,lapply(1:length(this.cutoff),function(j){
this.batch = this.cutoff[[j]]
R2 = c(gene= this.batch$gene.PERMANOVA[1],
cell = this.batch$cell.PERMANOVA[1],
correct=this.batch$R2[1])
dR2 = c(gene= abs(this.batch$gene.PERMANOVA[1]-this.batch$cell.PERMANOVA[1]),
correct=abs(this.batch$R2[1]-this.batch$cell.PERMANOVA[1]))
cp = c(gene=as.numeric((this.batch$gene.PERMANOVA[1]<this.batch$R2.CI[2])&
(this.batch$gene.PERMANOVA[1]>this.batch$R2.CI[1])),
cell=as.numeric((this.batch$cell.PERMANOVA[1]<this.batch$R2.CI[2])&
(this.batch$cell.PERMANOVA[1]>this.batch$R2.CI[1])))
this.NSTD = sapply(CV.community.sim[[i]][[good.sample[j]]],function(x){
exp(sum(log(CV.adjusted.NSTD[[i]][2,names(x$cell)])*relative_abundance(x$cell)))
})
data.frame(dR2=t(dR2),cp=t(cp),R2=t(R2),
NSTD = mean(this.NSTD),improvement=unname(1-dR2[2]/dR2[1]))
}))
})
#
top.foldchange = lapply(1:length(CV.sim.diff),function(i){
this.cutoff = CV.sim.diff[[i]]
lapply(1:length(this.cutoff),function(j){
this.batch = this.cutoff[[j]]
gene.top = names(sort(abs(log(this.batch$gene$diff)),decreasing = TRUE)[1:10])
cell.top = names(sort(abs(log(this.batch$cell$diff)),decreasing = TRUE)[1:10])
return(list(gene=gene.top,cell=cell.top,hit=sum(gene.top%in%cell.top)))
})
})
#
diff.data = lapply(1:length(CV.sim.diff),function(i){
this.cutoff = CV.sim.diff[[i]]
do.call(rbind,lapply(this.cutoff,function(this.batch){
data.frame(cell.diff = exp(log(this.batch$cell$diff)),
cell.p = this.batch$cell$test,
gene.diff = exp(log(this.batch$gene$diff)),
gene.p = this.batch$gene$test,
ddiff = exp(abs(log(this.batch$cell$diff/this.batch$gene$diff))))
}))
})
#
rf.data = lapply(1:length(CV.sim.rf),function(i){
this.cutoff = CV.sim.rf[[i]]
do.call(rbind,lapply(1:length(this.cutoff),function(j){
this.batch = this.cutoff[[j]]
this.sim = CV.community.sim[[i]][[j]]
this.group = do.call(c,lapply(this.sim,function(x){x$group}))
this.group = this.group[unique(names(this.group))]
this.feature = names(this.batch$cell$rank)[1:10]
gene.recall = sum(which(names(this.batch$gene$rank)%in%this.feature)<=10)/length(this.feature)
cell.recall = sum(which(names(this.batch$cell$rank)%in%this.feature)<=10)/length(this.feature)
correct.recall = sum(which(names(this.batch$correct$correct$rank)%in%this.feature)<=10)/length(this.feature)
this.NSTD = sapply(CV.community.sim[[i]][[j]],function(x){
exp(sum(log(CV.adjusted.NSTD[[i]][2,names(x$cell)])*relative_abundance(x$cell)))
})
return(data.frame(gene=gene.recall,cell=cell.recall,correct=correct.recall,NSTD=mean(this.NSTD)))
}))
})
#
PERMANOVA.plot = ggplot()+
geom_point(mapping=aes(x=R2.cell,y=R2.gene),data = PERMANOVA.data[[1]],alpha=0.5)+
geom_abline(slope = 1,intercept = 0,linetype="dashed",color="red")+
xlab(expression(R^2*" using true cell abundance"))+
ylab(expression(R^2*" using gene abundance"))+
coord_cartesian(xlim=c(0.05,0.2),ylim = c(0.05,0.2))+
theme(legend.position = "none",
panel.background = element_blank(),
axis.line = element_line())
#
diff.plot = ggplot()+
geom_point(mapping=aes(x=cell.diff,y=gene.diff),data = diff.data[[1]],alpha=0.25)+
geom_abline(slope = 1,intercept = 0,linetype="dashed",color="red")+
scale_x_continuous(trans="log10",breaks = 10^seq(-2,2,1),
labels = c(expression("10"[-2]),expression("10"[-1]),expression("10"[0]),
expression("10"[1]),expression("10"[2])))+
scale_y_continuous(trans="log10",breaks = 10^seq(-2,2,1),
labels = c(expression("10"[-2]),expression("10"[-1]),expression("10"[0]),
expression("10"[1]),expression("10"[2])))+
coord_cartesian(xlim=c(0.01,100),ylim=c(0.01,100))+
xlab("Fold change in true cell abundance")+
ylab("Fold change in gene abundance")+
theme(legend.position = "none",
panel.background = element_blank(),
axis.line = element_line())
#
pvalue.plot = ggplot()+
geom_point(mapping=aes(x=cell.p,y=gene.p),data = diff.data[[1]],alpha=0.25)+
geom_abline(slope = 1,intercept = 0,linetype="dashed",color="red")+
coord_cartesian(xlim=c(0,1),ylim=c(0,1))+
xlab("Fold change in true cell abundance")+
ylab("Fold change in gene abundance")+
theme(legend.position = "none",
panel.background = element_blank(),
axis.line = element_line())
#
combined.plot = ggarrange(plotlist = list(example.plot,PERMANOVA.plot,diff.plot),
labels = c("AUTO"),ncol = 3,nrow = 1,
align = "hv",common.legend = FALSE)
if(Sys.info()["sysname"]=='Windows'){
ggsave(filename = "Fig_4.png",plot = combined.plot,device = "png",
width = 9,height = 3,units = "in",
dpi = "print",scale = 1.5,type = "cairo")
}else{
ggsave(filename = "Fig_4.png",plot = combined.plot,device = "png",
width = 9,height = 3,units = "in",
dpi = "print",scale = 1.5)
}
ggsave(filename = "Fig_4.pdf",plot = combined.plot,device = "pdf",
width = 9,height = 3,units = "in",scale = 1.5)
|
8de394432c7e7722ebe90bd31bddee89b12b9cb4 | 83d93f6ff2117031ba77d8ad3aaa78e099657ef6 | /man/gdfnotebook.Rd | e64685a8cebd833da28e4b26fae95b648b930750 | [] | no_license | cran/gWidgets2 | 64733a0c4aced80a9722c82fcf7b5e2115940a63 | 831a9e6ac72496da26bbfd7da701b0ead544dcc1 | refs/heads/master | 2022-02-15T20:12:02.313167 | 2022-01-10T20:12:41 | 2022-01-10T20:12:41 | 17,696,220 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 663 | rd | gdfnotebook.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdfnotebook.R
\name{gdfnotebook}
\alias{gdfnotebook}
\alias{.gdfnotebook}
\title{A notebook container for many \code{gdf} instances}
\usage{
gdfnotebook(items = NULL, container = NULL, ..., toolkit = guiToolkit())
.gdfnotebook(toolkit, items, container, ...)
}
\arguments{
\item{items}{data frame for initial page, when given}
\item{container}{parent container}
\item{...}{passed to \code{add} method of parent container}
\item{toolkit}{toolkit}
}
\description{
A notebook container for many \code{gdf} instances
S3 generic whose methods are implemented in the toolkit packages
}
|
90dae494eea8ebf81fe0cad9f27de8a22933ff7f | d60895dd87c4c841513db24e43aa55d1bc10fce0 | /lib/mouse.food.r | fead86f68a9350247c8cbf2f2c94e894971b0454 | [] | no_license | knights-lab/IMP_analyses | a4fa294fbd1d443d7de0557348201f00e0dfa259 | 7f5c37092e7bd580e6e6dd8e88db482962090053 | refs/heads/master | 2021-07-08T04:42:44.269401 | 2021-05-08T14:57:49 | 2021-05-08T14:57:49 | 70,734,193 | 4 | 5 | null | 2019-05-31T22:31:01 | 2016-10-12T19:28:29 | JavaScript | UTF-8 | R | false | false | 6,340 | r | mouse.food.r | # calculates the average food eaten per mouse based on all of the different cages
calc.food <- function(ggdata)
{
chow <- unique(ggdata[,c("Mouse.ID", "Cage.ID", "Food.Weight.Out", "Food.Weight.In", "Week")])
weeks <- unique(ggdata$Week)
gg_chow <- NULL
# do this by Mouse.ID because of some issues with adding food to cages after sac'd mice are removed for fasting
for(i in 2:(length(weeks)))
{
last.week <- weeks[i-1]
this.week <- weeks[i]
cc <- merge(chow[chow$Week==this.week, c("Mouse.ID", "Cage.ID","Food.Weight.Out")], chow[chow$Week==last.week, c("Mouse.ID", "Food.Weight.In")], by=c("Mouse.ID"))
cc$FoodDiff <- cc$Food.Weight.In - cc$Food.Weight.Out
cc$Week <- this.week
gg_chow <- rbind(gg_chow, cc)
}
# calculate how many mice there were in each cage (alive at the time of food being weighed)
gg_chow$CageFood <- paste(gg_chow$Cage.ID, gg_chow$FoodDiff, sep=":")
map.counts <- table(gg_chow[,c("Week","CageFood")])
map.counts.long <- melt(map.counts, id.vars=(CageFood))
map.counts.long <- map.counts.long[map.counts.long$value!=0,]
gg_chowc <- merge(map.counts.long, unique(gg_chow[,c("CageFood", "Cage.ID", "FoodDiff", "Week")]), by=c("CageFood", "Week"))
colnames(gg_chowc)[which(colnames(gg_chowc) == "value")] <- "Num.Mice"
# now make some manual modifications to account for the mice that died
#M25 0 0 NA NA TFSCS026 Thai 6 TD.86489 LowFiber NA NA 11/6/17 --> 2 days before Week 2
#M26 0 1 NA NA IMP.264 US 7 TD.86489 LowFiber NA NA 11/2/17 --> 4 days before Week 2
#M28 0 2 NA NA IMP.264 US 7 TD.86489 LowFiber NA NA 11/22/17 --> on the end of Week 4
gg_chowc[gg_chowc$Cage.ID=="6" & gg_chowc$Week==2, "Num.Mice"] <- gg_chowc[gg_chowc$Cage.ID=="6" & gg_chowc$Week==2, "Num.Mice"] + ((14-2)/14) # died 2 days prior
gg_chowc[gg_chowc$Cage.ID=="7" & gg_chowc$Week==2, "Num.Mice"] <- gg_chowc[gg_chowc$Cage.ID=="6" & gg_chowc$Week==2, "Num.Mice"] + ((14-4)/14) # died 4 days prior
gg_chowc[gg_chowc$Cage.ID=="7" & gg_chowc$Week==4, "Num.Mice"] <- gg_chowc[gg_chowc$Cage.ID=="7" & gg_chowc$Week==4, "Num.Mice"] + (14/14) # died 0 days prior
gg_chowc$Food.Per.Mouse <- gg_chowc$FoodDiff/gg_chowc$Num.Mice
avg_chow <- merge(gg_chowc, unique(ggdata[,c("Cage.ID","Group")]), by="Cage.ID")
# manually add starting data points for cohousing lines
# cohouse.wk8 <- rbind(avg_chow[avg_chow$Cage.ID %in% c("1", "2") & avg_chow$Week==8,],
# avg_chow[avg_chow$Cage.ID %in% c("3", "4") & avg_chow$Week==8,])
# cohouse.wk8$Cage.ID <- c(rep("C1-2",2), rep("C3-4",4))
# cohouse.wk8$Group <- c(rep("Cohoused.LowFiber",2), rep("Cohoused.HighFiber",4))
# avg_chow <- rbind(avg_chow, cohouse.wk8)
return(avg_chow)
}
plot.food <- function(ggdata, group)
{
ggdata$Group <- ggdata[,group]
avg_chow <- calc.food(ggdata)
p_chow <- ggplot(data=avg_chow, aes(x=Week, y=Food.Per.Mouse, group=Group, color=Group)) + scale_color_manual(name = "Group", values = GROUP.COLORS) +
geom_line(data = aggregate(Food.Per.Mouse ~ Group + Week, avg_chow, mean), aes(x=Week, y=Food.Per.Mouse, group=Group, color=Group), alpha=1, size=2) +
geom_point(data = aggregate(Food.Per.Mouse ~ Group + Week, avg_chow, mean), aes(shape=Group), color="black", size=3) + scale_shape_manual(name="Group",values=GROUP.SHAPES) +
ggtitle("Average Food Consumed") + ylab("Per Mouse (g)") + xlab("Week") + scale_x_continuous(name="Week", breaks=seq(0,10,2)) + theme(legend.position='none')
save_plot("food-consumption-L.pdf", p_chow, base_aspect_ratio = 1)
}
plot.feed.efficiency.L <- function(ggdata, group)
{
ggdata$Group <- ggdata[,group]
avg_chow <- calc.food(ggdata)
weight.all <- calc.weights(ggdata)
chow.df <- aggregate(Food.Per.Mouse ~ Group + Week, avg_chow, mean) # average food consumed per mouse within group
# grab individual mouse weights so that we can include error bars in FE calculation
weight.food.df <- merge(weight.all[,c("Mouse.ID","Cohoused","Group","Week","Grams.Weight.Gain", "Donor")], chow.df, by=c("Group","Week"))
weight.food.df$FE <- weight.food.df$Grams.Weight.Gain/weight.food.df$Food.Per.Mouse
weight.food.mean <- aggregate(FE ~ Group + Week, weight.food.df, mean)
weight.food.se <- aggregate(FE ~ Group + Week, weight.food.df, FUN=function(xx) sd(xx)/sqrt(length(xx)))
weight.food.summary.df <- cbind(weight.food.mean[,1:2], Mean.FE=weight.food.mean$FE, SE.FE=weight.food.se$FE)
p.weight.food <- ggplot(weight.food.summary.df, aes(x=Week, y=Mean.FE, group=Group, color=Group)) +
scale_color_manual(name = "Group", values = GROUP.COLORS) +
ylab("Weight Gain / Food Consumed") + xlab("Week") + scale_x_continuous(name="Week", breaks=seq(0,10,2)) +
ggtitle("Feed Efficiency\n") + geom_line(size=2) + geom_errorbar(aes(ymin=Mean.FE-SE.FE, ymax=Mean.FE+SE.FE), width=.2) +
geom_point(aes(shape=Group), color="black", size=3) + scale_shape_manual(name="Group",values=GROUP.SHAPES)
p.legend <- get_legend(p.weight.food)
p.weight.food <- p.weight.food + theme(legend.position='none')
save_plot("feed-efficiency-L.pdf", p.weight.food, base_aspect_ratio = 1)
}
# use group = group.end instead of start
plot.feed.efficiency.wk8 <- function(ggdata, group, add.pval=TRUE, outputfn)
{
ggdata$Group <- ggdata[,group]
avg_chow <- calc.food(ggdata)
weight.all <- calc.weights(ggdata)
chow.df <- aggregate(Food.Per.Mouse ~ Group + Week, avg_chow, mean) # average food consumed per mouse within group
# grab individual mouse weights so that we can include error bars in FE calculation
weight.food.df <- merge(weight.all, chow.df, by=c("Group","Week"))
weight.food.df$FE <- weight.food.df$Grams.Weight.Gain/weight.food.df$Food.Per.Mouse
# Week 8 only
weight.food.wk8 <- weight.food.df[weight.food.df$Week %in% c(8,10),]
p.weight.food.wk8 <- mouse.boxplot(y=weight.food.wk8$FE, Group=weight.food.wk8$Group,
main="Feed Efficiency\n", add.pval=add.pval,
ylab="Weight Gain / Food Consumed", group.vars.df=weight.food.wk8[,c("Donor.Type","Diet.Type")], hide.box=FALSE)
save_plot(outputfn, p.weight.food.wk8, base_aspect_ratio = 1)
} |
ee69cfaac6e890ecdaafe2ffa816c9c19aa64e79 | 820f8aca9a690688cd5a48caa9038fbeff6ba971 | /R/RcppExports.R | 1abed4b43e2dd6adc0c546b7983b543369eb079e | [] | no_license | jkennel/transducer | 07374d4967498762cb692e71068bf89b6f026bc3 | 881ae6eb2570a15c6dc6aa91a69308183c3023f2 | refs/heads/master | 2021-06-11T18:14:03.474803 | 2021-06-04T12:22:17 | 2021-06-04T12:22:17 | 195,269,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,954 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' rbr_raw_to_pressure
#'
#' @param x
#' @param calib
#'
#' @return
#' @export
#'
#' @examples
rbr_raw_to_pressure <- function(x, calib) {
.Call('_transducer_rbr_raw_to_pressure', PACKAGE = 'transducer', x, calib)
}
#' rbr_raw_to_temperature
#'
#' @param x
#' @param calib
#'
#' @return
#' @export
#'
#' @examples
rbr_raw_to_temperature <- function(x, calib) {
.Call('_transducer_rbr_raw_to_temperature', PACKAGE = 'transducer', x, calib)
}
#' rbr_temperature_correction
#'
#' @param pressure
#' @param temperature
#' @param x calibration constants
#'
#' @return
#' @export
#'
#' @examples
rbr_temperature_correction <- function(pressure, temperature, x) {
.Call('_transducer_rbr_temperature_correction', PACKAGE = 'transducer', pressure, temperature, x)
}
#' rbr_times
#'
#' @param ev_tstamp
#' @param ev_index
#' @param ti
#'
#' @return
#' @export
#'
#' @examples
rbr_times <- function(ev_tstamp, ev_index, ti) {
.Call('_transducer_rbr_times', PACKAGE = 'transducer', ev_tstamp, ev_index, ti)
}
#' rbr_calib_mult
#'
#' @param x
#' @param calib
#' @param is_temp
#'
#' @return
#' @export
#'
#' @examples
rbr_calib_mult <- function(x, calib, is_temp) {
.Call('_transducer_rbr_calib_mult', PACKAGE = 'transducer', x, calib, is_temp)
}
#' @title density_temperature
#'
#' @details This function estimates the density of water at a given temperature
#' using Kell's formulation. ITS-90
#'
#' @param x temperature
#'
#' @return
#' @export
#' @seealso \link{https://nvlpubs.nist.gov/nistpubs/jres/097/jresv97n3p335_A1b.pdf}
density_temperature <- function(x) {
.Call('_transducer_density_temperature', PACKAGE = 'transducer', x)
}
# Register entry points for exported C++ functions
methods::setLoadAction(function(ns) {
.Call('_transducer_RcppExport_registerCCallable', PACKAGE = 'transducer')
})
|
8e78324dbc205491a8ab983b541b9fac25cfbfb0 | 28f63a13fefb546619bfe1b5b70dfd4b3d6f106a | /paper/code/RunParmsFile.R | 267b28a0f6729aaa1e6364fe3936138d2993e8c6 | [
"MIT"
] | permissive | mishra-lab/covid-GTA-surge-planning | 1a5e303606f32d0afb70d29e687191c06af4d43e | 8425f91c7dc8b3314c958476ed2609d9ba91a77c | refs/heads/master | 2023-01-20T08:08:20.106743 | 2020-11-26T15:23:01 | 2020-11-26T15:23:01 | 247,745,255 | 3 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,107 | r | RunParmsFile.R | #############################################################################################
# (1) can call in the parameter files,and save accordingly
# set up as a function RunParmsFile which has argument = the parameter file name in ""
# set up a function RunParmsFile_SampleImportCases if we want to vary sampling of travel-related cases
#############################################################################################
#############################################################################################
#############################################################################################
RunParmsFile<-function(Pfile){
# Pfile<-"ParmSet_Default"
nameOfParmFile <- paste(Pfile,".csv",sep="")
params_A <-read.csv(nameOfParmFile,header=TRUE)
names(params_A)[1] <-"pSet"
nSims2 <-nrow(params_A)
out_all <-data.frame()
for(i in 1:nSims2){
out_each <-epid(parmlist=params_A[i,])
out_all <-rbind(out_all,out_each)
}
currentDate <- Sys.Date()
csvFileName <- paste(Pfile,currentDate,".csv",sep="")
write.csv(out_all, file=csvFileName)
}
|
8c92768c133128b20fa01ce6d42ad1e92a1b3702 | 1c01ed7a5e79c5e281c0ede3406f702f79766882 | /man/toss.tail.Rd | ed61cfebdc0d6bd6027c991d88d069d831c36445 | [] | no_license | christiantillich/AnaliTools | 19e738e4084be1678ff7aeda45aa9f146de5ac1d | cab56ef7729f1d9692af5241ac5eca60060c3045 | refs/heads/master | 2020-04-06T05:12:48.950283 | 2019-02-25T22:09:03 | 2019-02-25T22:09:03 | 47,645,937 | 0 | 1 | null | 2019-02-25T22:09:04 | 2015-12-08T19:53:20 | R | UTF-8 | R | false | true | 405 | rd | toss.tail.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.tools.R
\name{toss.tail}
\alias{toss.tail}
\title{toss.tail}
\usage{
toss.tail(x, n)
}
\arguments{
\item{x}{- a vector whose tail you'll be tossing.}
\item{n}{- the number of observations to replace with null.}
}
\value{
- Returns a vector where the last n non-NA values are replaced with
NA.
}
\description{
toss.tail
}
|
e8c1d70348f626105b97caacb4df39190ba0c0e2 | 4d8d1583b5122eed34a47b287eddf13d7db9776b | /main.r | 9d8f18be0435c7cf257bf4d703f73a0e0bd23edd | [] | no_license | zhouxin0/Big_data_analysis | 8f612ecedf95f29b97a4c675a3626f61ae1e23fd | b6ec2c02160eedce013ff69dd2b11c1eb3eaf306 | refs/heads/master | 2021-03-09T21:50:50.083628 | 2020-03-11T14:54:40 | 2020-03-11T14:54:40 | 246,383,846 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,225 | r | main.r | ### Load R scripts with optimizers and cost functions
source("optimizers.r")
source("cost.r")
### Load data, standardize covariate and create design matrix
## Load CSV file
simulated_data <- read.csv("simulated_data.csv", header=TRUE)
## Standardize covariate
simulated_data$covariate <- scale(simulated_data$covariate)
## Number of data points
ndata <- nrow(simulated_data)
## Number of linear regression parameters, including intercept
npars <- 2
## Create design matrix X, including a first column of ones for intercept
X<-cbind(rep(1, ndata),simulated_data$covariate)
### Fit a linear regression model to the data
lm_out <- lm(y~covariate, data=simulated_data)
### Run optimization algorithms
## Initial values of parameters, common across optimization algorithms
theta0 <- c(7, -8)
## Gradient descent (GD)
# GD: learning rate
a<-0.2
# GD: number of iterations
niters <- 100
# Run GD
gd_out <- gd(lmf, lmgrad, simulated_data$y, X, theta0, npars, ndata, a, niters)
## Stochastic gradient descent (SGD)
# SGD: learning rate
a <-0.2
# SGD: number of iterations
niters <- 100
# SGD: number of subsamples per iteration
nsubsamples <- 100
# Run SGD
sgd_out <- sgd(lmf, lmgrad, simulated_data$y, X, theta0, npars, ndata, a, niters,nsubsamples)
### save design matrix X to file "answer1a.csv"
write.table(X, file="answer1a.csv", row.names=FALSE, col.names=FALSE, sep=",")
### save parameter estimates to file "answer1b.csv"
theta_estimates <- cbind(
LM=as.vector(lm_out$coefficients),
GD=as.vector(tail(gd_out$theta, n=1)),
SGD=as.vector(tail(sgd_out$theta, n=1))
)
write.table(
theta_estimates,
file="answer1b.csv",
row.names=FALSE,
col.names=TRUE,
sep=",",
quote=FALSE
)
### save last values of cost function to file "answer1c.csv"
final_cost_values <- cbind(
GD=tail(gd_out$cost, n=1),
SGD=tail(sgd_out$cost, n=1)
)
write.table(
final_cost_values,
file="answer1c.csv",
row.names=FALSE,
col.names=TRUE,
sep=",",
quote=FALSE
)
### save plot of cost function to file "answer1d.pdf"
pdf(file="answer1d.pdf")
plot(gd_out$cost,type="l",col="red",xlab="iteration",ylab="cost",main="compare cost of gd and sgd",lty=1,lwd=2)
lines(sgd_out$cost,type="l",col="green",lty=1,lwd=2)
legend("topright",legend=c("gd","sgd"),col=c("red","green"),lty=c(1,1),lwd=c(2,2))
dev.off()
### save phase plot of parameter theta_0 vs parameter theta_1 to file "answer1e.pdf"
pdf(file="answer1e.pdf")
gd_out$theta
sgd_out$theta
plot(y=gd_out$theta[,1],x=gd_out$theta[,2],ylim=c(-1,8),xlim=c(-8,2),type="l",col="red",ylab="theta0",xlab="theta1",main="compare two optimizers' parameters: theta0 and theta1 in gd and sgd",lty=1,lwd=2)
lines(y=sgd_out$theta[,1],x=sgd_out$theta[,2],type="l",col="green",lty=1,lwd=2)
legend("topright",legend=c("gd","sgd"),col=c("red","green"),lty=c(1,1),lwd=c(2,2))
points(y=gd_out$theta[1,1],x=gd_out$theta[1,2],col="black",cex=1)
points(y=lm_out$coefficients[1],x=lm_out$coefficients[2],col="black",cex=1)
text(y=gd_out$theta[1,1],x=gd_out$theta[1,2],col="black",labels="start point:(-8,7)",pos=4,cex=0.5)
text(y=lm_out$coefficients[1],x=lm_out$coefficients[2],col="black",labels="lm_coefficients:(1.375,-0.933)",pos=3,cex=0.5)
dev.off()
|
b5e1380f3adbc28a10439d0880e1632c0e53d0ed | d4dcbdbeac43b97d64978e0191845a2ad3e132e5 | /scripts/quantitatives.R | 862fb4a9b58a50d55755a17de2b005d4c041cc6d | [] | no_license | jpissinis/covid_in_oncohematology | 52dbb0915c917493b183004ea051094dc9b40ea2 | cc08cee99905d5522b26d525c4b115ab70897c7b | refs/heads/master | 2023-04-03T12:51:08.170911 | 2021-04-19T01:43:22 | 2021-04-19T01:43:22 | 340,002,030 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,169 | r | quantitatives.R | #loading packages
library(readr)
library(dplyr)
library(tidyverse)
library(ggplot2)
library(lubridate)
library(writexl)
#loading the data as patients
path_name<-file.path("./raw","patients.csv")
patients<-read_csv(path_name)
#selecting the comorbidities variables
comorbidities_patients<-patients%>%select(
PLASMA, `Infusion temprana 1 tardia 0`,EPOC, `Tumor Solido`,
`ICC/CI`, Obesidad, HTA, DIABETES,`TABAQ/EXTABAQ`,
`Enfermedad Autoinmune`, ERC)
#defining the events
events<-c("SI")
#wrangling the columns and translating the events
is_event<-function(x){ifelse(is.na(x),x,x%in%events)}
comorbidities_patients<-comorbidities_patients%>%
mutate_if(is.numeric,as.logical)%>%
mutate_if(is.character,is_event)%>%
mutate_if(is.character,as.logical)
#counting the comorbidities
comorbidities_patients<-comorbidities_patients%>%
mutate(N_comorbilidades=
EPOC+`Tumor Solido`+`ICC/CI`+Obesidad+HTA+DIABETES+
`TABAQ/EXTABAQ`+`Enfermedad Autoinmune`)%>%
mutate(N_comorbilidades=
ifelse(is.na(ERC),N_comorbilidades,
N_comorbilidades+ERC))
#grouping by the comparison variable
comorbidities_patients<-comorbidities_patients%>%
group_by(PLASMA)
#obtaining the median number of comorbidities
median_na_rm<-function(x){median(x,na.rm=T)}
median_comorbidities_patients<-comorbidities_patients%>%
select(PLASMA,N_comorbilidades)%>%summarise_all(median_na_rm)
median_comorbidities_patients
#plotting the number of comorbidities as histogram
comorbidities_patients%>%select(N_comorbilidades,PLASMA)%>%
ggplot(aes(N_comorbilidades,fill=PLASMA),alpha=0.5)+
geom_histogram(binwidth = 1)
#plotting the number of comorbidities as a boxplot
comorbidities_patients%>%select(N_comorbilidades,PLASMA)%>%
ggplot(aes(N_comorbilidades,fill=PLASMA),alpha=0.5)+
geom_boxplot()
#conducting the Mann Whitney test
N_comorb_PLASMA_YES<-comorbidities_patients%>%filter(PLASMA==T)
N_comorb_PLASMA_YES<-N_comorb_PLASMA_YES$N_comorbilidades
N_comorb_PLASMA_NO<-comorbidities_patients%>%filter(PLASMA==F)
N_comorb_PLASMA_NO<-N_comorb_PLASMA_NO$N_comorbilidades
N_comorb_PLASMA_NO
wilcox.test(N_comorb_PLASMA_YES,N_comorb_PLASMA_NO)
|
33a003598efabbc9b268eb4e5bf0ce1405f90ba5 | 8da47324149b1379db0a8a703a309b64b209c698 | /man/add_column_headings.Rd | 4d8621fb3e894321a67ef3a31c204b3dc25f2199 | [
"MIT"
] | permissive | calvinmfloyd/grobblR | 82794421cabb9ade685487ad5e127ad6d6c7efb6 | 8aa6b4fec42539ce212931ce4d3755357a826e85 | refs/heads/master | 2022-06-23T04:34:02.166580 | 2022-06-01T14:25:49 | 2022-06-01T14:25:49 | 130,590,418 | 40 | 4 | NOASSERTION | 2021-09-14T03:20:37 | 2018-04-22T17:24:59 | R | UTF-8 | R | false | true | 1,595 | rd | add_column_headings.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/headings.R
\name{add_column_headings}
\alias{add_column_headings}
\title{Add column headings to grob matrix}
\usage{
add_column_headings(mat, headings = list(), heading_cols = list())
}
\arguments{
\item{mat}{The grob matrix object the column headings will be added onto.}
\item{headings}{The headings to be added onto the initial matrix,
in a list with each heading a separate element. The list must have
the same amount of elements as the \code{heading_cols} parameter.}
\item{heading_cols}{Which column positions of the initial matrix the \code{headings}
will be placed above, in a list with each heading's column positions a separate element.
The list must have the same amount of elements as the \code{headings} parameter.
Can either be numeric indices, or column names of the initial data frame / matrix
passed through \code{\link{grob_matrix}}.
Default is an empty list. If unaltered, the function will assume the user
wants to apply \code{headings} to all columns of the \code{\link{grob_matrix}} -
in which case only one \code{headings} is allowed.}
}
\value{
The initial grob matrix object with column headings inserted into the
appropriate areas.
}
\description{
Add column headings onto an object initialized by \code{\link{grob_matrix}}.
}
\details{
The user must add column headings \strong{before} adding or altering
any aesthetics.
}
\examples{
data.frame(var1 = c(5, 14, 6, 10), var2 = c(3, 30, 17, 7)) \%>\%
grob_matrix() \%>\%
add_column_headings(c('HEADING')) \%>\%
view_grob()
}
|
10961b5484eea5b37f0a110a6745a20be593e23b | 028cee02cc54843314b7ab5d87e916855fa4de6e | /inst/deprecated/R/Mojave_data.R | d32a18423b7958740787feef87941ef544a3a018 | [] | no_license | pnnl/glmnetLRC | adc804676cce6056962c34a2a7f9326b616fab88 | 23418ce72f26c5c3acdf1f8f4f3c21547d499d0a | refs/heads/master | 2021-01-21T04:50:52.523118 | 2016-07-01T17:43:32 | 2016-07-01T17:43:32 | 51,790,257 | 3 | 5 | null | 2016-07-01T17:43:34 | 2016-02-15T22:26:56 | R | UTF-8 | R | false | false | 308 | r | Mojave_data.R | ##' Environmental data from the Mojave desert
##'
##' @docType data
##' @format A dataframe with 633 obs and 11 variables, \code{cheatGrass} is the response
##' variable, the rest are location or predictor variables
##' @name Mojave data
##' @examples
##' data(Mojave)
##' head(Mojave)
##' str(Mojave)
NULL
|
ae9a7f80a8686757ce179afcb314e518319baf40 | 32235d071051a6da8ff9c608a1eb3bc1e436f2b6 | /man/get_question.Rd | f3a6eab6d7f5fa626ef649c53a3e08ce9c511b7e | [
"MIT"
] | permissive | rbrellen/hksreviewr | 331ebef3d77e6255f4b9720bedb4f5f0b69a68b7 | c140b6524194de5d978980d2410b8a872756ee91 | refs/heads/master | 2020-03-27T21:30:29.608731 | 2018-09-03T15:27:27 | 2018-09-03T15:27:27 | 147,150,119 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 574 | rd | get_question.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{get_question}
\alias{get_question}
\title{Get Question}
\usage{
get_question(lines = lines, question_number)
}
\arguments{
\item{lines}{Input file represented as list object (output of \code{import_lines}).}
\item{question_number}{The question number (1-19) for which a mean response is desired.}
}
\value{
The mean response from all respondents for the specified question.
}
\description{
Get Question
}
\examples{
\dontrun{
question_01 <- get_question(lines, 01)
}
}
|
0c285e01cbde5d40218783e6d053740e434a54c9 | 7430d9b386f6b20e13ec5ee97bed5d434b9c6280 | /complete.R | fb8a1b7069994b0d1cbbf7df5664c98d0ed0a3ce | [] | no_license | ramanathan89/ProgrammingAssignment2 | a4e450dcf793f06a7c6bf677c2230711ee4beb7c | a3a4d1ef59ceb44cd15d73348f0c3b98270482f9 | refs/heads/master | 2020-07-11T16:44:41.061810 | 2015-04-26T19:38:37 | 2015-04-26T19:38:37 | 34,178,830 | 0 | 0 | null | 2015-04-18T19:05:16 | 2015-04-18T19:05:16 | null | UTF-8 | R | false | false | 630 | r | complete.R | complete <- function(directory = "specdata", id = 1:332) {
# formating the sequence into file names
userSelection <- sprintf("%03d.csv", id)
# prepare filenames w.r.t working directory
filesPrepared <- paste(directory, userSelection, sep = "/")
# prepare a list of dataframes w.r.t each csv file
listedDFs <- lapply(filesPrepared, read.csv)
# filter out NAs in the dataframe to get complete cases
completeDFs <- lapply(listedDFs, na.omit)
# count the number of observations using nrow
nobs <- sapply(completeDFs, nrow)
# join the 2 vectors to form a dataframe
data.frame(id,nobs)
}
|
ed21adedcf0cfef2e34cbc021d2aedefb0ae223a | bcb321dfd1181ec508d34639948d15aefcdfe8bf | /SequencEnG/SoftwareDecisionSupportSystem/server.R | 4fa6b238ab7c76f2b32aded6871a0d3965660ce4 | [] | no_license | washpots/Training_Education_code | 2bb10180b1bd3011da0a7ef2c819f1c480f58e24 | d4c08ee8dd3f6ad270045c9800a741a669b03f15 | refs/heads/master | 2021-01-17T04:25:40.611590 | 2017-02-23T16:27:21 | 2017-02-23T16:27:21 | 82,852,550 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,938 | r | server.R | library(shiny)
# variables
newnode <<- character()
ncount <- 0
sr <- 0 # id of selected recommendation table, triggers update of decision support table
x0 <- 'multiple'
pipelinestep <<- character()
mypipefilter <<- character()
userpipe <<- create_graph()
tdecisionTable <- data.frame()
n <- integer
b <<- NULL
s <<- NULL
# create standard pipeline
grVizOutput('userp', width = "200px", height = "400px" )# pipelines
StandPipelines<- function(standnodes, nodecolor="#2980b9 "){
pipe_nodes <<- na.omit(mypipe[,standnodes])
# prepare edges - detecting branchings
myfrom <- as.character(pipe_nodes[1:(length(pipe_nodes)-1)])
myto <- as.character(pipe_nodes[2:(length(pipe_nodes))])
myedge <- as.data.frame(cbind(myfrom, myto))
myedge <- myedge[(myedge$myfrom != 1) & (myedge$myto != 1), ]
# checking filter-out branching information
pipe_nodes <<- na.omit(mypipe[,standnodes])
pipe_nodes <<- pipe_nodes[pipe_nodes != 1]
nodes_pipeline <<- create_nodes(
nodes = pipe_nodes,
lable = FALSE, type = "lower", color = "white", fontcolor = "white", fontsize = "12", style = "filled",
penwidth ="1", fillcolor = "none", shape = "rectangle")
# coloring the selected pipeline step
if (!is.null(mypipefilter)) {
nodes_pipeline[nodes_pipeline$nodes == mypipefilter, "fillcolor"] <<- nodecolor
}
edge_pipeline <- create_edges(
from = myedge[,"myfrom"],
to = myedge[,"myto"],
relationship = "requires",
color = "white",
data = c("1", "2", "3", "4"))
StandardPipe <<- create_graph(
nodes_df = nodes_pipeline,
edges_df = edge_pipeline,
graph_attrs = c("bgcolor = none"),
node_attrs = c("fontname = Helvetica"),
edge_attrs = c("arrowsize = 1"))
reC <<- render_graph(StandardPipe)
}
#####################################################
userpipe <<- create_graph(
graph_attrs = c("bgcolor = none"),
node_attrs = c("fontcolor = white", "color = white", "fontsize = 11", "shape = ellipse", "width = 3"),
edge_attrs = c("arrowsize = 1", "color = white"))
softconstr <<- function(newnode){
## add node
#print(paste("newnode: ", newnode))
userpipe <<- add_node(
graph = userpipe, node = newnode, label = paste(newnode, ncount))
print(node_info(userpipe))
## ad edge
ncount <<- node_count(userpipe, type = FALSE)
print(paste("edgeinfo: ", edge_info(userpipe), " ncount: ", ncount, " n: ", n))
if (ncount > 1) {
mynodes <<- node_info(userpipe)
userpipe <<- add_edges(userpipe,
from = as.character(mynodes[n-1, 1]),
to = as.character(mynodes[n,1]))
n <- ncount
#print(paste("n: ", n, "if: # nodes: ", ncount, " from: ", mynodes[ncount-1,1], "to ", mynodes[ncount,1]))
}
b<<- render_graph(userpipe)
}
#####################################################
shinyServer(function(input, output, session) {
options(shiny.trace = TRUE)
# standard pipelines: user selects one of determined pipelines
callStandardPipe <<- reactive({
switch(input$selectpipe,
"peak calling" = "peak calling",
"differential binding" = "differential binding",
"motif analysis" = "motif analysis",
"peak annotation" = "peak annotation",
"gene ontology analysis" = "gene ontology analysis")})
observeEvent(input$selectpipe, {
StandPipelines(callStandardPipe())
output$pipelinestep <<- renderUI({
selectInput("pipelinestep", "B) Pipeline Step",
choices = as.character(pipe_nodes))
})
})
# software per pipeline step -> subsetting software based on pipeline step & output on data table
pipelinestepsoftInput <<- reactive({
print(paste("Reactive Function pipelinestepsoftInput"))
#if (is.na(input$soft_category)) softassesscat = "functionality"
#if (is.na(input$pipelinestep)) mypipefilter = "read mapping"
softassesscat<<- input$soft_category
mypipefilter <<- input$pipelinestep
# selecting mysoft data.frame, mysubcol vector reads enabled fields ("1") from myrecdata.xlsx
mysubcol <<- as.logical(sapply(mysoft[mysoft$goal == softassesscat,], function(x) grepl("^1$", x)))
softperpipe <- mysoft[mysoft$goal==mypipefilter ,mysubcol, drop = FALSE]
print(paste("softperpipe: ", softperpipe))
subsoft<-as.character(softperpipe[,"Software"])
#print(paste("subsoft: ", subsoft, " timestamp = ", timestamp()))
# software selection per pipeline step
output$pconst <<- renderUI({selectizeInput(
'pconst', '4) Software', choices = as.character(mysoft[mysoft$goal==mypipefilter, 3]),
multiple = TRUE, options = list(maxItems = 1, placeholder = 'Please select an option below'))
})
## colors the selected pipeline step node
StandPipelines(callStandardPipe())
output$standp <<- renderGrViz({reC})
output$softrecommend = DT::renderDataTable(
recommendationTable(),
rownames = FALSE, class = "cell_border",
server = TRUE,
selection = 'single',
style = 'bootstrap',
options = list(
#columnDefs = list(list(searchable = FALSE), list(className = 'dt-center')),
dom = 't', # "ft" is with search field
scrollY = '200px', paging = FALSE,
#scrollX = '890px',
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#2980b9', 'color': '#fff'});","}"
#"$(this.api().table().Seletion().css({selection-color})
),
extensions = 'ColVis'
)
)
## call citation history function
output$papertrend <- renderPlot({
cithist(subsoft)
})
## input for outputDataTable
softperpipe
})
# Software Decision Support Table
observeEvent({input$softrecommend_rows_selected}, {
sr <<- as.vector(input$softrecommend_rows_selected)
if (sr>0) {
myd<- finalDecision(sr)
}
print("print selected row:")
print(sr)
output$decTable = DT::renderDataTable(
myd,
rownames = TRUE, class = "cell_border",
server = TRUE,
style = 'bootstrap',
selection = 'none',
options = list(
#columnDefs = list(list(searchable = FALSE), list(className = 'dt-center')),
dom = 't', # "ft" is with search field
scrollY = '200px', paging = FALSE,
scrollX = '500px',
columnDefs = list(list(searchable = FALSE,
bsort = FALSE,
className = 'dt-left',
autoWidth = TRUE,
width = '250px' , targets = c(0),
width = '150px' , targets = c(1),
width = '150px' , targets = c(2),
width = '150px' , targets = c(3)
#width = '15%' , targets = c(4)
)),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#2980b9', 'color': '#fff'});","}"),
extensions = 'ColVis'
)
)
})
################################ output to ui #######################################
scoreInput <<- reactive({
input$soft_category
softperpipe <<- mysoft[ ,c(15:17), drop = FALSE]})
output$input_type_text <- renderText({
input$soft_category})
output$dynamic_value <- renderPrint({
str(input$dynamic)})
######################################################################################
# software table
output$tabletitle <- renderText({
paste("Software Information System: Software \"",input$pipelinestep,"\" with focus on \"", input$soft_category, "\"", sep="")
paste("Software Information System: Software", input$soft_category, sep="")
})
# Knowledge Table
output$softdatatableout = DT::renderDataTable(rownames = FALSE, class = "cell_border",
pipelinestepsoftInput(),
server = TRUE,
selection = x0,
style = 'bootstrap',
options = list(
dom = 'ft', # "ft" is with search field
scrollY = '200px', paging = FALSE,
scrollX = '890px',
columnDefs = list(list(searchable = FALSE,
bsort = FALSE,
className = 'dt-left',
autoWidth = TRUE,
width = '10%' , targets = c(0),
width = '10%' , targets = c(1),
width = '10%' , targets = c(2),
width = '70%' , targets = c(3)
)),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': 'grey', 'color': '#fff'});","
}"
)
)
)
# Software Lineup: selection made on the Knowledge DataTable
observeEvent(input$softdatatableout_rows_selected, {
s <<- input$softdatatableout_rows_selected
if (length(s) > 2) {
x0 <- 'none'
print(x0)
}
else {
mypipelinesoftInput <<- as.data.frame(pipelinestepsoftInput())
selectedSoft <<- as.character(mypipelinesoftInput[s,"Software"]) # s selected row from KnowledgeTable
print("software name(s)for benchmarking and assessment category stored")
benchmarking(selectedSoft, softassesscat)
print("benchmarking function processed")
output$benchdatatableout = DT::renderDataTable(mybench, rownames = FALSE,
server = TRUE,
selection = 'none',
style = 'bootstrap',
options = list(
dom = 't',
scrollY = '200px', paging = FALSE,
scrollX = '590px',
extensions = 'ColVis',
columnDefs = list(list(searchable = FALSE,
className = 'dt-left',
autoWidth = FALSE,
#width = '20%' , targets = c(0),
width = '40%' , targets = c(1),
width = '40%' , targets = c(2)
)),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#2980b9', 'color': '#fff'});"
,"}")
)
)
}
})
}) |
7412069b572a4d075925d87c35c18424186f68fa | 203e755d91d27365117ca10f2215709a2c0fea1c | /R/bhpm.cluster.1a.hier3.lev2.R | 1b6e55eab801c9a06a308b6c07c3fabaae8d3f53 | [] | no_license | rcarragh/bhpm | ab447e4fbaf47953e50161f1f4bee8a6c15c7022 | f8847db6eb116ef9b783cbfcab48a801e2455f67 | refs/heads/master | 2021-07-05T00:15:10.173741 | 2020-10-28T12:27:48 | 2020-10-28T12:27:48 | 191,961,611 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 13,741 | r | bhpm.cluster.1a.hier3.lev2.R | # bhpm.cluster
# bhpm: Cluster Analysis wrapper
# R. Carragher
# Date: 29/06/2018
Md <- new.env()
Md$Id <- "$Id: bhpm.cluster.1a.hier3.lev2.R,v 1.13 2020/03/31 12:42:23 clb13102 Exp clb13102 $"
bhpm.cluster.1a.dep.lev2 <- function(cluster.data, sim_type = "SLICE", burnin = 10000, iter = 40000, nchains = 3,
global.sim.params = data.frame(type = c("MH", "SLICE"), param = c("sigma_MH", "w"), value = c(0.2,1),
control = c(0,6)),
sim.params = NULL,
monitor = data.frame(variable = c("theta", "gamma", "mu.gamma", "mu.theta",
"sigma2.theta", "sigma2.gamma",
"mu.theta.0", "mu.gamma.0", "tau2.theta.0", "tau2.gamma.0"),
monitor = c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
stringsAsFactors = FALSE),
initial_values = NULL,
hyper_params = list(mu.gamma.0.0 = 0, tau2.gamma.0.0 = 10,
mu.theta.0.0 = 0, tau2.theta.0.0 = 10, alpha.gamma.0.0 = 3, beta.gamma.0.0 = 1, alpha.theta.0.0 = 3,
beta.theta.0.0 = 1, alpha.gamma = 3, beta.gamma = 1, alpha.theta = 3, beta.theta = 1), memory_model = "HIGH")
{
cluster = M_global$CLUSTERdata(Md, cluster.data, iter, nchains, burnin, initial_values)
if (is.null(cluster)) {
return(NULL)
}
cluster.data = cluster$cluster.data
cntrl.data = cluster$cntrl.data
Md$sim_type <- sim_type
if (nrow(global.sim.params[global.sim.params$type == sim_type,]) != 1) {
message("Missing simulation parametetrs");
return(NULL)
}
Md$global.sim.param <- global.sim.params[global.sim.params$type == sim_type,]$value
Md$global.sim.param_ctrl <- global.sim.params[global.sim.params$type == sim_type,]$control
if (Md$global.sim.param <= 0) {
message("Invalid simulation parametetr value");
return(NULL)
}
Md$level = 2
sim.params = M_global$CLUSTER_sim_params1a(Md, sim.params, sim_type, cluster.data, cntrl.data)
monitor = M_global$CLUSTER_monitor_1a_3(monitor)
# Initialise the hyper-parameters
Md$mu.gamma.0.0 <- hyper_params$mu.gamma.0.0
Md$tau2.gamma.0.0 <- hyper_params$tau2.gamma.0.0
Md$alpha.gamma <- hyper_params$alpha.gamma
Md$beta.gamma <- hyper_params$beta.gamma
Md$alpha.gamma.0.0 <- hyper_params$alpha.gamma.0.0
Md$beta.gamma.0.0 <- hyper_params$beta.gamma.0.0
Md$mu.theta.0.0 <- hyper_params$mu.theta.0.0
Md$tau2.theta.0.0 <- hyper_params$tau2.theta.0.0
Md$alpha.theta <- hyper_params$alpha.theta
Md$beta.theta <- hyper_params$beta.theta
Md$alpha.theta.0.0 <- hyper_params$alpha.theta.0.0
Md$beta.theta.0.0 <- hyper_params$beta.theta.0.0
Ret2 = .Call("bhpm1a_poisson_mc_exec", as.integer(nchains), as.integer(burnin),
as.integer(iter), Md$sim_type,
memory_model,
as.numeric(Md$global.sim.param),
as.numeric(Md$global.sim.param_ctrl),
sim.params,
monitor,
as.integer(Md$nTreatments),
as.integer(Md$numClusters), as.integer(Md$level),
Md$maxOutcome.Grps, as.integer(Md$numOutcome.Grp), as.integer(Md$maxOutcomes),
as.integer(t(Md$nOutcome)), as.integer(aperm(Md$x)), as.integer(aperm(Md$y)),
as.numeric(aperm(Md$C)),
as.numeric(aperm(Md$T)),
as.numeric(aperm(Md$theta)),
as.numeric(aperm(Md$gamma)),
as.numeric(Md$mu.gamma.0.0),
as.numeric(Md$tau2.gamma.0.0),
as.numeric(Md$mu.theta.0.0),
as.numeric(Md$tau2.theta.0.0),
as.numeric(Md$alpha.gamma.0.0),
as.numeric(Md$beta.gamma.0.0),
as.numeric(Md$alpha.theta.0.0),
as.numeric(Md$beta.theta.0.0),
as.numeric(Md$alpha.gamma),
as.numeric(Md$beta.gamma),
as.numeric(Md$alpha.theta),
as.numeric(Md$beta.theta),
as.numeric(Md$mu.gamma.0),
as.numeric(Md$tau2.gamma.0),
as.numeric(Md$mu.theta.0),
as.numeric(Md$tau2.theta.0),
as.numeric(aperm(Md$mu.gamma)),
as.numeric(aperm(Md$mu.theta)),
as.numeric(aperm(Md$sigma2.gamma)),
as.numeric(aperm(Md$sigma2.theta)))
mu.gamma.0_samples = NULL
if (monitor[monitor$variable == "mu.gamma.0", ]$monitor == 1) {
mu.gamma.0_samples <- .Call("getMuGamma0SamplesClusterAll")
mu.gamma.0_samples = aperm(mu.gamma.0_samples)
}
mu.theta.0_samples = NULL
if (monitor[monitor$variable == "mu.theta.0", ]$monitor == 1) {
mu.theta.0_samples <- .Call("getMuTheta0SamplesClusterAll")
mu.theta.0_samples = aperm(mu.theta.0_samples)
}
tau2.gamma.0_samples = NULL
if (monitor[monitor$variable == "tau2.gamma.0", ]$monitor == 1) {
tau2.gamma.0_samples <- .Call("getTau2Gamma0SamplesClusterAll")
tau2.gamma.0_samples = aperm(tau2.gamma.0_samples)
}
tau2.theta.0_samples = NULL
if (monitor[monitor$variable == "tau2.theta.0", ]$monitor == 1) {
tau2.theta.0_samples <- .Call("getTau2Theta0SamplesClusterAll")
tau2.theta.0_samples = aperm(tau2.theta.0_samples)
}
mu.theta_samples = NULL
if (monitor[monitor$variable == "mu.theta", ]$monitor == 1) {
mu.theta_samples <- .Call("getMuThetaSamplesClusterAll")
mu.theta_samples <- aperm(mu.theta_samples)
}
mu.gamma_samples = NULL
if (monitor[monitor$variable == "mu.gamma", ]$monitor == 1) {
mu.gamma_samples <- .Call("getMuGammaSamplesClusterAll")
mu.gamma_samples <- aperm(mu.gamma_samples)
}
sigma2.theta_samples = NULL
if (monitor[monitor$variable == "sigma2.theta", ]$monitor == 1) {
sigma2.theta_samples <- .Call("getSigma2ThetaSamplesClusterAll")
sigma2.theta_samples <- aperm(sigma2.theta_samples)
}
sigma2.gamma_samples = NULL
if (monitor[monitor$variable == "sigma2.gamma", ]$monitor == 1) {
sigma2.gamma_samples <- .Call("getSigma2GammaSamplesClusterAll")
sigma2.gamma_samples <- aperm(sigma2.gamma_samples)
}
gamma_samples = NULL
gamma_acc = NULL
if (monitor[monitor$variable == "gamma", ]$monitor == 1) {
gamma_samples = .Call("getGammaSamplesClusterAll")
gamma_samples = aperm(gamma_samples)
gamma_acc = .Call("getGammaAcceptClusterAll")
gamma_acc <- aperm(gamma_acc)
}
theta_samples = NULL
theta_acc = NULL
if (monitor[monitor$variable == "theta", ]$monitor == 1) {
theta_samples = .Call("getThetaSamplesClusterAll")
theta_samples = aperm(theta_samples)
theta_acc = .Call("getThetaAcceptClusterAll")
theta_acc <- aperm(theta_acc)
}
.C("Release_Cluster")
model_fit = list(id = Md$Id, sim_type = Md$sim_type, chains = nchains, nClusters = Md$numClusters,
nTreatments = Md$nTreatments,
Clusters = Md$Clusters, Trt.Grps = Md$Trt.Grps, nOutcome.Grp = Md$numOutcome.Grp, maxOutcome.Grps = Md$maxOutcome.Grps,
maxOutcomes = Md$maxOutcomes, nOutcome = Md$nOutcome, Outcome=Md$Outcome, Outcome.Grp = Md$Outcome.Grp,
burnin = burnin, iter = iter,
monitor = monitor,
gamma = gamma_samples,
theta = theta_samples,
mu.gamma = mu.gamma_samples,
mu.theta = mu.theta_samples,
sigma2.gamma = sigma2.gamma_samples,
sigma2.theta = sigma2.theta_samples,
mu.gamma.0 = mu.gamma.0_samples,
mu.theta.0 = mu.theta.0_samples,
tau2.gamma.0 = tau2.gamma.0_samples,
tau2.theta.0 = tau2.theta.0_samples,
gamma_acc = gamma_acc,
theta_acc = theta_acc)
# Model is poisson with BB1a hierarchy and independent clusters
attr(model_fit, "model") = "1a_pois_dep_lev2"
return(model_fit)
}
Md$initVars = function() {
# Data Structure
Md$Outcome.Grp <- c()
Md$numOutcome.Grp <- NA
Md$numClusters <- NA
Md$nOutcome <- c()
Md$maxOutcomes <- NA
# Cluster Event Data
Md$x <- array()
Md$C <- array()
Md$y <- array()
Md$T <- array()
# Hyperparameters
Md$mu.gamma.0.0 <- NA
Md$tau2.gamma.0.0 <- NA
Md$mu.theta.0.0 <- NA
Md$tau2.theta.0.0 <- NA
Md$alpha.gamma.0.0 <- NA
Md$beta.gamma.0.0 <- NA
Md$alpha.theta.0.0 <- NA
Md$beta.theta.0.0 <- NA
Md$alpha.gamma <- NA
Md$beta.gamma <- NA
Md$alpha.theta <- NA
Md$beta.theta <- NA
# Parameters/Simulated values
# Stage 3
Md$mu.gamma.0 <- c()
Md$tau2.gamma.0 <- c()
Md$mu.theta.0 <- c()
Md$tau2.theta.0 <- c()
# Stage 2
Md$mu.gamma <- array()
Md$mu.theta <- array()
Md$sigma2.gamma <- array()
Md$sigma2.theta <- array()
# Stage 1
Md$theta <- array()
Md$gamma <- array()
}
Md$initChains = function(c) {
# Choose random values for gamma and theta
for (i in 1:Md$numClusters) {
numOutcome.Grp = Md$numOutcome.Grp[i]
for (b in 1:numOutcome.Grp) {
Md$gamma[c, i, b, 1:Md$nOutcome[i, b]] <- runif(Md$nOutcome[i, b], -10, 10)
Md$gamma[c, i, b, ][is.infinite(Md$gamma[c, i, b, ])] = -10
Md$gamma[c, i, b, ][is.nan(Md$gamma[c, i, b, ])] = -10 # -1000
for (t in 1:(Md$nTreatments -1)) {
Md$theta[c, t, i, b, 1:Md$nOutcome[i, b]] <- runif(Md$nOutcome[i, b], -10, 10)
Md$theta[c, t, i, b, ][is.infinite(Md$theta[c, t, i, b, ])] = -10
Md$theta[c, t, i, b, ][is.nan(Md$theta[c, t, i, b, ])] = -10 # -1000
}
}
Md$mu.gamma[c, i, 1:numOutcome.Grp] = runif(numOutcome.Grp, -10, 10)
Md$mu.theta[c,, i, 1:numOutcome.Grp] = runif(numOutcome.Grp*(Md$nTreatments -1), -10, 10)
Md$sigma2.gamma[c, i, 1:numOutcome.Grp] = runif(numOutcome.Grp, 5, 20)
Md$sigma2.theta[c,, i, 1:numOutcome.Grp] = runif(numOutcome.Grp*(Md$nTreatments -1), 5, 20)
Md$mu.gamma.0[c] = runif(1, -10, 10)
Md$tau2.gamma.0[c] = runif(1, 5, 20)
Md$mu.theta.0[c,] = runif(1*(Md$nTreatments -1), -10, 10)
Md$tau2.theta.0[c,] = runif(1*(Md$nTreatments -1), 5, 20)
}
}
Md$initialiseChains = function(initial_values, nchains) {
Md$theta = array(0, dim=c(nchains, Md$nTreatments -1, Md$numClusters, Md$maxOutcome.Grps, Md$maxOutcomes))
Md$gamma = array(0, dim=c(nchains, Md$numClusters, Md$maxOutcome.Grps, Md$maxOutcomes))
if (is.null(initial_values)) {
# Initialise the first chain with the data
for (i in 1:Md$numClusters) {
numOutcome.Grp = Md$numOutcome.Grp[i]
for (b in 1:numOutcome.Grp) {
Md$gamma[1, i, b, ] <- log(Md$x[i, b,]/Md$C[i, b, ])
for (t in 1:(Md$nTreatments -1)) {
Md$theta[1, t, i, b, ] <- log(Md$y[t, i, b,]/Md$T[t, i, b,]) - Md$gamma[1, i, b, ]
Md$theta[1, t, i, b, ][is.infinite(Md$theta[1, t, i, b, ])] = -10 # -1000
Md$theta[1, t, i, b, ][is.nan(Md$theta[1, t, i, b, ])] = -10 # -1000
}
Md$gamma[1, i, b, ][is.infinite(Md$gamma[1, i, b, ])] = -10 # -1000
Md$gamma[1, i, b, ][is.nan(Md$gamma[1, i, b, ])] = -10 # -1000
}
}
Md$mu.gamma <- array(0, dim = c(nchains, Md$numClusters, Md$maxOutcome.Grps))
Md$mu.theta <- array(0, dim = c(nchains, Md$nTreatments -1, Md$numClusters, Md$maxOutcome.Grps))
Md$sigma2.gamma <- array(10, dim = c(nchains, Md$numClusters, Md$maxOutcome.Grps))
Md$sigma2.theta <- array(10, dim = c(nchains, Md$nTreatments -1, Md$numClusters, Md$maxOutcome.Grps))
Md$mu.gamma.0 <- rep(0, nchains)
Md$tau2.gamma.0 <- rep(10, nchains)
Md$mu.theta.0 <- array(0, dim = c(nchains, Md$nTreatments -1))
Md$tau2.theta.0 <- array(10, dim = c(nchains, Md$nTreatments -1))
if (nchains > 1) {
for (c in 2:nchains) {
Md$initChains(c)
}
}
}
else {
Md$mu.gamma.0 <- rep(0, nchains)
Md$tau2.gamma.0 <- rep(10, nchains)
Md$mu.theta.0 <- array(0, dim = c(nchains, Md$nTreatments -1))
Md$tau2.theta.0 <- array(10, dim = c(nchains, Md$nTreatments -1))
for (c in 1:nchains) {
Md$mu.gamma.0[c] = initial_values$mu.gamma.0[c]
Md$tau2.gamma.0[c] = initial_values$tau2.gamma.0[c]
for (t in 1:(Md$nTreatments -1)) {
Md$mu.theta.0[c,t] = initial_values$mu.theta.0[[t]][c]
Md$tau2.theta.0[c,t] = initial_values$tau2.theta.0[[t]][c]
}
}
Md$mu.gamma <- array(0, dim = c(nchains, Md$numClusters, Md$maxOutcome.Grps))
Md$mu.theta <- array(0, dim = c(nchains, Md$nTreatments -1, Md$numClusters, Md$maxOutcome.Grps))
Md$sigma2.gamma <- array(0, dim = c(nchains, Md$numClusters, Md$maxOutcome.Grps))
Md$sigma2.theta <- array(0, dim = c(nchains, Md$nTreatments -1, Md$numClusters, Md$maxOutcome.Grps))
for (c in 1:nchains) {
for (i in 1:Md$numClusters) {
cluster = Md$Clusters[i]
for (b in 1:Md$numOutcome.Grp[i]) {
data = initial_values$mu.gamma[initial_values$mu.gamma$chain == c &
initial_values$mu.gamma$Cluster == cluster
& initial_values$mu.gamma$Outcome.Grp == Md$Outcome.Grp[i, b],]
Md$mu.gamma[c, i, b] = data$value
data = initial_values$sigma2.gamma[initial_values$sigma2.gamma$chain == c &
initial_values$sigma2.gamma$Cluster == cluster
& initial_values$sigma2.gamma$Outcome.Grp == Md$Outcome.Grp[i, b],]
Md$sigma2.gamma[c, i, b] = data$value
for (t in 1:(Md$nTreatments -1)) {
data = initial_values$mu.theta[[t]][initial_values$mu.theta[[t]]$chain == c &
initial_values$mu.theta[[t]]$Cluster == cluster
& initial_values$mu.theta[[t]]$Outcome.Grp == Md$Outcome.Grp[i, b],]
Md$mu.theta[c, t, i, b] = data$value
data = initial_values$sigma2.theta[[t]][initial_values$sigma2.theta[[t]]$chain == c &
initial_values$sigma2.theta[[t]]$Cluster == cluster
& initial_values$sigma2.theta[[t]]$Outcome.Grp == Md$Outcome.Grp[i, b],]
Md$sigma2.theta[c, t, i, b] = data$value
}
}
}
}
for (c in 1:nchains) {
for (i in 1:Md$numClusters) {
cluster = Md$Clusters[i]
for (b in 1:Md$numOutcome.Grp[i]) {
for (j in 1:Md$nOutcome[i, b]) {
ae = Md$Outcome[i, b, j]
data = initial_values$gamma[initial_values$gamma$chain == c
& initial_values$gamma$Cluster == cluster
& initial_values$gamma$Outcome.Grp == Md$Outcome.Grp[i, b]
& initial_values$gamma$Outcome == ae,]
Md$gamma[c, i, b, j] = data$value
for (t in 1:(Md$nTreatments -1)) {
data = initial_values$theta[[t]][initial_values$theta[[t]]$chain == c
& initial_values$theta[[t]]$Cluster == cluster
& initial_values$theta[[t]]$Outcome.Grp == Md$Outcome.Grp[i, b]
& initial_values$theta[[t]]$Outcome == ae,]
Md$theta[c, t, i, b, j] = data$value
}
}
}
}
}
}
}
|
e4367a1ff0ce07b53684846599eba6c4deea7951 | 7075471c1b29d89c7d08cc3c72ef888609e76dd1 | /ui.R | ad26a45fa3e555a3205745f128d846c459542d0b | [] | no_license | manshrestha/Shiny-Application-and-Reproducible-Pitch | ddee2a7157a4666f70710a205aebad701007854e | f340a695ef3566abe56b68b6dd5dee4c4aca9d99 | refs/heads/master | 2020-12-30T22:32:18.419172 | 2016-05-12T11:11:18 | 2016-05-12T11:11:18 | 58,634,522 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,511 | r | ui.R | library(shiny)
shinyUI(navbarPage(title = "Calculates Car Fuel Efficiency",
tabPanel(title = "PREDICTION",
sidebarLayout(
sidebarPanel(
h4("Predicts the fuel efficiency of your
car in MPG (miles per gallon)"),
br(),
# Get the number of Cylinder
numericInput(inputId="cyl",
label= c("Number of Cyclinder (even number): ",min(mtcars$cyl), " to ", max(mtcars$cyl)),
value = min(mtcars$cyl),
min = min(mtcars$cyl),
max = max(mtcars$cyl),
step = 2),
# Get the horse power
numericInput(inputId="hp",
label= c("Horse Power: ", min(mtcars$hp)," to ", max(mtcars$hp)),
value = min(mtcars$hp),
min = min(mtcars$hp),
max = max(mtcars$hp),
step = 1),
# Get the weight
numericInput(inputId="wt",
label= c("Car Weight (1000 lbs): ", min(mtcars$wt)," to ", max(mtcars$wt)),
value = min(mtcars$wt),
min = min(mtcars$wt),
max = max(mtcars$wt),
step = 1/1000),
# Get the transmission type (checked means Auto transmission)
radioButtons("am",
"Transmission Type",
c("Automatic","Manual"),
selected = NULL ),
submitButton("Submit")
),
mainPanel(
h2("Your Car Specs Are:"),
br(),
h4("Number of Cylinder:"),
verbatimTextOutput("cyl"),
h4("Horse Power:"),
verbatimTextOutput("hp"),
h4("Weight (1000 lbs):"),
verbatimTextOutput("wt"),
h4("Transmission Type:"),
verbatimTextOutput("am"),
br(),
h4("The average fuel efficiency (MPG) of your car is as:"),
verbatimTextOutput("mpg_avg")
)
)
),
tabPanel(title = "DOCUMENTATION",
includeMarkdown("readme.md"))
)
)
|
35b56246321178e7a0267229ec21b2553e31d846 | f1de9ab672d72d706324f9f6d597f46f601622bf | /src/linear_model_and_plot.R | 3df97c717df8758da2403fa50bf06dd6e57d2a1e | [] | no_license | rq1995/DSCI_522_milestone | a3ecb8f259dbe3edd939e53a9d2cc51ebc5649ce | 483db6d8d9740858ae1d2ea5101cad7074ff7d26 | refs/heads/master | 2021-09-04T19:35:21.875333 | 2018-01-21T20:15:57 | 2018-01-21T20:15:57 | 112,911,630 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,521 | r | linear_model_and_plot.R | #! /usr/bin/env Rscript
# linear_model_and_plot.R
# Stephanie Xu, Dec 2016
#
# This script can fit linear model and plot it based on data from a .csv file.
#
# Usage: Rscript linear_model_and_plot.R titanic_clean.csv
# read in command line arguments
args <- commandArgs(trailingOnly = TRUE)
input_file <- args[1]
library(tidyverse)
library(ggplot2)
library(svglite)
# define main function
main <- function(){
# read in data
data1 <- read.csv(input_file,header=TRUE)
# fit linear model between age and survived rate.
lm1 <- lm(data = data1, survived~age)
summary(lm1)
#take results to a table
test1 <- broom::tidy(summary(lm1))
write.csv(test1,file="./results/age_survived.csv")
#draw a boxplot and save it to plot1.png
plot1 <- ggplot(data1, aes(factor(survived),age))+
geom_boxplot(aes(group=survived))+
labs(x="survived", y="age",title="Boxplot for survived and age")
ggsave("./results/figure/age_survived.svg",plot1,scale=0.8)
# fit linear model between fare and survived rate.
lm2 <- lm(data = data1, survived~fare)
summary(lm2)
#take results to a table
test2 <- broom::tidy(summary(lm2))
write.csv(test2,file="./results/fare_survived.csv")
# draw a boxplot and save it to plot2.png
plot2 <- ggplot(data1, aes(factor(survived),fare))+
geom_boxplot(aes(group=survived))+
labs(x="survived", y="fare",title="Boxplot for survived and fare")
ggsave("./results/figure/fare_survived.svg",plot2,scale=0.8)
}
# call main function
main() |
f7ae5372b870753e673848c8a74960311d00983e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/nlme/examples/corRatio.Rd.R | 821d733a29d0712702f1013d51a88b049b7d76ff | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 849 | r | corRatio.Rd.R | library(nlme)
### Name: corRatio
### Title: Rational Quadratic Correlation Structure
### Aliases: corRatio
### Keywords: models
### ** Examples
sp1 <- corRatio(form = ~ x + y + z)
# example lme(..., corRatio ...)
# Pinheiro and Bates, pp. 222-249
fm1BW.lme <- lme(weight ~ Time * Diet, BodyWeight,
random = ~ Time)
# p. 223
fm2BW.lme <- update(fm1BW.lme, weights = varPower())
# p 246
fm3BW.lme <- update(fm2BW.lme,
correlation = corExp(form = ~ Time))
# p. 249
fm5BW.lme <- update(fm3BW.lme, correlation =
corRatio(form = ~ Time))
# example gls(..., corRatio ...)
# Pinheiro and Bates, pp. 261, 263
fm1Wheat2 <- gls(yield ~ variety - 1, Wheat2)
# p. 263
fm3Wheat2 <- update(fm1Wheat2, corr =
corRatio(c(12.5, 0.2),
form = ~ latitude + longitude,
nugget = TRUE))
|
356c51eb3a6da63906d9818ee8fc7a699d0a20cf | 4d7e214578bea5355d869f596e04b6800e0c654b | /R/bilsalget.R | 56fd40e89ae550595cea55004554a5f1def50fcd | [] | no_license | dmi3kno/zoe | b02821f02ac9590de0b9e816867475175553a8a6 | b5e58d28c0db95e4840bdd7a286f04638e8332fb | refs/heads/master | 2020-03-23T19:00:31.150632 | 2019-02-09T21:05:38 | 2019-02-09T21:05:38 | 141,947,963 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,003 | r | bilsalget.R | #' New car registrations in Norway data for 1999-2018
#'
#' Selected monthly data about new car registrations in Norway for 1999-2018.
#' Raw data is presented in "long" format for several data series and from different sources.
#'
#' @source Opplysningsrådet for Veitrafikken AS (OFV AS), webiste
#' <http:://www.ofvas.no>
#' @format Data frame with columns
#' \describe{
#' \item{car_name}{Name of the make or model (depending on the data `series``).}
#' \item{year, month}{Year and month of observation.}
#' \item{series}{Factor variable taking one of the following values: makes, models, total models or total makes.}
#' \item{source}{Source of the data. Top-20/40 tables from OFVAS website (top), images(ocr) or various other sources (web)}
#' \item{metric}{Current month observation (CM) or year-to-date (YTD).}
#' \item{period}{Recorded as current year data (CY) or last year data (LY).}
#' \item{value}{Quantity of the cars registered}
#' }
"bilsalget_raw"
#' @importFrom tibble tibble
NULL
|
5b02e930b4253c1e2dedc9b92da5c9f4b4966e1f | 3f265708edbf54676b492987a916eab569c99b3b | /split_related_pairs.R | 301b19a4410ecc30bb3e0ed529ca9728849a883c | [] | no_license | jmoggridge/Labrador_ACL_rupture_GWAS | e95e1329e26f6c404e6d2ffa8d365c479866eb5d | 92fd1da7c03c98c8424827dd1d5f47c6c2f4b59a | refs/heads/main | 2023-04-05T13:57:34.750968 | 2021-04-08T17:02:11 | 2021-04-08T17:02:11 | 350,905,722 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 414 | r | split_related_pairs.R | # tutorial needs a script to select out related pairs. (not by hand)
# generally, want to drop the member with more missing data.
# could try to find the individuals most related to others?
# df <- read.table("plink.imiss", header = T)
# snps <- read.table("min0.2_something from last part of QC)
# pick <- function(row_input){
#
# }
#
# to_drop <- list()[1:nrow(df)]
# for i in seq_along(nrow(df)){
#
# } |
e171d8d28e5cf1dd557ccbd0e8a8cbbea9539c26 | b28381c59a503d10dd3cb2e42a5d25430c644abe | /shinyapp_craigslist_housing_browser/server.R | 6688cea31d862628ce9eafb9f8cff96d7c4c3248 | [] | no_license | sueyic/shinyapp-craigslist-housing | c150801e9212b3db732d8192bc674e1b6ac7c051 | 7be7cb599f4edd37501b6d446d06596096514151 | refs/heads/master | 2021-01-10T03:06:35.406464 | 2015-12-07T05:15:58 | 2015-12-07T05:15:58 | 46,397,033 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,715 | r | server.R | library(ggplot2)
library(httr)
library(plyr)
library(shiny)
library(stringr)
library(XML)
URLS = list(
'newyork' = 'http://newyork.craigslist.org/search/aap',
'sfbay' = 'http://sfbay.craigslist.org/search/apa',
'seattle' = 'http://seattle.craigslist.org/search/apa'
)
fetchTime <- NA
# A combined data frame of all cities data
datadf <- NA
getDataFromUrl <- function(url) {
cat(sprintf("url: *%s*\n", url));
content <- content(GET(url), as="text")
parsedHtml <- htmlParse(content, asText=TRUE)
domain <- str_extract(url, 'http://[^.]+.craigslist.org')
posts <- xpathApply(parsedHtml, "//p[@class='row']", fun=function(node) {
rowHtml <- saveXML(node)
parsedRowHtml <- htmlParse(rowHtml, asText=TRUE)
date <- xpathSApply(parsedRowHtml, "//time", xmlValue)
title <- xpathSApply(parsedRowHtml, "//span[@class='pl']/a", xmlValue)
price <- xpathSApply(parsedRowHtml, "//span[@class='price']", xmlValue)
housing <- xpathSApply(parsedRowHtml, "//span[@class='housing']", xmlValue)
housingToks <- strsplit(as.character(housing), '[/ -]+', perl=TRUE)
bedrooms <- NA
sqft <- NA
href <- paste0(
domain,
xpathSApply(parsedRowHtml, "//span[@class='pl']/a",
function(x) { xmlAttrs(x)[['href']] }))
if (length(housingToks) == 1) {
for (tok in housingToks[[1]]) {
if (grepl('br', tok)) {
bedrooms <- tok
} else if (grepl('ft2', tok)) {
sqft <- tok
}
}
}
#cat(sprintf("date: *%s*, title: *%s*, price: *%s*, housing: *%s*, bedrooms: *%s*, sqft: *%s*\n", date, title, price, housing, bedrooms, sqft))
x = list(date=NA, title=NA, price=NA, bedrooms=NA, sqft=NA, href=NA)
x$date = date
x$title = title
x$price = as.numeric(gsub('\\$', '', price))
x$bedrooms = as.factor(gsub('br', '', bedrooms))
x$sqft = as.numeric(gsub('ft2', '', sqft))
x$href = href
return(x)
})
df = do.call(rbind.data.frame, posts)
return(df)
}
# get data for all cities.
getData <- function() {
ldply(URLS, function(url) {
cat(sprintf('this url: *%s*', url))
# try(getDataFromUrl(url))
tryCatch({
getDataFromUrl(url)
}, error = function(err) {
# Occasionally getting data fails (exactly, why?). In this case,
# return an empty data frame for this location.
print(paste("ERROR getting data from url: ", url))
return(data.frame())
})
}) %>% rename(replace=c('.id' = 'city'))
}
shinyServer(function(input, output) {
location <- reactive({
input$location
})
df <- reactive({
curTime <- Sys.time()
# Fetch data if we've never fetched it or its been at least 10 minutes since last fetched.
if (is.na(fetchTime) || as.double(difftime(curTime, fetchTime, units="mins")) > 10) {
cat(' fetching\n')
datadf <<- getData()
fetchTime <<- Sys.time()
} else {
cat(' not fetching\n')
}
loc <- location()
cat(sprintf('*** loc: %s', loc))
if (loc == "all") {
return (datadf)
} else {
data <- subset(datadf, city == loc)
data$city <- NULL
return (data)
}
})
output$viewTitle <- renderText({ifelse(location() == 'all', 'Data for all locations', paste0('Data for ', location()))})
### Plots comparing locations ###
output$plotPxLoc <- renderPlot({
if (location() == "all") {
g <- ggplot(df(), aes(y=price, x=city)) + geom_violin() +
labs(title="Price vs City")
} else {
g <- NULL
}
return (g)
})
output$plotPxSqft <- renderPlot({
if (location() == "all") {
g <- ggplot(df(), aes(x=sqft, y=price, color=city)) + geom_point() +
labs(title="Price vs Sqft by City")
} else {
g <- NULL
}
return (g)
})
output$plotBdrLoc <- renderPlot({
if (location() == "all") {
g <- ggplot(df(), aes(x=bedrooms, fill=bedrooms)) + geom_histogram() + coord_flip() + facet_wrap(~ city) + labs(title="Number of Bedrooms per City")
} else {
g <- NULL
}
return (g)
})
### Location-specific plots ###
output$plotPxBdr <- renderPlot({
loc <- location()
if (loc == "all") {
g <- NULL
} else {
g <- ggplot(df(), aes(x=bedrooms, y=price)) + geom_point() +
labs(title="Price vs Bedroom")
}
return (g)
})
output$plotPxSqftBdr <- renderPlot({
loc <- location()
if (loc == "all") {
g <- NULL
} else {
g <- ggplot(df(), aes(x=sqft, y=price, color=bedrooms)) + geom_point() +
labs(title="Price vs Sqft by Bedroom")
}
return (g)
})
### Table ###
output$tbl <- DT::renderDataTable({
table <- df()
table$link = paste0('<a href="', table$href, '" target="_blank">link</a>')
table$href <- NULL
return (datatable(table, escape=-which(names(df()) %in% c('link'))))
})
})
|
a72283e4c270bfc4086301c355044013ea7ac7ae | 56a9d559179735c964590ecca0a7353fd437d8d8 | /R/vie-getNetwork.R | 5c55eab6abec2cc19302c348c42a808167899ecf | [] | no_license | leemh5544/peter | 02c041ea31d32a574be961d4c7df6cdb6754b5c5 | 621e1fc1a5fe3097bdbd92c7a0b64f54562e3d3e | refs/heads/master | 2021-01-18T08:58:10.199960 | 2013-09-16T09:45:52 | 2013-09-16T09:45:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,907 | r | vie-getNetwork.R | ### Functions to obtain, parse and store locally Vienna's Wiener Linien network
### data. Note: This data is provided by the Vienna city government under a
### CC-BY license.
##' Download Vienna's Wiener Linien network data
##'
##' This function provides access to Vienna's Wiener Linien network meta data.
##' Data on stations, lines, and platforms are retrieved from online sources.
##'
##' A networks meta data is required to produce a model of the graph
##' representing the network. Wiener Linien provides csv files for stations,
##' lines and platforms. The following sections describe the data content.
##' Usually, all three data sets are required to obtain a meaningful
##' representation of the network graph.
##' @section License: Wiener Linien and the Vienna city government provides
##' these data under a
##' \href{http://creativecommons.org/licenses/by/3.0/at/deed.de}{Creative
##' Commons Attribution license}. The attribution clause is fulfilled by
##' stating the data source: Stadt Wien - data.wien.gv.at. The authoritative
##' description of the data set can be found in the
##' \href{https://open.wien.at/site/datensatz/?id=add66f20-d033-4eee-b9a0-47019828e698}{original
##' data set description}.
##'
##' @section Stations: Stations group together platforms. Some real-time data is
##' available for / applies to stations instead of platforms. Essentially,
##' stations are the nodes in a coarse network model.
##' \tabular{ll}{ HALTESTELLEN_ID \tab Station ID (key)\cr TYP \tab Object type
##' \cr DIVA \tab Internal numeric code \cr NAME \tab Station name \cr GEMEINDE
##' \tab Name of municipiality \cr GEMEINDE_ID \tab municipiality ID (key) \cr
##' WGS84_LAT \tab Latitude of station location \cr WGS84_LON \tab Longitude of
##' station location \cr STAND \tab Date of data export}
##'
##' @section Lines: Lines are the edges connecting the network nodes: they link
##' up platforms and in that stations.
##'
##' \tabular{ll}{ LINIEN_ID \tab Line ID (key) \cr BEZEICHNUNG \tab Line name
##' \cr REIHENFOLGE \tab Internal line order \cr ECHTZEIT \tab Availability of
##' real-time data \cr VERKEHRSMITTEL \tab Kind of transport \cr STAND \tab
##' Date of data export}
##'
##' @section Platforms: Platform data describes nodes in a much finer resolution
##' than \code{stations}. Essentially, this table links \code{lines} and
##' \code{stations} together.
##'
##' \tabular{ll}{ Steig_ID \tab Platform ID (key) \cr FK_LINIEN_ID \tab Line
##' ID (key) \cr FK_HALTESTELLEN_ID \tab Station ID (key) \cr RICHTUNG \tab
##' Direction of ordering \cr REIHENFOLGE \tab Ordering withing direction\cr
##' RBL_NUMMER \tab Computer aided dispatch code \cr BEREICH \tab Platform
##' area \cr STEIG \tab Name of Platform (internal) \cr STEIG_WGS84_LAT \tab
##' Latitude of platform location \cr STEIG_WGS84_LON \tab Longitude of
##' platform location \cr STAND \tab Date of data export}
##' @param kind a vector specifying the data sets to be downloaded. Defaults to
##' all available data. See Details.
##' @return A named list with (at most) three elements named \code{stations},
##' \code{lines}, and \code{platforms}.
##' @export
##' @examples
##' \dontrun{
##' vie.ntw <- getNetwork.vie() # get all network data
##' }
getNetwork.vie <- function(kind=c("stations", "lines", "platforms")) {
message(paste("Retrieving", paste(kind, collapse=", "), "from Vienna city government. Data provided as CC-BY. Data source: Stadt Wien - data.wien.gv.at"))
url <- list(stations="http://data.wien.gv.at/csv/wienerlinien-ogd-haltestellen.csv",
lines="http://data.wien.gv.at/csv/wienerlinien-ogd-linien.csv",
platforms="http://data.wien.gv.at/csv/wienerlinien-ogd-steige.csv")
dta <- lapply(url[kind], function(this.url) read.csv(file=this.url, sep=";"))
return(dta)
}
|
865c0a8b73805f366d051f8528dd54612450416d | b3ba1d1795e2374ca3b8c25043362ec8d2b4b479 | /man/RunModel.Rd | d052ee31879dce95ee468d5a0a54707340614a56 | [] | no_license | xuzhenwu/airGR | a335b982cf56e9d35b048822a0ef3c940ae4b1f7 | 7f407b01c06397f2023e62118c8408e567ca04e7 | refs/heads/master | 2023-02-17T19:04:24.846715 | 2020-02-28T15:20:06 | 2020-02-28T15:20:06 | 280,121,172 | 0 | 0 | null | 2020-07-16T10:11:24 | 2020-07-16T10:11:24 | null | UTF-8 | R | false | false | 2,388 | rd | RunModel.Rd | \encoding{UTF-8}
\name{RunModel}
\alias{RunModel}
\title{Run with the provided hydrological model function}
\description{
Function which performs a single model run with the provided function over the selected period.
}
\usage{
RunModel(InputsModel, RunOptions, Param, FUN_MOD)
}
\arguments{
\item{InputsModel}{[object of class \emph{InputsModel}] see \code{\link{CreateInputsModel}} for details}
\item{RunOptions}{[object of class \emph{RunOptions}] see \code{\link{CreateRunOptions}} for details}
\item{Param}{[numeric] vector of model parameters}
\item{FUN_MOD}{[function] hydrological model function (e.g. \code{\link{RunModel_GR4J}}, \code{\link{RunModel_CemaNeigeGR4J}})}
}
\value{
[list] see \code{\link{RunModel_GR4J}} or \code{\link{RunModel_CemaNeigeGR4J}} for details
}
\examples{
library(airGR)
## loading catchment data
data(L0123001)
## preparation of the InputsModel object
InputsModel <- CreateInputsModel(FUN_MOD = RunModel_GR4J, DatesR = BasinObs$DatesR,
Precip = BasinObs$P, PotEvap = BasinObs$E)
## run period selection
Ind_Run <- seq(which(format(BasinObs$DatesR, format = "\%Y-\%m-\%d")=="1990-01-01"),
which(format(BasinObs$DatesR, format = "\%Y-\%m-\%d")=="1999-12-31"))
## preparation of the RunOptions object
RunOptions <- CreateRunOptions(FUN_MOD = RunModel_GR4J,
InputsModel = InputsModel, IndPeriod_Run = Ind_Run)
## simulation
Param <- c(X1 = 734.568, X2 = -0.840, X3 = 109.809, X4 = 1.971)
OutputsModel <- RunModel(InputsModel = InputsModel,
RunOptions = RunOptions, Param = Param,
FUN_MOD = RunModel_GR4J)
## results preview
plot(OutputsModel, Qobs = BasinObs$Qmm[Ind_Run])
## efficiency criterion: Nash-Sutcliffe Efficiency
InputsCrit <- CreateInputsCrit(FUN_CRIT = ErrorCrit_NSE, InputsModel = InputsModel,
RunOptions = RunOptions, Obs = BasinObs$Qmm[Ind_Run])
OutputsCrit <- ErrorCrit_NSE(InputsCrit = InputsCrit, OutputsModel = OutputsModel)
}
\author{
Laurent Coron, Olivier Delaigue
}
\seealso{
\code{\link{RunModel_GR4J}}, \code{\link{RunModel_CemaNeigeGR4J}}, \code{\link{CreateInputsModel}},
\code{\link{CreateRunOptions}}, \code{\link{CreateIniStates}}.
}
|
a8ff5a964fb041941bb086dc34c4dabf490850e4 | 2df0cbb9669bc66e9ebea1f4cf1bbc29a13fa548 | /example-MIP.R | 4f86d992399a8f76c047787141d956e02610ca14 | [] | no_license | xgsu/MIP | d93ef394ab5b300d154b5b71587e095fb43d5648 | 68c81bd4efc2ada2d33f44a55bc0cf0ca728d383 | refs/heads/master | 2021-04-09T14:41:28.720849 | 2018-03-18T14:42:23 | 2018-03-18T14:42:23 | 125,734,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,963 | r | example-MIP.R | #######################################################
## L1-REGUARED MULTIPLE-INFLATION POISSON MODEL (MIP)
## with Local Quadratic Approximation (LQA)
#######################################################
# WRITTEN BY: XIAOGANG SU, PH.D.
# UNIVERSITY OF ALABAMA AT BIRMINGHAM
# EMAIL: XGSU@UAB.EDU
#######################################################
# FIRST DOWNLOAD ALL THREE FILES INTO THE SAME FOLDER.
# THEN RUN THIS FILE ONLY, WHICH CONTAINS A SIMPLE ILLUSTRATION
# USING A SIMULATED DATA SET.
source("MIP-MLE.R");
source("MIP-L1.R");
# ======================================================
# FUNCTION THAT GENERATES DATA FROM AN MIP MODEL
# ======================================================
rdat.mip <- function(n=100, gammas=c(-3, -1.5, 3, 2), betas=c(-2, 3, 2),
poisson.only=F, clogit.only=F, confound=T){
x1 <- runif(n); x2 <- runif(n);
x3 <- runif(n); x4 <- runif(n);
jvec <- rep(1, n)
gamma1 <- gammas[-2]; gamma2 <- gammas[-1]
G <- cbind(jvec, x1, x3)
p1 <- logistic(G%*%gamma1); p2 <- logistic(G%*%gamma2) - p1
p3 <- 1- logistic(G%*%gamma2)
if (poisson.only) {p1<- p2 <- rep(0,n); p3 <- rep(1,n)}
# print(cbind(p1, p2, p3, p1+p2+p3))
if (confound) B <- cbind(jvec, x2, x3)
else B <- cbind(jvec, x2, x4)
lambda <- exp(B%*%betas)
y <- rep(0, n)
for (i in 1:n){
if (clogit.only) {
y[i] <- sample(c(0, 1, 2), size=1, prob=c(p1[i], p2[i], p3[i]))
lambda <- rep(2, n)}
else {
y[i] <- sample(c(0, 1, rpois(1, lambda=lambda[i])),
size=1, prob=c(p1[i], p2[i], p3[i]))}
}
E.y <- p1*0 + p2*1 + p3*lambda # THE EXPECTED Y
dat <- data.frame(x1=x1, x2=x2, x3=x3, x4=x4, y=y)
list(dat=dat, E.y=E.y)
}
# ==========================
# AN ILLUSTRATION OF CODES
# ==========================
set.seed(123)
n <- 500
rdat <- rdat.mip(n=n, confound=T)
dat <- rdat$dat;
# ----------------------------------------
# FIRST FIT THE MODEL WITH ALL COVARIATES
# ----------------------------------------
source("MIP-MLE.R");
cols.LM <- 1:4; cols.PM <- 1:4; col.y <- 5; M <- 2
FIT <- MLE.MIP(dat, cols.LM=cols.LM, cols.PM=cols.PM, col.y=col.y,
max.it.EM=3, maxit=100, M=M, epsilon=1e-7, use.gradient=F)
out <- FIT$results
out
# -------------------------------------------
# L1-REGULARIZATION FOR VARIABLE SELECTION
# -------------------------------------------
n <- nrow(dat)
out <- FIT$results
theta.hat <- out$theta.hat
fit.full <- FIT$fit
Sigma0 <- fit.full$hessian
results.selection <- LAS.LAR.MI(Sigma0, b0=theta.hat, M=M, p=length(cols.LM),
n=n, eps = .Machine$double.eps, max.steps =30)
results.selection
cbind(theta.hat, beta.unpen=results.selection$beta.unpen, bic=results.selection$beta.bic, aic=results.selection$beta.aic)
Best.step.BIC <- which.min(results.selection$BIC)
Best.step.AIC <- which.min(results.selection$AIC)
betas <- results.selection$beta # THESE ARE SLOPES ONLY.
which(betas[Best.step.BIC,] !=0)
# TWO PLOTS FROM L1-REGULARIZATION
# ----------------------------------
# postscript(file="fig1.eps", horizontal=F)
par(mfrow=c(2,1), mar=rep(4,4))
aic <- results.selection$AIC
bic <- results.selection$BIC
plot(x=c(1, nrow(betas)), y=c(min(aic), max(bic)), type="n",
xlab="step", ylab="information criterion", main="(a) AIC & BIC")
lines(x=1:nrow(betas), y=aic, lty=1, col="green", lwd=1)
lines(x=1:nrow(betas), y=bic, lty=1, col="red", lwd=1)
abline(v=Best.step.BIC, lwd=3, col="blue")
abline(v=Best.step.AIC, lwd=3, col="green")
plot(x=c(1, nrow(betas)+1), y=c(min(betas), max(betas)), type="n",
xlab="step", ylab="coefficient estimate", main="(b) regularized path")
for (j in 1:ncol(betas)){
lines(x=1:nrow(betas), y=betas[,j], col="red", lty=1, lwd=1)
}
abline(h=0, col="black", lwd=1)
abline(v=Best.step.BIC, lwd=3, col="blue")
abline(v=Best.step.AIC, lwd=3, col="green")
# dev.off()
# ---------------------------------
# FITTING THE SELECTED MIP MODEL
# ---------------------------------
cols.LM <- c(1, 3); cols.PM <- c(2, 3); col.y <- 5; M <- 2
FIT1 <- MLE.MIP(dat, cols.LM=cols.LM, cols.PM=cols.PM, col.y=col.y,
max.it.EM=3, maxit=100, M=M, epsilon=1e-7, use.gradient=F)
out <- FIT1$results
out
# USE THE report() FUNCTION
vnames <- colnames(dat)
variables.LM <- vnames[cols.LM]
variables.PM <- vnames[cols.PM]
result <- report(fit.MIP=FIT1, variables.LM=variables.LM,
variables.PM=variables.PM, M=M)
result
# -------------------------------
# PREDICTION WITH A TEST SAMPLE
# -------------------------------
test <- rdat.mip(n=n, confound=T)$dat;
y.hat <- est.MIP.mean(theta=out$theta.hat, M=M, cols.LM=cols.LM,
cols.PM = cols.PM, dat=test)
plot(test$y, y.hat, type="p", pch=19, cex=0.5, xlab="observed",
ylab="predicted", col="blue")
abline(a=0, b=1, col="red", lwd=2)
#
|
2233445d6f9826fb06bbb05f8a168658743f0136 | ab73d60d1734a8d08eec9c410744b4441750ad95 | /R/checkVariableValidity.R | f11f05fb8cd2675e9956b20c67436587e53b0a9e | [] | no_license | gsimchoni/yrbss | d608ef13375c3a4a5901b4aef99709e534e0c476 | c181dc14e7429ef37c65da4ca8db5903e3fa4d37 | refs/heads/master | 2020-09-05T05:25:36.680035 | 2017-06-18T16:14:41 | 2017-06-18T16:14:41 | 94,413,209 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 572 | r | checkVariableValidity.R | #' Check the validity of a variable name
#'
#' Helper method to check the validity of a variable name.
#'
#' @param variable a string representing the name of the desired binary variable
#'
#' @return an ERROR if the variable is NULL or not in the yrbss_questions_binary
#' list of questions
#'
#' @examples
#' checkVariableValidity("qn8")
#'
#' @export
checkVariableValidity <- function(variable) {
if (is.null(variable)) {
stop("A variable name must be specified")
}
if (!variable %in% yrbss_questions_binary$variable) {
stop("Invalid variable name")
}
} |
8254d4420a65e4a8403bf0e909737cbab5df309f | 2432d53374caf319fa5ae13e4f304b22b3f7c425 | /app.R | 61af6054b4f058dbe0feae0769e4be79e929d92c | [] | no_license | databrew/steppingup | 3749a9b40e52675f0a5ce7b78c7e766ca7a14c45 | e43a1befcc701d56e81c085c2b9508af3b36ee90 | refs/heads/master | 2021-09-07T16:21:13.433360 | 2018-02-26T01:55:04 | 2018-02-26T01:55:04 | 108,323,819 | 0 | 0 | null | 2018-02-25T19:10:43 | 2017-10-25T20:41:46 | HTML | UTF-8 | R | false | false | 46,444 | r | app.R | library(shiny)
library(googleVis)
library(DT)
library(leaflet)
library(RColorBrewer)
library(networkD3)
options(gvis.plot.tag = 'chart')
library(shinyBS)
library(shinyLP)
library(ggplot2)
library(shinythemes)
source('global.R')
ui <- dashboardPage(skin = 'purple',
dashboardHeader(
title = "Ontario Youth Compass",
titleWidth = 300
),
dashboardSidebar(width = 300,
sidebarMenu(
menuItem('Welcome',
icon = icon('users'),
tabName = 'welcome'),
menuItem('Explore census data',
icon = icon('address-book-o'),
tabName = 'census'),
menuItem("Explore data by theme",
tabName = "theme",
icon = icon("dashboard")),
# menuItem("Download raw data",
# tabName = "download",
# icon = icon("suitcase")),
menuItem("About",
icon = icon('folder-open'),
tabName = "about"))),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css")
),
tabItems(
tabItem(tabName = 'welcome',
jumbotron("Ontario Youth Compass", "The Ontario Youth Compass tracks the wellbeing of youth across the province using data from a variety trusted sources.
This web app allows for easy exploration, visualization,
and access to data from the last four Canadian Censuses (2001, 2006, 2011, 2016), as well as various
other national and province-wide surveys. This web app is meant to accompany 'The Ontario Youth Compass:
A Databook on Youth Wellbeing' report published by YouthREX in 2018.",
button = FALSE
# buttonLabel = "Explore!"
),
fluidRow(
br(), br(), br(), br(),
div(img(src='youthrex_logo_clear.png', align = "left", width = '200'), style="text-align: left; margin-left:10px;")
)
),
tabItem(tabName = "census",
h2('Explore census data'),
helpText('I\'m looking for data about:'),
fluidRow(column(3,
selectInput('category',
'Category',
choices = category_choices)),
column(3,
uiOutput("sub_category")),
column(1,
htmlOutput('arrow')),
column(5,
uiOutput("variable"),
h4(strong(textOutput('variable_text'))))),
fluidRow(column(3,
radioButtons('percent',
'View as percentage or raw number',
choices = c('Percentage',
'Raw numbers',
'Both'))),
column(3,
checkboxGroupInput('years',
'Year',
choices = c('2001', '2006', '2011', '2016'),
selected = '2016'))),
fluidRow(column(12,
strong('Examine by sub-group(s):'))),
fluidRow(column(2,
checkboxInput('age',
'Age Group',
value = TRUE)),
column(2,
checkboxInput('sex',
'Sex',
value = TRUE)),
column(2,
checkboxInput('pob',
'Place of Birth')),
column(2,
checkboxInput('vm',
'Visible minority')),
column(2,
checkboxInput('ai',
'Aboriginal identity')),
column(2,
checkboxInput('geography',
'Geography')),
column(2)),
fluidRow(column(2,
uiOutput('age_filter')),
column(2,
uiOutput('sex_filter')),
column(2,
uiOutput('pob_filter')),
column(2,
uiOutput('vm_filter')),
column(2,
uiOutput('ai_filter')),
column(2,
uiOutput('geography_filter'))),
tabsetPanel(
tabPanel('Table',
fluidRow(column(12,
textOutput('xing_text'),
DT::dataTableOutput('xing_table')
))),
tabPanel('Map',
textOutput('map_text'),
h3(textOutput('map_title')),
leafletOutput('the_map')),
tabPanel('Plot',
checkboxInput('show_labels',
'Show values on charts?',
TRUE),
plotOutput('bar_plot')))),
tabItem(tabName = "theme",
h2('Explore data by theme'),
p('In 2013, the Government of Ontario adopted Stepping Up as the province’s evidence-based framework for improving youth outcomes. As an evidence-based framework, Stepping Up aims to consolidate and harmonize decision-making and program planning in Ontario’s youth-serving sectors to support youth wellbeing. This framework has guided both the development and implementation of youth initiatives by specifying seven themes for youth wellbeing.'),
p('You can explore various data sets under each of the Stepping Up themes below, or search for which theme any variable falls under below:'),
fluidRow(column(6,
selectInput('theme_word', 'Select or type to search for variable(s):',
choices = theme_variables,
selectize = TRUE,
multiple = TRUE)),
column(6,
dataTableOutput('theme_search'))),
tabsetPanel(id = "tabs",
tabPanel(title = 'Health and wellness'),
tabPanel(title = 'Supportive families'),
tabPanel(title = 'Education'),
tabPanel(title = 'Employment'),
tabPanel(title = 'Civic engagement'),
tabPanel(title = 'Diversity'),
tabPanel(title = 'Communities')
),
fluidRow(column(6,
uiOutput('theme_var')),
column(6,
uiOutput('theme_var_2'))),
fluidRow(column(6,
checkboxInput('want_another_var',
'Compare with a second variable?',
value = FALSE)),
column(6,
helpText(textOutput('compare_text')))),
fluidRow(
column(3,
uiOutput('theme_gender')),
column(3,
uiOutput('theme_race')),
column(6)
),
# fluidRow(textOutput('fake_text')),
tabsetPanel(
tabPanel('Table',
fluidRow(column(12,
DT::dataTableOutput('theme_table')
))),
tabPanel('Plot',
fluidRow(column(12,
plotOutput('theme_plot')))))
),
tabItem(tabName = "download",
h2("Data download"),
br(),
h3('Census data'),
p('Click below to download the entire census dataset (processed, formatted, and filtered by Databrew). Warning: This file is nearly 30MB large; depending on your internet connection speed, this download can be slow.'),
downloadButton('downloadData', 'Download'),
h3('Survey data'),
p('This application uses many different surveys. Select a survey below, and then click download to get entire survey dataset (processed, formatted, and filtered by Databrew) in raw form.'),
fluidRow(column(12,
selectInput('survey_download',
'Choose a survey dataset to download',
choices = survey_download_choices))),
downloadButton('downloadSurvey', 'Download'),
br()),
tabItem(
tabName = 'about',
fluidPage(
fluidRow(
div(img(src='logo_clear.png', align = "center"), style="text-align: center;"),
h4('Built in partnership with ',
a(href = 'http://databrew.cc',
target='_blank', 'Databrew'),
align = 'center'),
p('Empowering research and analysis through collaborative data science.', align = 'center'),
div(a(actionButton(inputId = "email", label = "info@databrew.cc",
icon = icon("envelope", lib = "font-awesome")),
href="mailto:info@databrew.cc",
align = 'center')),
style = 'text-align:center;'
)
)
)
)))
# Define server
server <- function(input, output) {
# reactive object for theme
theme_code <- reactive({
input$tabs # just run to refresh
x <- theme_dictionary %>% filter(long_name == input$tabs)
x <- x$short_name
return(x)
})
# reactive for choosing themes
theme_choices <- reactive({
x <- survey_dictionary
x <- x %>% filter(theme_name == theme_code())
x$new_variable
})
theme_choices_labels <- reactive({
x <- survey_dictionary
x <- x %>% filter(theme_name == theme_code())
x$display_name
})
output$theme_var <- renderUI({
input$tabs # just run to refresh
x <- theme_choices()
names(x) <- theme_choices_labels()
# x <- x[!grepl('weigh', x)]
selectInput('theme_var',
'Choose a variable to explore',
choices = x)
})
output$arrow <- renderText({
if(is.null(input$variable)){
HTML(as.character(icon("arrow-circle-right", "fa-4x")))
} else {
NULL
}
})
# reactive data set NAME based on the input$theme_var
theme_data_name <- reactive({
if(!is.null(input$theme_var)){
var1 <- input$theme_var
x <- var_summary
x$data_set <- unlist(lapply(strsplit(x$new_variable, '_'), function(x) x[2]))
x <- x %>% filter(new_variable == var1)
return(x$data_set[1])
} else {
return(NULL)
}
})
output$compare_text <-
renderText({
if(input$want_another_var){
'Note that variables can only be compared from within the same dataset. So, the list of comparison variables (to the right) depends on the exploratory variable chosen (on the left).'
} else {
NULL
}
})
# reactive dataset based on the theme_data_name
theme_data <- reactive({
the_name <- theme_data_name()
if(is.null(the_name)){
NULL
} else {
full_name <- dataset_dictionary %>%
filter(short_name == the_name) %>%
.$long_name
x <- survey[[which(names(survey) == full_name)]]
# remove weight and ids
x <- x[, !grepl('weigh', colnames(x))]
x <- x[, !grepl('_id', colnames(x))]
x <- x[, !grepl('999', colnames(x), fixed = T)]
x
}
})
# reactive object for second choice
theme_choices_2 <- reactive({
input$tabs # just for refreshing
x <- survey_dictionary
x <- x %>% filter(short_name == theme_data_name())
out <- x$new_variable
dd <- theme_data()
out <- out[out %in% names(dd)]
if(length(out) == 0){
return(NULL)
} else{
return(out)
}
})
theme_choices_labels_2 <- reactive({
x <- survey_dictionary
x <- x %>% filter(short_name == theme_data_name())
out <- x$display_name
original_var_name <- x$new_variable
dd <- theme_data()
out <- out[original_var_name %in% names(dd)]
if(length(out) == 0){
return(NULL)
} else{
return(out)
}
})
has_race_gender <- reactive({
# Get the theme data name
x <- theme_data_name()
y <- theme_code()
full_name <- NULL
if(!is.null(x)){
# Use the dataset dictionary to convert to a fuller name
full_name <- dataset_dictionary %>%
filter(short_name == x)
if(nrow(full_name) == 1){
full_name <- full_name$long_name
} else {
full_name <- NULL
}
}
if(!is.null(full_name)){
race_gender <- race_gender_dictionary %>%
filter(data_folder == full_name)
full_name <- race_gender
race <- full_name %>% dplyr::filter(category == 'race') %>% .$variable_name
gender <- full_name %>% dplyr::filter(category == 'gender') %>% .$variable_name
full_name <- data.frame('race' = ifelse(is.na(race), NA, race),
'gender' = ifelse(is.na(gender), NA, gender))
}
# full_name <- paste0(x, ' ', y)
return(full_name)
})
output$theme_gender <- renderUI({
x <- has_race_gender()
out <- NULL
if(!is.null(x)){
if(!is.na(x$gender)){
out <- checkboxInput('theme_gender',
'Group by gender')
}
}
return(out)
})
output$theme_race <- renderUI({
x <- has_race_gender()
out <- NULL
if(!is.null(x)){
if(!is.na(x$race)){
out <- checkboxInput('theme_race',
'Group by race')
}
}
return(out)
})
output$theme_var_2 <- renderUI({
input$tabs # just run to refresh
input$want_another_var # just run to refresh
if(is.null(input$theme_var) | !input$want_another_var) {
return(NULL)
} else {
x <- theme_choices_2()
if(is.null(x)){
return(NULL)
} else {
names(x) <- theme_choices_labels_2()
selectInput('theme_var_2',
'Choose a variable to compare',
choices = x)
}
}
})
# output$fake_text <- renderText({
# paste0('Theme data name is ', theme_data_name(), '\n',
# 'var 1 is ', input$theme_var, '\n',
# 'var 2 is ', input$theme_var_2)
# })
output$theme_plot <- renderPlot({
input$tabs # just run to refresh
df <- theme_data()
# df_full <- df
v1 <- input$theme_var
v2 <- input$theme_var_2
has_two <- input$want_another_var & !is.null(input$theme_var_2)
# Deal with grouping by gender and race
by_gender <- FALSE
by_race <- FALSE
if(!is.null(df)){
if(!is.null(input$theme_gender)){
if(input$theme_gender){
by_gender <- TRUE
}
}
if(!is.null(input$theme_race)){
if(input$theme_race){
by_race <- TRUE
}
}
} else {
return(NULL)
}
# Get the label names of our variables
if(!is.null(v1)){
v1_label <- survey_dictionary %>% filter(new_variable == v1) %>% .$display_name %>% strsplit(' (', fixed = TRUE) %>% lapply(function(x){x[1]}) %>% unlist
} else {
v1_label <- ''
}
if(!is.null(v2)){
v2_label <- survey_dictionary %>% filter(new_variable == v2) %>% .$display_name %>% strsplit(' (', fixed = TRUE) %>% lapply(function(x){x[1]}) %>% unlist
} else {
v2_label <- ''
}
# Subset to only include the variables we want
keep_vars <- v1
if(!is.null(df)){
if(has_two){
keep_vars <- c(keep_vars, v2)
}
# Deal with gender and race grouping
if(by_gender){
keep_vars <- c(keep_vars, 'gender')
}
if(by_race){
keep_vars <- c(keep_vars, 'race')
}
if(!is.data.frame(df)){
return(NULL)
} else {
# All operations go here
# Keep only the relevant variables
df <- df[,names(df) %in% keep_vars, drop = FALSE]
# print(head(df))
if(has_two & ncol(df) >= 2){
names(df)[1:2] <- c('v1', 'v2')
type_1 <- class(df$v1)
type_2 <- class(df$v2)
type_2_numeric <- type_2 %in% c('integer', 'numeric')
type_1_numeric <- type_1 %in% c('integer', 'numeric')
if(type_1_numeric & type_2_numeric){
g <- ggplot(data = df,
aes(x = v1,
y = v2)) +
geom_point() +
labs(x = v1_label,
y = v2_label)
}
if(type_1_numeric & !type_2_numeric){
g <- ggplot(data = df,
aes(x = v1,
group = v2,
fill = v2)) +
geom_density(alpha = 0.3) +
labs(x = v1_label)
}
if(!type_1_numeric & type_2_numeric){
g <- ggplot(data = df,
aes(x = v1,
y = v2,
group = v1)) +
geom_jitter(alpha = 0.3) +
geom_violin() +
labs(x = v1_label,
y = v2_label)
}
if(!type_1_numeric & !type_2_numeric){
# get percentages for plot
out <- df %>%
group_by(v1, v2) %>%
summarise(observations = n()) %>%
ungroup %>%
mutate(percentage = round(observations / sum(observations) * 100, digits = 2))
# plot data
cols <- colorRampPalette(brewer.pal(9, 'Spectral'))(length(unique(df$v2)))
g <- ggplot(data = out,
aes(x = v1,
y = percentage,
group = v2,
fill = v2)) +
geom_bar(position = 'dodge', stat = 'identity') +
labs(x = v2_label) +
scale_fill_manual(name = v1_label,
values = cols)
}
g <- g + theme_databrew() +
labs(title = v1_label,
subtitle = v2_label)
} else {
if(!is.data.frame(df)){ # this means there is no gender / race, it's just one vector
df <- data.frame(v1 = df)
} else {
names(df)[1] <- 'v1'
}
type_1 <- class(df$v1)
type_1_numeric <- type_1 %in% c('integer', 'numeric')
if(type_1_numeric){
g <- ggplot(data = df,
aes(x = v1)) +
geom_density(fill = 'darkorange',
alpha = 0.6) +
labs(x = v1_label)
} else {
g <- ggplot(data = df,
aes(x = v1)) +
geom_bar(fill = 'darkorange',
alpha = 0.6) +
labs(x = v1_label)
}
g <- g +
theme_databrew() +
labs(title = v1_label)
}
if(by_gender & !by_race){
if('gender' %in% names(df)){
g <- g + facet_wrap(~gender)
}
} else if(!by_gender & by_race){
if('race' %in% names(df)){
g <- g + facet_wrap(~race)
}
} else if(by_gender & by_race){
if('race' %in% names(df) & 'gender' %in% names(df)){
g <- g + facet_grid(~gender+race)
}
}
g <- g +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
return(g)
}
} else{
NULL
}
})
output$theme_search <- renderDataTable({
out <- NULL
theme_word <- input$theme_word
if(!is.null(theme_word)){
x <- survey_dictionary
x <- x %>% filter(display_name %in% theme_word)
x <- x %>%
dplyr::select(display_name, theme_name)
x <- left_join(x, theme_dictionary,
by = c('theme_name' = 'short_name'))
x <- x %>%
filter(theme_name != 'demo')
x <- x %>% dplyr::select(-theme_name) %>%
dplyr::rename(Theme = long_name) %>%
dplyr::rename(Variable = display_name)
x <- x %>% filter(!is.na(Theme))
out <- DT::datatable(x,options = list(dom = 't'), rownames = FALSE)
}
return(out)
})
output$theme_table <- renderDataTable({
# Deal with grouping by gender and race
by_gender <- FALSE
by_race <- FALSE
demo_keepers <- c()
has_two <- FALSE
df <- NULL
v1 <- NULL
v2 <- NULL
input$tabs # just run to refresh
df <- theme_data()
v1 <- input$theme_var
v2 <- input$theme_var_2
has_two <- input$want_another_var & !is.null(input$theme_var_2)
if(!is.null(df)){
if(!is.null(input$theme_gender)){
if(input$theme_gender){
by_gender <- TRUE
}
}
if(!is.null(input$theme_race)){
if(input$theme_race){
by_race <- TRUE
}
}
} else {
return(NULL)
}
# Get the label names of our variables
if(!is.null(v1)){
v1_label <- survey_dictionary %>% filter(new_variable == v1) %>% .$display_name %>% strsplit(' (', fixed = TRUE) %>% lapply(function(x){x[1]}) %>% unlist
} else {
v1_label <- ''
}
if(!is.null(v2)){
v2_label <- survey_dictionary %>% filter(new_variable == v2) %>% .$display_name %>% strsplit(' (', fixed = TRUE) %>% lapply(function(x){x[1]}) %>% unlist
} else {
v2_label <- ''
}
# Subset to only include the variables we want
keep_vars <- v1
if(!is.null(df)){
if(has_two){
keep_vars <- c(keep_vars, v2)
}
if(!is.data.frame(df)){
return(NULL)
} else {
# All operations go here
# Deal with gender and race grouping
if(by_gender){
keep_vars <- c(keep_vars, 'gender')
demo_keepers <- c(demo_keepers, 'gender')
}
if(by_race){
keep_vars <- c(keep_vars, 'race')
demo_keepers <- c(demo_keepers, 'race')
}
# Keep only the relevant variables
df <- df[,names(df) %in% unique(c(demo_keepers, keep_vars)), drop = FALSE]
if(is.null(df)){
return(NULL)
}
if(has_two & ncol(df) >= 2){
names(df)[1:2] <- c('v1', 'v2')
type_1 <- class(df$v1)
type_2 <- class(df$v2)
type_2_numeric <- type_2 %in% c('integer', 'numeric')
type_1_numeric <- type_1 %in% c('integer', 'numeric')
if(type_1_numeric & type_2_numeric){
if(length(demo_keepers) > 0){
a <- df %>%
group_by_at(demo_keepers) %>%
summarise(average = mean(v1, na.rm = TRUE),
maximum = max(v1, na.rm = TRUE),
minimum = min(v1, na.rm = TRUE),
IQR = paste0(quantile(v1, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v1),
NAs = length(which(is.na(v1))))
b <- df %>%
group_by_at(demo_keepers) %>%
summarise(average = mean(v2, na.rm = TRUE),
maximum = max(v2, na.rm = TRUE),
minimum = min(v2, na.rm = TRUE),
IQR = paste0(quantile(v2, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v2),
NAs = length(which(is.na(v2))))
} else {
a <- df %>%
summarise(average = mean(v1, na.rm = TRUE),
maximum = max(v1, na.rm = TRUE),
minimum = min(v1, na.rm = TRUE),
IQR = paste0(quantile(v1, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v1),
NAs = length(which(is.na(v1))))
b <- df %>%
summarise(average = mean(v2, na.rm = TRUE),
maximum = max(v2, na.rm = TRUE),
minimum = min(v2, na.rm = TRUE),
IQR = paste0(quantile(v2, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v2),
NAs = length(which(is.na(v2))))
}
out <- bind_rows(
cbind(data.frame(variable = v1_label), a),
cbind(data.frame(variable = v2_label), b)
)
}
if(type_1_numeric & !type_2_numeric){
if(length(demo_keepers) > 0){
out <- df %>%
group_by_at(c('v2', demo_keepers)) %>%
summarise(average = mean(v1, na.rm = TRUE),
maximum = max(v1, na.rm = TRUE),
minimum = min(v1, na.rm = TRUE),
IQR = paste0(quantile(v1, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v1),
NAs = length(which(is.na(v1))))
} else {
out <- df %>%
group_by(v2) %>%
summarise(average = mean(v1, na.rm = TRUE),
maximum = max(v1, na.rm = TRUE),
minimum = min(v1, na.rm = TRUE),
IQR = paste0(quantile(v1, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v1),
NAs = length(which(is.na(v1))))
}
names(out)[1] <- v2_label
}
if(!type_1_numeric & type_2_numeric){
if(length(demo_keepers) > 0){
out <- df %>%
group_by_at(c('v1', demo_keepers)) %>%
summarise(average = mean(v2, na.rm = TRUE),
maximum = max(v2, na.rm = TRUE),
minimum = min(v2, na.rm = TRUE),
IQR = paste0(quantile(v2, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v2),
NAs = length(which(is.na(v2))))
} else {
out <- df %>%
group_by(v1) %>%
summarise(average = mean(v2, na.rm = TRUE),
maximum = max(v2, na.rm = TRUE),
minimum = min(v2, na.rm = TRUE),
IQR = paste0(quantile(v2, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v2),
NAs = length(which(is.na(v2))))
}
names(out)[1] <- v1_label
}
if(!type_1_numeric & !type_2_numeric){
# Both are categorical
if(length(demo_keepers) > 0){
out <- df %>%
group_by_at(c('v1', 'v2', demo_keepers)) %>% tally
} else {
out <- df %>%
group_by(v1, v2) %>% tally
}
names(out)[1:2] <- c(v1_label, v2_label)
}
} else {
if(!is.data.frame(df)){ # this means there is no gender / race, it's just one vector
df <- data.frame(v1 = df)
} else {
names(df)[1] <- 'v1'
}
type_1 <- class(df$v1)
type_1_numeric <- type_1 %in% c('integer', 'numeric')
if(type_1_numeric){
if(length(demo_keepers) > 0){
out <- df %>%
group_by_at(demo_keepers) %>%
summarise(average = mean(v1, na.rm = TRUE),
maximum = max(v1, na.rm = TRUE),
minimum = min(v1, na.rm = TRUE),
IQR = paste0(quantile(v1, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v1),
NAs = length(which(is.na(v1))))
} else {
out <- df %>%
summarise(average = mean(v1, na.rm = TRUE),
maximum = max(v1, na.rm = TRUE),
minimum = min(v1, na.rm = TRUE),
IQR = paste0(quantile(v1, c(0.25, 0.75), na.rm = TRUE), collapse = ' to '),
observations = length(v1),
NAs = length(which(is.na(v1))))
}
} else {
if(length(demo_keepers) > 0){
out <- df %>%
group_by_at(c('v1', demo_keepers)) %>%
summarise(observations = n()) %>%
ungroup %>%
mutate(percentage = round(observations / sum(observations) * 100, digits = 2))
} else {
out <- df %>%
group_by(v1) %>%
summarise(observations = n()) %>%
ungroup %>%
mutate(percentage = round(observations / sum(observations) * 100, digits = 2))
}
names(out)[1] <- v1_label
}
}
return(prettify(out, download_options = TRUE))
}
} else{
NULL
}
})
# Reactive census object
censified <- reactive({
choices <- unique(census_dict$sub_category[census_dict$category == input$category])
if(length(choices) == 1) {
sc <- input$category
} else {
sc <- input$sub_category
}
x <- censify(df = census, dict = census_dict,
age = input$age,
sex = input$sex,
pob = input$pob,
vm = input$vm,
ai = input$ai,
geo_code = input$geography,
years = input$years,
sc = sc,
percent = input$percent)
if(input$age & !is.null(input$age_filter)) {
if(!'All' %in% input$age_filter) {
x <- x %>% filter(`Age group` %in% input$age_filter)
}
}
if(input$sex & !is.null(input$sex_filter)) {
if(!'All' %in% input$sex_filter) {
x <- x %>% filter(Sex %in% input$sex_filter)
}
}
if(input$pob & !is.null(input$pob_filter)) {
if(!'All' %in% input$pob_filter) {
x <- x %>% filter(`Place of Birth` %in% input$pob_filter)
}
}
if(input$vm & !is.null(input$vm_filter)) {
if(!'All' %in% input$vm_filter) {
x <- x %>% filter(`Visible minority` %in% input$vm_filter)
}
}
if(input$ai & !is.null(input$ai_filter)) {
if(!'All' %in% input$ai_filter) {
x <- x %>% filter(`Aboriginal identity` %in% input$ai_filter)
}
}
if(input$geography & !is.null(input$geography_filter)) {
if(!'All' %in% input$geography_filter) {
x <- x %>% filter(Geography %in% input$geography_filter)
}
}
return(x)
})
# Separate censified table (just for plotting) - does not use the "Both" option for %
censified_plot <- reactive({
choices <- unique(census_dict$sub_category[census_dict$category == input$category])
if(length(choices) == 1) {
sc <- input$category
} else {
sc <- input$sub_category
}
# This is the only different with censified - can't handle the pasted values with % in one column
pp <- input$percent
if(pp == 'Both'){
pp <- 'Percentage'
}
x <- censify(df = census, dict = census_dict,
age = input$age,
sex = input$sex,
pob = input$pob,
vm = input$vm,
ai = input$ai,
geo_code = input$geography,
years = input$years,
sc = sc,
percent = pp)
if(input$age & !is.null(input$age_filter)) {
if(!'All' %in% input$age_filter) {
x <- x %>% filter(`Age group` %in% input$age_filter)
}
}
if(input$sex & !is.null(input$sex_filter)) {
if(!'All' %in% input$sex_filter) {
x <- x %>% filter(Sex %in% input$sex_filter)
}
}
if(input$pob & !is.null(input$pob_filter)) {
if(!'All' %in% input$pob_filter) {
x <- x %>% filter(`Place of Birth` %in% input$pob_filter)
}
}
if(input$vm & !is.null(input$vm_filter)) {
if(!'All' %in% input$vm_filter) {
x <- x %>% filter(`Visible minority` %in% input$vm_filter)
}
}
if(input$ai & !is.null(input$ai_filter)) {
if(!'All' %in% input$ai_filter) {
x <- x %>% filter(`Aboriginal identity` %in% input$ai_filter)
}
}
if(input$geography & !is.null(input$geography_filter)) {
if(!'All' %in% input$geography_filter) {
x <- x %>% filter(Geography %in% input$geography_filter)
}
}
return(x)
})
# Misc approval box
output$approvalBox <- renderInfoBox({
infoBox(
"Approval", "80%", icon = icon("thumbs-up", lib = "glyphicon"),
color = "yellow"
)
})
# Misc approval box
output$approvalBox2 <- renderInfoBox({
infoBox(
"Approval", "80%", icon = icon("thumbs-up", lib = "glyphicon"),
color = "yellow", fill = TRUE
)
})
# Age filter
output$age_filter <- renderUI({
if(input$age){
choices <- sort(unique(census$`Age group`))
choices <- c('All', choices)
choices <- choices[!grepl('Total', choices)]
selectInput('age_filter',
'Filter',
choices = choices,
multiple = TRUE)
}
})
# barplot
output$bar_plot <- renderPlot({
if(input$category == 'income'){
ggplot() +
theme_databrew() +
labs(title = 'Income variables are not visualizable yet.')
} else {
if(is.null(input$variable) | length(input$variable) == 0){
ggplot() +
theme_databrew() +
labs(title = 'You must select a variable to plot')
} else {
plotter(censified_plot(), variable = input$variable, show_labels = input$show_labels)
}
}
})
# Download table
output$downloadData <- downloadHandler(
filename = function() { paste('databrew', '.csv', sep='') },
content = function(file) {
write.csv(census, file)})
# Geography filter
output$geography_filter <- renderUI({
if(input$geography){
choices <- sort(unique(census$Geography))
#remove ontario
choices <- choices[!grepl('Ontario', choices)]
choices <- c('All', choices)
choices <- choices[!grepl('Total', choices)]
selectInput('geography_filter',
'Filter',
choices = choices,
multiple = TRUE)
}
})
# Download survey
output$downloadSurvey <- downloadHandler(
filename = function() { paste(paste0('databrew_survey_',
input$survey_download,
collapse = ''),
'.csv', sep='') },
content = function(file) {
the_data <- survey[[which(names(survey) == input$survey_download)]]
write.csv(the_data, file)})
# Leaflet
output$map_text <- renderText({
make_map <- FALSE
if(input$geography &
!input$age &
!input$sex &
!input$pob &
!input$vm &
length(input$years) == 1){
make_map <- TRUE
}
if(!make_map){
paste0('To generate a map with data, check the "geography" box above, select only one year, and uncheck all the others.')
}
})
# Place of birth filter
output$pob_filter <- renderUI({
if(input$pob){
choices <- sort(unique(census$`Place of Birth`))
choices <- c('All', choices)
choices <- choices[!grepl('Total', choices)]
selectInput('pob_filter',
'Filter',
choices = choices,
multiple = TRUE)
}
})
# Progress box
output$progressBox <- renderInfoBox({
infoBox(
"Progress", paste0(25 + input$count, "%"), icon = icon("list"),
color = "purple"
)
})
# Progress box
output$progressBox2 <- renderInfoBox({
infoBox(
"Progress", paste0(25 + input$count, "%"), icon = icon("list"),
color = "purple", fill = TRUE
)
})
# Sex filter
output$sex_filter <- renderUI({
if(input$sex){
choices <- sort(unique(census$Sex))
choices <- c('All', choices)
choices <- choices[!grepl('Total', choices)]
selectInput('sex_filter',
'Filter',
choices = choices,
multiple = TRUE)
}
})
# Sub category UI
output$sub_category <- renderUI({
choices <- unique(census_dict$sub_category[census_dict$category == input$category])
if(length(choices) == 1) {
return(NULL)
} else {
names(choices) <- Hmisc::capitalize(gsub('_', ' ', choices))
radioButtons('sub_category',
'Sub Category',
choices = choices,
selected = choices[1])}})
make_the_map <- reactive({
make_map <- FALSE
if(input$geography &
!input$age &
!input$sex &
!input$pob &
!input$vm &
length(input$years) == 1){
make_map <- TRUE
}
if(make_map){
df <- censified()
if(length(input$variable) == 1){
which_var <- which(names(df) == input$variable)
val <- as.numeric(unlist(df %>% dplyr::select_(which_var)))
if(all(is.na(val))){
make_map <- FALSE
} else {
make_map <- TRUE
}
} else {
make_map <- FALSE
}
} else {
make_map <- FALSE
}
return(make_map)
})
output$map_title <- renderText({
make_it <- make_the_map()
if(make_it){
paste0('Map of ', tolower(input$variable))
} else {
NULL
}
})
output$the_map <- renderLeaflet({
make_it <- make_the_map()
if(make_it){
n <- 3
the_title <- input$variable
withProgress(message = 'Making map', value = 0, {
incProgress(1/n, detail = paste("Doing part", i))
df <- censified()
incProgress(1/n, detail = paste("Doing part", i))
which_var <- which(names(df) == input$variable)
val <- as.numeric(unlist(df %>% dplyr::select_(which_var)))
df <- df %>%
dplyr::select(geo_code)
incProgress(1/n, detail = paste("Doing part", i))
df$value <- val
leaf(x = df)#,
# tile = input$tile,
# palette = input$palette,
# show_legend = input$show_legend)
})
} else {
# NULL
leaf_basic()
}
})
# Variable selection
variable_choices <- reactive({
x <- censified()
out <- input$variable
x <- names(x)
x <- x[!x %in% head_vector]
x <- x[!grepl('Total', x)]
if(all(out %in% x)){
out <- out
} else {
out <- x[1]
}
return(out)
})
output$variable <- renderUI({
x <- censified()
x <- names(x)
x <- x[!x %in% head_vector]
x <- x[!grepl('Total', x)]
selectInput('variable',
'Variable',
choices = x,
selected = variable_choices(),
# selected = x[1],
multiple = TRUE)
})
# Variable reactive text
output$variable_text <- renderText({
if(is.null(input$variable) | length(input$variable) == 0){
'(Pick at least one variable)'
} else {
NULL
}
})
# Visible minority filter
output$vm_filter <- renderUI({
if(input$vm){
choices <- sort(unique(census$`Visible minority`))
choices <- c('All', choices)
choices <- choices[!grepl('Total', choices)]
selectInput('vm_filter',
'Filter',
choices = choices,
multiple = TRUE)
}
})
# Aboriginal identity filter
output$ai_filter <- renderUI({
if(input$ai){
choices <- sort(unique(census$`Aboriginal identity`))
choices <- c('All', choices)
choices <- choices[!grepl('Total', choices)]
selectInput('ai_filter',
'Filter',
choices = choices,
multiple = TRUE)
}
})
# Main table
no_go <- reactive({
no_go <- FALSE
x <- censified()
x <- x[, names(x) %in% c(head_vector, input$variable)]
if(is.null(input$variable)){
no_go <- TRUE
} else {
non_head <- x[,!names(x) %in% head_vector]
if(all(is.na(non_head))){
no_go <- TRUE
}
}
return(no_go)
})
output$xing_text <- renderText({
ng <- no_go()
if(ng){
'No data available for the parameters chosen.'
} else {
NULL
}
})
output$xing_table <- renderDataTable({
out <- DT::datatable(data_frame())
x <- censified()
ng <- no_go()
x <- x[, names(x) %in% c(head_vector, input$variable)]
if(length(input$variable) == 0 | ng) {
out
} else {
x$geo_code <- NULL
prettify(x, download_options = TRUE)
}
})
}
# Run the application
shinyApp(ui = ui,
# htmlTemplate("www/index.html"),
server)
|
e51d8554fac259a7e7396affbbfa0d00312b9aa2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BSDA/examples/Sat.Rd.R | 50e61ffa13bc5ec523e685baa646856ffa52d17d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 435 | r | Sat.Rd.R | library(BSDA)
### Name: Sat
### Title: SAT scores, percent taking exam and state funding per student by
### state for 1994, 1995 and 1999
### Aliases: Sat
### Keywords: datasets
### ** Examples
Sat94 <- Sat[Sat$year == 1994, ]
Sat94
Sat99 <- subset(Sat, year == 1999)
Sat99
stem(Sat99$total)
plot(total ~ percent, data = Sat99)
model <- lm(total ~ percent, data = Sat99)
abline(model, col = "blue")
summary(model)
rm(model)
|
ed3be4b4252ce42e1ea135410e181b1f48906fa1 | a108751f3b11f32f345cc07204e0f27819d83b7a | /scripts/data_prep/raw_to_prepared_Workplaces.R | 67c12f30456d3317d37ef73a1ce8c48b657a5116 | [
"MIT"
] | permissive | alan-turing-institute/uatk-spc | d2b276b434b437eae90500e881d0c2a519f58a77 | 753e9888137c19050dde1b17646be52a74c9cfff | refs/heads/main | 2023-08-31T07:46:36.071934 | 2023-08-01T18:37:33 | 2023-08-01T18:37:33 | 479,038,905 | 14 | 9 | MIT | 2023-08-02T09:44:36 | 2022-04-07T15:15:53 | Jupyter Notebook | UTF-8 | R | false | false | 11,790 | r | raw_to_prepared_Workplaces.R | library(dplyr)
library(tidyr)
library(rgdal)
library(sp)
library(foreign)
library(reshape2)
library(readr)
library(stringr)
#library(ggplot2)
folderIn <- "Data/dl/"
folderOut <- "Data/prepData/"
APIKey <- Sys.getenv("API_KEY")
set.seed(14101066)
createURL <- function(dataset,geogr,APIKey,date,other){
url <- paste("https://www.nomisweb.co.uk/api/v01/dataset/",dataset,".data.csv?uid=",APIKey,"&geography=",geogr,"&date=",date,other,sep = "")
return(url)
}
print("Creating Business Registry...")
#############################################################
####### Employees per business unit at national level #######
#############################################################
# Get UK Business Counts - local units by industry and employment size band per industry sic2017 "section" (21 categories), summing all (https://www.nomisweb.co.uk/datasets/idbrlu)
datasetBR <- "NM_141_1"
# Download
geogr <- "2092957699"
date<- "latestMINUS2"
other <- "&industry=150994945...150994965&employment_sizeband=1...9&legal_status=0&measures=20100&select=industry_name,employment_sizeband_name,obs_value&rows=employment_sizeband_name&cols=industry_name"
url <- createURL(datasetBR,geogr,APIKey,date,other)
download.file(url,destfile = paste(folderIn,"data.csv",sep=""))
# Load data and clean
data <- read.csv(paste(folderIn,"data.csv",sep=""))
data <- data[c(1,7,2,5,8,3,6,9,4),]
row.names(data) <- 1:nrow(data)
# 1/x fit
nat <- data.frame(real = rowSums(data[,2:22]))
nat$mid <- c((4-0)/2,5+(9-5)/2,10+(19-10)/2,20+(49-20)/2,50+(99-50)/2,100+(249-100)/2,250+(499-250)/2,500+(999-500)/2,1000+(2000-1000)/2) # 2000 as upper limit is arbitrary
fit <- lm(log(nat$real) ~ log(nat$mid))
nat$fit <-exp(fitted(fit))
# CHECK: real values vs 1/x fit
#ggplot(nat, aes(x=mid, y=real)) + geom_line(color="black",size=2,alpha=0.6) +
# geom_line(aes(x=mid, y=fit),color = 5) +
# ylab("Number of business units") + xlab("Number of employees") +
# ggtitle("National distribution of business unit sizes")
########################################################################################################################
####### Business units per employee size band at MSOA level and per business sic2017 2d division (89 categories) #######
########################################################################################################################
# Get UK Business Counts - local units by industry and employment size band (https://www.nomisweb.co.uk/datasets/idbrlu)
print("Downloading and preparing MSOA data...")
# Download
geogrMSOA <- read.csv("raw_to_prepared_MSOA-IZ_list_for_nomis.txt")
geogrMSOA <- geogrMSOA$MSOA11CD
geogrMSOA <- paste(geogrMSOA,collapse=",")
date<- "latestMINUS2"
downloadBR <- function(size){
other <- paste("&industry=146800641...146800643,146800645...146800673,146800675...146800679,146800681...146800683,146800685...146800687,146800689...146800693,146800695,146800696,146800698...146800706,146800708...146800715,146800717...146800722,146800724...146800728,146800730...146800739&employment_sizeband=",size,"&legal_status=0&measures=20100&select=geography_code,industry_code,obs_value&rows=geography_code&cols=industry_code",sep="")
url <- createURL(datasetBR,geogrMSOA,APIKey,date,other)
if(!file.exists(paste(folderIn,"data_BR_",size,".csv",sep=""))){
download.file(url,destfile = paste(folderIn,"data_BR_",size,".csv",sep=""))
} else{
print(paste(paste(folderIn,"data_BR_",size,".csv",sep="")," already exists, not downloading again",sep = ""))
}
data <- read.csv(paste(folderIn,"data_BR_",size,".csv",sep=""))
colnames(data)[1] <- "MSOA11CD"
data <- data[order(data$MSOA11CD),]
rownames(data) <- 1:nrow(data)
return(data)
}
E0to9 <- downloadBR(10)
E10to49 <- downloadBR(20)
E50to249 <- downloadBR(30)
E250p <- downloadBR(40)
# Melt into full list of existing workplaces
meltedData <- function(size){
ifelse(size == 10, melted <- melt(E0to9), ifelse(size == 20, melted <- melt(E10to49), ifelse(size == 30, melted <- melt(E50to249), melted <- melt(E250p))))
melted <- melted %>% filter(value > 0)
refTemp <- unlist(sapply(melted$value,function(x){1:x}))
melted <- melted %>% uncount(value)
melted$band <- size
melted$refTemp <- refTemp
return(melted)
}
msoaData <- rbind(meltedData(10),meltedData(20),meltedData(30),meltedData(40))
msoaData$variable <- as.numeric(substr(msoaData$variable,2,3))
########################################################################################
####### Employees at LSOA level per business sic2017 2d division (89 categories) #######
########################################################################################
# Get Business Register and Employment Survey (https://www.nomisweb.co.uk/datasets/newbrespub)
datasetES <- "NM_172_1"
print("Downloading and preparing LSOA data...")
# Download
if(!file.exists(paste(folderOut,"lsoaData.csv",sep = ""))){
geogrLSOA <- read.csv("raw_to_prepared_LSOA-DZ_list_for_nomis.txt")
geogrLSOA <- geogrLSOA$LSOA11CD
l <- length(geogrLSOA)
date<- "latest"
other <- "&industry=146800641...146800643,146800645...146800673,146800675...146800679,146800681...146800683,146800685...146800687,146800689...146800693,146800695,146800696,146800698...146800706,146800708...146800715,146800717...146800722,146800724...146800728,146800730...146800739&employment_status=1&measure=1&measures=20100&select=geography_code,industry_code,obs_value&rows=geography_code&cols=industry_code"
geogrLSOA2 <- paste(geogrLSOA[1:1000],collapse=",")
url <- createURL(datasetES,geogrLSOA2,APIKey,date,other)
download.file(url,destfile = paste(folderIn,"data.csv",sep=""))
data <- read.csv(paste(folderIn,"data.csv",sep=""))
for(i in 1:22){
geogrLSOA2 <- paste(geogrLSOA[(i*1000 + 1):min(i*1000 + 1000,l)],collapse=",")
url <- createURL(datasetES,geogrLSOA2,APIKey,date,other)
download.file(url,destfile = paste(folderIn,"data1.csv",sep=""))
data1 <- read.csv(paste(folderIn,"data1.csv",sep=""))
data <- rbind(data,data1)
}
colnames(data)[1] <- "LSOA11CD"
lsoaData <- data[order(data$LSOA11CD),]
rownames(lsoaData) <- 1:nrow(lsoaData)
write.table(lsoaData,paste(folderOut,"lsoaData.csv",sep = ""),row.names = F,sep = ",")
} else{
print(paste(paste(folderOut,"lsoaData.csv",sep = "")," already exists, loading directly",sep = ""))
lsoaData <- read.csv(paste(folderOut,"lsoaData.csv",sep = ""))
}
#########################################################################
####### look up tables: MSOA/LSOA and industry sic2017 categories #######
#########################################################################
oatoOther <- read.csv(paste(folderOut,"lookUp-GB.csv",sep = ""))
# If raw_to_prepared.R hasn't been run:
#download.file("https://opendata.arcgis.com/api/v3/datasets/e8fef92ac4114c249ffc1ff3ccf22e12_0/downloads/data?format=csv&spatialRefId=4326&where=1%3D1",destfile = paste(folderIn,"Output_Area_to_Lower_Layer_Super_Output_Area_to_Middle_Layer_Super_Output_Area_to_Local_Authority_District_(December_2020)_Lookup_in_England_and_Wales.csv",sep = ""))
#oatoOther <- read.csv(paste(folderIn,"Output_Area_to_Lower_Layer_Super_Output_Area_to_Middle_Layer_Super_Output_Area_to_Local_Authority_District_(December_2020)_Lookup_in_England_and_Wales.csv",sep = ""))
oatoOther <- oatoOther[,c("LSOA11CD","MSOA11CD")]
oatoOther <- oatoOther %>% distinct()
temp <- c(rep("A",3),rep("B",5),rep("C",24),rep("D",1),rep("E",4),rep("F",3),rep("G",3),rep("H",5),rep("I",2),rep("J",6),
rep("K",3),rep("L",1),rep("M",7),rep("N",6),rep("O",1),rep("P",1),rep("Q",3),rep("R",4),rep("S",3),rep("T",2),
rep("U",1))
refIC <- data.frame(sic1d07 = temp, sic2d07 = c(1:3,5:9,10:33,35,36:39,41:43,45:47,49:53,55:56,58:63,64:66,68,69:75,77:82,84,85,86:88,90:93,94:96,97:98,99))
#################################################################
####### Assembling the puzzle: register of business units #######
#################################################################
busPop <- merge(msoaData,refIC,by.x="variable",by.y="sic2d07")
colnames(busPop)[1] <- "sic2d07"
# 'id' field
temp1 <- str_pad(busPop$sic2d07, 2, pad = "0")
temp2 <- str_pad(busPop$refTemp, max(nchar(busPop$refTemp)), pad = "0")
busPop$id <- paste(busPop$MSOA11CD,busPop$band,temp1,temp2,sep="")
busPop <- busPop[,c(6,2,3,5,1)]
busPop <- busPop[order(busPop$id),]
row.names(busPop) <- 1:nrow(busPop)
# 'size' field
print("Recalculating business sizes...")
BUsize <- function(n,band){
ifelse(band == 10, x <- 1:9, ifelse(band == 20, x <- 10:49, ifelse(band == 30, x <- 50:249, x <- 250:2000)))
return(sample(x, n, replace = T, prob = fit$coefficients[1]*(x^fit$coefficients[2])))
}
busPop$size <- mapply(BUsize,1,busPop$band)
busPop <- busPop[,c(1,6,2,4:5)]
#hist(busPop$size[busPop$size < 60])
# 'lsoa' field
print("Assigning LSOAs...")
lsoaData <- merge(lsoaData,oatoOther,by.x="LSOA11CD",by.y="LSOA11CD")
msoaFilling <- function(name,lsoaData,MSOA11CD,sic2d07){
lsoa <- lsoaData %>% filter(MSOA11CD == name)
ref <- which(MSOA11CD == name)
sic <- sic2d07[ref]
res <- rep(NA,length(ref))
for(i in 1:length(unique(sic))){
ref2 <- which(sic == unique(sic)[i])
weights <- lsoa[,paste("X",str_pad(unique(sic)[i], 2, pad = "0"),sep = "")]
potlsoa <- lsoa$LSOA11CD
ifelse(sum(weights) > 0, res[ref2] <- sample(potlsoa, length(ref2), prob = weights, replace = T),
res[ref2] <- sample(potlsoa, length(ref2), replace = T))
}
return(res)
}
LSOA11CD <- sapply(unique(busPop$MSOA11CD),function(x){msoaFilling(x,lsoaData,busPop$MSOA11CD,busPop$sic2d07)})
LSOA11CD <- unname(unlist(LSOA11CD))
#LSOA11CD <- msoaFilling(unique(busPop$MSOA11CD)[1],lsoaData,busPop$MSOA11CD,busPop$sic2d07)
#for(i in 2:length(unique(busPop$MSOA11CD))){
# if(i%%80 == 0){print(paste(round(i/length(unique(busPop$MSOA11CD)),2)*100,"%",sep = ""))}
# res <- msoaFilling(unique(busPop$MSOA11CD)[i],lsoaData,busPop$MSOA11CD,busPop$sic2d07)
# LSOA11CD <- c(LSOA11CD,res)
#}
busPop$LSOA11CD <- LSOA11CD
# 'lng' and 'lat' fields
print("Adding coordinates...")
# England and Wales
#download.file("https://stg-arcgisazurecdataprod1.az.arcgis.com/exportfiles-1559-15693/Lower_layer_Super_Output_Areas_Dec_2011_Boundaries_Full_Clipped_BFC_EW_V3_2022_3601855424856006397.csv?sv=2018-03-28&sr=b&sig=tmZTl6Eh6ryGtEsEaHWPbp0GKF2SUcejnO1DeF7csk4%3D&se=2023-04-26T15%3A58%3A01Z&sp=r",destfile = paste(folderIn,"Lower_Layer_Super_Output_Areas__December_2011__Boundaries_Full_Clipped__BFC__EW_V3.csv",sep = ""))
shp <- read.csv(paste(folderIn,"LSOA_Dec_2011_PWC_in_England_and_Wales_2022_1923591000694358693.csv",sep = ""))
coords <- data.frame(LSOA11CD = shp$LSOA11CD, lng = shp$x, lat = shp$y)
# Scotland
download.file("https://maps.gov.scot/ATOM/shapefiles/SG_DataZoneCent_2011.zip",destfile = paste(folderIn,"SG_DataZoneCent_2011.zip",sep = ""))
unzip(paste(folderIn,"SG_DataZoneCent_2011.zip",sep = ""),exdir=folderIn)
coords2 <- read.dbf(paste(folderIn,"SG_DataZone_Cent_2011.dbf",sep = ""))
ukgrid = "+init=epsg:27700"
latlong = "+init=epsg:4326"
coords3 <- cbind(Easting = as.numeric(as.character(coords2$Easting)), Northing = as.numeric(as.character(coords2$Northing)))
coords3 <- SpatialPointsDataFrame(coords3, data = data.frame(coords2$DataZone), proj4string = CRS("+init=epsg:27700"))
coords3 <- spTransform(coords3, CRS(latlong))
#plot(coords3)
coords3 <- coords3@coords
coords3 <- data.frame(LSOA11CD = coords2$DataZone, lng = coords3[,1], lat = coords3[,2])
refLSOA <- rbind(coords,coords3)
busPop <- merge(busPop,refLSOA,by.x = "LSOA11CD",by.y = "LSOA11CD")
busPop <- busPop[,c(2:4,1,7,8,5,6)]
busPop <- busPop[order(busPop$id),]
row.names(busPop) <- 1:nrow(busPop)
print("Writing outputs...")
write.table(busPop,"Outputs/businessRegistry.csv",sep=",",row.names = F) |
349452ecb36323024cb8fb516772454bfe7d5eb7 | e389b5d62bf21948b61ba6944a4f72960a804a7b | /delta_hedging_for_CEVmodel(notradingcost).R | a7ea835482dadbc76ff1577f2e9f46afc77b6598 | [] | no_license | yedidv/fin_sim_comp_hedging_asset_pricing_model | f09a4e352186082a796a44e529ba2f818bbf6c90 | 386800f64651aed50a8f8298da3e3d3db0119ddb | refs/heads/main | 2023-04-23T09:52:09.346077 | 2021-05-07T23:07:55 | 2021-05-07T23:07:55 | 358,719,633 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,764 | r | delta_hedging_for_CEVmodel(notradingcost).R | rm(list = ls())
library(tidyverse)
library(Matrix)
library(plotly)
library(moments)
#setwd("F:/Spring/Financial computation and simulation")
source('asset_price_model.r')
source('misc_funs.r')
set.seed(2654)
## Read the data ####
n_stocks <- 1
prices <- Read_Data(n_stocks)
prices %>% head()
## Calculate Returns
rets <- Returns(prices)
## Look at the moments for the prices and the returns
price_moms <- Moments(prices)
price_moms
rets_moms <- Moments(rets)
rets_moms
mean_rets <- Single_Moment(rets_moms, 'mean') %>%
t()
mu<- as.numeric(matrix(Annualize(mean_rets), ncol = 1))
vol <- as.numeric(Annualize(var(rets %>% select(-Date), use = 'complete.obs')))
S0 <- as.numeric(prices %>% select(-Date, -RF) %>% tail(1)%>% t())
K <- as.numeric(prices %>% select(-Date, -RF) %>% tail(1) %>% t()) +5
r <- 0.05
alpha <- 1
sigma <- vol*S0^(1-alpha)
t <- 1
n <- 1
# 1. simulate the paths
M <- 100
N <- 40
myCEV <- function(M,N,r,sigma,t,S0,alpha){
S.Euler <- matrix(NA,ncol=N+1,nrow=M)
S.CEV <- matrix(NA,ncol=N+1,nrow=M)
S.Euler[,1] <- S0
S.CEV[,1] <- S0
dt <- t/N
sqdt <- sqrt(dt)
for (i in 1:N){
# use a common Z to compare methods:
Z <- matrix(rnorm(M),ncol=1)
# GBM:
S.Euler[,i+1] <- S.Euler[,i] + r*S.Euler[,i]*dt +
sigma*S.Euler[,i]*sqdt*Z
# CEV:
S.CEV[,i+1] <- S.CEV[,i] + r*S.CEV[,i]*dt +
sigma*S.CEV[,i]^alpha*sqdt*Z
}
S.out <- list("GBM"=S.Euler,"CEV"=S.CEV)
return(S.out)
}
delta.hedge <- function(M,N,S0,K,r,sigma,t,mu,call=1){
print(N)
if (call == 1){
d1 <- (log(S0/K) + (r + vol*vol/2)*t)/(vol*sqrt(t))
d2 <- d1 - vol*sqrt(t)
BLS <- S0*pnorm(d1) - K*exp(-r*t)*pnorm(d2)
# Plain vanilla call payoff function
f <- function(S,K){
f <- pmax(S-K,0)
}
h <- 0.1
delta <- function(M,n,t,r,S0,K,sigma,ss=1){
set.seed(ss)
#ST <- S0*exp((r - 0.5*sigma^2)*t+sigma*sqrt(t)*rnorm(M))
ST <- myCEV(M,N,r,sigma,t,S0,alpha)$CEV[,N+1]
set.seed(ss)
#STh <- (S0+h)*exp((r - 0.5*sigma^2)*t+sigma*sqrt(t)*rnorm(M))
STh <-myCEV(M,N,r,sigma,t,S0+h,alpha)$CEV[,N+1]
f0 <- f(ST,K)
f0h <- f(STh,K)
fd <- exp(-r*t)*mean((f0h - f0) / h)
}
# Simulate the paths and deltas:
X <- deltas <- matrix(NA,ncol=N+1,nrow=M)
X[,1] <- S0
dt <- t/N
for (i in 1:N){
X[,i+1] <- X[,i]+ mu*X[,i]*dt +
sigma*X[,i]^alpha*sqrt(dt)*rnorm(M)
ttm <- t - dt*(i-1)
for (j in 1:M) {
deltas[j,i] <- delta(M,n,ttm,r,X[j,i],K,sigma)
}
}
# Fill in terminal deltas (1/0):
for (j in 1:M) {
deltas[j,N+1] <- delta(M,n,0,r,X[j,N+1],K,sigma)
}
# Generate a matrix of positions:
CF <- matrix(NA,ncol=N+1,nrow=M)
CF[,1] <- -deltas[,1]*S0
for (j in 1:M) {
for (i in 1:(N-1)){
# transaction cost equals to 1% of the trading value
#if(deltas[j,i+1] < deltas[j,i]){
CF[j,i+1] <- -1*(deltas[j,i+1] - deltas[j,i])*X[j,i]
#CF[j,i+1] <- (-1*(deltas[j,i+1] - deltas[j,i])*X[j,i])*(1+0.01)
#} else {
#CF[j,i+1] <- (-1*(deltas[j,i+1] - deltas[j,i])*X[j,i+1])*(1-0.01)
#}
}
}
IN <- which(X[,N+1] > K)
CF[IN,N+1] <- K - ((1-deltas[IN,N])*X[IN,N+1])
CF[-IN,N+1] <- deltas[-IN,N]*X[-IN,N+1]
# 3. sum the costs:
disc <- matrix(exp(-r*seq(0,t,length=N+1)),ncol=1)
PV <- CF%*%disc
# compute performace
H.perf <- sqrt(var(PV))/BLS
outlist <- list("H.perf"=H.perf,"PV"=PV,"BLS"=BLS)
return(outlist)
}
else{
d1 <- (log(S0/K) + (r + vol*vol/2)*t)/(vol*sqrt(t))
d2 <- d1 - vol*sqrt(t)
BLS <- K*exp(-r*t)*pnorm(-d2) - S0*pnorm(-d1)
# Plain vanilla put payoff function
f <- function(S,K){
f <- pmax(K-S,0)
}
h <- 0.1
delta <- function(M,n,t,r,S0,K,sigma,ss=1){
set.seed(ss)
#ST <- S0*exp((r - 0.5*sigma^2)*t+sigma*sqrt(t)*rnorm(M))
ST <- myCEV(M,N,r,sigma,t,S0,alpha)$CEV[,n+1]
set.seed(ss)
#STh <- (S0+h)*exp((r - 0.5*sigma^2)*t+sigma*sqrt(t)*rnorm(M))
STh <-myCEV(M,N,r,sigma,t,S0+h,alpha)$CEV[,n+1]
f0 <- f(ST,K)
f0h <- f(STh,K)
fd <- exp(-r*t)*mean((f0h - f0) / h)
}
# Simulate the paths and deltas:
X <- deltas <- matrix(NA,ncol=N+1,nrow=M)
X[,1] <- S0
dt <- t/N
for (i in 1:N){
X[,i+1] <- X[,i]+ r*X[,i]*dt +
sigma*X[,i]^alpha*sqrt(dt)*rnorm(M)
ttm <- t - dt*(i-1)
for (j in 1:M) {
deltas[j,i] <- delta(M,n,ttm,r,X[j,i],K,sigma)
}
}
# Fill in terminal deltas (1/0):
for (j in 1:M) {
deltas[j,N+1] <- delta(M,n,0,r,X[j,N+1],K,sigma)
}
# Generate a matrix of positions:
CF <- matrix(NA,ncol=N+1,nrow=M)
CF[,1] <- -deltas[,1]*S0
for (j in 1:M) {
for (i in 1:(N-1)){
# transaction cost equals to 1% of the trading value
#if(deltas[j,i+1] < deltas[j,i]){
CF[j,i+1] <- -1*(deltas[j,i+1] - deltas[j,i])*X[j,i]
#CF[j,i+1] <- (-1*(deltas[j,i+1] - deltas[j,i])*X[j,i])*(1+0.01)
#} else {
#CF[j,i+1] <- (-1*(deltas[j,i+1] - deltas[j,i])*X[j,i+1])*(1-0.01)
#}
}
}
IN <- which(X[,N+1] < K)
CF[IN,N+1] <- -(((-1)-deltas[IN,N])*X[IN,N+1]) - K
CF[-IN,N+1] <- deltas[-IN,N]*X[-IN,N+1]
# 3. sum the costs:
disc <- matrix(exp(-r*seq(0,t,length=N+1)),ncol=1)
PV <- CF%*%disc
# compute performace
H.perf <- sqrt(var(PV))/BLS
outlist <- list("H.perf"=H.perf,"PV"=PV,"BLS"=BLS)
return(outlist)
}
}
#N <- c(4,5,10,20,40,80)
N <- c(4,5)
H <- c(NA)
PV <- c(NA)
for (j in 1:length(N)){
tmp <- delta.hedge(M,N[j],S0,K,r,sigma,t,mu,call = 0)
H[j] <- tmp$H.perf
PV[j] <- mean(tmp$PV)
}
print(H)
print(PV)
print(tmp$BLS)
|
1d69b84a25faa066e38dbb4c98d22c4a4b922ce6 | 5d5d7785f5ce2ff377ebec29d74382652502c1d8 | /man/pairwise_count.Rd | 773aa50c0b9da164d1aa0a2044cd7c07bd377fdd | [
"MIT"
] | permissive | standardgalactic/wpa | d7256e719732c7c3f067e88d253e600cd1d66a06 | b64b562cee59ea737df58a9cd2b3afaec5d9db64 | refs/heads/main | 2023-08-10T19:11:03.211088 | 2021-09-08T13:40:35 | 2021-09-08T13:40:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,821 | rd | pairwise_count.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwise_count.R
\name{pairwise_count}
\alias{pairwise_count}
\title{Perform a pairwise count of words by id}
\usage{
pairwise_count(data, id = "line", word = "word")
}
\arguments{
\item{data}{Data frame output from \code{tm_clean()}.}
\item{id}{String to represent the id variable. Defaults to \code{"line"}.}
\item{word}{String to represent the word variable. Defaults to \code{"word"}.}
}
\value{
data frame with the following columns representing a pairwise count:
\itemize{
\item \code{"item1"}
\item \code{"item2"}
\item \code{"n"}
}
}
\description{
This is a 'data.table' implementation that mimics the output of
\code{pairwise_count()} from 'widyr' to reduce package dependency. This is used
internally within \code{tm_cooc()}.
}
\examples{
td <- data.frame(line = c(1, 1, 2, 2),
word = c("work", "meeting", "catch", "up"))
pairwise_count(td, id = "line", word = "word")
}
\seealso{
Other Support:
\code{\link{camel_clean}()},
\code{\link{check_inputs}()},
\code{\link{combine_signals}()},
\code{\link{cut_hour}()},
\code{\link{extract_date_range}()},
\code{\link{extract_hr}()},
\code{\link{heat_colours}()},
\code{\link{is_date_format}()},
\code{\link{maxmin}()},
\code{\link{p_test}()},
\code{\link{plot_WOE}()},
\code{\link{read_preamble}()},
\code{\link{rgb2hex}()},
\code{\link{totals_bind}()},
\code{\link{totals_col}()},
\code{\link{totals_reorder}()},
\code{\link{tstamp}()},
\code{\link{us_to_space}()},
\code{\link{wrap}()}
Other Text-mining:
\code{\link{meeting_tm_report}()},
\code{\link{subject_validate_report}()},
\code{\link{subject_validate}()},
\code{\link{tm_clean}()},
\code{\link{tm_cooc}()},
\code{\link{tm_freq}()},
\code{\link{tm_wordcloud}()}
}
\concept{Support}
\concept{Text-mining}
|
4e1c34d300ccb7ddfe62096482f51bcc5a2f2a36 | 5292f1bedc1ed2ce5a48008fdb38c654514a697c | /man/bagging.Rd | c55da9e1eac0a28d30b991fab38190d43b50caba | [] | no_license | vishalbelsare/HDeconometricsBeta | b074a2e485523238b790d2c4f3f19299bc8135ed | 6a0266a4eefae665664d4ede8f51a7ea1c6d5063 | refs/heads/master | 2020-04-03T21:42:32.820685 | 2017-03-21T00:47:44 | 2017-03-21T00:47:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,988 | rd | bagging.Rd | \name{bagging}
\alias{bagging}
\title{Estimates bagging coefficients for a given pre-testing procedure.}
\usage{
# Method 1:
bagging(y,X,R=100,l=5,sim="fixed",pre.testing=c("joint","individual"),fixed.controls=NULL,...)
# Method 2:
bagging(y,X,fn,R=100,l=5,sim="fixed",pre.testing="personal",...)
}
\description{
This function returns the pre-testing coefficients for all bootstrap samples. This coefficients may then be used to calculate forecasts.
}
\arguments{
\item{y}{T-dimensional vector with the dependent variable.}
\item{x}{T x N matrix with all candidate variables.}
\item{R}{Number of bootstrap samples to be used.}
\item{l}{length of the blocks for blockbootstrap.}
\item{sim}{How the blockboostrap is computed. See ?tsboot for details.}
\item{pre.testing}{For method 1 this argument defines which pre-testing of the function baggit to be used. For method 2 this argument must be set to "personal", indicating that some other user defined pre-testing is to be used. }
\item{fixed.controls}{Only for method 1. Vector of indexes to determine which variables are fixed if pre.testing="individual". See ?baggit for details.}
\item{fn}{User define pre-testing for method 2.}
\item{...}{Aditional arguments for the tsboot function and the user defined pre-testing in method 2.}
}
\value{
This function returns a list with several items.
\item{boot.coef}{R x N matrix. Each line has the coefficients for one bootstrap sample.}
\item{orig.coef}{Coefficients for the pre-testing applied on the original sample. }
\item{fitted.values}{In-sample fitted values.}
}
\details{
Nothin to add.
}
\examples{
## Generates data
set.seed(12345)
N=50
p=20
T=101
betas=runif(p-1,-1,1)
data=dgp(N=N,p=p,T=T,betas=betas)
y=data$y
X=data$X
X=cbind(embed(y,2)[,-1],X[-1,])
y=y[-1]
set.seed(123)
#Method 1
test1=bagging(y,X,R=50,l=5,pre.testing = "individual")
Method 2
test2=bagging(y,X,fn=baggit,R=50,l=5,pre.testing = "personal")
}
|
73b64a5013c06660df70d40af6f8e04494ae8e20 | f2d3d9d517a121ec7c48cc6c1ad1440fcc8a4188 | /man/interp_param.Rd | 18b512c81377a03002533e58b4b7039538a95d9d | [] | no_license | asgr/ProSpect | 77cf8dc72bd9c1101883cb05581f88ab40d68d82 | 9757ae9a46f63cb14c2445cde9b9c2b79ce2d8d1 | refs/heads/master | 2023-05-26T14:42:16.862494 | 2023-05-22T08:43:24 | 2023-05-22T08:43:24 | 111,638,670 | 14 | 3 | null | 2022-11-02T06:06:45 | 2017-11-22T05:05:46 | R | UTF-8 | R | false | false | 3,531 | rd | interp_param.Rd | \name{interp_param}
\alias{interp_param}
\alias{interp_quick}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Interpolate weights for a vector of values
}
\description{
Utility functions to interpolate the lineat of log spaced weights for a vector of values. This can be used to create interpolations for x versus y type data, but it is more useful when the property to be weighted is non-trivial, e.g. you have spectra on a grid of metallicities and you want to interpolate between values. \code{interp_quick} is faster, but can only be used for single value lookups, has less flesibility, and returns less information.
}
\usage{
interp_param(x, params, log = FALSE, method = "linear")
interp_quick(x, params, log = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Numeric vector; the value/s to use for the interpolation. For \code{interp_quick} this can only be a scalar.
}
\item{params}{
Numeric vector; the values to be interpolated.
}
\item{log}{
Logical scalar; should the interpolation be done in linear space (\option{log}=FALSE, the default), or log space (\option{log}=TRUE).
}
\item{method}{
One of “constant", “linear", “nearest", “spline", or “cubic"; default is “linear". This is passed to \code{\link{interp1}}.
}
}
\details{
This routine is used to calculate appropriate weights for a number of interpolations within \code{ProSpect}, where outputs are often generated for fixed grids of parameters (e.g. metallicity, AGN fraction and radiation field).
}
\value{
x param_lo param_hi ID_lo weight_lo ID_hi weight_hi flag
A data.frame with the same number of rows as the length of \option{x}, with columns:
\item{x}{The value/s uses for the interpolation (might be different to the input \option{x} if this went beyond the limits of \option{params})}
\item{param_lo}{The nearest value of \option{params} lower than \option{x}}
\item{param_hi}{The nearest value of \option{params} higher than \option{x}}
\item{ID_lo}{The location ID of the nearest value of \option{params} lower than \option{x}}
\item{ID_hi}{The location ID of the nearest value of \option{params} higher than \option{x}}
\item{ID_mode}{The ID with the most weight between \option{ID_lo} and \option{ID_hi}}
\item{wt_lo}{The interpolation weight to be applied to the nearest value of \option{params} lower than \option{x}}
\item{wt_hi}{The interpolation weight to be applied to the nearest value of \option{params} higher than \option{x}}
\item{flag}{Interpolation flag: 0 means the input \option{x} is at an exact value of \option{params}, in which case \option{ID_lo}=\option{ID_hi}, \option{weight_lo}=1 and \option{weight_hi}=0; 1 means \option{x} is less than the minimum of \option{params}, so forced to this value; 2 means \option{x} is between two values in \option{params}, so interpolation behaves in a standard sense; 3 means \option{x} is more than the maximum of \option{params}, so forced to this value}
%% ...
}
\author{
Aaron Robotham
}
\note{
In the output, \option{flag}=2 is the "normal" flag in the sense the interpolation has not gone beyond the limits of \option{params} and is not trivial (as exact value of \option{params}).
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{Dale_interp}}
}
\examples{
interp_param(c(0.1,3.3,5.8,8,11.2),1:10)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ interpolation }% use one of RShowDoc("KEYWORDS")
|
f1244003a8023a94021f0764ca9783c37c31c2e8 | c44582cb89d12fae3667c602bd924cc286d175c4 | /tests/testthat/test-get_references.R | de839de26c8db23bdc870d265ce56c2d5cd3ce5b | [] | no_license | kbroman/rebi | a4837c36909c91cbe02b1b33a277c461eccba176 | 262bd147ca8609ca100658e7a81c8addf5d773f3 | refs/heads/master | 2020-12-03T08:15:26.197343 | 2014-07-01T15:28:08 | 2014-07-01T15:28:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 492 | r | test-get_references.R | # test for get_references
context("get_references")
my.data <- get_references(ext_id="PMC3219685", src = "PMC")
test_that("dimensions", {
expect_that(ncol(my.data), equals(15))
})
test_that("class", {
expect_that(my.data, is_a("data.frame"))
})
test_that("value", {
expect_that(unique(unlist(my.data$src_ext_id)) == "PMC:PMC3219685", is_true())
})
test_that("errors", {
expect_that(get_references("14600211"),
gives_warning("No references found for: 14600211"))
}) |
e078453241d82c4fccbc2e0c7ff184276853d70c | 29585dff702209dd446c0ab52ceea046c58e384e | /eggCounts/R/fecr_models_stan.R | 494eb2c3655bb98e4353cbab7fb8e23036e9d6f7 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,559 | r | fecr_models_stan.R | # set default values for the priors
fecr_setPrior <- function(muPrior, kappaPrior, deltaPrior, phiPrior){
if(missing(muPrior)) muPrior = list(priorDist = "gamma",hyperpars=c(1,0.001))
if(is.null(muPrior[["priorDist", exact = TRUE]])) muPrior$priorDist = "gamma"
if(is.null(muPrior[["hyperpars", exact = TRUE]])) muPrior$hyperpars = c(1,0.001)
if(missing(kappaPrior)) kappaPrior = list(priorDist = "gamma",hyperpars=c(1,0.7))
if(is.null(kappaPrior[["priorDist", exact = TRUE]])) kappaPrior$priorDist = "gamma"
if(is.null(kappaPrior[["hyperpars", exact = TRUE]])) kappaPrior$hyperpars = c(1,0.7)
if(missing(deltaPrior)) deltaPrior <- list(priorDist="beta", hyperpars=c(1,1))
if(is.null(deltaPrior[["priorDist", exact = TRUE]])) deltaPrior$priorDist = "beta"
if(is.null(deltaPrior[["hyperpars", exact = TRUE]])) deltaPrior$hyperpars = c(1,1)
if(missing(phiPrior)) phiPrior = list(priorDist = "beta",hyperpars=c(1,1))
if(is.null(phiPrior[["priorDist", exact = TRUE]])) phiPrior$priorDist = "beta"
if(is.null(phiPrior[["hyperpars", exact = TRUE]])) phiPrior$hyperpars = c(1,1)
return(list(mu=muPrior,kappa=kappaPrior,delta=deltaPrior, phi = phiPrior))
}
# Stan model code for paired model without zero inflation
paired_stan <- function(priors){
#hyperparameters for pre-treatment mean mu
a.mu <- priors$mu$hyperpars[1]
b.mu <- priors$mu$hyperpars[2]
dist.mu <- priors$mu$priorDist
#hyperparameters for overdispersion parameter kappa
a.kappa <- priors$kappa$hyperpars[1]
b.kappa <- priors$kappa$hyperpars[2]
dist.kappa <- priors$kappa$priorDist
#hyperparameters for change in mean delta
a.delta <- priors$delta$hyperpars[1]
b.delta <- priors$delta$hyperpars[2]
dist.delta <- priors$delta$priorDist
paste0('data {
int J; // number of animals
int ystararaw[J]; // after treatment McMaster count
int ystarbraw[J]; // before treatment McMaster count
int fpre[J];
int fpost[J];
}
parameters {
real<lower=0> kappa;
real<lower=0> mu;
real<lower=0,upper=1> delta;
real<lower=0> mub[J];
}
transformed parameters{
real lambdaa[J];
real lambdab[J];
real kappamu;
for (i in 1:J){
lambdab[i] <- mub[i]/fpre[i];
lambdaa[i] <- delta*mub[i]/fpost[i];
}
kappamu <- kappa/mu;
}
model {
mu ~ ',dist.mu,'(',a.mu,',',b.mu,'); // prior
kappa ~ ',dist.kappa,'(',a.kappa,',',b.kappa,');
delta ~ ',dist.delta,'(',a.delta,',',b.delta,');
mub ~ gamma(kappa,kappamu); // likelihoods
ystarbraw ~ poisson(lambdab);
ystararaw ~ poisson(lambdaa);
}')
}
# Stan model code for unpaired model without zero inflation --------------
unpaired_stan <- function(priors){
#hyperparameters for pre-treatment mean mu
a.mu <- priors$mu$hyperpars[1]
b.mu <- priors$mu$hyperpars[2]
dist.mu <- priors$mu$priorDist
#hyperparameters for overdispersion parameter kappa
a.kappa <- priors$kappa$hyperpars[1]
b.kappa <- priors$kappa$hyperpars[2]
dist.kappa <- priors$kappa$priorDist
#hyperparameters for change in mean delta
a.delta <- priors$delta$hyperpars[1]
b.delta <- priors$delta$hyperpars[2]
dist.delta <- priors$delta$priorDist
paste0('data {
int Ja; // number of animals
int Jb;
int ystararaw[Ja]; // after treatment McMaster count
int ystarbraw[Jb]; // before treatment McMaster count
int fpre[Ja];
int fpost[Jb];
}
parameters {
real<lower=0> kappa;
real<lower=0> mu;
real<lower=0,upper=1> delta;
real<lower=0> mub[Jb]; # true epg before treatment
real<lower=0> mua[Ja]; # true epg after treatment
}
transformed parameters{
real lambdaa[Ja];
real lambdab[Jb];
real kappamu;
for (i in 1:Jb){
lambdab[i] <- mub[i]/fpre[i];
}
for (i in 1:Ja){
lambdaa[i] <- delta*mua[i]/fpost[i];
}
kappamu <- kappa/mu;
}
model {
// prior
mu ~ ',dist.mu,'(',a.mu,',',b.mu,');
kappa ~ ',dist.kappa,'(',a.kappa,',',b.kappa,');
delta ~ ',dist.delta,'(',a.delta,',',b.delta,');
increment_log_prob(gamma_log(mub,kappa,kappamu)+gamma_log(mua,kappa,kappamu)); // likelihoods
ystarbraw ~ poisson(lambdab);
ystararaw ~ poisson(lambdaa);
}')
}
# Stan model code for unpaired model with zero inflation ---------------
ZI_unpaired_stan <- function(priors){
#hyperparameters for pre-treatment mean mu
a.mu <- priors$mu$hyperpars[1]
b.mu <- priors$mu$hyperpars[2]
dist.mu <- priors$mu$priorDist
#hyperparameters for overdispersion parameter kappa
a.kappa <- priors$kappa$hyperpars[1]
b.kappa <- priors$kappa$hyperpars[2]
dist.kappa <- priors$kappa$priorDist
#hyperparameters for change in mean delta
a.delta <- priors$delta$hyperpars[1]
b.delta <- priors$delta$hyperpars[2]
dist.delta <- priors$delta$priorDist
#hyperparameters for prevalence
a.phi <- priors$phi$hyperpars[1]
b.phi <- priors$phi$hyperpars[2]
dist.phi <- priors$phi$priorDist
paste0('data {
int Ja; // number of animals
int Jb;
int ystararaw[Ja]; // after treatment McMaster count
int ystarbraw[Jb]; // before treatment McMaster count
int fpost[Ja];
int fpre[Jb];
}
parameters {
real<lower=0> kappa;
real<lower=0> mu;
real<lower=0,upper=1> delta;
real<lower=0> mua[Ja];
real<lower=0> mub[Jb];
real<lower=0,upper=1> phi;
}
transformed parameters{
real lambdaa[Ja];
real lambdab[Jb];
real kappamu;
for (i in 1:Jb){
lambdab[i] <- mub[i]/fpre[i];
}
for (i in 1:Ja){
lambdaa[i] <- delta*mua[i]/fpost[i];
}
kappamu <- kappa/mu;
}
model {
// prior
mu ~ ',dist.mu,'(',a.mu,',',b.mu,');
kappa ~ ',dist.kappa,'(',a.kappa,',',b.kappa,');
delta ~ ',dist.delta,'(',a.delta,',',b.delta,');
phi ~ ',dist.phi,'(',a.phi,',',b.phi,');
// likelihoods
ystarbraw ~ poisson(lambdab);
ystararaw ~ poisson(lambdaa);
for (n in 1:Jb) {
if (mub[n] == 0)
increment_log_prob(bernoulli_log(1,phi));
else
increment_log_prob(bernoulli_log(0,phi) + gamma_log(mub[n],kappa,kappamu));
}
for (n in 1:Ja) {
if (mua[n] == 0)
increment_log_prob(bernoulli_log(1,phi));
else
increment_log_prob(bernoulli_log(0,phi) + gamma_log(mua[n],kappa,kappamu));
}
}')
}
# Stan model code for paired model with zero inflation -------------------
ZI_paired_stan <- function(priors){
#hyperparameters for pre-treatment mean mu
a.mu <- priors$mu$hyperpars[1]
b.mu <- priors$mu$hyperpars[2]
dist.mu <- priors$mu$priorDist
#hyperparameters for overdispersion parameter kappa
a.kappa <- priors$kappa$hyperpars[1]
b.kappa <- priors$kappa$hyperpars[2]
dist.kappa <- priors$kappa$priorDist
#hyperparameters for change in mean delta
a.delta <- priors$delta$hyperpars[1]
b.delta <- priors$delta$hyperpars[2]
dist.delta <- priors$delta$priorDist
#hyperparameters for zero-inflation
a.phi <- priors$phi$hyperpars[1]
b.phi <- priors$phi$hyperpars[2]
dist.phi <- priors$phi$priorDist
paste0('data {
int J; // number of animals
int ystararaw[J]; // after treatment McMaster count
int ystarbraw[J]; // before treatment McMaster count
int fpre[J];
int fpost[J];
}
parameters {
real<lower=0> kappa;
real<lower=0> mu;
real<lower=0,upper=1> delta;
real<lower=0> mub[J];
real<lower=0,upper=1> phi;
}
transformed parameters{
real lambdaa[J];
real lambdab[J];
real kappamu;
for (i in 1:J){
lambdab[i] <- mub[i]/fpre[i];
lambdaa[i] <- delta*mub[i]/fpost[i];
}
kappamu <- kappa/mu;
}
model {
mu ~ ',dist.mu,'(',a.mu,',',b.mu,'); // prior
kappa ~ ',dist.kappa,'(',a.kappa,',',b.kappa,');
delta ~ ',dist.delta,'(',a.delta,',',b.delta,');
phi ~ ',dist.phi,'(',a.phi,',',b.phi,');
mub ~ gamma(kappa,kappamu); // likelihoods
for (n in 1:J) {
if (ystarbraw[n] == 0)
increment_log_prob(log_sum_exp(bernoulli_log(1,phi), bernoulli_log(0,phi)+poisson_log(ystarbraw[n],lambdab[n])));
else
increment_log_prob(bernoulli_log(0,phi) + poisson_log(ystarbraw[n],lambdab[n]));
}
for (n in 1:J) {
if (ystararaw[n] == 0)
increment_log_prob(log_sum_exp(bernoulli_log(1,phi), bernoulli_log(0,phi)+poisson_log(ystararaw[n],lambdaa[n])));
else
increment_log_prob(bernoulli_log(0,phi) + poisson_log(ystararaw[n],lambdaa[n]));
}
}
')
}
|
20dd31d6b0af3c9af1ec2fddd9cd1df3537a7c18 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/FitAR/examples/JacobianK.Rd.R | 6fbf72ca9196dd8745834d99bbd2f17c5f7e6e3e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 159 | r | JacobianK.Rd.R | library(FitAR)
### Name: JacobianK
### Title: Internal Utility Function
### Aliases: JacobianK
### Keywords: ts
### ** Examples
JacobianK(rep(0.8,4),3)
|
f5e51d9110f8e182f7d9a65a524e72d8b5ba0074 | 1abd977b5e5506e33e33596dc519047ba06b531e | /R/retrieve.probesets.R | 3f3e682c33dcd8ecf448cec8b1513314dc816bf0 | [
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | openresearchlabs/HITChipDB | a46c2b69a31d2d0eff290a318b5a2171c40c2d06 | 138b714ae4f14652479bd2cac9e42c64cc944ca0 | refs/heads/master | 2022-09-22T15:03:33.149542 | 2020-06-03T08:02:28 | 2020-06-03T08:02:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,070 | r | retrieve.probesets.R | #' retrieve.probesets
#'
#' List probes for each probeset
#'
#' @param tax.table data.frame with oligo - phylotype
#' mapping information
#' @param level phylotype level for probesets
#' @param name specify phylotypes to check (optional)
#'
#' @return A list. Probes for each phylotype.
#'
#' @export
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
retrieve.probesets <- function(tax.table, level = "species", name = NULL) {
tax.table <- as.data.frame(tax.table)
# If name not given, pick all
if (is.null(name)) {
name <- unique(as.character(tax.table[[level]]))
}
phylo <- tax.table[tax.table[[level]] %in% name, ]
if (is.factor(phylo[[level]])) {
phylo[[level]] <- droplevels(phylo[[level]])
}
phylo.list <- split(phylo, phylo[[level]])
probesets <- lapply(phylo.list, function(x) {
as.character(unique(x$oligoID))
})
names(probesets) <- names(phylo.list)
probesets
}
|
26b890767f3fd723c9d21c5b62e32b5066864d5f | e5283914d18d8e09a8d279bbcd184506ff7d44b1 | /scripts/ucs_modernize_0923.R | e61afe88dc60969beb86f8bcf872380e8315e1f2 | [] | no_license | criticalgeo/satinnovation | 4aa408d5737463f773dbe767dd6c7609c69f72b0 | f929798487675f866a5caabbedcbc8b15ad99c93 | refs/heads/master | 2022-12-25T12:34:45.576283 | 2020-10-08T19:45:51 | 2020-10-08T19:45:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 438 | r | ucs_modernize_0923.R | # REPLACING NAMES 09/23 FROM MANUAL SEARCH (MODERNIZE) #
ucs_sats <- read.csv("data/processed/ucs_sats_corr_0914.csv")
replace <- read.csv("data/processed/name_replace_0923.csv")
apply(replace, 1, function(x) {
ucs_sats[ucs_sats$opown == x[["database_name"]], 2] <<- x[["replacement"]]
ucs_sats[ucs_sats$contractor == x[["database_name"]], 3] <<- x[["replacement"]]
})
write.csv(ucs_sats, "data/processed/modern_ucssats_0923.csv")
|
8c56ae3d962f6b84c156ea078165ada8febbee2b | d00503fecf1d364f53670cf938a46e8837e02016 | /run_analysis.R | 738b7a2e3f314a44fb6e5b5aedb5dd53e530ca91 | [] | no_license | EGovender/Getting-and-Cleaning-Data | 0d54b4608d28d1598c0a6008df965695cccdf0a3 | 8bc17ff3025e4fa5e05ee67fa8d8dfb1b9155d0a | refs/heads/master | 2021-01-01T05:28:16.064636 | 2016-04-24T21:00:11 | 2016-04-24T21:00:11 | 56,992,552 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,003 | r | run_analysis.R | ## 1.Merges the training and the test sets to create one data set.
##Read training dataset
X_train<-read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt",header=FALSE)
y_train<-read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/y_train.txt",header=FALSE)
subject_train<-read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt",header=FALSE)
##Read test dataset
X_test<-read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt",header=FALSE)
y_test<-read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/y_test.txt",header=FALSE)
subject_test<-read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt",header=FALSE)
##Read feature vector
features<-read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/features.txt",header=FALSE)
##Set the column names of datasets
colnames(X_train)<-features$V2
colnames(y_train)<-"label"
colnames(subject_train)<-"subject"
colnames(X_test)<-features$V2
colnames(y_test)<-"label"
colnames(subject_test)<-"subject"
##Combine train and test datasets with subject datasets
train<-cbind(subject_train,y_train,X_train)
test<-cbind(subject_test,y_test,X_test)
##row combine training and test dataset
complete<-rbind(train, test)
complete <- rbind(cbind(subject_train,y_train,X_train),cbind(subject_test,y_test,X_test))
## 2.Extracts only the measurements on the mean and standard deviation for each measurement.
meanstd<-features[grep("mean\\(\\)|std\\(\\)", features$V2),]
completeMeanStd<-complete[,c(1, 2, meanstd$V1+2)]
## 3.Uses descriptive activity names to name the activities in the data set
activitylabels<-read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/activity_labels.txt", header=FALSE)
newlabels<-c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS","SITTING","STANDING","LAYING")
complete$label<-factor(complete$label,levels=c(1,2,3,4,5,6),labels=newlabels)
## 4.Appropriately labels the data set with descriptive variable names.
NewColNames<-colnames(completeMeanStd)
NewColNames<-gsub("[Aa]cc","Accelerometer",NewColNames)
NewColNames<-gsub("[Gg]yro","Gyroscope",NewColNames)
NewColNames<-gsub("[Ff]req","Frequency",NewColNames)
NewColNames<-gsub("[Mm]ag","Magnitude",NewColNames)
NewColNames<-gsub("tBody","timeBody",NewColNames)
NewColNames<-gsub("tGravity","timeGravity",NewColNames)
NewColNames<-gsub("fBody","fastFourierTransformBody",NewColNames)
NewColNames<-tolower(NewColNames)
colnames(completeMeanStd)<-NewColNames
## 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
final <- aggregate(completeMeanStd[, 3:ncol(completeMeanStd)],by=list(subject = completeMeanStd$subject,label = completeMeanStd$label),mean)
write.table(final, file="tidy_data.txt", row.names=FALSE, col.names=TRUE) |
ba33fed9fd081a7cc75710a7e211a18123f4247d | b46e30a53da84b294a155d29002fd9680b992769 | /ui.R | 23c80cc80b54f6b7b87628db593a856ae91c6378 | [] | no_license | newonewon/DevelopingDataProducts | 4862ffa493fd4a7b4cb7c0001701140974ed17af | 5d625f51643b1b1b3e603f8c1d6833f5997b4b6d | refs/heads/master | 2021-01-20T18:28:25.671232 | 2016-08-16T07:51:01 | 2016-08-16T07:51:01 | 65,708,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,557 | r | ui.R | # Coursera - Developing Data Products- Course Project
# ui.R file for the shiny app
# This app was developed to help people choose the best car for their trip, using mtcars dataset, from [R]
library(markdown)
shinyUI(navbarPage("Select the best car for your trip",
tabPanel("Table",
# Sidebar
sidebarLayout(
sidebarPanel(
helpText("Please enter information about your trip and the parameters of your car"),
numericInput('dis', 'Distance (in miles):', 25, min = 1, max = 500),
numericInput('cost', 'Gasoline Price (per gallon):',2.0 , min = 1.5, max = 3.5, step=0.01),
numericInput('gas', 'Maximum expenditure on gasoline:', 100, min=1, max=2500),
checkboxGroupInput('cyl', 'Number of cylinders:', c("Four"=4, "Six"=6), selected = c(4,6)),
sliderInput('hp', 'Gross horsepower', min=10, max=400, value=c(10,400), step=10),
checkboxGroupInput('am', 'Transmission:', c("Automatic"=0, "Manual"=1), selected = c(0,1))
),
mainPanel(
dataTableOutput('table')
)
)
)
)
) |
66b3acd0115cac65424c2b9ea6dc6cf79c26405c | d00a38a82667b4ad6b3f13874b117d5809f487ee | /abc.R | fe3428776f714d0a6d550c9d60089bef9e9904a9 | [
"MIT"
] | permissive | RonRichman/ABC_pricing | e8a63cd9b5edc0694997bb9cdf580a93ff130998 | 19ed99cc3d5463794b2d398d5cfb53a9f7ffe413 | refs/heads/master | 2021-04-28T20:34:26.873655 | 2018-02-19T05:08:21 | 2018-02-19T05:08:21 | 121,928,422 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,995 | r | abc.R | require(foreign)
require(data.table)
require(plyr)
require(maggritr)
require(reshape2)
require(broom)
require(data.table)
require(reshape2)
require(rgl)
require(stringr)
require(gnm)
require(dplyr)
require(ggplot2)
require(RColorBrewer)
require(gridExtra)
require(scales)
require(foreign)
require(lubridate)
require(mgcv)
require(reshape)
library(rvest)
require(lme4)
require(splines)
require(gridExtra)
dat = fread("c:/r/prices_motor.csv")
my_optim = function(pars,lr=lr ,dat=dat ){
freq = pars[1]
sev = pars[2]
dat = dat %>% data.table()
dat[,Priced := freq*(sev-Excess)/lr/12]
return(dat[,sum(abs(Premium/Priced-1))])
}
my_optim(c(0.25, 25000), 0.75, dat)
naive = optim(c(0.25, 25000), my_optim, lr = 0.7, dat=dat)
naive$par
### Frequency
mean_freq = 0.25
sd_freq = 0.075
mean_beta = function(a,b) a/(a+b)
sd_beta = function(a,b) ((a*b/((a+b)^2 *(a+b+1)))^0.5)
beta_optim = function(pars){
a = pars[1]
b = pars[2]
mean_dev = abs(mean_freq/mean_beta(a,b)-1)
sd_dev= abs(sd_freq/sd_beta(a,b)-1)
return(mean_dev + sd_dev)
}
beta_par = optim(c(0.1,0.1), beta_optim)
a=beta_par$par[1]
b=beta_par$par[2]
rbeta(10000, a, b) %>% hist
### Severity - Mean
mean_sev = 20000
sd_sev = 2500
mean_gamma = function(a,b) a/b
sd_gamma = function(a,b) ((a/(b)^2)^0.5)
gamma_optim = function(pars){
a = exp(pars[1])
b = exp(pars[2])
mean_dev = abs(mean_sev/mean_gamma(a,b)-1)
sd_dev= abs(sd_sev/sd_gamma(a,b)-1)
return(mean_dev+sd_dev)
}
gamma_par = optim(c(1,1), gamma_optim, "BFGS", control = list(maxit=10000))
alpha = exp(gamma_par$par[1])
beta =exp(gamma_par$par[2])
rgamma(10000, alpha, beta) %>% hist
### Severity - sd
sd_sev = 10000
sd_sd_sev = 2500
mean_gamma = function(a,b) a/b
sd_gamma = function(a,b) ((a/(b)^2)^0.5)
gamma_optim = function(pars){
a = exp(pars[1])
b = exp(pars[2])
mean_dev = abs(sd_sev/mean_gamma(a,b)-1)
sd_dev= abs(sd_sd_sev/sd_gamma(a,b)-1)
return(mean_dev+sd_dev)
}
gamma_par = optim(c(1,1), gamma_optim, "BFGS", control = list(maxit=10000))
alpha_sd = exp(gamma_par$par[1])
beta_sd =exp(gamma_par$par[2])
rgamma(10000, alpha_sd, beta_sd) %>% hist
### log-norm
lnorm_par = function(mean, sd) {
cv = sd/mean
sigma2 = log(cv^2+1)
mu = log(mean)-sigma2/2
results = list(mu,sigma2)
results
}
lnorm_par = lnorm_par %>% Vectorize()
### Loss ratio
mean_freq_lr = 0.7
sd_freq_lr = 0.025
mean_beta = function(a,b) a/(a+b)
sd_beta = function(a,b) ((a*b/((a+b)^2 *(a+b+1)))^0.5)
beta_optim = function(pars){
a = pars[1]
b = pars[2]
mean_dev = abs(mean_freq_lr/mean_beta(a,b)-1)
sd_dev= abs(sd_freq_lr/sd_beta(a,b)-1)
return(mean_dev + sd_dev)
}
beta_lr_par = optim(c(0.1,0.1), beta_optim)
a_lr=beta_lr_par$par[1]
b_lr=beta_lr_par$par[2]
rbeta(10000, a_lr, b_lr) %>% hist
#### compound
N_sims = 100000
claims = data.table(freq = rbeta(N_sims, a, b),
acpc = rgamma(N_sims, alpha, beta),
acpc_sd = rgamma(N_sims, alpha_sd, beta_sd),
LR = rbeta(N_sims, a_lr, b_lr))
claims[,run:=.I]
my_pois = function(lambda) rpois(1, lambda)
my_pois = my_pois %>% Vectorize()
claims[, n:= my_pois(freq)]
claims[,c("mu", "sigma2"):=lnorm_par(acpc, acpc_sd),by=run]
compound = function(dat) {
n=dat$n
mu = dat$mu
sigma2 = dat$sigma2
excess = dat$excess
if(n ==0 ) {0}
else {
claims = rlnorm(n , mu, sigma2)
net_claims = claims - excess
net_claims = ifelse(net_claims<0,0,net_claims)
return(sum(net_claims))
}
}
claims[,freq_bin :=ntile(freq,9)]
claims[,sev_bin :=ntile(acpc,9)]
claims[,sev_sd_bin :=ntile(acpc_sd,9)]
claims[,lr_bin :=ntile(LR,9)]
claims[,id:=paste0(freq_bin, sev_bin, sev_sd_bin, lr_bin)]
claims[,excess:=9845]
claims$prem_9845 = ddply(claims, .(run), compound)$V1
claims[,prem_9845 :=prem_9845/LR/12]
claims[,excess:=7620]
claims$prem_7620 = ddply(claims, .(run), compound)$V1
claims[,prem_7620 :=prem_7620/LR/12]
claims[,excess:=4840]
claims$prem_4840 = ddply(claims, .(run), compound)$V1
claims[,prem_4840 :=prem_4840/LR/12]
claims[,excess:=4580]
claims$prem_4580 = ddply(claims, .(run), compound)$V1
claims[,prem_4580 :=prem_4580/LR/12]
claims[,excess:=3920]
claims$prem_3920 = ddply(claims, .(run), compound)$V1
claims[,prem_3920 :=prem_3920/LR/12]
claims[,excess:=4515]
claims$prem_4515 = ddply(claims, .(run), compound)$V1
claims[,prem_4515 :=prem_4515/LR/12]
claims_melt = claims %>% melt(measure.vars = c("prem_9845", "prem_7620", "prem_4840",
"prem_4580", "prem_3920", "prem_4515")) %>% data.table()
reasonable = claims_melt[,.(N=.N, mean = mean(value)), keyby = .(id, variable)]
dat[,variable:=paste0("prem_", Excess)]
dat %>% setkey(variable)
reasonable %>% setkey(variable)
reasonable = reasonable %>% merge(dat[,c(1,3), with=F], allow.cartesian = T)
reasonable[,distance:=abs(Premium/mean-1)]
reasonable = reasonable[,.(dist_mean = mean(distance), dist_median = median(distance)), keyby = id]
reasonable = reasonable[order(dist_median)]
reasonable %>% ggplot(aes(x = dist_mean, y = dist_median)) + geom_point()
reasonable = reasonable[dist_median<0.08]
reasonable_pars = claims[id %in% reasonable$id][,c(1:4),with=F]
reasonable_pars[,type:="posterior"]
prior_draws = reasonable_pars[,.N]
prior = data.table(freq = rbeta(prior_draws, a, b),
acpc = rgamma(prior_draws, alpha, beta),
acpc_sd = rgamma(prior_draws, alpha_sd, beta_sd),
LR = rbeta(prior_draws, a_lr, b_lr))
prior[,type:="prior"]
reasonable_pars = rbind(reasonable_pars,prior)
#require(GGally)
#ggpairs(reasonable_pars[type == "posterior"], aes( alpha = 0.2, fill=type))
reasonable_pars[,run:=.I]
reasonable_pars = reasonable_pars %>% melt(id.vars = c("run", "type")) %>% data.table
reasonable_pars %>% ggplot(aes(x = value, fill = type, alpha=.5))+geom_density()+facet_wrap(~variable, scales='free')+
scale_alpha_continuous(guide=F)
ggsave("c:/r/posterior.jpg", device = "jpg")
results = claims_melt[id %in% reasonable$id,.(N=.N, mean = mean(value)), keyby = .(variable)]
results = merge(dat,results)
ggplot()+geom_point(data=results, aes(x = variable, y = Premium))+
geom_point(data=results, aes(x = variable, y = mean, colour = "red"))+
scale_color_discrete(guide=F)
ggsave("c:/r/prices.jpg", device = "jpg")
### individual policies
ind_pol_results = claims_melt[id %in% reasonable$id] %>% setkey(variable)
ind_pol_prems = results[,.(premiums = first(mean)),keyby=variable]
ind_pol_results = merge(ind_pol_results,ind_pol_prems)
ind_pol_results[,LRs:=value/premiums*LR]
ind_pol_results[,mean(LRs), keyby= variable]
ind_pol_results %>% ggplot(aes(x=LRs))+geom_density(aes(fill = variable))+facet_wrap(~variable, scales = "free")
|
ac87f377c61569def704c12118224c0bf1247a94 | 4b92cdefa377126dfbf2e79a831d82ec10f083b1 | /inst/unitTests/test_functions.R | d8b3defaa1003ac5dc4fcbda7c37d88fb961a6bf | [
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | DataSciBurgoon/aop | afe589a69a9f5431f59e860a1e0e49fdca1f04ed | 323b08977970cc7f76eeccb1e299b9844a9e0be7 | refs/heads/master | 2020-04-08T15:04:56.472133 | 2015-09-29T15:54:41 | 2015-09-29T15:54:41 | 41,174,739 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 442 | r | test_functions.R | test_aop_backdoor <- function(){
library(graph)
library(rjson)
steatosis_json_file <- system.file("extdata", "steatosis_aop_json.cyjs", package = "aop")
steatosis_aop <- convert_cytoscape_to_aop(steatosis_json_file)
steatosis_aop_graph <- convert_aop_to_graph(steatosis_aop)
checkEquals("389", aop_backdoor(steatosis_aop_graph, "391", "388")[1])
checkEquals("392", aop_backdoor(steatosis_aop_graph, "391", "388")[2])
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.