blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1e6ec1ac7a2738c4b9a1e60e72f8b4804dc91b58 | 2e40146ce11e802417170247ba5ff59a2d15fe91 | /test.R | 80c6fbea4a32e23e9e76e14ea0424c81c48196e5 | [] | no_license | zfredyu/GitAndR | 85a10ac3eaf3430ce0e8d27cd0c4156bdf572f97 | f6712ef9f94b8276fef4b2e5c5efb4d734024344 | refs/heads/master | 2023-02-13T03:18:06.113218 | 2021-01-14T17:07:51 | 2021-01-14T17:07:51 | 329,680,246 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 103 | r | test.R | get_yesterday <- function(){
x <- (Sys.Data()-1)
return(x)
## adding a comment from github web
}
|
f57027aaafe267ccb988a0997d9db722be18d2d8 | d87171bd1f93476112f101c7f6789dd973d57d34 | /R/eyer_package_check.R | 55841f31c9cc5cf23d852184b0466eec8f6b8498 | [] | no_license | DejanDraschkow/TestR | 9cfb90fc0ce0b99b5fe8b96aca82d8de9e761988 | b18c12026fd184a50833d768d806b01f44639684 | refs/heads/master | 2021-01-01T15:53:25.193260 | 2018-02-27T14:06:06 | 2018-02-27T14:06:06 | 97,723,703 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 964 | r | eyer_package_check.R |
EyerPackageCheck <- function(){
if (!require("EBImage")) {
source("https://bioconductor.org/biocLite.R")
biocLite("EBImage")
library(EBImage)
}
if (!require("png")) {
install.packages("png", dependencies = TRUE)
library(png)
}
if (!require("ggplot2")) {
install.packages("ggplot2", dependencies = TRUE)
library(ggplot2)
}
if (!require("grid")) {
install.packages("grid", dependencies = TRUE)
library(grid)
}
if (!require("emov")) {
install.packages("emov", dependencies = TRUE)
library(emov)
}
if (!require("reshape2")) {
install.packages("reshape2", dependencies = TRUE)
library(reshape2)
}
if (!require("plyr")) {
install.packages("plyr", dependencies = TRUE)
library(dplyr)
}
if (!require("dplyr")) {
install.packages("dplyr", dependencies = TRUE)
library(dplyr)
}
if (!require("dtplyr")) {
install.packages("dtplyr", dependencies = TRUE)
library(dtplyr)
}
}
|
d433d6346085650f9946bd34c5d2c1ff6a7d68c4 | 94135de9e1c97efc3e23a295a1b842a309600489 | /R/filteR.R | db2ecf955f87730ac49e15e31cc377bd3dbb62ec | [
"MIT"
] | permissive | inambioinfo/RNAseqAnalysis | 8b4338e8cc02c9b9fc2187f0d76c23761ae264bb | 07684e01792866ffb9318d52ffdf182fec64b0de | refs/heads/master | 2020-05-27T02:08:36.059712 | 2018-09-21T12:55:46 | 2018-09-21T12:55:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,514 | r | filteR.R | #'@title Trimming and filtering
#'
#'@description trim and filter raw reads containing N, trim as soon as 2 of 5 nucleotides has quality encoding less than "4" (phred score 20) and drop reads that are less than 36nt
#'@import ShortRead
#'@param a a character(1) with nchar(a) == 1L giving the letter at or below which a nucleotide is marked as failing. The default is "4" (phred score 20).
#'@param b drop reads that are less than the number of nucleotides given in b. Default is 36nt
#'@details Filters and trims each user selected raw data file (in .fastq.gz format), based on three parameters, raw reads containing N are removed, reagins having a phred score less than the value defined by user or less tha a phred score of 20 (by default) is trimmed out. As well as sequence reads that have no of bases less than the value given by user or less than 36nt (default value) is removed.
#'@export
#'@seealso \code{\link{filterFastq}}
#'@examples
#'
#'\dontrun{
#'filteR()
#'filteR(a = "4", b = 38)
#'}
#'
filteR <- function (a = "4", b = 36) {
#To ignore the warnings during usage
options(warn=-1)
options("getSymbols.warning4.0"=FALSE)
setwd("results")
dir.create("filtered_data")
setwd("filtered_data")
x <- 1:20
for (val in x) {
if (interactive()) {
fl <- file.choose()
} else {
fl <- "/home/disc/new_test/inst/dataset/SRR5253683_1.fastq.gz"
}
if (interactive()) {
destination <- readline(prompt="Enter name for output file ")
} else {
destination <- "SRR5253683_1.fastq.gz"
}
stream <- open(FastqStreamer(fl))
on.exit(close(stream))
ft <- readFastq(fl)
print(ft)
repeat {
## input chunk
fq <- yield(stream)
if (length(fq) == 0)
break
## An Introduction to ShortRead 5
## trim and filter, e.g., reads cannot contain 'N'...
fq <- fq[nFilter()(fq)] # see ?srFilter for pre-defined filters
## trim as soon as 2 of 5 nucleotides has quality encoding less
## than "4" (phred score 20)
fq <- trimTailw(fq, 2, a = a, 2)
## drop reads that are less than 36nt
fq <- fq[width(fq) >= b]
## append to destination
writeFastq(fq, destination, "a")
}
#fs <- readFastq(fq)
# print(fs)
if (interactive()) {
n <- readline(prompt="Enter 0 TO STOP EXECUTION/ 1 TO CONTINUE: ")
} else {
n <- 0
}
if (n == 0)
{
break
}
}
setwd("..")
}
|
2894a7667f2b4cd72515875a4988ea935ac2f6e1 | 999cdce72d01e4f8af25f6499be03ea89e9b3738 | /src/dove_config.R | 02141d5d0a74304abc602612fde83d182df4d092 | [
"MIT"
] | permissive | dove-project/dove-frontend-r | cd3dd0414b0edcb18ad69f9f3af73189aaef0fc0 | 290406d9993a261812f0190e5d0602c4cbc4eb71 | refs/heads/main | 2023-04-05T01:59:03.690713 | 2021-04-22T19:45:29 | 2021-04-22T19:45:29 | 311,546,417 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,092 | r | dove_config.R | INSTR_FILE = "instr.asm"
# DATA_FILE = "data.csv"
# Prepare file for writing.
close(file(INSTR_FILE, open="w"))
# Load Pseudonyms from file.
# data_format <- read.csv(DATA_FILE, header=FALSE)
# dove_pseudonyms <- list()
# for (d in 1:nrow(data_format)) {
# # data_list <- list()
# # count <- 1
# # for (j in 1:data_format[d,3]) {
# # for (i in 1:data_format[d,2]) {
# # data_list[[count]] <- new_dove_pointer(dove_matrix_counter, i, j)
# # count <- count + 1
# # }
# # }
# dove_pseudonyms[[length(dove_pseudonyms) + 1]] <-
# dove.matrix(as.character(data_format[d,1]), data_format[d,2], data_format[d,3])
# # new_dove_matrix(list(), c(data_format[d,2], data_format[d,3]),
# # data_format[d,1])
# # if (data_format[i,2] == 1 && data_format[i,3] == 1) {
# # # Scalar value.
# # # Want to avoid dealing with factors, so we convert to char vector.
# # new_ptr <- new_dove_pointer(data_format[i,1], 1, 1)
# # dove_pseudonyms[[length(dove_pseudonyms) + 1]] <- new_ptr
# # }
# }
|
95a129c438a4f117d1bf4783d6f5d7ead348ca63 | b4c63e915976dcbea9295e51d22c16f58e581efb | /man/ReplicationBF-class.Rd | 0bc6695968d87503706ccfacc3cb5cb7731e974f | [
"MIT"
] | permissive | MarielleZZ/ReplicationBF | 1fa17a0d4e53359d5e34a702f5fc999aac2b8e49 | 9b507b053b396506db2dce6ff5fba97c10a9f437 | refs/heads/master | 2020-04-04T23:10:32.804692 | 2018-09-10T09:25:21 | 2018-09-10T09:25:21 | 156,349,458 | 0 | 0 | null | 2018-11-06T08:17:15 | 2018-11-06T08:17:15 | null | UTF-8 | R | false | true | 1,100 | rd | ReplicationBF-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aaaClasses.R
\docType{class}
\name{ReplicationBF-class}
\alias{ReplicationBF-class}
\title{General S4 class for a single Replication Bayes Factor}
\description{
General S4 class for a single Replication Bayes Factor
}
\section{Slots}{
\describe{
\item{\code{bayesFactor}}{The numerical value of the Replication Bayes Factor.}
\item{\code{approxMethod}}{Method used to approximate the marginal likelihoods.}
\item{\code{posteriorSamplesOriginal}}{A \code{data.frame} containing the samples
from the original study's posterior distribution.}
\item{\code{posteriorSamplesReplication}}{A \code{data.frame} containing samples
from the replication study's posterior distribution.}
\item{\code{test}}{A string that contains which test was used.}
\item{\code{functionCall}}{String containing the function call.}
\item{\code{originalStudy}}{Data from the original study that went into the analysis.}
\item{\code{replicationStudy}}{Data from the replication study that was used for the
calculation of the Bayes factor.}
}}
|
bb065ef6fa69adec91d842071fc6f95391f4cb53 | 2e6a078ac749e7da662197c0b5aedde6a6c4808b | /man/grouseticks.Rd | 00470158c29725c43d8df1555f99f7236590cc9f | [] | no_license | Stat990-033/Timings | 019fd14ed9f643f0e5d0d40163319dea80a17dff | 6a26e98ac79cc60f985ff8881d1f232321574065 | refs/heads/master | 2021-01-21T04:40:24.596854 | 2016-07-22T22:01:11 | 2016-07-22T22:01:11 | 36,084,717 | 3 | 1 | null | 2015-12-11T18:32:25 | 2015-05-22T17:04:54 | TeX | UTF-8 | R | false | false | 2,075 | rd | grouseticks.Rd | \name{grouseticks}
\alias{grouseticks}
\alias{grouseticks_agg}
\docType{data}
\title{
Data on red grouse ticks from Elston et al. 2001
}
\description{
Number of ticks on the heads of red grouse chicks
sampled in the field (\code{grouseticks})
and an aggregated version (\code{grouseticks_agg}); see original source for more details
}
\usage{data(grouseticks)}
\format{
\describe{
\item{\code{i}}{(factor) chick number (observation level)}
\item{\code{t}}{number of ticks sampled}
\item{\code{b}}{(factor) brood number}
\item{\code{h}}{height above sea level (meters)}
\item{\code{y}}{year (-1900)}
\item{\code{l}}{(factor) geographic location code}
\item{\code{ch}}{centered height, derived from \code{HEIGHT}}
\item{\code{meanTICKS}}{mean number of ticks by brood}
\item{\code{varTICKS}}{variance of number of ticks by brood}
}
}
\source{
Robert Moss, via David Elston
}
\details{\code{grouseticks_agg} is just a brood-level
aggregation of the data}
\references{
Elston, D. A., R. Moss, T. Boulinier, C. Arrowsmith, and
X. Lambin. 2001. "Analysis of Aggregation, a Worked Example: Numbers of
Ticks on Red Grouse Chicks." Parasitology 122 (05):
563-569. doi:10.1017/S0031182001007740.
\url{http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=82701}.
}
\examples{
data(grouseticks)
## Figure 1a from Elston et al
par(las=1,bty="l")
tvec <- c(0,1,2,5,20,40,80)
pvec <- c(4,1,3)
with(grouseticks_agg,plot(1+meanTICKS~HEIGHT,
pch=pvec[factor(YEAR)],
log="y",axes=FALSE,
xlab="Altitude (m)",
ylab="Brood mean ticks"))
axis(side=1)
axis(side=2,at=tvec+1,label=tvec)
box()
abline(v=405,lty=2)
## Figure 1b
with(grouseticks_agg,plot(varTICKS~meanTICKS,
pch=4,
xlab="Brood mean ticks",
ylab="Within-brood variance"))
curve(1*x,from=0,to=70,add=TRUE)
## Model fitting
form <- t~y+h+(1|b)+(1|i)+(1|l)
(full_mod1 <- glmer(form, family="poisson",data=grouseticks))
}
\keyword{datasets}
|
0b0ef1169fda05f7b1fe2ebed9616da08a44bf9e | 6bc756162a2c9dbc41e5c497bbdebbc95f9f2074 | /man/mulDiff.Rd | e8ca665e6c4a9144f298b5cc9691acd734702ad9 | [] | no_license | cisella/Mulcom | 0be6d52fb89c8417bcdf773a0de2950152454323 | 6e9b6a3a4d3087952c994c9d3e7d42a53c596706 | refs/heads/master | 2020-05-30T06:34:05.135802 | 2019-04-24T10:36:16 | 2019-04-24T10:36:16 | 21,429,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 980 | rd | mulDiff.Rd | \name{mulDiff}
\alias{mulDiff}
\title{MulCom Test Differential analysis}
\description{
Identify the differentially expressed features for a specific comparison with given m and t value}
\usage{
mulDiff(eset, Mulcom_P, m, t, ind)
}
\arguments{
\item{eset}{An ExpressionSet object from package Biobase}
\item{Mulcom_P}{An object of class Mulcom_P}
\item{m}{the m values for the analysis}
\item{t}{the t values for the analysis}
\item{ind}{and index refeing to te comparison, should be numeric}
}
\value{
\item{eset}{An ExpressionSet object from package Biobase}
\item{Mulcom_P}{An object of class Mulcom_P}
\item{m}{the m values for the analysis}
\item{t}{the t values for the analysis}
\item{ind}{and index refeing to te comparison, should be numeric}
}
\examples{
data(benchVign)
mulcom_perm <- mulPerm(Affy, Affy$Groups, 10, 7)
mulcom_diff <- mulDiff(Affy, mulcom_perm, 0.2, 2)
}
\author{Claudio Isella, \email{claudio.isella@ircc.it}}
\keyword{MulCom}
|
7ab9e0815c85b3fa3a8777b960b0b693434ae746 | b7e5362f3b323b2b65aba4056fa99d2dd5eb035e | /R/myclt2.R | a5527d8b30d26c98ae79ba725564812c3ff53f7c | [
"MIT"
] | permissive | ahanoch/StatsPackages | 327b3436f6ca34914aee5c0d5fb683d4da68fa51 | bee888a58267eba16cf09c7d6495c07ead9ae3d5 | refs/heads/master | 2023-04-16T18:02:16.727551 | 2021-04-23T13:47:00 | 2021-04-23T13:47:00 | 341,334,215 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 302 | r | myclt2.R | #' myclt
#'
#' @param n mean
#' @param iter standard deviation
#'
#' @return histogram of the distribution
#' @export
#'
#' @examples
#' m=myclt2(n=10,iter=10000)
myclt2=function(n,iter){
y=runif(n*iter,0,5) #A
data=matrix(y,nr=n,nc=iter,byrow=TRUE) #B
sm2=apply(data,2,mean)
hist(sm2)
sm2
}
|
5138ba7b4c3018a8b9c6d080a3130305dc125f51 | c24d1fd18c1cd08287ec718158c47f12a9b801b3 | /Scripts/4.MedianBetaatTSS/1.Median5hmc_atTSS.013020.R | 11637212db5551d8f38f6211abd648f5534372a3 | [] | no_license | Christensen-Lab-Dartmouth/PCNS_5hmC_06112020 | 68b516905bd12827fe476fd7f1b4c134f3bf0b48 | f89e8fbbae365369aa90e6a16827d1da96515abc | refs/heads/master | 2022-11-07T19:01:36.180358 | 2020-06-11T13:35:14 | 2020-06-11T13:35:14 | 271,554,640 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 24,017 | r | 1.Median5hmc_atTSS.013020.R | ##Find Mean 5(h)mc% at distance from TSS
##Nasim Azizgolshani
##12/11/2019
#Examine porportion level of cytosine modifications (5mC and 5hmC) of tumor vs non-tumor
#in relation to distance from TSS
#this is in our subset of top 5% 5hmC
# ==========================================================================================
# Step 1: Initialization
# ==========================================================================================
rm(list = ls())
# Load Required Packages
library(tidyverse)
library(GenomicRanges)
library(genomation)
library(RColorBrewer)
library(wesanderson)
library(ggsci)
# Set WD for folder
setwd('/Users/nasimazizgolshani/Dropbox/christensen/PCNS_Analysis/PCNS')
# ==========================================================================================
# Step 2: Load Necessary Files
# ==========================================================================================
#Load all Median values for both 5mC and 5hmc for all tumors for each CpG site
#these median values are from unique tumors only
load("./Files/01162020_unique_hydroxy_methyl_ss.RData")
# Load EPIC annotation file created in section 3
load("./Files/brainSpecificAnnotation.RData")
#Take out probes in epic annotation with sex chromosomes and cross hybridizing probes
#by matching CpGs in my medians file
epicAnnotation <- epicAnnotation[row.names(epicAnnotation) %in% hydroxy_unique_ss$ID,] #743461
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Step 3: make Grange object for good probes from EPIC array
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Generate new variables for MAPINFO. A probe lists one location, to get genomic ranges we need two locations
epicAnnotation$pos_start = epicAnnotation$pos
epicAnnotation$pos_end <- epicAnnotation$pos + 1
#subset annotation to have only chr, pos, pos start and pos end
annotsub <- epicAnnotation[,c(1,2, 58, 59)]
#Create a 'GRanges' object from the Illumina annotation file
annot_gr <- makeGRangesFromDataFrame(annotsub, keep.extra.columns=TRUE, ignore.strand=TRUE, seqnames.field = "chr", start.field="pos_start", end.field="pos_end")
#Print the GRange object to see what it looks like
annot_gr
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Step 4: calculate distance and orientation of 5hmC relative to nearest canonical TSS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Load UCSC reference genome annotation for hg19
#up.flank is how far up-stream from TSS want to detect promoter boundaries and downstream for down.flank
transcFeat <- readTranscriptFeatures('./Script/4.MedianBetasatTSS/Files/UCSC_hg19_refGene.bed',up.flank = 2000, down.flank = 2000)
# get indices of CpGs in annotation file that precede transcription start sites (TSSs)
tss_precede <- precede(annot_gr, transcFeat$TSSes)
# do the same for those that follow TSSs
tss_follow <- follow(annot_gr, transcFeat$TSSes)
# identify the nearest TSS for each of the CpGs
nearest_tss <- nearest(annot_gr, transcFeat$TSSes)
# index for these TSSs only
transcFeat_tss_sub <- transcFeat$TSSes[nearest_tss]
# calculate distance to nearest TSS for each 5hmC CpG
dist_to_TSS <- distance(annot_gr, transcFeat_tss_sub)
# divide by 1000 to convert to kilobases
#dist_to_TSS <- dist_to_TSS/1000
dist_to_TSS <- round(dist_to_TSS, -1)
dist_to_TSS2 <-as.data.frame(dist_to_TSS)
#Add indices and distance to TSS to annotation file
annot_df <- as.data.frame(annot_gr)
annot_df$dist_to_TSS <- dist_to_TSS
annot_df$TSS_indices <- row.names(dist_to_TSS2)
#Add column describing direction of TSS and conditionally make upstream distances negative
annot_df$direction <- ifelse(nearest_tss==tss_precede,"upstream",
ifelse(nearest_tss!=tss_precede, "downstream", NA))
annot_df$dist_w_dir <- ifelse(annot_df$direction=="upstream", paste(-annot_df$dist_to_TSS),annot_df$dist_to_TSS)
rm(dist_to_TSS2)
#save(annot_df,file="./Script/3.Top5_Enrichment/Downloaded_Annot_Files/dist_to_TSS.RData")
#combine summary stats for 5mc and 5hmc (2 separate data frames into one)
ss_unique_both <- merge(hydroxy_unique_ss, methyl_unique_ss, by ="ID")
rm(hydroxy_unique_ss)
rm(methyl_unique_ss)
#subset for only medians for simplicity
median_5mc_5hmc <- ss_unique_both[,c(1,2,5)]
#Must do the same now for control samples
load("./Files/01162020_ctl_ss.RData")
median_ctl <- ctl_ss[,c(1,2,5)]
#merge ctl and tumors
all_medians <- merge(median_ctl,median_5mc_5hmc, by="ID")
colnames(all_medians) <- c("ID", "Ctl_5hmc", "Ctl_5mc", "Tumor_5hmc","Tumor_5mc")
#subset annot_df to only distance and CpG ID
annot_df_sub <- annot_df[,c(9,10)]
annot_df_sub <- rownames_to_column(annot_df_sub,"ID")
#combine annotation dataframe with TSS distance with data frame with all sample
matched_dist_medians <- merge(annot_df_sub, all_medians, "ID")
matched_dist_medians$dist_w_dir <- as.integer(matched_dist_medians$dist_w_dir)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Step 5: Calculate Means of Tumor vs Control and Plot
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#First filter to less than 2000bp away from TSS and get medians of median beta value
#for each distance from TSS
TSS_2kb <- matched_dist_medians %>% filter(dist_w_dir <=2000) %>%
filter(dist_w_dir >= (-2000)) %>%
group_by(dist_w_dir) %>%
summarize(M2_5hmc_Ctl= median(Ctl_5hmc),
M2_5mc_Ctl = median(Ctl_5mc),
M2_5hmc_Tumor = median(Tumor_5hmc),
M2_5mc_Tumor = median(Tumor_5mc))
#Collapse data frame
G_DF = TSS_2kb %>%
gather(key = "Measure", value = "Beta_value", -dist_w_dir) %>%
#Rename column names so that legend will be cleaner
mutate(Measure = ifelse(Measure=="M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure=="M2_5mc_Ctl", "5mC Control",
ifelse(Measure=="M2_5hmc_Tumor", "All Tumors 5hmC",
ifelse(Measure=="M2_5mc_Tumor", "All Tumors 5mC", NA ))) ))
p1 <- ggplot(G_DF, aes(x = dist_w_dir, y = Beta_value, colour = Measure)) +
geom_line() +
xlab("Distance from TSS (bp)") +
ylab("Median of Median 5(h)mc") +
theme_classic()+
#scale_color_manual(values = wes_palette("Zissou1", n=4))
scale_color_npg()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Repeat just with resolution at 1kb
matched_dist_medians <- matched_dist_medians %>%
mutate(dist_w_dir_kb = dist_w_dir / 1000) %>%
mutate(dist_w_dir_kb = round(dist_w_dir_kb,1))
#First filter to less than 2000bp away from TSS
TSS_2kb <- matched_dist_medians %>% filter(dist_w_dir_kb <=2.0) %>%
filter(dist_w_dir_kb >= (-2.0)) %>%
group_by(dist_w_dir_kb) %>%
summarize(M2_5hmc_Ctl= median(Ctl_5hmc),
M2_5mc_Ctl = median(Ctl_5mc),
M2_5hmc_Tumor = median(Tumor_5hmc),
M2_5mc_Tumor = median(Tumor_5mc))
G_DF = TSS_2kb %>%
gather(key = "Measure", value = "Beta_value", -dist_w_dir_kb) %>%
#Rename column names so that legend will be cleaner
mutate(Measure = ifelse(Measure=="M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure=="M2_5mc_Ctl", "5mC Control",
ifelse(Measure=="M2_5hmc_Tumor", "5hmC Tumor",
ifelse(Measure=="M2_5mc_Tumor", "5mC Tumor", NA ))) )) %>%
mutate(Measure = factor(Measure, levels = c("5hmC Tumor", "5hmC Control", "5mC Tumor", "5mC Control")))
#Set colors for lines
library("scales")
show_col(pal_npg("nrc")(10))
ann_colors <- list(c("#E64B35FF","#4DBBD5FF", "#F39B7FFF", "#3C5488FF"))
ggplot(G_DF, aes(x = dist_w_dir_kb, y = Beta_value, colour = Measure)) +
geom_line() +
xlab("Distance from TSS (kilo-bp)") +
ylab("Median of Median 5(h)mc") +
theme_classic()+
theme(axis.text=element_text(size=14),
axis.title=element_text(size=14),
legend.title=element_text(size=12),
legend.text=element_text(size=11))+
scale_x_continuous(limits=c(-2, 2),breaks=c(-2.0, -1.5, -1.0, -0.5, 0 , 0.5, 1.0, 1.5, 2.0))+
scale_color_manual(values = c("#E64B35FF","#4DBBD5FF", "#F39B7FFF", "#3C5488FF"))
# geom_vline(xintercept= 0.8)+
#scale_color_npg()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Intermediate Range
#First filter to less than 10,000bp away from TSS
TSS_2kb <- matched_dist_medians %>% filter(dist_w_dir_kb <=10.0) %>%
filter(dist_w_dir_kb >= (-10.0)) %>%
group_by(dist_w_dir_kb) %>%
summarize(M2_5hmc_Ctl= median(Ctl_5hmc),
M2_5mc_Ctl = median(Ctl_5mc),
M2_5hmc_Tumor = median(Tumor_5hmc),
M2_5mc_Tumor = median(Tumor_5mc))
G_DF = TSS_2kb %>%
gather(key = "Measure", value = "Beta_value", -dist_w_dir_kb) %>%
#Rename column names so that legend will be cleaner
mutate(Measure = ifelse(Measure=="M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure=="M2_5mc_Ctl", "5mC Control",
ifelse(Measure=="M2_5hmc_Tumor", "5hmC Tumor",
ifelse(Measure=="M2_5mc_Tumor", "5mC Tumor", NA ))) )) %>%
mutate(Measure = factor(Measure, levels = c("5mC Tumor", "5mC Control", "5hmC Control", "5hmC Tumor")))
ggplot(G_DF, aes(x = dist_w_dir_kb, y = Beta_value, colour = Measure)) +
geom_line(size =1.2) +
xlab("Distance from TSS (kilo-bp)") +
ylab("Median 5(h)mc") +
theme_classic()+
theme(axis.text=element_text(size=16),
axis.title=element_text(size=18),
legend.title=element_text(size=20),
legend.text=element_text(size=18))+
scale_x_continuous(limits=c(-10, 10), breaks= seq(-10.0, 10.0, by =1.0))+
scale_color_manual(values = c("#E64B35FF","#4DBBD5FF", "#F39B7FFF", "#3C5488FF"))
#scale_color_manual(values = wes_palette("Zissou1", n=4))
# geom_vline(xintercept= 0.8)+
#scale_color_npg()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Longer Range
matched_dist_medians$round_dist <- round(matched_dist_medians$dist_w_dir_kb, -1)
TSS_long_range <- matched_dist_medians %>% group_by(round_dist) %>%
filter(round_dist < 500)%>%
filter(round_dist > (-500)) %>%
summarize(M2_5hmc_Ctl= median(Ctl_5hmc),
M2_5mc_Ctl = median(Ctl_5mc),
M2_5hmc_Tumor = median(Tumor_5hmc),
M2_5mc_Tumor = median(Tumor_5mc))
G_DF = TSS_long_range %>%
gather(key = "Measure", value = "Beta_value", -round_dist) %>%
#Rename column names so that legend will be cleaner
mutate(Measure = ifelse(Measure=="M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure=="M2_5mc_Ctl", "5mC Control",
ifelse(Measure=="M2_5hmc_Tumor", "5hmC Tumor",
ifelse(Measure=="M2_5mc_Tumor", "5mC Tumor", NA ))) )) %>%
mutate(Measure = factor(Measure, levels = c("5hmC Tumor", "5hmC Control", "5mC Tumor", "5mC Control")))
#plot
ggplot(G_DF, aes(x = round_dist, y = Beta_value, colour = Measure)) +
geom_line() +
xlab("Distance from TSS (kilo-bp)") +
ylab("Median of Median 5(h)mc") +
theme_classic()+
theme(axis.text=element_text(size=14),
axis.title=element_text(size=14),
legend.title=element_text(size=12),
legend.text=element_text(size=11))+
#scale_color_manual(values = wes_palette("Zissou1", n=4))
scale_color_npg()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Step 6: Calculate Means of Tumor Subtypes vs Control and Plot
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Look to see if tumor subtype behaves similarly
#load unique tumor beta values
load("./Files/01162020_unique_tumor_betas.RData")
#load metadata
targets <- read.csv("./Files/Metadata_complete.csv", stringsAsFactors = FALSE)
#subset for tumors only (can use a shortcut since data.topvar is already exclusive to tumors)
#Need to Subset for Tumors Only
GliomaSamples <- targets %>% filter(Tumor_Type=="Glioma")
EpendymomaSamples <- targets %>% filter(Tumor_Type=="Ependymoma")
EmbryonalSamples <- targets %>% filter(Tumor_Type=="Embryonal")
Glioma5hmc <- tumor_5hmc_unique[,colnames(tumor_5hmc_unique)%in%GliomaSamples$Label.ID.]
Ependymoma5hmc <- tumor_5hmc_unique[,colnames(tumor_5hmc_unique)%in%EpendymomaSamples$Label.ID.]
Embryonal5hmc <- tumor_5hmc_unique[,colnames(tumor_5hmc_unique)%in%EmbryonalSamples$Label.ID.]
Glioma5mc <- tumor_5mc_unique[,colnames(tumor_5mc_unique)%in%GliomaSamples$Label.ID.]
Ependymoma5mc <- tumor_5mc_unique[,colnames(tumor_5mc_unique)%in%EpendymomaSamples$Label.ID.]
Embryonal5mc <- tumor_5mc_unique[,colnames(tumor_5mc_unique)%in%EmbryonalSamples$Label.ID.]
median_5hmc_glioma <- apply(Glioma5hmc, 1, median, na.rm=TRUE)
median_5hmc_epend <- apply(Ependymoma5hmc, 1, median, na.rm=TRUE)
median_5hmc_emb <- apply(Embryonal5hmc, 1, median, na.rm=TRUE)
median_5mc_glioma <- apply(Glioma5mc, 1, median, na.rm=TRUE)
median_5mc_epend <- apply(Ependymoma5mc, 1, median, na.rm=TRUE)
median_5mc_emb <- apply(Embryonal5mc, 1, median, na.rm=TRUE)
subtype_medians <- as.data.frame(cbind(median_5hmc_glioma,median_5hmc_epend,
median_5hmc_emb,median_5mc_glioma, median_5mc_epend,median_5mc_emb))
subtype_medians <- rownames_to_column(subtype_medians, "ID")
subtype_medians <- merge(subtype_medians,median_ctl, by ="ID" )
matched_subtype_medians <- merge(annot_df_sub, subtype_medians, "ID")
matched_subtype_medians$dist_w_dir <- as.integer(matched_subtype_medians$dist_w_dir)
#First filter to less than 2000bp away from TSS
TSS_2kb <- matched_subtype_medians %>% filter(dist_w_dir <= 2000) %>%
filter(dist_w_dir >= (-2000)) %>%
group_by(dist_w_dir) %>%
summarize(M2_5hmc_Gl= median(median_5hmc_glioma),
M2_5hmc_Ep = median(median_5hmc_epend),
M2_5hmc_Emb = median(median_5hmc_emb),
M2_5mc_Gl= median(median_5mc_glioma),
M2_5mc_Ep = median(median_5mc_epend),
M2_5mc_Emb = median(median_5mc_emb),
M2_5hmc_Ctl = median(Ctl_Median_5hmC),
M2_5mc_Ctl = median(Ctl_Median_5mC),)
#Rename column names so that legend will be cleaner
G_DF = TSS_2kb %>%
gather(key = "Measure", value = "Beta_value", -dist_w_dir) %>%
mutate(Measure = ifelse(Measure=="M2_5hmc_Gl", "5hmC Glioma",
ifelse(Measure=="M2_5hmc_Ep", "5hmC Ependymoma",
ifelse(Measure=="M2_5hmc_Emb", "5hmC Embryonal",
ifelse(Measure== "M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure=="M2_5mc_Gl", "5mC Glioma",
ifelse(Measure== "M2_5mc_Ep", "5mC Ependymoma",
ifelse(Measure== "M2_5mc_Emb", "5mC Embryonal",
ifelse(Measure== "M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure== "M2_5mc_Ctl", "5mC Control",NA)
)
)
)
)
)
)
)))
#plot
ggplot(G_DF, aes(x = dist_w_dir, y = Beta_value, colour = Measure)) +
geom_line() +
xlab("Distance from TSS (kilo-bp)") +
ylab("Median of Median 5(h)mc") +
theme_classic()+
#scale_color_brewer(palette ="Spectral")
scale_color_npg()
#Repeat just with resolution at 5kb
matched_subtype_medians <- matched_subtype_medians %>%
mutate(dist_w_dir_kb = dist_w_dir / 1000) %>%
mutate(dist_w_dir_kb = round(dist_w_dir_kb,1))
#First filter to less than 2000bp away from TSS
TSS_2kb <- matched_subtype_medians %>% filter(dist_w_dir_kb <=2.0) %>%
filter(dist_w_dir_kb >= (-2.0)) %>%
group_by(dist_w_dir_kb) %>%
summarize(M2_5hmc_Gl= median(median_5hmc_glioma),
M2_5hmc_Ep = median(median_5hmc_epend),
M2_5hmc_Emb = median(median_5hmc_emb),
M2_5mc_Gl= median(median_5mc_glioma),
M2_5mc_Ep = median(median_5mc_epend),
M2_5mc_Emb = median(median_5mc_emb),
M2_5hmc_Ctl = median(Ctl_Median_5hmC),
M2_5mc_Ctl = median(Ctl_Median_5mC),)
G_DF = TSS_2kb %>%
#select(dist_w_dir, contains("median"), contains("Median")) %>%
gather(key = "Measure", value = "Beta_value", -dist_w_dir_kb)%>%
mutate(Measure = ifelse(Measure=="M2_5hmc_Gl", "5hmC Glioma",
ifelse(Measure=="M2_5hmc_Ep", "5hmC Ependymoma",
ifelse(Measure=="M2_5hmc_Emb", "5hmC Embryonal",
ifelse(Measure== "M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure=="M2_5mc_Gl", "5mC Glioma",
ifelse(Measure== "M2_5mc_Ep", "5mC Ependymoma",
ifelse(Measure== "M2_5mc_Emb", "5mC Embryonal",
ifelse(Measure== "M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure== "M2_5mc_Ctl", "5mC Control",NA)
)
)
)
)
)
)
)))
ggplot(G_DF, aes(x = dist_w_dir_kb, y = Beta_value, colour = Measure)) +
geom_line() +
xlab("Distance from TSS (kilo-bp)") +
ylab("Median of Median 5(h)mc") +
theme_classic()+
theme(axis.text=element_text(size=14),
axis.title=element_text(size=14),
legend.title=element_text(size=12),
legend.text=element_text(size=11))+
#scale_color_brewer(palette ="Paired")
scale_color_npg()
#Longer Range
matched_subtype_medians$round_dist <- round(matched_subtype_medians$dist_w_dir_kb, -1)
TSS_long_range <- matched_subtype_medians %>% group_by(round_dist) %>%
filter(round_dist < 500)%>%
filter(round_dist > (-500)) %>%
summarize(M2_5hmc_Gl= median(median_5hmc_glioma),
M2_5hmc_Ep = median(median_5hmc_epend),
M2_5hmc_Emb = median(median_5hmc_emb),
M2_5mc_Gl= median(median_5mc_glioma),
M2_5mc_Ep = median(median_5mc_epend),
M2_5mc_Emb = median(median_5mc_emb),
M2_5hmc_Ctl = median(Ctl_Median_5hmC),
M2_5mc_Ctl = median(Ctl_Median_5mC),)
G_DF = TSS_long_range %>%
#select(dist_w_dir, contains("median"), contains("Median")) %>%
gather(key = "Measure", value = "Beta_value", -round_dist)%>%
mutate(Measure = ifelse(Measure=="M2_5hmc_Gl", "5hmC Glioma",
ifelse(Measure=="M2_5hmc_Ep", "5hmC Ependymoma",
ifelse(Measure=="M2_5hmc_Emb", "5hmC Embryonal",
ifelse(Measure== "M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure=="M2_5mc_Gl", "5mC Glioma",
ifelse(Measure== "M2_5mc_Ep", "5mC Ependymoma",
ifelse(Measure== "M2_5mc_Emb", "5mC Embryonal",
ifelse(Measure== "M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure== "M2_5mc_Ctl", "5mC Control",NA)
)
)
)
)
)
)
)))
#plot
ggplot(G_DF, aes(x = round_dist, y = Beta_value, colour = Measure)) +
geom_line() +
xlab("Distance from TSS (kilo-bp)") +
ylab("Median of Median 5(h)mc") +
theme_classic()+
theme(axis.text=element_text(size=14),
axis.title=element_text(size=14),
legend.title=element_text(size=12),
legend.text=element_text(size=11))+
scale_color_npg()
# scale_color_brewer(palette ="Paired")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Intermediate Range
#First filter to less than 10,000bp away from TSS
TSS_10kb <- matched_subtype_medians %>% filter(dist_w_dir_kb <=10.0) %>%
filter(dist_w_dir_kb >= (-10.0)) %>%
group_by(dist_w_dir_kb) %>%
summarize(M2_5hmc_Gl= median(median_5hmc_glioma),
M2_5hmc_Ep = median(median_5hmc_epend),
M2_5hmc_Emb = median(median_5hmc_emb),
M2_5mc_Gl= median(median_5mc_glioma),
M2_5mc_Ep = median(median_5mc_epend),
M2_5mc_Emb = median(median_5mc_emb),
M2_5hmc_Ctl = median(Ctl_Median_5hmC),
M2_5mc_Ctl = median(Ctl_Median_5mC),)
G_DF = TSS_10kb %>%
gather(key = "Measure", value = "Beta_value", -dist_w_dir_kb) %>%
#Rename column names so that legend will be cleaner
mutate(Measure = ifelse(Measure=="M2_5hmc_Gl", "5hmC Glioma",
ifelse(Measure=="M2_5hmc_Ep", "5hmC Ependymoma",
ifelse(Measure=="M2_5hmc_Emb", "5hmC Embryonal",
ifelse(Measure== "M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure=="M2_5mc_Gl", "5mC Glioma",
ifelse(Measure== "M2_5mc_Ep", "5mC Ependymoma",
ifelse(Measure== "M2_5mc_Emb", "5mC Embryonal",
ifelse(Measure== "M2_5hmc_Ctl", "5hmC Control",
ifelse(Measure== "M2_5mc_Ctl", "5mC Control",NA)
)
)
)
)
)
)
)))
ggplot(G_DF, aes(x = dist_w_dir_kb, y = Beta_value, colour = Measure)) +
geom_line(size =1.0) +
xlab("Distance from TSS (kilo-bp)") +
ylab("Median 5(h)mc") +
theme_classic()+
theme(axis.text=element_text(size=16),
axis.title=element_text(size=18),
legend.title=element_text(size=20),
legend.text=element_text(size=18))+
scale_x_continuous(limits=c(-10, 10), breaks= seq(-10.0, 10.0, by =1.0))+
scale_color_manual(values = c("5hmC Glioma" = "#E64B35FF","5hmC Ependymoma" = "#4DBBD5FF",
"5hmC Embryonal"= "#F39B7FFF", "5hmC Control" = "#3C5488FF",
"5mC Glioma" = "#F39B7FFF","5mC Ependymoma" = "#8491B4FF",
"5mC Embryonal"= "#91D1C2FF", "5mC Control" = "#7E6148FF"))
|
c7161adcd749209b9027334e46c413897d0113db | 9693f65177fd72398e6218989815729f155eddb5 | /document.R | a33ce4258aed7a5dd216c53ab4e572fbeac83cd2 | [
"MIT"
] | permissive | poissonconsulting/mcmcrdata | 2d275a2222cff0d476980a876c1aa1684a847ed3 | 3834fbd0625d5109e59fcdca44a6b561352bfcfc | refs/heads/master | 2023-03-05T05:01:44.695976 | 2021-02-12T22:17:55 | 2021-02-12T22:17:55 | 120,050,158 | 0 | 1 | NOASSERTION | 2020-04-23T00:31:08 | 2018-02-03T01:47:26 | R | UTF-8 | R | false | false | 179 | r | document.R | devtools::document()
if(file.exists("DESCRIPTION")) unlink("docs", recursive = TRUE)
codemetar::write_codemeta()
knitr::knit("README.Rmd")
pkgdown::build_site()
devtools::check()
|
8f6a8e680dbf665a8c12b79d97320dcae21954d7 | eaadadf94d1efa41d0fc86462f14be82da31d23b | /cachematrix.R | baa751222aeb10eefb2c27f9a21e690b2024ef16 | [] | no_license | abk11/ProgrammingAssignment2 | 20785e15dbb283c38fa7ef49f28c98743f5c1ff3 | cde980234886af6f6c814ad5ec283baee9c1b931 | refs/heads/master | 2021-01-15T14:11:38.277968 | 2015-06-16T18:43:16 | 2015-06-16T18:43:16 | 37,538,071 | 0 | 0 | null | 2015-06-16T15:16:09 | 2015-06-16T15:16:06 | null | UTF-8 | R | false | false | 2,113 | r | cachematrix.R | ## Because calculating the inverse of a matrix tends to be very costly,
## in case that we need to compute the inverse of a matrix we have already encountered,
## if we have already recorded its inverse,
## there is no need to recompute it since we can just look it up in the cache.
## I assume that the given matrix is always invertible.
## The first function, makeCacheMatrix, returns a list containing 4 functions.
## Each of them will respectively:
## 1) set the matrix to be inverted
## 2) get the matrix to be inverted
## 3) set the inverse of the original matrix
## 4) get the inverse of the original matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
setMatrixToBeInverted <- function(y) {
x <<- y
m <<- NULL
}
getMatrixToBeInverted <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(setMatrixToBeInverted = setMatrixToBeInverted, getMatrixToBeInverted = getMatrixToBeInverted,
setInverse = setInverse,
getInverse = getInverse)
}
## The second function, cacheSolve, finds the inverse of the matrix originated with the previous function.
## It asks: has the inverse for the given matrix already been computed?
## If TRUE, it looks up in the cache and returns the matrix.
## If FALSE, it computes the inverse, caches the result, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invertedMatrix <- x$getInverse()
if(!is.null(invertedMatrix)) {
message("getting cached data")
return(invertedMatrix)
}
matrixToBeInverted <- x$getMatrixToBeInverted()
invertedMatrix <- solve(matrixToBeInverted, ...)
x$setInverse(invertedMatrix)
invertedMatrix
}
## Example:
## > A <- cbind(c(1, 0), c(1, 1))
## > functionsForMatrixA <- makeCacheMatrix(A)
## > functionsForMatrixA$getMatrixToBeInverted()
## [,1] [,2]
## [1,] 1 1
## [2,] 0 1
## > cacheSolve(functionsForMatrixA)
## [,1] [,2]
## [1,] 1 -1
## [2,] 0 1
## > cacheSolve(functionsForMatrixA)
## getting cached data
## [,1] [,2]
## [1,] 1 -1
## [2,] 0 1 |
d62000906c0d02c1d443111bc429d7b88e6f78bd | b251989491d6f1481894ee206ce94bb55b679845 | /Market Simulation_Beer_BC_BLP.R | ba288f004c52eac9774914115ca8a17d4c1eeae9 | [] | no_license | lukas-jue/cscc-simulation | c0fe78fdc32bd93cb8d8a81433752a9a50a10c3e | 4a3be73bf4729bc2c5fecc835f2c3e6773f82600 | refs/heads/master | 2020-06-15T07:50:23.747341 | 2019-07-18T10:53:47 | 2019-07-18T10:53:47 | 195,241,474 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,881 | r | Market Simulation_Beer_BC_BLP.R | ##################################################################################################
#####Simulation & Estimation - Nash with Budget###################################################
#####BLP-type of indirect utility#################################################################
##################################################################################################
rm(list=ls())
# LOAD LIBRARIES
library(xtable)
library(devtools)
library(MASS)
library(Rcpp)
library(RcppArmadillo)
library(bayesm)
library(ggplot2)
library(ggpmisc)
library(tikzDevice)
library(plyr)
library(latex2exp)
library(FixedPoint)
library(dplyr)
library(reshape2)
library(tidyr)
library(ggcorrplot)
load("100k_MCMC_corrected.RData")
###Increase memory capacities
memory.limit(size=100000000)
load("Estimation_Data_Beer_20170423.Rdata")
products = c("Amstel Extra Lata 37,5 cl","Amstel Extra Lata 33 cl","Amstel Lata 37,5 cl","Amstel Lata 33 cl","Amstel Clasica Lata 33 cl",
"Cruzcampo Lata 33 cl","Estrella Damm Lata 33 cl","Estrella Galicia Lata 33 cl","Heineken Lata 33 cl","Mahou 5 Estrellas Lata 33 cl",
"Mahou Clasica Lata 33 cl","San Miguel Lata 33 cl","Voll Damm Lata 33 cl","Steinburg (Marca Blanca Mercadona) Lata 33 cl",
"Marca Blanca Carrefour Lata 33 cl")
#reorder that price is first in the design matrix
for (i in 1:length(E_Data$lgtdata)){
E_Data$lgtdata[[i]]$X <- E_Data$lgtdata[[i]]$X[,c(16,1:15)]
}
#Number of players
nplayers = 15
##########################################
#######Run BLP Budget model###############
##########################################
###Load complete sampler now...
Rcpp::sourceCpp("rhierMnlRwMixture_rcpp_loop_Illus_BLP_type.cpp",showOutput = FALSE)
source('rhierMnlRwMixture_main_untuned_BC.R')
#number of constrained coefficients (budget & price)
nvar_c = 2
#position of price coefficient in design matrix
pr=1
###Prior setting
Amu = diag(1/10, nrow = nvar_c, ncol = nvar_c)
mustarbarc = matrix(rep(0, nvar_c), nrow = nvar_c)
nu = 15 + nvar_c
V = nu * diag(nvar_c)*0.5
Prior = list(ncomp=1, Amu = Amu, mustarbarc = mustarbarc, nu = nu, V = V)
Mcmc = list(R=30000, keep=3)#, s=1.6)
#,s=c(0.1,0.5,0.5,0.5)
out_BC = rhierMnlRwMixture_SR(Data=E_Data,Prior=Prior,Mcmc=Mcmc,nvar_c=nvar_c,pr=pr,starting_budget = log(0.74))
betastar_HB_BC = out_BC$betadraw
compdraw_HB = out_BC$nmix$compdraw
probdraw_HB = out_BC$nmix$probdraw
rejection = out_BC$rejection
loglike_BC = out_BC$loglike
###Compute rejection rate of sampler
rej_rate_indi = apply(rejection,2,mean)
summary(rej_rate_indi)
rej_rate_agg = mean(rej_rate_indi)
rej_rate_agg
########################
###Get rid of burnin####
########################
# check visually how much burn-in is required
plot(out_BC$loglike, type="l")
data.frame(loglikelihood = out_BC$loglike) %>%
mutate(index = row_number()) %>%
ggplot(aes(x = index, y = loglikelihood)) +
geom_line(alpha = 0.7) +
geom_smooth(method = "lm", se = FALSE) +
stat_poly_eq(formula = y ~ x, aes(label = paste(..eq.label.., ..rr.label.., sep = "~~~~~~")),
parse=TRUE,label.x.npc = "right", label.y.npc = 0.1,
output.type = "expression") +
theme_bw()
burnin = 3100
R = dim(betastar_HB_BC)[3]
betastar_HB_BC = betastar_HB_BC[,,(burnin+1):R]
compdraw_HB = compdraw_HB[(burnin+1):R]
probdraw_HB = probdraw_HB[(burnin+1):R]
rejection = rejection[(burnin+1):R,]
loglike_BC = loglike_BC[(burnin+1):R]
plot(loglike_BC, type="l")
data.frame(loglikelihood = loglike_BC) %>%
mutate(index = row_number()) %>%
ggplot(aes(x = index, y = loglikelihood)) +
geom_line(alpha = 0.7) +
geom_smooth(method = "lm", se = FALSE) +
stat_poly_eq(formula = y ~ x, aes(label = paste(..eq.label.., ..rr.label.., sep = "~~~~~~")),
parse=TRUE,label.x.npc = "right", label.y.npc = 0.01,
output.type = "expression") +
theme_bw()
lmdata <- data.frame(loglikelihood = loglike_BC) %>%
mutate(index = row_number())
lm(loglike_BC ~ index, data = lmdata) %>%
summary()
R = dim(betastar_HB_BC)[3]
N = dim(betastar_HB_BC)[1]
###Heterogeneity distribution lower level model non-smoothed
l=10
betastar_BC_LLMns <- array(0,dim=c(R*l,dim(betastar_HB_BC)[2]))
index_r = rep(rank(runif(R)),l)
index_n = rep(rank(runif(N)),round((R*l)/N)+1)
#data generation
for(i in 1:(R*l)){
betastar_BC_LLMns[i,] = betastar_HB_BC[index_n[i],,index_r[i]]
}
#transform betastardraws to betadraws
beta_BC_LLMns = betastar_BC_LLMns
beta_BC_LLMns[,1] = exp(betastar_BC_LLMns[,1])
beta_BC_LLMns[,2] = exp(betastar_BC_LLMns[,2])
summary(beta_BC_LLMns)
#######################
###Nash optimization
#######################
### Load functions for Fixed-point approach
source('Morrow_Skerlos_Implementations_BC_BLP_MarkupEquations_Final_Efficient.R')
Rcpp::sourceCpp("Speed++_MS_BC_BLP_Efficient.cpp",showOutput = FALSE)
###########################################################################
# Nash Equilibrium Prices and Shares for all 15 brands
###########################################################################
# ingredients
nplayers = 15 + 1 ### 15 inside + outside
min_p = 0.22
prices_init = c(rep(min_p,nplayers - 1),0)
designBase = rbind(diag(nplayers-1),rep(0,nplayers-1))
Xdata = cbind(prices_init,designBase); colnames(Xdata)[1] = "PRICE"
Ownership = array(0,dim=c(nplayers,nplayers))
Ownership[1:(nplayers-1),1:(nplayers-1)] = diag((nplayers-1))
# define which brands compete on price, i.e. are owned by different companies
# without any specification, all brands compete (owned by different companies)
#Ownership[1,2] <- 1
#Ownership[2,1] <- 1
inside_row=which(rowSums(Ownership) != 0, arr.ind = TRUE)
p0=Xdata[inside_row,"PRICE"]
costBase = as.vector(prices_init*0.9)
MC = costBase[inside_row]
### Run Fixed-Point algorithm with Xi-markup equation (reliable and fast: See Table 3 in paper for comparison)
p_Markup_Xi_FixedPoint_BC_BLP = FixedPoint(Function = function(price_vec) FixedPoint_BLP_Xi(price_vec,MC=MC,
ownership=Ownership,Xdata=Xdata,beta_draws=beta_BC_LLMns,pr=1),
Inputs = p0, MaxIter = 10000, ConvergenceMetricThreshold = 1e-10, Method = "Anderson")
p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint
# Overview optimal prices & shares
Optimal_prices <- array(0,dim=c(nplayers,2))
rownames(Optimal_prices) = c(products[1:(nplayers-1)],"Outside")
colnames(Optimal_prices) = c("Equilibrium Price","Equilibrium Shares")
# Save equi-prices
Optimal_prices[,"Equilibrium Price"] <-c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)
# Check for all of the 15 brands -> change one, keep all the others fixed
computeShares_BC_BLP <- function(prices, beta, design, pr = 1) {
fullDesign <- cbind(prices,design) ###put prices to the last position here
probabilities_BC_BLP_log_cpp(beta,fullDesign,pr)
}
Optimal_prices[,"Equilibrium Shares"] <- as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns,designBase,pr=1))
round(Optimal_prices,2)
# store results in data frame
res_matrix <- data.frame(
product = c(products[1:(nplayers-1)],"Outside"),
equi_price = as.numeric(c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)),
equi_share = as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns,designBase,pr=1)))
# create list to store different competitive scenarios
scenarios <- list("full_comp" = res_matrix)
###########################################################################
# Ownership according to brand names (ie all Amstel go to the same company)
###########################################################################
# ingredients
nplayers = 15 + 1 ### 15 inside + outside
min_p = 0.22
prices_init = c(rep(min_p,nplayers - 1),0)
designBase = rbind(diag(nplayers-1),rep(0,nplayers-1))
Xdata = cbind(prices_init,designBase); colnames(Xdata)[1] = "PRICE"
Ownership = array(0,dim=c(nplayers,nplayers))
Ownership[1:(nplayers-1),1:(nplayers-1)] = diag((nplayers-1))
# define which brands compete on price, i.e. are owned by different companies
# Amstel (five brands)
for (i in 1:5){
for (k in 1:5){
Ownership[i,k] <- 1
}
}
# Estrella (two brands)
for (i in 7:8){
for (k in 7:8){
Ownership[i,k] <- 1
}
}
# Mahou (two brands)
for (i in 10:11){
for (k in 10:11){
Ownership[i,k] <- 1
}
}
inside_row=which(rowSums(Ownership) != 0, arr.ind = TRUE)
p0=Xdata[inside_row,"PRICE"]
costBase = as.vector(prices_init*0.9)
MC = costBase[inside_row]
### Run Fixed-Point algorithm with Xi-markup equation (reliable and fast: See Table 3 in paper for comparison)
p_Markup_Xi_FixedPoint_BC_BLP = FixedPoint(Function = function(price_vec) FixedPoint_BLP_Xi(price_vec,MC=MC,
ownership=Ownership,Xdata=Xdata,beta_draws=beta_BC_LLMns,pr=1),
Inputs = p0, MaxIter = 10000, ConvergenceMetricThreshold = 1e-10, Method = "Anderson")
p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint
Optimal_prices <- array(0,dim=c(nplayers,2))
rownames(Optimal_prices) = c(products[1:(nplayers-1)],"Outside")
colnames(Optimal_prices) = c("Equilibrium Price","Equilibrium Shares")
# Save equi-prices
Optimal_prices[,"Equilibrium Price"] <-c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)
computeShares_BC_BLP <- function(prices, beta, design, pr = 1) {
fullDesign <- cbind(prices,design) ###put prices to the last position here
probabilities_BC_BLP_log_cpp(beta,fullDesign,pr)
}
Optimal_prices[,"Equilibrium Shares"] <- as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns,designBase,pr=1))
round(Optimal_prices,2)
# store results in data frame
res_matrix <- data.frame(
product = c(products[1:(nplayers-1)],"Outside"),
equi_price = as.numeric(c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)),
equi_share = as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns,designBase,pr=1)))
# add to results list
scenarios[["brand_comp"]] <- res_matrix
###########################################################################
# Only two Amstel, two Estrella and one Heineken (Ownership according to brands)
###########################################################################
# ingredients
nplayers = 5 + 1
min_p = 0.22
prices_init = c(rep(min_p,nplayers - 1),0)
designBase = rbind(diag(nplayers-1),rep(0,nplayers-1))
Xdata = cbind(prices_init,designBase); colnames(Xdata)[1] = "PRICE"
Ownership = array(0,dim=c(nplayers,nplayers))
Ownership[1:(nplayers-1),1:(nplayers-1)] = diag((nplayers-1))
# define which brands compete on price, i.e. are owned by different companies
# Amstel (two brands)
for (i in 1:2){
for (k in 1:2){
Ownership[i,k] <- 1
}
}
# Estrella (two brands)
for (i in 3:4){
for (k in 3:4){
Ownership[i,k] <- 1
}
}
inside_row=which(rowSums(Ownership) != 0, arr.ind = TRUE)
p0=Xdata[inside_row,"PRICE"]
costBase = as.vector(prices_init*0.9)
MC = costBase[inside_row]
### Run Fixed-Point algorithm with Xi-markup equation (reliable and fast: See Table 3 in paper for comparison)
p_Markup_Xi_FixedPoint_BC_BLP = FixedPoint(Function = function(price_vec) FixedPoint_BLP_Xi(price_vec,MC=MC,
ownership=Ownership,Xdata=Xdata,beta_draws=beta_BC_LLMns[,(c(1,2,4,7,9,10,11))],pr=1),
Inputs = p0, MaxIter = 10000, ConvergenceMetricThreshold = 1e-10, Method = "Anderson")
p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint
Optimal_prices <- array(0,dim=c(nplayers,2))
rownames(Optimal_prices) = c(products[c(2,5,7,8,9)],"Outside")
colnames(Optimal_prices) = c("Equilibrium Price","Equilibrium Shares")
# Save equi-prices
Optimal_prices[,"Equilibrium Price"] <-c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)
computeShares_BC_BLP <- function(prices, beta, design, pr = 1) {
fullDesign <- cbind(prices,design) ###put prices to the last position here
probabilities_BC_BLP_log_cpp(beta,fullDesign,pr)
}
Optimal_prices[,"Equilibrium Shares"] <- as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1))
round(Optimal_prices,2)
# store results in data frame
res_matrix <- data.frame(
product = rownames(Optimal_prices),
equi_price = as.numeric(c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)),
equi_share = as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1)))
# add to results list
scenarios[["five_full_comp"]] <- res_matrix
###########################################################################
# Only two Amstel, two Estrella and one Heineken (Merger Amstel and Heineken, bc highly correlated betas)
###########################################################################
# ingredients
nplayers = 5 + 1
min_p = 0.22
prices_init = c(rep(min_p,nplayers - 1),0)
designBase = rbind(diag(nplayers-1),rep(0,nplayers-1))
Xdata = cbind(prices_init,designBase); colnames(Xdata)[1] = "PRICE"
Ownership = array(0,dim=c(nplayers,nplayers))
Ownership[1:(nplayers-1),1:(nplayers-1)] = diag((nplayers-1))
# define which brands compete on price, i.e. are owned by different companies
# Amstel (two brands)
for (i in c(1:2,5)){
for (k in c(1:2,5)){
Ownership[i,k] <- 1
}
}
# Estrella (two brands)
for (i in 3:4){
for (k in 3:4){
Ownership[i,k] <- 1
}
}
inside_row=which(rowSums(Ownership) != 0, arr.ind = TRUE)
p0=Xdata[inside_row,"PRICE"]
costBase = as.vector(prices_init*0.9)
MC = costBase[inside_row]
### Run Fixed-Point algorithm with Xi-markup equation (reliable and fast: See Table 3 in paper for comparison)
p_Markup_Xi_FixedPoint_BC_BLP = FixedPoint(Function = function(price_vec) FixedPoint_BLP_Xi(price_vec,MC=MC,
ownership=Ownership,Xdata=Xdata,beta_draws=beta_BC_LLMns[,(c(1,2,4,7,9,10,11))],pr=1),
Inputs = p0, MaxIter = 10000, ConvergenceMetricThreshold = 1e-10, Method = "Anderson")
p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint
Optimal_prices <- array(0,dim=c(nplayers,2))
rownames(Optimal_prices) = c(products[c(2,5,7,8,9)],"Outside")
colnames(Optimal_prices) = c("Equilibrium Price","Equilibrium Shares")
# Save equi-prices
Optimal_prices[,"Equilibrium Price"] <-c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)
computeShares_BC_BLP <- function(prices, beta, design, pr = 1) {
fullDesign <- cbind(prices,design) ###put prices to the last position here
probabilities_BC_BLP_log_cpp(beta,fullDesign,pr)
}
Optimal_prices[,"Equilibrium Shares"] <- as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1))
round(Optimal_prices,2)
# store results in data frame
res_matrix <- data.frame(
product = rownames(Optimal_prices),
equi_price = as.numeric(c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)),
equi_share = as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1)))
# add to results list
scenarios[["five_merge_comp"]] <- res_matrix
###########################################################################
# Heineken and Estrella Merger
###########################################################################
# ingredients
nplayers = 5 + 1
min_p = 0.22
prices_init = c(rep(min_p,nplayers - 1),0)
designBase = rbind(diag(nplayers-1),rep(0,nplayers-1))
Xdata = cbind(prices_init,designBase); colnames(Xdata)[1] = "PRICE"
Ownership = array(0,dim=c(nplayers,nplayers))
Ownership[1:(nplayers-1),1:(nplayers-1)] = diag((nplayers-1))
# define which brands compete on price, i.e. are owned by different companies
# Amstel (two brands)
for (i in c(1:2)){
for (k in c(1:2)){
Ownership[i,k] <- 1
}
}
# Estrella (two brands)
for (i in c(3:4,5)){
for (k in c(3:4,5)){
Ownership[i,k] <- 1
}
}
inside_row=which(rowSums(Ownership) != 0, arr.ind = TRUE)
p0=Xdata[inside_row,"PRICE"]
costBase = as.vector(prices_init*0.9)
MC = costBase[inside_row]
### Run Fixed-Point algorithm with Xi-markup equation (reliable and fast: See Table 3 in paper for comparison)
p_Markup_Xi_FixedPoint_BC_BLP = FixedPoint(Function = function(price_vec) FixedPoint_BLP_Xi(price_vec,MC=MC,
ownership=Ownership,Xdata=Xdata,beta_draws=beta_BC_LLMns[,(c(1,2,4,7,9,10,11))],pr=1),
Inputs = p0, MaxIter = 10000, ConvergenceMetricThreshold = 1e-10, Method = "Anderson")
p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint
Optimal_prices <- array(0,dim=c(nplayers,2))
rownames(Optimal_prices) = c(products[c(2,5,7,8,9)],"Outside")
colnames(Optimal_prices) = c("Equilibrium Price","Equilibrium Shares")
# Save equi-prices
Optimal_prices[,"Equilibrium Price"] <-c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)
computeShares_BC_BLP <- function(prices, beta, design, pr = 1) {
fullDesign <- cbind(prices,design) ###put prices to the last position here
probabilities_BC_BLP_log_cpp(beta,fullDesign,pr)
}
Optimal_prices[,"Equilibrium Shares"] <- as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1))
round(Optimal_prices,2)
# store results in data frame
res_matrix <- data.frame(
product = rownames(Optimal_prices),
equi_price = as.numeric(c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)),
equi_share = as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1)))
# add to results list
scenarios[["five_merge_comp_estr"]] <- res_matrix
###########################################################################
# Only two Amstel, two Estrella and one Heineken (Full Competition)
###########################################################################
# ingredients
nplayers = 5 + 1
min_p = 0.22
prices_init = c(rep(min_p,nplayers - 1),0)
designBase = rbind(diag(nplayers-1),rep(0,nplayers-1))
Xdata = cbind(prices_init,designBase); colnames(Xdata)[1] = "PRICE"
Ownership = array(0,dim=c(nplayers,nplayers))
Ownership[1:(nplayers-1),1:(nplayers-1)] = diag((nplayers-1))
inside_row=which(rowSums(Ownership) != 0, arr.ind = TRUE)
p0=Xdata[inside_row,"PRICE"]
costBase = as.vector(prices_init*0.9)
MC = costBase[inside_row]
### Run Fixed-Point algorithm with Xi-markup equation (reliable and fast: See Table 3 in paper for comparison)
p_Markup_Xi_FixedPoint_BC_BLP = FixedPoint(Function = function(price_vec) FixedPoint_BLP_Xi(price_vec,MC=MC,
ownership=Ownership,Xdata=Xdata,beta_draws=beta_BC_LLMns[,(c(1,2,4,7,9,10,11))],pr=1),
Inputs = p0, MaxIter = 10000, ConvergenceMetricThreshold = 1e-10, Method = "Anderson")
p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint
Optimal_prices <- array(0,dim=c(nplayers,2))
rownames(Optimal_prices) = c(products[c(2,5,7,8,9)],"Outside")
colnames(Optimal_prices) = c("Equilibrium Price","Equilibrium Shares")
# Save equi-prices
Optimal_prices[,"Equilibrium Price"] <-c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)
computeShares_BC_BLP <- function(prices, beta, design, pr = 1) {
fullDesign <- cbind(prices,design) ###put prices to the last position here
probabilities_BC_BLP_log_cpp(beta,fullDesign,pr)
}
Optimal_prices[,"Equilibrium Shares"] <- as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1))
round(Optimal_prices,2)
# store results in data frame
res_matrix <- data.frame(
product = rownames(Optimal_prices),
equi_price = as.numeric(c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)),
equi_share = as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1)))
# add to results list
scenarios[["five_full_comp"]] <- res_matrix
###########################################################################
# Only two Amstel, two Estrella and one Heineken (Monopoly)
###########################################################################
# ingredients
nplayers = 5 + 1
min_p = 0.22
prices_init = c(rep(min_p,nplayers - 1),0)
designBase = rbind(diag(nplayers-1),rep(0,nplayers-1))
Xdata = cbind(prices_init,designBase); colnames(Xdata)[1] = "PRICE"
Ownership = array(0,dim=c(nplayers,nplayers))
Ownership[1:(nplayers-1),1:(nplayers-1)] = diag((nplayers-1))
# define which brands compete on price, i.e. are owned by different companies
# All five belong to the same owner
for (i in 1:5){
for (k in 1:5){
Ownership[i,k] <- 1
}
}
inside_row=which(rowSums(Ownership) != 0, arr.ind = TRUE)
p0=Xdata[inside_row,"PRICE"]
costBase = as.vector(prices_init*0.9)
MC = costBase[inside_row]
### Run Fixed-Point algorithm with Xi-markup equation (reliable and fast: See Table 3 in paper for comparison)
p_Markup_Xi_FixedPoint_BC_BLP = FixedPoint(Function = function(price_vec) FixedPoint_BLP_Xi(price_vec,MC=MC,
ownership=Ownership,Xdata=Xdata,beta_draws=beta_BC_LLMns[,(c(1,2,4,7,9,10,11))],pr=1),
Inputs = p0, MaxIter = 10000, ConvergenceMetricThreshold = 1e-10, Method = "Anderson")
p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint
Optimal_prices <- array(0,dim=c(nplayers,2))
rownames(Optimal_prices) = c(products[c(2,5,7,8,9)],"Outside")
colnames(Optimal_prices) = c("Equilibrium Price","Equilibrium Shares")
# Save equi-prices
Optimal_prices[,"Equilibrium Price"] <-c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)
computeShares_BC_BLP <- function(prices, beta, design, pr = 1) {
fullDesign <- cbind(prices,design) ###put prices to the last position here
probabilities_BC_BLP_log_cpp(beta,fullDesign,pr)
}
Optimal_prices[,"Equilibrium Shares"] <- as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1))
round(Optimal_prices,2)
# store results in data frame
res_matrix <- data.frame(
product = rownames(Optimal_prices),
equi_price = as.numeric(c(p_Markup_Xi_FixedPoint_BC_BLP$FixedPoint,0)),
equi_share = as.vector(computeShares_BC_BLP(Optimal_prices[,"Equilibrium Price"],
beta_BC_LLMns[,c(1,2,4,7,9,10,11)],designBase,pr=1)))
# add to results list
scenarios[["five_monopoly"]] <- res_matrix
########################################################
# Compute Average Prices in the Market, Weighted by Market Share
########################################################
w_a_p_full_comp <- t(scenarios[["full_comp"]]$equi_price) %*% scenarios[["full_comp"]]$equi_share
w_a_p_brand_comp <- t(scenarios[["brand_comp"]]$equi_price) %*% scenarios[["brand_comp"]]$equi_share
w_a_p_five_full_comp <- t(scenarios[["five_full_comp"]]$equi_price) %*% scenarios[["five_full_comp"]]$equi_share
w_a_p_five_brand_comp <- t(scenarios[["five_brand_comp"]]$equi_price) %*% scenarios[["five_brand_comp"]]$equi_share
w_a_p_five_monopoly <- t(scenarios[["five_monopoly"]]$equi_price) %*% scenarios[["five_monopoly"]]$equi_share
w_a_p_five_merge <- t(scenarios[["five_merge_comp"]]$equi_price) %*% scenarios[["five_merge_comp"]]$equi_share
w_a_p_five_merge_estr <- t(scenarios[["five_merge_comp_estr"]]$equi_price) %*% scenarios[["five_merge_comp_estr"]]$equi_share
w_a_p_brand_comp / w_a_p_full_comp
w_a_p_five_brand_comp / w_a_p_five_full_comp
# compute average annual welfare loss for typical German consumer with the merger
welf_loss_avg <- 102/(1/3)*(w_a_p_five_merge - w_a_p_five_brand_comp)
welf_loss_total <- welf_loss_avg * 82000000
########################################################
# Compute contribution margin * market share for all scenarios
########################################################
MC <- rep(0.198, 15)
for (i in 1:length(scenarios)) { #
# position of outside good
out_pos <- length(scenarios[[i]]$equi_price)
# compute contribubtion margin times market share for every brand in every scenario in the list
scenarios[[i]]$CMxMS <- c((scenarios[[i]][-out_pos,"equi_price"] - MC[1:out_pos-1]) * scenarios[[i]][-out_pos,"equi_share"], NA)
}
# compute CMxMS for both Amstel and Heineken before and after merger
CMxMS_full_comp <- sum(scenarios$five_full_comp[c(1,2,5),"CMxMS"])
CMxMS_before <- sum(scenarios$five_brand_comp[c(1,2,5),"CMxMS"])
CMxMS_after <- sum(scenarios$five_merge_comp[c(1,2,5),"CMxMS"])
CMxMS_monop <- sum(scenarios$five_monopoly[c(1,2,5),"CMxMS"])
CMxMS_before_estr <- sum(scenarios$five_brand_comp[c(3:5),"CMxMS"])
CMxMS_after_estr <- sum(scenarios$five_merge_comp_estr[c(3:5),"CMxMS"])
CMxMS_after / CMxMS_before
CMxMS_monop / CMxMS_full_comp
CMxMS_monop / CMxMS_after
CMxMS_after_estr / CMxMS_before_estr
########################################################
# Plotting
########################################################
# corrplot of betas for all 15 brands
colnames(beta_BC_LLMns) <- c("Budget", "Price", products)
windows()
cor(beta_BC_LLMns) %>%
ggcorrplot(type = "lower", lab = TRUE)
cor(beta_BC_LLMns[,c(1,2,4,7,9,10,11)]) %>%
ggcorrplot(type = "lower", lab = TRUE)
# histogram of all betas
beta_BC_LLMns %>%
data.frame() %>%
gather(key = "beer_brand") %>%
ggplot(aes(value)) +
geom_histogram() +
facet_wrap(~beer_brand, scales = "free") +
theme_bw()
# density plot of five brands (all in one)
beta_BC_LLMns[,c(4,7,9,10,11)] %>%
data.frame() %>%
gather(key = "beer_brand") %>%
ggplot(aes(value)) +
geom_density(aes(fill = beer_brand), position="identity", alpha = 0.3) +
xlim(c(-10, 20)) +
ylim(c(0, 0.095)) +
scale_fill_manual(values = c("#FD0505", "#FF9A9A", "#000CD3", "#00C1EA", "green")) +
theme_bw() +
theme(legend.position = c(0.2, 0.8),
legend.direction = "vertical")
# density plot of five brands (separate)
beta_BC_LLMns[,c(2,4,7,9,10,11)] %>%
data.frame() %>%
gather(key = "beer_brand") %>%
ggplot(aes(value)) +
geom_density(aes(fill = beer_brand), position="identity") +
xlim(c(-10, 25)) +
ylim(c(0, 0.13)) +
scale_fill_manual(values = c("#FD0505", "#FF9A9A", "#000CD3", "#00C1EA", "green", "grey"), guide = FALSE) +
facet_wrap(~beer_brand) +
theme_bw()
# basic plot of one scenario
scenarios[["brand_comp"]] %>%
melt() %>%
ggplot(aes(x = reorder(product, value) , y = value)) +
geom_bar(stat = "identity", position = "dodge") +
coord_flip() +
theme(axis.text.x = element_text(angle = 90)) +
facet_wrap(~variable, scales = "free_x") +
theme_bw()
# uses all competitive situations in the scenarios list and plots them side by side
windows()
scenarios[c("brand_comp", "full_comp")] %>%
melt() %>%
rename(comp_scenario = L1) %>%
ggplot(aes(x = reorder(product, value) , y = value, fill = comp_scenario)) +
geom_bar(stat = "identity", colour="black", position = "dodge") +
coord_flip() +
labs(x = "Beer Brand",
y = NULL) +
facet_wrap(~variable, scales = "free_x") +
scale_fill_brewer(palette = "Dark2") +
theme_bw()
# only five brands #c("five_full_comp", "five_brand_comp", "five_monopoly", "five_merge_comp")
scenarios[c("five_full_comp","five_brand_comp", "five_merge_comp", "five_monopoly")] %>%
melt() %>%
rename(comp_scenario = L1) %>%
ggplot(aes(x = reorder(product, value) , y = value, fill = comp_scenario)) +
geom_bar(stat = "identity", colour="black", position = "dodge") +
coord_flip() +
labs(x = "Beer Brand",
y = NULL) +
facet_wrap(~variable, scales = "free_x") +
scale_fill_brewer(palette = "Dark2") +
theme_bw()
# price-share scatterplot, w/o outside option (either for single scenario or all)
#scenarios[["brand_comp"]] %>%
scenarios[c("five_full_comp","five_brand_comp", "five_merge_comp", "five_monopoly")] %>%
bind_rows() %>%
filter(product != "Outside") %>%
ggplot(aes(x = equi_price, y = equi_share)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
stat_poly_eq(formula = y ~ x, aes(label = paste(..eq.label.., ..rr.label.., sep = "~~~~~~")),
parse=TRUE,label.x.npc = "right",
output.type = "expression") +
theme_bw()
cor(scenarios[["brand_comp"]]$equi_price, scenarios[["brand_comp"]]$equi_share)
# bivariate denisity plot
hist(beta_BC_LLMns[,c(4,7)])
beta_BC_LLMns[,c(4,7)] %>%
data.frame() %>%
gather(key = "beer_brand") %>%
ggplot(aes(value)) +
geom_density(aes(fill = beer_brand), position="identity")
data.frame(beta_BC_LLMns[,c(4,7)])%>%
ggplot(aes_string(x = "Amstel.Extra.Lata.33.cl", y = "Amstel.Clasica.Lata.33.cl")) +
stat_density_2d(aes(fill = ..level..), geom = "polygon")+
scale_fill_continuous(low="lavenderblush", high="blue")+
geom_abline(slope = 1) +
labs(fill = "density") +
#theme(legend.title = element_text("density")) +
theme_bw()
|
4cfde652202fece848123b5201ef96d092b9100d | ba61cb6394979cc3430b3d5b8b5dd1136c6c7bf7 | /Clase17Abr2021/ej8.R | 035a9a025cd59f3bb55cc177fe3c1ae5702d4016 | [] | no_license | angpa/RugCGFIUBA | 95e1dece470fcf396ad656deb00d063e909c3b45 | 4a2d0ce0d6349afa1bcbbfee101cb7c8885b933e | refs/heads/main | 2023-04-11T03:41:23.003513 | 2021-04-19T20:51:34 | 2021-04-19T20:51:34 | 356,505,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 171 | r | ej8.R | x
y <- rnorm(20)
y
lrf <- lowess(x,y)
lrf
plot(x,y)
lines(x,y)
plot(lrf)
lines(lrf)
plot(lrf$x,lrf$y)
lines(lrf)
plot(cars,main="lowes(cars)")
lines(lowess(cars),col=2)
|
c6e3d7770216b3e73e1e3c70c4828a4781b0ee0d | 58078cdf345d7340534d5dbf05315f73486e67b8 | /year-2-projects/team-1/04 Sensitivity Analysis/01 precip.R | 78f1c2a69ddc6c9802112a0e17198c70de6102ef | [] | no_license | big-data-lab-umbc/cybertraining | d91de41ec6882b21f8f3c3cb236d13d06f651dba | 2179fb771dcb69d4f302f693b30e057c366f6be1 | refs/heads/master | 2022-07-25T19:59:57.666162 | 2022-07-13T12:03:02 | 2022-07-13T12:03:02 | 118,185,525 | 14 | 37 | null | 2020-09-04T22:01:31 | 2018-01-19T22:24:53 | Jupyter Notebook | UTF-8 | R | false | false | 4,965 | r | 01 precip.R | ## This file does some basic spatio temporal analysis of the precipitation data
# install.packages("lubridate")
# install.packages("TSA")
# install.packages("forecast")
# install.packages("RColorBrewer")
# install.packages("ggplot2")
# install.packages("gstat")
# install.packages("sp")
# install.packages("spatial")
library(lubridate)
library(TSA)
library(forecast)
library(RColorBrewer)
library(ggplot2)
library(gstat)
library(sp)
library(spatial)
library(ncf)
###################################
##################################
#setwd("D:/Google Drive/UMBC/2019 02 Spring/Cybertraining/Project/meteo_forcing")
#setwd("C:/Users/reetam/Google Drive/2019 02 Spring/Cybertraining/Project/meteo_forcing")
file_names=list.files(pattern="forcing_*") #gets file names from working directory; can specify path
files=lapply( file_names , read.table ,header = F,sep = ' ',numerals = 'no.loss',stringsAsFactors = F) # gets files
for(i in 1:387)
{
#files[[i]] = files[[i]][1:456,]
files[[i]] = data.frame(files[[i]][,1]) # Just keep precip
names(files[[i]])='precip' # name the variable
}
#names(files[[1]])
#dim(files[[1]])
lat = substr(file_names,9,15) # extract lat and long from the filenames vector
long = substr(file_names,17,24)
for(i in 1:387) # add lat and long to each element in list. might not be necessary
{
files[[i]]$lat = rep(as.numeric(as.character(lat[i])),639) #makes lat and long numeric.
files[[i]]$long = rep(as.numeric(as.character(long[i])),639)
}
#head(files[[2]])
#### Temporal Analysis #######
##############################
#create temporal data frame; each row is a grid point
mean=numeric(387);mean0=numeric(387);zero=numeric(387)
sd = numeric(387); sd0=numeric(387)
for(i in 1:387)
{
mean0[i]=mean(files[[i]]$precip)
mean[i]=mean(files[[i]]$precip[files[[i]]$precip>0])
sd0[i]=sd(files[[i]]$precip)
sd[i]=sd(files[[i]]$precip[files[[i]]$precip>0])
zero[i]=sum(files[[i]]$precip==0)/639
}
params = data.frame(lat,long,mean,sd,mean0,sd0,zero)
params$lat = as.numeric(as.character(params$lat))
params$long = as.numeric(as.character(params$long))
head(params)
#write.csv(params,"heatmap.csv")
ggplot(params, aes(x=long,y=lat,fill=mean0))+geom_raster() + scale_fill_gradientn(colours=rainbow(7)) +
labs(fill = 'Mean', x='Longitude', y='Latitude',title = 'Heat map of annual mean precipitation')
ggplot(params, aes(x=long,y=lat,fill=sd0))+geom_raster() + scale_fill_gradientn(colours=rainbow(7)) +
labs(fill = 'Mean', x='Longitude', y='Latitude',title = 'Heat map of annual SD of precipitation')
ggplot(params, aes(x=long,y=lat,fill=zero))+geom_raster() + scale_fill_gradientn(colours=rainbow(7)) +
labs(fill = 'Mean', x='Longitude', y='Latitude',title = 'Heat map of proportion of days with no rainfall')
for(i in 1:387)
{
params$ts.model[i]=as.character(auto.arima(ts(files[[i]]$precip))) #extract auto.arima model for each location
}
xtabs(~ts.model,data=params)
params$ts.model = as.factor(substr(params$ts.model,6,12)) # keep only the values of p,d,q
ggplot(params,aes(x=long,y=lat,label=ts.model,col=ts.model))+ geom_raster() +
geom_text() + scale_color_brewer(palette = "Spectral") + scale_fill_gradientn(colors=terrain.colors(10)) +
labs(col='(p,d,q)',x='Longitude',y='Latitude',title='ARIMA(p,d,q) models for annual precipitation' )
##### SPatial Analysis ##########
#################################
## Correlogram
dates =seq(as.Date("2016-01-01"),as.Date("2017-03-31"),by='days')
sp.temp = data.frame(matrix(NA,nrow = 387,ncol = 458))
names(sp.temp) = c("lat","long",as.character(dates))
sp.temp$lat = params$lat
sp.temp$long = params$long
for(i in 1:387)
{
sp.temp[i,3:641]=files[[i]][,1]
}
rain=numeric(639)
for(i in 1:639)
rain[i] = mean(sp.temp[,i+2]>0)
summary(rain)
sum(rain==0)
sum(rain<.3)
plot.ecdf(rain)
# using spatial - individual correlograms
cor1 = data.frame(x=params$long,y=params$lat,z=sp.temp[,31])
head(cor1)
topo.cor1 = surf.ls(2,cor1)
cor1 = correlogram(topo.cor1,25)
head(cor1)
#Get the first lag of correlogram at each data point
max.cor = numeric(456)
for(i in 1:456)
{
temp = data.frame(x=params$long,y=params$lat,z=sp.temp[,i+2])
topo.temp = surf.ls(2,temp)
correl = correlogram(topo.temp,25)
max.cor[i] = correl$y[2]
}
ggplot(as.data.frame(max.cor),aes(x=max.cor))+geom_histogram(bins=15) #hist of correlations from correlograms
boxplot(max.cor)
hist(max.cor)
loc8 = as.numeric(sp.temp[8,3:458])
hist(loc8[loc8>0],probability = T)
summary(loc8[0<loc8 & loc8<63])
# Using package ncf
cor.mult=correlog(sp.temp$long,sp.temp$lat,as.matrix(sp.temp[,3:458]),increment = 1,latlon = T,resamp = 100)
plot(cor.mult)
#Variogram analysis (partial)
params2=params[,c(1,2,5)]
coordinates(params2) = ~ long+lat
g = gstat(id='mean0',formula = mean0~1,data = params2)
expvar = variogram(g)
head(expvar)
plot(expvar)
ggplot(expvar,aes(x=dist,y=gamma,size=np)) + geom_point()
expvar2 <- variogram(g,width=3,cutoff=5,map=TRUE)
plot(expvar2) |
013fbed8c3cdcdcf4179d7f4546c308c7aaddd47 | 832b90b6a7522f80371d141dbaf74bf8002aa2c7 | /Shiny/app.R | d2da354fb20fbb16fb31f24e373483f45518fa31 | [] | no_license | Lavin9/Happiness_level_predictor | fcb967d2cfc8b193e13775a49f5ac369c8c2bccf | 85eb590424ddc14637d9b12c532229d9d521b073 | refs/heads/master | 2021-08-12T08:22:35.000293 | 2017-11-14T15:47:42 | 2017-11-14T15:47:42 | 110,676,727 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,158 | r | app.R | rm(list=ls())
library(shiny)
library(readr)
library(dplyr)
raw_data <- read_csv("../2015.csv")
raw_data <- arrange(raw_data, desc(Country))
dataset1 <- raw_data %>% select (Region, 'Happiness Score', 'Economy (GDP per Capita)', Family, 'Health (Life Expectancy)', Freedom, 'Trust (Government Corruption)', Generosity)
dataset1 <- dataset1 %>% rename(Happiness_score = 'Happiness Score')
dataset1 <- dataset1 %>% rename(GDP_per_Capita = 'Economy (GDP per Capita)')
dataset1 <- dataset1 %>% rename(Life_expectancy = 'Health (Life Expectancy)')
dataset1 <- dataset1 %>% rename(Trust_in_Government = 'Trust (Government Corruption)')
dataset1$Region <- factor(dataset1$Region)
trainingData <- dataset1[1:126,]
LM1 <- lm(Happiness_score ~ GDP_per_Capita + Family + Life_expectancy + Freedom + Trust_in_Government + Generosity + Region, trainingData)
ui <- fluidPage(
titlePanel("Predict happiness score"),
sidebarLayout(
sidebarPanel(
sidebarLayout(
sidebarPanel(
textInput("title1", label = "put GDP score here", placeholder = "0"),
textInput("title2", label = "put Family score here", placeholder = "0"),
textInput("title3", label = "put Life expectancy score here", placeholder = "0"),
textInput("title4", label = "put Freedom score here", placeholder = "0"),width = 5
),
mainPanel(
textInput("title5", label = "put Trust score here", placeholder = "0"),
textInput("title6", label = "put Generosity score here", placeholder = "0"),
textInput("title7", label = "put Region name here", placeholder = "Central and Eastern Europe"),
actionButton("Start", label = "Go"), width = 5)), width = 9),
mainPanel(textOutput('result')))
)
server <- function(input, output) {
string1 <- eventReactive(input$Start, predict(LM1, list(GDP_per_Capita = as.numeric(input$title1),Family = as.numeric(input$title2), Life_expectancy = as.numeric(input$title3), Freedom = as.numeric(input$title4), Trust_in_Government = as.numeric(input$title5), Generosity = as.numeric(input$title6), Region =input$title7)))
output$result <- renderPrint({
cat('The happiness level is: ',string1())})
}
shinyApp(ui = ui, server = server)
|
5b6e018b790a806d4e86350bdd44bd4a7aa1d37f | d1482841703dc5f4738382090dab1ff6135ecf65 | /Ajuste_Reserva_por_Municipios_e_Poligonais.R | 092bf3621d76be40ec3606d53d0a48844ed549d8 | [] | no_license | hal-delaserna/Rastreamento_de_Inconsistencias | aaba98b02ad63d8ee0c77a356aff152bc2caa0cc | f2b301c4b3282bafc4cdf16bda5bc8b99c5f617f | refs/heads/master | 2023-04-11T11:41:38.339538 | 2023-03-24T14:05:23 | 2023-03-24T14:05:23 | 252,294,622 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 5,903 | r | Ajuste_Reserva_por_Municipios_e_Poligonais.R | # rm(list = ls())
# carregamento ----
source(file = "./Funcoes_R/FUNA_Eventos_RRR_RFP.R")
source(file = "./Funcoes_R/Funcoes_Consumidores.R")
source(file = "./Funcoes_R/Funcoes_de_Formatacao_Estilo.R")
source(file = "./Funcoes_R/Funcoes_Producao.R")
source(file = "./Funcoes_R/Funcoes_Reserva.R")
source(file = "./Funcoes_R/Abatimento_Reserva_Producao.R")
source(file = "./Funcoes_R/Funcoes_VPM.R")
source(file = "./Funcoes_R/Municipios_inconsistencia.R")
source(file = "./carregamento_Bases_AMB_outras.r")
#_______________________________________________________________________________________________
#---------------------INCONSISTÊNCIA MUNICIPIOS--------------------------------------
# PROCESSOS COM SIGMINE = 1 & AMB >= 2 MUNICÍPIOS
inner_join(
filter(data.frame(
table(interseccao_poligonais_ibge$processo)
), Freq == 1),
geocod_AMB.inconsistencia_SIGMINE,
by = c("Var1" = "processo_sigmine")
)[, c(-1, -2, -4, -15, -16, -18)] %>% View()
#_____________ Preparando lista de inconsistência AMB X Sigmine
# AMB: processo <-> município (2011 a 2018)
processo.AMB_municipio <-
unique(left_join(unique(
reserva_AMB[!reserva_AMB$substancia.amb %in%
c("areia", "saibro", "brita e cascalho"),
c("processo", "municipio")]),
geocod, by = c("municipio")))
colnames(processo.AMB_municipio) <- c("processo.AMB","municipio.AMB", "geocod.AMB")
# Formato SigMine de Processo: sem 'ponto' ou zeros a esquerda
processo.AMB_municipio$processo_sigmine.AMB <- processo.AMB_municipio$processo %>%
gsub(pattern = "\\.", replacement = "") %>% gsub(pattern = "^0", replacement = "") %>%
gsub(pattern = "^0", replacement = "") %>% gsub(pattern = "^0", replacement = "")
# Inconsistência AMB X Sigmine
inconsistencia_AMB_X_Sigmine <-
left_join(
processo.AMB_municipio,
interseccao_poligonais_ibge,
by = c("processo_sigmine.AMB" = "processo"))
colnames(inconsistencia_AMB_X_Sigmine) <-
c("processo.AMB", "municipio.AMB", "geocod.AMB", "processo_sigmine.AMB",
"municipio.sigmine", "geocod.sigmine", "area_ha", "fase", "titular", "substancia.ral",
"uso", "uf", "areaDeg2", "perimetro", "poligonalDeg2")
inconsistencia_AMB_X_Sigmine <-
inconsistencia_AMB_X_Sigmine[,c("processo.AMB", "municipio.AMB",
"geocod.AMB", "processo_sigmine.AMB",
"municipio.sigmine", "geocod.sigmine",
"fase", "titular", "substancia.ral",
"uso", "area_ha", "areaDeg2", 'poligonalDeg2')]
# processos com NA são areia e brita
lista <- list()
for (i in 1:nrow(inconsistencia_AMB_X_Sigmine)) {
if (is.na(inconsistencia_AMB_X_Sigmine$geocod.sigmine[i]) == FALSE) {
if ((
inconsistencia_AMB_X_Sigmine$geocod.AMB[i] != inconsistencia_AMB_X_Sigmine$geocod.sigmine[i]
)) {
lista[[i]] <-
inconsistencia_AMB_X_Sigmine[i, c(
"processo.AMB", "municipio.AMB","geocod.AMB",
"municipio.sigmine","geocod.sigmine","fase","titular",
"substancia.ral", "area_ha", "areaDeg2", 'poligonalDeg2')]}}}
inconsistencia_AMB_X_Sigmine <-
arrange(do.call("rbind", lista), desc(titular))
inconsistencia_AMB_X_Sigmine$razaoArea <- round(
(inconsistencia_AMB_X_Sigmine$areaDeg2 / inconsistencia_AMB_X_Sigmine$poligonalDeg2), digits = 2)
inconsistencia_AMB_X_Sigmine$segmento_poligonal <- round(
(inconsistencia_AMB_X_Sigmine$area_ha * inconsistencia_AMB_X_Sigmine$razaoArea), digits = 2)
# delimitando segmento de poligonal > 25%
inconsistencia_AMB_X_Sigmine <-
inconsistencia_AMB_X_Sigmine[inconsistencia_AMB_X_Sigmine$razaoArea > 0.75,]
inconsistencia_AMB_X_Sigmine <-
left_join(inconsistencia_AMB_X_Sigmine,
reserva_AMB[reserva_AMB$ano == 2018, c("processo", "pareto")],
by = c("processo.AMB" = "processo"))
View(inconsistencia_AMB_X_Sigmine[,-c(3,5,10,11)])
# Anfibólito OK
# Bauxita OK
# Min de Aluminío OK
# Caulim OK
# Reserva visão ----
mina <- 'SÃO JOÃO DA BOA VISTA'
cpfcnpj = '.'
subsAMB <- '.'
processo <- '.'
FUNA_visao_RESERVA(subsAMB = subsAMB, processo = processo, cpfcnpj = cpfcnpj, mina = mina)
# medida + indicada + Inferida + lavrável ----
reserva_groupBY_SUBSTANCIA.AMB(processo = processo, mina = mina, reserva = 'medida')
reserva_groupBY_SUBSTANCIA.AMB(processo = processo, mina = mina, reserva = 'indicada')
reserva_groupBY_SUBSTANCIA.AMB(processo = processo, mina = mina, reserva = 'inferida')
reserva_groupBY_SUBSTANCIA.AMB(processo = processo, mina = mina, reserva = 'lavravel')
# Produção ------------------------------------------------------------------
producaoBRUTA_groupBY_PROCESSO(cpfcnpj = cpfcnpj, subsAMB = "")
# Consumidores ------------------------------------------------------------------
consumidoresMINA_busca(cpfcnpj = cpfcnpj)
consumidoresUSINA[consumidoresUSINA$cpfcnpj == cpfcnpj,] %>% View()
#--------------------- Abatimento - Produção Rerserva --------------------------------------
processo <- '.'
mina <- '.'
cpfcnpj <- '.'
subsAMB <- '.'
FUNA_Abatimento_Reserva_Producao(processo = processo,
subsAMB = subsAMB,
mina = mina,
cpfcnpj = cpfcnpj,
ano1 = '2011')
#--------------------- tela livre --------------------------------------
# ARDÓSIA: OK!
reserva_groupBY_SUBSTANCIA.AMB()[1,] %>% FUNA_BARPLOT()
FUNA_visao_RESERVA(subsAMB = 'ardosia')
FUNA_visao_RESERVA(processo = "820.276/1979", subsAMB = ".")
reserva_groupBY_SUBSTANCIA.AMB(processo = "821.014/1995")
|
6beea7e1244c3d0e8bab6846a346d7061e1b3786 | 890ad15b4c57b0ce350facd6c013d7dc423514f0 | /Lecture 4/Chapter_IV_a-b_syntax.R | e32bd16ee2f86a152ba390974aff69669d595c48 | [] | no_license | vegmer/R-Class | c81bb1870debf2227be99935f38729f9847dfea8 | 610d2ca140ae921ac6033fe11dfa6c7659b170dc | refs/heads/master | 2023-01-01T12:52:54.973049 | 2020-10-29T15:25:00 | 2020-10-29T15:25:00 | 308,371,127 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,909 | r | Chapter_IV_a-b_syntax.R | # ------------------------------------------------------- #
# Statistical computing and programming using R #
# with Michael Chajewski #
# ------------------------------------------------------- #
# mchajewski@collegeboard.org // http://www.chajewski.com #
# ------------------------------------------------------- #
# Section IV: Using R #
# ------------------------------------------------------- #
# ----------------- #
# Demonstration 4.1 #
# ----------------- #
assign("obj1", 2+3)
obj1
obj2 = 5
obj3 <- 5 # Recommended
obj2; obj3
?eval
eval(2+sqrt(4))
get("obj3")
is.numeric(obj2)
is.character(obj2)
as.character(obj2)
paste(obj1, obj2, obj3)
# ----------------- #
# Demonstration 4.2 #
# ----------------- #
1:6
seq(from=2, to=5, by=.2)
sample(1:10, 100, replace=TRUE)
gender
gender <- c("M", "F", "M", "F", "F")
runif(10)
rnorm(10)
matrix(1:25, 5, 5)
t(matrix(1:25, nrow=5))
diag(10)
mat1 <- matrix(runif(15),,3)
mat1
rbind(mat1, c(20, 20, 20))
cbind(gender,mat1)
data.frame(cbind(gender,mat1))
dat1 <- data.frame(gender,mat1)
colnames(dat1) <- c("Gender", "Uniform 1", "Uniform 2", "Uniform 3")
rownames(dat1) <- c("Cas1", "Cas2", "Cas3", "Cas4", "Cas5")
dat1
proc.time()
now <- Sys.time()
difftime(Sys.time(),now)
func1 <- function(x) {x+2}
func1(5)
func1(1:6)
# ----------------- #
# Demonstration 4.3 #
# ----------------- #
mode(gender)
typeof(gender)
class(gender)
length(gender)
mode(dat1)
typeof(dat1)
class(dat1)
length(dat1)
dim(dat1)
object.size(dat1)
format.info(gender)
format.info(runif(10))
summary(gender)
levels(factor(gender))
#Multiple univariate regression with two predictors (x1 and x2)
model1<-lm(dat1$X1~dat1$x2+dat1$gender)
?lm
summary(dat1)
names(dat1)
labels(dat1)
attributes(dat1)
attr(dat1, "created") <- date()
names(attributes(dat1))
dat1
dat1/2
t(dat1[,2:4])
as.matrix(dat1[,2:4])%*%as.matrix(t(dat1[,2:4]))
invar <- as.matrix(t(dat1[,2:4]))%*%as.matrix(dat1[,2:4])
diag(invar)*diag(3)
# ----------------- #
# Demonstration 4.4 #
# ----------------- #
objects()
ls()
ls.str()
find("dat1")
exists("dat1")
apropos("dat")
apropos("dat1")
getAnywhere("dat1")
browseEnv()
exists("func1")
rm(func1); exists("func1")
options()
names(options())
options()[9]
# ----------------- #
# Demonstration 4.5 #
# ----------------- #
dat1
dat1 <- dat1[order(dat1[,1]),]
sort(dat1$"Uniform 2")
sort(dat1$"Uniform 2", decreasing=TRUE)
dat1[which(dat1$Gender=="M"),]
table(dat1$Gender)
colSums(dat1[,2:4])
rowSums(dat1[,2:4])
ifelse(dat1$"Uniform 2" < .5, 1, 0)
ifelse(dat1$"Uniform 2" > .9, NA, dat1$"Uniform 2")
for(i in 1:5) {sqrt(dat1[i,3])}
allsqrt <- c()
for(i in 1:5) {
allsqrt <- c(allsqrt,sqrt(dat1[i,3]))
}
allsqrt
for(i in 1:5) {
if(dat1[i,2] > .5) {
dat1[i,1] <- NA
}
}
dat1
apply(dat1[,2:4], c(1,2), function(x) {ifelse(x > .60, NA, x)})
colSums(dat1[,2:4])
repeat{
thiscell <- sample(1:5,1)
dat1[thiscell,3] <- dat1[thiscell,3]+rnorm(1)
if (sum(dat1[,3]) > 10) {break}
}
colSums(dat1[,2:4])
# ----------------- #
# Demonstration 4.6 #
# ----------------- #
library()
getCRANmirrors()
names(getCRANmirrors())
install.packages("rela")
library(rela)
?rela
citation("rela")
ls("package:rela")
?paf
example(paf)
apropos("paf")
names(paf.belt)
summary(paf.belt)
plot(paf.belt2)
detach("package:rela", unload = TRUE)
?rela
source("D:/Programs/bivarfreqfunction.R")
objects()
findfreq
findfreq(rbeta(1000,2,4),rbeta(1000,3,1))
# ----------------- #
# Demonstration 4.7 #
# ----------------- #
?read.table
dat3 <- read.table("D:/Data/scores.txt")
dim(dat3)
names(dat3)
mode(dat3); class(dat3)
dat3[1:10,]
typeof(dat3[,1])
typeof(dat3[,2])
typeof(dat3[,3])
summary(dat3)
dat3[,1] <- factor(dat3[,1], levels=c(0,1), labels=c("yes","no"))
summary(dat3)
dat3[1:10,]
as.numeric(dat3[,1])
dat4 <- read.table("D:/Data/time_data.csv")
dim(dat4)
names(dat4)
mode(dat4); class(dat4)
dat4[1:10,]
dat4 <- read.csv("D:/Data/time_data.csv")
dim(dat4)
names(dat4)
mode(dat4); class(dat4)
dat4[1:10,]
typeof(dat4[,1])
typeof(dat4[,2])
typeof(dat4[,3])
class(dat4[,1])
summary(dat4)
?read.fwf
dat5 <- read.fwf("D:/Data/IRT_report.txt",
skip=49, widths=rep(c(4,-3),11))
dat5
dat5 <- t(dat5)
dat5[,1]%*%t(dat5[,2])
dat6 <- read.fwf("D:/Data/IRT_report.txt",
skip=34, widths=list(c(-15,7,-3,7,-3,7,
-3,7,-3,7,-3,7,-3,7,-3,7,-3,7,-3,7),
c(-1)),n=3)
dim(dat6)
names(dat6)
mode(dat6); class(dat6)
dat6
typeof(dat6[,1])
rownames(dat6) <- c("Discrimination","Difficulty","Guessing")
summary(t(dat6))
install.packages("foreign")
library(foreign)
?read.spss
dat7 <- read.spss("D:/Data/band.sav")
dim(dat7)
length(dat7)
names(dat7)
mode(dat7); class(dat7)
dat7
dat7 <- read.spss("D:/Data/band.sav", to.data.frame=TRUE)
dim(dat7)
names(dat7)
mode(dat7); class(dat7)
dat7[1:10,]
install.packaged("RODBC")
library(RODBC)
?sqlFetch
# ----------------- #
# Demonstration 4.8 #
# ----------------- #
mat3 <- matrix(runif(10000),100,100)
matlabs <- c()
for(j in 1:100){
matlabs <- c(matlabs, paste("Item",j,sep=" "))
}
colnames(mat3) <- matlabs
mat3[1:3,1:3]
attr(mat3,"author") <- "Michael Chajewski"
object.size(mat3)
func3 <- function(x,y) {
(runif(1)*x)/runif(1)*y
}
object.size(func3)
write.table(mat3, "C:/mat3.txt")
write.csv(mat3, "C:/mat3.csv")
dput(mat3, "C:/mat3_put.R")
dump(list=c("mat3","func3"), "C:/matfunc_dump.R")
save(list=c("mat3","func3"), file="C:/mat3_save.R")
exists("mat3");exists("func3")
rm(mat3, func3)
exists("mat3");exists("func3")
load(file="C:/mat3_save.R")
exists("mat3");exists("func3")
mode(mat3);dim(mat3)
rm(mat3, func3)
exists("mat3");exists("func3")
source("C:/matfunc_dump.R")
exists("mat3");exists("func3")
rm(mat3, func3)
mat4 <- dget("C:/mat3_put.R")
exists("mat3");exists("func3")
mode(mat4);dim(mat4)
attach(mat4)
mat4 <- data.frame(mat4)
attach(mat4)
summary(Item.100)
detach(mat4)
summary(Item.100)
attach(dat7)
names(dat7)
summary(sales)
|
bfbe4c72511b77a9c64dc83780e48e704a39b886 | 01c25eb17adde05509a5b925de164b7980cbb326 | /shiny_app/app_functions_oddpub.R | e970d12a0f01237e2c6a4235110f2ddd79cc8905 | [] | no_license | abannachbrown/dashboard | 4669389107ab03bc291c532777492dc54c06b9cd | c93d6ab41099b135a5cf2d782e5fddf7fe2ee076 | refs/heads/master | 2023-02-24T11:20:56.692527 | 2021-01-13T11:52:42 | 2021-01-13T11:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,788 | r | app_functions_oddpub.R | #----------------------------------------------------------------------
# oddpub data loading & preprocessing functions
#----------------------------------------------------------------------
make_oddpub_plot_data <- function(data_table)
{
od_manual_pos <- (!is.na(data_table$open_data_manual_check) & data_table$open_data_manual_check)
data_table[od_manual_pos,]$is_open_data <- TRUE
oddpub_plot_data <- data_table %>%
#only take the categories mentioned first, as there is no space to show all combinations in the plots
mutate(open_data_category_priority = (open_data_category_manual %>% (function(x)
x %>% str_split(",") %>% map_chr(head, 1)))) %>%
mutate(open_code_category_priority = (open_code_category_manual %>% (function(x)
x %>% str_split(",") %>% map_chr(head, 1)))) %>%
group_by(year) %>%
summarize(open_data_manual_count = sum(open_data_manual_check, na.rm = TRUE),
open_data_neg_count = sum(!open_data_manual_check, na.rm = TRUE),
open_data_NA_count = sum(is.na(open_data_manual_check), na.rm = TRUE),
open_code_manual_count = sum(open_code_manual_check, na.rm = TRUE),
open_code_neg_count = sum(!open_code_manual_check, na.rm = TRUE),
open_code_NA_count = sum(is.na(open_code_manual_check), na.rm = TRUE),
OD_field_specific_count = sum(open_data_category_priority == "field-specific repository", na.rm = TRUE),
OD_general_purpose_count = sum(open_data_category_priority == "general-purpose repository", na.rm = TRUE),
OD_supplement_count = sum(open_data_category_priority == "supplement", na.rm = TRUE),
OC_github_count = sum(open_code_category_priority == "github", na.rm = TRUE),
OC_other_count = sum(open_code_category_priority == "other repository/website", na.rm = TRUE),
OC_supplement_count = sum(open_code_category_priority == "supplement", na.rm = TRUE),
total = sum(!is.na(is_open_data) | (open_data_manual_check == TRUE), na.rm = TRUE)) %>%
mutate(open_data_manual_perc = round(open_data_manual_count/total * 100, 1)) %>%
mutate(open_code_manual_perc = round(open_code_manual_count/total * 100, 1)) %>%
mutate(OD_field_specific_perc = round(OD_field_specific_count/total * 100, 1)) %>%
mutate(OD_general_purpose_perc = round(OD_general_purpose_count/total * 100, 1)) %>%
mutate(OD_supplement_perc = round(OD_supplement_count/total * 100, 1)) %>%
mutate(OC_github_perc = round(OC_github_count/total * 100, 1)) %>%
mutate(OC_other_perc = round(OC_other_count/total * 100, 1)) %>%
mutate(OC_supplement_perc = round(OC_supplement_count/total * 100, 1))
return(oddpub_plot_data)
}
|
c322d26c67a21b3409387cbf7dfd703925e766b2 | 39dd176988177f1b4d0f5c8659042fd2b09232c9 | /Modeling and Simulation/Queueing Models/R Code/Queueing Models.R | 0580b9afa42dd36b5a285d785d43df778717fb2e | [] | no_license | rsalaza4/R-for-Industrial-Engineering | 922719bc17a03a6c146b9393c6a9766e65dc3be4 | b22524e9e6811041fa8da4d3bb331d07e3bd447c | refs/heads/master | 2023-01-14T09:57:33.450167 | 2022-12-22T01:15:17 | 2022-12-22T01:15:17 | 235,593,780 | 49 | 24 | null | null | null | null | UTF-8 | R | false | false | 4,088 | r | Queueing Models.R | ### QUEUEING MODELS ###
# Import queueing package
library(queueing)
# lambda = arrival rate
# mu = service rate
# c = number of servers
# k = system capacity
# n = number of customers in the system from which you want to obtain its probabilities. Put n = 0 for an idle probability.
# method = method of computation of the probabilities of k (system capacity) customers down:
# With method = 0, the exact results are calculated using the formal definition
# With method = 1, aproximate results are calculated using Stirling aproximation of factorials and logarithms
# Set queue model input parameters (select one)
input_mm1 <- NewInput.MM1(lambda = , mu = , n = )
input_mm1k <- NewInput.MM1K(lambda = , mu = , k = )
input_mm1kk <- NewInput.MM1KK(lambda = , mu = , k = , method = )
input_mmc <- NewInput.MMC(lambda = , mu = , c = , method = )
input_mmcc <- NewInput.MMCC(lambda = , mu = , c = , method = )
input_mmck <- NewInput.MMCK(lambda = , mu = , c = , k = )
input_mmckk <- NewInput.MMCKK(lambda = , mu = , c = , k = )
input_mmckm <- NewInput.MMCKM(lambda = , mu = , c = , k = , method = )
input_mminf <- NewInput.MMInf(lambda = , mu = , n = )
input_mm1infkk <- NewInput.MMInfKK(lambda = , mu = , k = )
# Create queue class object (select one)
output_mm1 <- QueueingModel(input_mm1)
output_mm1k <- QueueingModel(input_mm1k)
output_mm1kk <- QueueingModel(input_mm1kk)
output_mmc <- QueueingModel(input_mmc)
output_mmcc <- QueueingModel(input_mmcc)
output_mmck <- QueueingModel(input_mmck)
output_mmckk <- QueueingModel(input_mmckk)
output_mmckm <- QueueingModel(input_mmckm)
output_mminf <- QueueingModel(input_mminf)
output_mminfkk <- QueueingModel(input_mminfkk)
# Get queue model report (select one)
Report(output_mm1)
Report(output_mm1k)
Report(output_mm1kk)
Report(output_mmc)
Report(output_mmcc)
Report(output_mmck)
Report(output_mmckk)
Report(output_mmckm)
Report(output_mminf)
Report(output_mminfkk)
# Get queue model summary (select one)
summary(output_mm1)
summary(output_mm1k)
summary(output_mm1kk)
summary(output_mmc)
summary(output_mmcc)
summary(output_mmck)
summary(output_mmckk)
summary(output_mmckm)
summary(output_mminf)
summary(output_mminfkk)
# Poisson Distribution Plot for Arrival Process
curve(dpois(x, input_mm$lambda),
from = 0,
to = 20,
type = "b",
lwd = 2,
xlab = "Number of customers",
ylab = "Probability",
main = "Poisson Distribution for Arrival Process",
ylim = c(0, 0.25),
n = 21)
# Exponential Distribution Plot for Interarrival Time
curve(dexp(x, rate = 1/input_mm$lambda),
from = 0,
to = 10,
type = "l",
lwd = 2,
xlab = "Interarrival Time",
ylab = "Probaility",
main = "Exponential Distribution for Interarrival Time",
ylim = c(0, 1))
abline(h = 0)
# Exponential Distribution Plot for Service Process
curve(dexp(x, rate = input_mm$mu),
from = 0,
to = 5,
type = "l",
lwd = 2,
xlab = "Service Waiting Time",
ylab = "Probaility",
main = "Exponential Distribution for Service Process",
ylim = c(0, 1))
abline(h = 0)
# M/M/1 EXAMPLE
library(queueing)
input_mm1 <- NewInput.MM1(lambda = 3, mu = 4, n = 0)
output_mm1 <- QueueingModel(input_mm1)
Report(output_mm1)
summary(output_mm1)
curve(dpois(x, input_mm1$lambda),
from = 0,
to = 20,
type = "b",
lwd = 2,
xlab = "Number of customers",
ylab = "Probability",
main = "Poisson Distribution for Arrival Process",
ylim = c(0, 0.25),
n = 21)
curve(dexp(x, rate = 1/input_mm1$lambda),
from = 0,
to = 10,
type = "l",
lwd = 2,
xlab = "Interarrival Time",
ylab = "Probaility",
main = "Exponential Distribution for Interarrival Time",
ylim = c(0, 1))
abline(h = 0)
curve(dexp(x, rate = input_mm1$mu),
from = 0,
to = 5,
type = "l",
lwd = 2,
xlab = "Service Waiting Time",
ylab = "Probaility",
main = "Exponential Distribution for Service Process",
ylim = c(0, 1))
abline(h = 0)
|
7e2b9cf405c2541c26255ae78538b70e42405c5b | 5e1ff69f4317d74e74638daf3e74d6615e0f4441 | /plot3.R | 1821a54159fdc63c51c4cc573a50a8fa148b85bf | [] | no_license | alexgleon/ExData_Plotting1 | ef68663ab14eadf66d88bae6c4609ea928c5117e | 6f27481b0b25bbae5a0ca0b8055d01152de8daff | refs/heads/master | 2021-01-20T00:34:18.500672 | 2017-04-25T04:25:19 | 2017-04-25T04:25:19 | 89,152,602 | 0 | 0 | null | 2017-04-23T15:37:31 | 2017-04-23T15:37:31 | null | UTF-8 | R | false | false | 1,226 | r | plot3.R | #23/04/2017 Oliver González
#Peer-graded Assignment: Week1 Peer-graded Assignment: Course Project 1
#output: plot3.png
#Setting working Directory and File
setwd("~/RWkngdrctry/plotting")
File <- "household_power_consumption.txt"
# Reading, naming and subsetting data
source <- read.table(File, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subset <- source[source$Date %in% c("1/2/2007","2/2/2007") ,]
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
datetime <- strptime(paste(subset$Date, subset$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subset$Global_active_power)
subMetering1 <- as.numeric(subset$Sub_metering_1)
subMetering2 <- as.numeric(subset$Sub_metering_2)
subMetering3 <- as.numeric(subset$Sub_metering_3)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off() |
dea01788b7d2a15640aac8e774eb7c3c8ea8b820 | 0d48ee066bbcb8fdb2985d78d8dda8436b85cd64 | /Conferences/fcsm20/code/frequentist_method.R | 423df717c046fd8827001a76d4a7385b99192d2d | [] | no_license | sctyner/talks | 4b9cad85d3fd6186f15065c14135c85de2010faa | 39a3ddf86476cb4220e66339e61329869403fd5b | refs/heads/master | 2022-02-11T08:57:09.561002 | 2022-01-24T00:20:31 | 2022-01-24T00:20:31 | 272,045,415 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 732 | r | frequentist_method.R | # frequentist method: glm
library(tidyverse)
male_births <- read_csv("dat/arbuthnot_data.csv")
successes <- male_births$n_births
failures <- male_births$total_births - male_births$n_births
data_mat <- cbind(successes, failures)
library(lme4)
# logit(p_i) = \mu + alpha_i
# alph_i ~ Normal(0, sigma^2)
mod <- glmer(data_mat ~ 1+ (1|male_births$year), family = "binomial")
# summary(mod)
# predict(mod, type = "response")
mu <- fixef(mod)
p_mean <-( exp(mu) / (1 + exp(mu)) )
# p_mean
preds <- predict(mod, type = "response")
preds2 <- predict(mod, type = "response")
qplot(preds, preds2) + geom_abline()
# plain ole glm
mod2 <- glm(data_mat ~ 1, family = "binomial")
mu2 <- coef(mod2)
p_mean2 <-( exp(mu2) / (1 + exp(mu2)) )
|
50bd19fe05f7984217669d086eb4a94cac6e09ea | bd24f7e679bb7244c7051d9b8e36a45dfc6a4c1a | /man/plotDP.Rd | 79e6d7aaa4a9f7fb9dbb9564ed60c60d2605dcb1 | [] | no_license | sxz155/PFDA | 91f0cca917358688a6ed26bb71199645c8d56f76 | bd6e7a6630b33f21f5c6967e997bb5f5d4c0ff63 | refs/heads/master | 2020-07-25T11:04:14.542604 | 2016-12-14T18:09:21 | 2016-12-14T18:09:21 | 75,235,351 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 701 | rd | plotDP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDP.R
\name{plotDP}
\alias{plotDP}
\title{Plotting RKHS smoothing mean and its DP version simultaneously}
\usage{
plotDP(DP, text.main = "", legend.cex = 1.4, legend.loc = "topleft",
seg.lin = 0.75, text.font = 2, text.width = 0.15, xlab = "",
ylab = "", extra_range = c(0, 0), lty.p = 3, lwd.p = 3,
legend.text = c("sample functions", "RKHS mean", "DP RKHS mean"),
col = c("grey", "green", "red"))
}
\arguments{
\item{DP}{output of \link{DP.RKHS} or \link{DP.Sim} list}
}
\description{
This function plots the original data, the RKHS smoothing mean and its differential private
version simply in a same plot
}
|
8fe8de94754fa67668da89d710acd6ab817575ad | acd16314c8165ae150d7c6d4e57fb2e72847417f | /Test EventHandler/runApplication.R | 3553af641bab1827e594f01c13dcb0fb165f2962 | [] | no_license | gideonllee/R-Experiments | 7086666d2f58096a2bb482c7d218be14990a8344 | 37f0edb8ec064d0a57abdebbadb814d2098e0c79 | refs/heads/master | 2021-01-23T19:46:48.004899 | 2015-09-09T05:15:24 | 2015-09-09T05:15:24 | 40,285,362 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 102 | r | runApplication.R | library(shiny)
source("ui.R")
source("server.R")
app <- shinyApp(ui,server)
runApp(app)
showReactLog |
1ede12654c22875d088ce645e654d2c5960191b7 | 4a2d7acafbb25e9ea3712d611a52e69d889cab53 | /man/tidyfield.Rd | 33b2b00cf2731f065fb2f8a347843c4dffc91e9f | [
"MIT"
] | permissive | tjebo/perimetry | 72c7386964d03fd15ddc50794c134bfccf2b9f00 | 1a0bccdb401c3430b41d17b1385bb615ead2bca2 | refs/heads/master | 2023-07-13T15:56:01.652583 | 2021-07-30T15:41:37 | 2021-07-30T15:41:37 | 390,404,349 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 361 | rd | tidyfield.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidyfield.R
\name{tidyfield}
\alias{tidyfield}
\title{The tidyfield object}
\description{
The tidyfield object is the central object of the perimetry
package. It is designed to allow for efficient and fast usage of
perimetry data in tidy models. It is essentially a nested tibble.
}
|
0c71447be3d8c4f1c6a9bfddf7dd2b91daaacbb8 | 8fd5b7c83913adb9151b70ea9129bec0db876da3 | /R-intro.R | 47c3f3c904c615e8d37bab0253fb8a87c6e88f15 | [] | no_license | rappuccino/Carpenter-R | 159f11674e74ef3135ad3587ecb9bd0522dc9d6d | ecb57b7677ab5df897d561d475e033e02d02737e | refs/heads/master | 2020-03-07T07:12:23.736792 | 2018-03-29T21:58:10 | 2018-03-29T22:01:25 | 127,342,914 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 242 | r | R-intro.R | # SWC R Intro #
getwd()
quiz_vector_1 <- c(2,6,'3')
quiz_vector_2 <- c('a', TRUE)
quiz_vector_3 <- c(0, TRUE)
typeof(quiz_vector_1)
typeof(quiz_vector_2)
typeof(quiz_vector_3)
class(quiz_vector_1)
class(quiz_vector_2)
class(quiz_vector_3)
|
ccef833221c5a8028c521cf41033715bf2ac0179 | 17ca53a3827be35bbe7b0b1e88decbeed2f9eded | /R/crestObj.init.R | 161fe0179e8bb1bfadcb1b42a5a8ee656b1e45c7 | [
"MIT"
] | permissive | mchevalier2/crestr | 190afcd9d563f92afe51394b0dad752496ce3e5b | e1978059c243f61475055c1f2ff08d5d8b601079 | refs/heads/master | 2023-08-30T03:59:50.319316 | 2023-08-25T16:00:36 | 2023-08-25T16:00:36 | 269,097,345 | 9 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,559 | r | crestObj.init.R | #' Create a \code{crestObj} object.
#'
#' Creates a \code{crestObj} object with all default parameters.
#'
#' @param taxa.name A vector that contains the names of the taxa to study.
#' @param pse A pollen-Species equivalency table. See \code{\link{createPSE}} for
#' details.
#' @param taxaType A numerical index (between 1 and 6) to define the type of
#' palaeoproxy used: 1 for plants, 2 for beetles, 3 for chironomids,
#' 4 for foraminifers, 5 for diatoms and 6 for rodents. The example
#' dataset uses taxaType=0 (pseudo-data). Default is 1.
#' @param climate A vector of the climate variables to extract. See
#' \code{\link{accClimateVariables}} for the list of accepted values.
#' @param df A data frame containing the data to reconstruct (counts,
#' percentages or presence/absence data).
#' @param x.name A string describing the x axis (e.g. 'Sample Name', 'Age',
#' 'Depth').
#' @param x The name, age or depth of the rows of df (the samples).
#' @param xmn,xmx,ymn,ymx The coordinates defining the study area.
#' @param elev_min,elev_max Parameters to only selected grid cells with an
#' elevation higher than elev_min or lower than elev_max (default is
#' '\code{NA} ).
#' @param elev_range Parameters discard the grid cell with a high elevation
#' range (default is \code{NA}).
#' @param year_min,year_max The oldest and youngest occurrences accepted
#' (default is 1900-2021).
#' @param nodate A boolean to accept occurrences without a date (can overlap
#' with occurrences with a date; default \code{TRUE}).
#' @param type_of_obs The type of observation to use in the study. 1: human
#' observation, 2: observation, 3: preserved specimen, 4: living specimen,
#' 5: fossil specimen, 6: material sample, 7: machine observation, 8:
#' literature, 9: unknown (Default \code{c(1, 2, 3, 8, 9)})
#' @param dbname The name of the data source database.
#' @param continents A vector of the continent names defining the study area.
#' @param countries A vector of the country names defining the study area.
#' @param basins A vector of the ocean names defining the study area.
#' @param sectors A vector of the marine sector names defining the study area.
#' @param realms A vector of the studied botanical realms defining the study area.
#' @param biomes A vector of the studied botanical biomes defining the study area.
#' @param ecoregions A vector of the studied botanical ecoregions defining the
#' study area.
#' @param distributions A dataframe containing the presence records of the
#' studied proxies and their associated climate values.
#' @param minGridCells The minimum number of unique presence data necessary to
#' estimate a species' climate response. Default is 20.
#' @param weightedPresences A boolean to indicate whether the presence records
#' should be weighted. Default is \code{FALSE}.
#' @param bin_width The width of the bins used to correct for unbalanced climate
#' state. Use values that split the studied climate gradient in
#' 15-25 classes (e.g. 2°C for temperature variables). Default is 1.
#' @param shape The imposed shape of the species \code{pdfs}. We recommend using
#' 'normal' for temperature variables and 'lognormal' for the
#' variables that can only take positive values, such as
#' precipitation or aridity. Default is 'normal' for all.
#' @param selectedTaxa A data frame assigns which taxa should be used for each
#' variable (1 if the taxon should be used, 0 otherwise). The colnames
#' should be the climate variables' names and the rownames the taxa
#' names. Default is 1 for all taxa and all variables.
#' @param npoints The number of points to be used to fit the \code{pdfs}. Default 200.
#' @param geoWeighting A boolean to indicate if the species should be weighting
#' by the square root of their extension when estimating a genus/family
#' level taxon-climate relationships.
#' @param climateSpaceWeighting A boolean to indicate if the species \code{pdfs}
#' should be corrected for the modern distribution of the climate space
#' (default \code{TRUE}).
#' @param climateSpaceWeighting.type A correction factor for the clame space
#' weighting correction to limit the edge effects. Either 'linear'
#' (default), 'sqrt' or 'log'.
#' @param presenceThreshold All values above that threshold will be used in the
#' reconstruction (e.g. if set at 1, all percentages below 1 will be set
#' to 0 and the associated presences discarded). Default is 0.
#' @param taxWeight One value among the following: 'originalData',
#' 'presence/absence', 'percentages' or 'normalisation' (default).
#' @param uncertainties A (vector of) threshold value(s) indicating the error
#' bars that should be calculated (default both 50 and 95% ranges).
#' @return A \code{crestObj} object that is used to store data and information
#' for reconstructing climate
#' @export
#' @seealso See \code{vignette('technicalities')} for details about the structure
#' of the object. See also \url{https://gbif.github.io/parsers/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html}
#' for a detailed explanation of the types of observation.
crestObj <- function(taxa.name, taxaType, climate,
pse = NA, dbname = NA,
continents = NA, countries = NA,
basins = NA, sectors = NA,
realms = NA, biomes = NA, ecoregions = NA,
xmn = NA, xmx = NA, ymn = NA, ymx = NA,
elev_min = NA, elev_max = NA, elev_range = NA,
year_min = 1900, year_max = 2021, nodate = TRUE,
type_of_obs = c(1, 2, 3, 8, 9),
df = NA, x = NA, x.name = "",
minGridCells = 20, weightedPresences = FALSE,
bin_width = NA,
shape = NA,
npoints = 200,
geoWeighting = TRUE,
climateSpaceWeighting = TRUE,
climateSpaceWeighting.type = 'linear',
selectedTaxa = NA,
distributions = NA,
presenceThreshold = 0,
taxWeight = "normalisation",
uncertainties = c(0.5, 0.95)) {
if(base::missing(taxa.name)) taxa.name
if(base::missing(taxaType)) taxaType
if(base::missing(climate)) climate
if(is.na(bin_width)) {
bin_width <- as.data.frame(matrix(rep(1, length(climate)), ncol=1))
rownames(bin_width) <- climate
}
if(is.na(shape)) {
shape <- as.data.frame(matrix(rep("normal", length(climate)), ncol=1))
rownames(shape) <- climate
}
inputs <- list(
df = df,
taxa.name = taxa.name,
x = x,
pse = pse,
selectedTaxa = selectedTaxa,
x.name = x.name
)
parameters <- list(
climate = climate,
taxaType = taxaType,
xmn = xmn,
xmx = xmx,
ymn = ymn,
ymx = ymx,
elev_min = elev_min,
elev_max = elev_max,
elev_range = elev_range,
year_min = year_min,
year_max = year_max,
nodate = nodate,
type_of_obs = type_of_obs,
continents = continents,
countries = countries,
basins = basins,
sectors = sectors,
realms = realms,
biomes = biomes,
ecoregions = ecoregions,
taxWeight = taxWeight,
minGridCells = minGridCells,
weightedPresences = weightedPresences,
bin_width = bin_width,
shape = shape,
npoints = npoints,
geoWeighting = geoWeighting,
climateSpaceWeighting = climateSpaceWeighting,
climateSpaceWeighting.type = climateSpaceWeighting.type,
presenceThreshold = presenceThreshold,
uncertainties = uncertainties
)
modelling <- list(taxonID2proxy = NA, climate_space = NA, pdfs = NA, weights = NA, xrange = NA, distributions = distributions)
reconstructions <- list()
misc <- list(dbname = dbname, stage = 'init')
value <- list(
inputs = inputs,
parameters = parameters,
modelling = modelling,
reconstructions = reconstructions,
misc = misc
)
# class can be set using class() or attr() function
attr(value, "class") <- "crestObj"
value
}
|
9a5dd0f13bfd15b53e58b75a0bd88eec066940c2 | 36e7a3ab63a73a82216f948bcfae6f2e5016ac77 | /R/languageserver.R | d1ffb9915105832aed68c2cfeb8265949e1301b4 | [] | no_license | MJimitater/languageserver | 7f6da363d6b6eb701641d68de918de93274b0112 | 1fa3008b78bdee123d5035da1e401da95a321663 | refs/heads/master | 2023-08-18T10:55:13.722049 | 2021-10-21T14:09:48 | 2021-10-21T14:09:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,694 | r | languageserver.R | #' @useDynLib languageserver
#' @importFrom R6 R6Class
#' @import xml2
#' @details
#' An implementation of the Language Server Protocol for R
"_PACKAGE"
#' The language server
#'
#' Describe the language server and how it interacts with clients.
#' @noRd
LanguageServer <- R6::R6Class("LanguageServer",
inherit = LanguageBase,
public = list(
tcp = FALSE,
inputcon = NULL,
outputcon = NULL,
exit_flag = NULL,
documents = NULL,
workspace = NULL,
processId = NULL,
rootUri = NULL,
rootPath = NULL,
initializationOptions = NULL,
ClientCapabilities = NULL,
ServerCapabilities = NULL,
diagnostics_task_manager = NULL,
parse_task_manager = NULL,
resolve_task_manager = NULL,
pending_replies = NULL,
initialize = function(host, port) {
if (is.null(port)) {
logger$info("connection type: stdio")
outputcon <- stdout()
# note: windows doesn't support `blocking = FALSE`
# we use `PeekNamedPipe` in c to mimic non-blocking reading
inputcon <- file("stdin", open = "rb", blocking = FALSE)
} else {
self$tcp <- TRUE
logger$info("connection type: tcp at ", port)
inputcon <- socketConnection(host = host, port = port, open = "r+b")
logger$info("connected")
outputcon <- inputcon
}
self$inputcon <- inputcon
self$outputcon <- outputcon
cpus <- parallel::detectCores()
pool_size <- as.integer(
Sys.getenv("R_LANGSVR_POOL_SIZE", min(max(floor(cpus / 2), 1), 3)))
# parse pool
parse_pool <- if (pool_size > 0) SessionPool$new(pool_size, "parse") else NULL
# diagnostics is slower, so use a separate pool
diagnostics_pool <- if (pool_size > 0) SessionPool$new(pool_size, "diagnostics") else NULL
self$parse_task_manager <- TaskManager$new("parse", parse_pool)
self$diagnostics_task_manager <- TaskManager$new("diagnostics", diagnostics_pool)
# no pool for resolve task
# resolve task require a new session for every task
self$resolve_task_manager <- TaskManager$new("resolve", NULL)
self$pending_replies <- collections::dict()
super$initialize()
},
finalize = function() {
close(self$inputcon)
super$finalize()
},
process_events = function() {
self$diagnostics_task_manager$run_tasks()
self$diagnostics_task_manager$check_tasks()
self$parse_task_manager$run_tasks()
self$parse_task_manager$check_tasks()
self$resolve_task_manager$run_tasks()
self$resolve_task_manager$check_tasks()
if (length(self$rootPath) && !is.null(self$workspace)) {
self$workspace$poll_namespace_file()
}
},
text_sync = function(
# TODO: move it to Workspace!?
uri, document, run_lintr = FALSE, parse = FALSE, delay = 0) {
if (!self$pending_replies$has(uri)) {
self$pending_replies$set(uri, list(
`textDocument/documentSymbol` = collections::queue(),
`textDocument/foldingRange` = collections::queue(),
`textDocument/documentLink` = collections::queue(),
`textDocument/documentColor` = collections::queue()
))
}
if (run_lintr && lsp_settings$get("diagnostics")) {
temp_root <- dirname(tempdir())
if (path_has_parent(self$rootPath, temp_root) ||
!path_has_parent(path_from_uri(uri), temp_root)) {
self$diagnostics_task_manager$add_task(
uri,
diagnostics_task(self, uri, document, delay = delay)
)
}
}
if (parse) {
self$parse_task_manager$add_task(
uri,
parse_task(self, uri, document, delay = delay)
)
}
},
check_connection = function() {
if (!isOpen(self$inputcon)) {
self$exit_flag <- TRUE
}
if (.Platform$OS.type == "unix" && process_is_detached()) {
# exit if the current process becomes orphan
self$exit_flag <- TRUE
}
},
write_text = function(text) {
# we have made effort to ensure that text is utf-8
# so text is printed as is
writeLines(text, self$outputcon, sep = "", useBytes = TRUE)
},
read_line = function() {
if (self$tcp) {
if (socketSelect(list(self$inputcon), timeout = 0)) {
readLines(self$inputcon, n = 1, encoding = "UTF-8")
} else {
character(0)
}
} else {
stdin_read_line()
}
},
read_char = function(n) {
if (self$tcp) {
out <- readChar(self$inputcon, n, useBytes = TRUE)
Encoding(out) <- "UTF-8"
out
} else {
stdin_read_char(n)
}
},
run = function() {
while (TRUE) {
ret <- tryCatchStack({
if (isTRUE(self$exit_flag)) {
logger$info("exiting")
break
}
self$process_events()
data <- self$fetch(blocking = FALSE)
if (is.null(data)) {
Sys.sleep(0.1)
next
}
self$handle_raw(data)
}, error = function(e) e)
if (inherits(ret, "error")) {
logger$error(ret)
logger$error("exiting")
break
}
}
}
)
)
LanguageServer$set("public", "register_handlers", function() {
self$request_handlers <- list(
initialize = on_initialize,
shutdown = on_shutdown,
`textDocument/completion` = text_document_completion,
`completionItem/resolve` = completion_item_resolve,
`textDocument/definition` = text_document_definition,
`textDocument/hover` = text_document_hover,
`textDocument/signatureHelp` = text_document_signature_help,
`textDocument/formatting` = text_document_formatting,
`textDocument/rangeFormatting` = text_document_range_formatting,
`textDocument/onTypeFormatting` = text_document_on_type_formatting,
`textDocument/documentSymbol` = text_document_document_symbol,
`textDocument/documentHighlight` = text_document_document_highlight,
`textDocument/documentLink` = text_document_document_link,
`documentLink/resolve` = document_link_resolve,
`textDocument/documentColor` = text_document_document_color,
`textDocument/codeAction` = text_document_code_action,
`textDocument/colorPresentation` = text_document_color_presentation,
`textDocument/foldingRange` = text_document_folding_range,
`textDocument/references` = text_document_references,
`textDocument/rename` = text_document_rename,
`textDocument/prepareRename` = text_document_prepare_rename,
`textDocument/selectionRange` = text_document_selection_range,
`textDocument/prepareCallHierarchy` = text_document_prepare_call_hierarchy,
`callHierarchy/incomingCalls` = call_hierarchy_incoming_calls,
`callHierarchy/outgoingCalls` = call_hierarchy_outgoing_calls,
`textDocument/linkedEditingRange` = text_document_linked_editing_range,
`workspace/symbol` = workspace_symbol
)
self$notification_handlers <- list(
initialized = on_initialized,
exit = on_exit,
`textDocument/didOpen` = text_document_did_open,
`textDocument/didChange` = text_document_did_change,
`textDocument/didSave` = text_document_did_save,
`textDocument/didClose` = text_document_did_close,
`workspace/didChangeConfiguration` = workspace_did_change_configuration,
`workspace/didChangeWatchedFiles` = workspace_did_change_watched_files,
`$/setTrace` = protocol_set_trace
)
})
#' Run the R language server
#' @param debug set `TRUE` to show debug information in stderr;
#' or it could be a character string specifying the log file
#' @param host the hostname used to create the tcp server, not used when `port` is `NULL`
#' @param port the port used to create the tcp server. If `NULL`, use stdio instead.
#' @examples
#' \dontrun{
#' # to use stdio
#' languageserver::run()
#'
#' # to use tcp server
#' languageserver::run(port = 8888)
#' }
#' @export
run <- function(debug = FALSE, host = "localhost", port = NULL) {
tools::Rd2txt_options(underline_titles = FALSE)
tools::Rd2txt_options(itemBullet = "* ")
lsp_settings$update_from_options()
if (isTRUE(debug)) {
lsp_settings$set("debug", TRUE)
lsp_settings$set("log_file", NULL)
} else if (is.character(debug)) {
lsp_settings$set("debug", TRUE)
lsp_settings$set("log_file", debug)
}
langserver <- LanguageServer$new(host, port)
langserver$run()
}
|
341c30ae683a488d5df075f974e83f323bb37517 | 6a5d187f124d5b84f64dc2127dff687e19847750 | /vignettes/precompile.R | f5f59a9ca257365ba9dcc46b338d56c94c414d2a | [] | no_license | unina-sfere/funcharts | 6f3e5ce475d9d1a7b2bd4342ea4ec8870da7a4d8 | df57f6c6846de38aaed8fdeee9ff8b196304d0e1 | refs/heads/main | 2023-04-09T17:07:54.079007 | 2023-03-30T14:32:37 | 2023-03-30T14:32:37 | 345,451,129 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 970 | r | precompile.R | ## Create .Rmd from .Rmd.orig
knitr::knit("vignettes/colosimo2010.Rmd.orig", "vignettes/colosimo2010.Rmd")
knitr::knit("vignettes/capezza2020.Rmd.orig", "vignettes/capezza2020.Rmd")
knitr::knit("vignettes/centofanti2021.Rmd.orig", "vignettes/centofanti2021.Rmd")
knitr::knit("vignettes/mfd.Rmd.orig", "vignettes/mfd.Rmd")
## Move png files generated from .Rmd to vignettes/ folder
file_png <- list.files()
file_png <- file_png[grepl(".png", file_png)]
new_file_png <- paste0("vignettes/", file_png)
file.rename(file_png, new_file_png)
## Build vignettes
devtools::build_vignettes()
## Build website
pkgdown::build_site()
## After building the website with pkgdown,
## rename files to get the right image paths
files <- list.files("docs/articles", full.names = TRUE)
files_html <- files[grepl(".html", files)]
for (ff in files_html) {
text <- readLines(ff)
new_text <- gsub("../../../R%20packages/funcharts/articles/", "", text)
writeLines(new_text, ff)
}
|
6a5d153650e474be1d659dc6ca7f883bf4e11c4b | 9016b6a0e2631b93da766c9f9da032ad832ba547 | /basic_workflow.R | 7f6c976fcc8da54591f2bf3bee80ba09acfd5864 | [] | no_license | turnbullr-fly-connect/synapse_prediction | 336edffd6f8b2ccf519237410535dcbd0480e454 | 97f2ca6041c31c04df6cae8667f682734bcba1da | refs/heads/master | 2020-04-08T11:52:56.754824 | 2018-11-29T16:58:38 | 2018-11-29T16:58:38 | 159,324,191 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,146 | r | basic_workflow.R | #get xyz coordinates of connectors
catmaid_get_connector_table(38885) -> connector_table
connector_table[match(unique(connector_table$connector_id), connector_table$connector_id),] -> unique_connectors
data.frame(connector_id = unique_connectors$connector_id,
x = unique_connectors$x, y = unique_connectors$y, z = unique_connectors$z) -> connectors_xyz
#resample a neuron to be tested to 1 um
read.neuron.catmaid(2659704) -> Duck
resample(Duck, stepsize = 1000) -> Duck_resampled
data.frame(x = Duck_resampled$d$X, y = Duck_resampled$d$Y, z = Duck_resampled$d$Z) -> Duck_xyz
points3d(xyzmatrix(connectors_xyz))
plot3d(FAFB) #shows connectors as floating points
#calculate distance between connectors and resampled nodes
#d(A,B) = sqrt(((x2-x1)^2)+((y2-y1)^2)+((z2-z1)^2))
distances = vector("list", length = 4140436) #Duck_xyz*connectors_xyz
counts = 0
for (i in 1:length(connectors_xyz$connector_id)){
for (j in 1:length(Duck_xyz$x)){
counts = counts + 1
distances[counts] =
sqrt(((connectors_xyz$x[i] - Duck_xyz$x[j])^2)+((connectors_xyz$y[i] - Duck_xyz$y[j])^2)+((connectors_xyz$z[i] - Duck_xyz$z[j])^2))
}
}
#apply various thresholds
thresholds = c(500, 600, 700, 800, 900, 1000)
counts = vector("list", length = 6)
for (i in 1:length(thresholds)){
counts[i] = sum(distances <= thresholds[i])
}
data.frame(threshold_nm = as.numeric(thresholds), predicted_synapses = as.numeric(counts)) -> Duck_predicted_synapses
#Phil Harris
read.neuron.catmaid(2096700) -> PH
resample(PH, stepsize = 1000) -> PH_resampled
data.frame(x = PH_resampled$d$X, y = PH_resampled$d$Y, z = PH_resampled$d$Z) -> PH_xyz
distances = vector("list", length = 1535336) #Duck_xyz*connectors_xyz
counts = 0
for (i in 1:length(connectors_xyz$connector_id)){
for (j in 1:length(PH_xyz$x)){
counts = counts + 1
distances[counts] =
sqrt(((connectors_xyz$x[i] - PH_xyz$x[j])^2)+((connectors_xyz$y[i] - PH_xyz$y[j])^2)+((connectors_xyz$z[i] - PH_xyz$z[j])^2))
}
}
thresholds = c(500, 600, 700, 800, 900, 1000)
counts = vector("list", length = 6)
for (i in 1:length(thresholds)){
counts[i] = sum(distances <= thresholds[i])
}
data.frame(threshold_nm = as.numeric(thresholds), predicted_synapses = as.numeric(counts)) -> PH_predicted_synapses
#Joffrey
read.neuron.catmaid(1376325) -> Joffrey
resample(Joffrey, stepsize = 1000) -> Joffrey_resampled
data.frame(x = Joffrey_resampled$d$X, y = Joffrey_resampled$d$Y, z = Joffrey_resampled$d$Z) -> Joffrey_xyz
distances = vector("list", length = 8336320) #Joffrey_xyz*connectors_xyz
counts = 0
for (i in 1:length(connectors_xyz$connector_id)){
for (j in 1:length(Joffrey_xyz$x)){
counts = counts + 1
distances[counts] =
sqrt(((connectors_xyz$x[i] - Joffrey_xyz$x[j])^2)+((connectors_xyz$y[i] - Joffrey_xyz$y[j])^2)+((connectors_xyz$z[i] - Joffrey_xyz$z[j])^2))
}
}
thresholds = c(500, 600, 700, 800, 900, 1000)
counts = vector("list", length = 6)
for (i in 1:length(thresholds)){
counts[i] = sum(distances <= thresholds[i])
}
data.frame(threshold_nm = as.numeric(thresholds), predicted_synapses = as.numeric(counts)) -> Joffrey_predicted_synapses
|
3717118206b073449135f8eba6c3e01e45585bc6 | 6da6523cba9bd00bfd04a39153684ae129a9f959 | /R/relabel.R | 75a4db50e671c4b37a243cc3eb91767431e0702c | [] | no_license | alistaire47/forcats | 28e24d73a72b46b013cbbf36e7ff1dee7b754f4a | 9126935351ef2c3d6cb4cd6f9d45d76f965e53f0 | refs/heads/master | 2021-05-01T08:53:21.635871 | 2018-02-14T23:27:19 | 2018-02-14T23:27:19 | 121,178,114 | 0 | 0 | null | 2018-02-11T23:30:22 | 2018-02-11T23:30:22 | null | UTF-8 | R | false | false | 1,137 | r | relabel.R | #' Automatically relabel factor levels, collapse as necessary
#'
#' @param f A factor.
#' @param fun A bare or character function name or an actual function in
#' formula, quosure, or ordinary notation to be applied to each level. Must
#' accept one character argument and return a character vector of the same
#' length as its input.
#' @param ... Additional arguments to `fun`.
#' @export
#' @examples
#'
#' gss_cat$partyid %>% fct_count()
#' gss_cat$partyid %>% fct_relabel(~gsub(",", ", ", .x)) %>% fct_count()
#'
#' convert_income <- function(x) {
#' regex <- "^(?:Lt |)[$]([0-9]+).*$"
#' is_range <- grepl(regex, x)
#' num_income <- as.numeric(gsub(regex, "\\1", x[is_range]))
#' num_income <- trunc(num_income / 5000) * 5000
#' x[is_range] <- paste0("Gt $", num_income)
#' x
#' }
#' fct_count(gss_cat$rincome)
#' convert_income(levels(gss_cat$rincome))
#' rincome2 <- fct_relabel(gss_cat$rincome, convert_income)
#' fct_count(rincome2)
fct_relabel <- function(.f, .fun, ...) {
.fun <- rlang::as_function(.fun)
old_levels <- levels(.f)
new_levels <- .fun(old_levels, ...)
lvls_revalue(.f, new_levels)
}
|
7cd5a972e3f5da09ae112efc37efb94b0ebe5401 | 751b93ecac667aa73e06956a831811acf00b7faa | /plot4.R | ca15e5eecff52b064b23cfeed8266e7c25e6c194 | [] | no_license | kammerel/exploratory-data-analysis | f3d57ece1d6ec5d5b4f1ce480a6f90f1a67f788f | e14e56845d0e522d1176345c2aafe46045c060a8 | refs/heads/master | 2020-05-29T22:41:41.725862 | 2019-06-04T17:00:22 | 2019-06-04T17:00:22 | 189,416,319 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,411 | r | plot4.R | # set the working directory
Sys.setlocale("LC_ALL","English")
setwd("E:/Dokumente/Fortbildung on the job/Coursera/Data Science Specialization/Course 4 Exploratory Data Analysis/Week 1/Assignment")
data <- read.table(file = "household_power_consumption.txt", header = T, sep = ";",na.strings = "?")
# data$Date <- as.Date(data$Date, "%d/%m/%Y")
data$datetime <- paste(data$Date, data$Time)
data$datetime <- strptime(data$datetime,"%d/%m/%Y %H:%M:%S")
data <- data[(data$datetime >= "2007-02-01 00:00:00" & data$datetime < "2007-02-02 23:59:00"), ]
png("Plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
with(data, {
plot(data$datetime, data$Global_active_power, type = "l", ylab = "Global Active Power", xlab = "")
plot(data$datetime, data$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(data$datetime, data$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy Sub metering")
lines(data$datetime, data$Sub_metering_2, type = "l", col = "red")
lines(data$datetime, data$Sub_metering_3,type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$datetime, data$Global_reactive_power,type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
dev.off()
|
a72c00b970b45695691145a591287dfeb8a760cd | decd805a323a5bdb863f9d5501c2963a4fb51ba0 | /MarketSimulator/R/Market.R | a012dd88b9a7e6e9b5970f8feb893c899ba8aa6a | [] | no_license | markhocky/MarketSimulator | eda0d2d11f01c480cc7a40506ae695284b77fee5 | d7000d90bc822521cc084b4c245321c565b716b5 | refs/heads/master | 2016-08-09T20:51:56.167101 | 2016-04-09T01:56:27 | 2016-04-09T01:56:27 | 55,815,590 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,747 | r | Market.R | #'
#'
setClass("Market",
representation(
instruments = "list"
))
Market <- function(instruments) {
if (missing(instruments)) {
instruments <- list()
}
market <- new("Market")
market@instruments <- instruments
return(market)
}
setMethod("getBar",
signature("Market"),
function(object, instrument, timestamp) {
instrument <- object@instruments[[instrument]]
return(instrument[timestamp])
})
active_market <- function(price.bar) {
volume.ok <- not_NA_or_Zero(Vo(price.bar))
open.ok <- not_NA_or_Zero(Op(price.bar))
close.ok <- not_NA_or_Zero(Cl(price.bar))
return(volume.ok & open.ok & close.ok)
}
not_NA_or_Zero <- function(value) {
isTRUE(!(is.na(value) || value == 0))
}
setMethod("tradeableInstruments",
signature("Market"),
function(object) {
# sort was throwing warning of little use about applying is.na to NULL object.
return(suppressWarnings(sort(names(object@instruments))))
})
setMethod("show",
signature("Market"),
function(object) {
date <- function(instrument, when) as.character(when(instrument))
market.frame <- data.frame(
start = sapply(object@instruments, date, when = start),
end = sapply(object@instruments, date, when = end))
show(market.frame)
})
dailyReturns <- function(market, type = "arithmetic") {
if (!type %in% c("arithmetic", "log")) {
stop("Type must be 'arithmetic' or 'log'")
}
returns <- xts()
for (instrument in tradeableInstruments(market)) {
mktdata <- market@instruments[[instrument]]
returns <- merge(returns, dailyReturn(mktdata, type = type))
}
names(returns) <- tradeableInstruments(market)
return(returns)
}
|
b2ccbd3366b6fcd39b23a75879685bc93bc1df7c | 830876874bf3e1fca80dcbf0c727229e30bdbb12 | /Data Structure/cast&melt.R | 00d3b10fad0398d0e942371ed990b306e6b073b2 | [] | no_license | ShrutiSi/RIA | 761311fdae32ef99f4c7ca7ed09e8e8cf5bf38ae | f55c5655329c28de8b5aedbbae728fd439af5b43 | refs/heads/master | 2021-01-22T21:17:27.307109 | 2018-01-23T05:15:24 | 2018-01-23T05:15:24 | 100,679,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 692 | r | cast&melt.R | df2
rollno = rep(c(10,11,12,13),4)
rollno
(sname = rep(c("Achal","Apporva", "Goldie", "Hitesh"), 4))
(examunit= rep(c("u1", "u2", "u3", "u4"), each =4))
set.seed(1234)
(rpgm = ceiling(rnorm(4*4,60,10)))
set.seed(1234)
(sql = ceiling(rnorm(4*4, 65,10)))
df1 = data.frame(rollno,sname,examunit,rpgm,sql)
df1
str(df1)
#melt the data : wide to long (more rows)
library(reshape)
md= reshape::melt(df1,id=c("rollno", "sname", "examunit"))
md[md$rollno==12,]
#cast ----
#without aggregation, important
reshape::cast(md,rollno+sname+examunit~variable)
reshape::cast(md, rollno + sname +variable~ examunit)
reshape::cast(md, rollno+sname ~variable ~examunit) # gives mark unitwise, array structure
|
c1d981bf31dcdc6147be5749df01f3beb8c99b93 | 8749cd76d533720e28877bdfaf9b7fe45e1ccf5f | /man/loadEIAbulk.Rd | b0e2b4229c47d16e19ecfef867a6103ce8a46bf7 | [] | no_license | bigdatalib/RTL | 89ecba94b24ea1fa65bb87408b0c08eaa5809b23 | 9fe64900c618cae4d5286d6440774bf92819d387 | refs/heads/master | 2020-12-25T00:02:56.479952 | 2014-05-21T18:19:23 | 2014-05-21T18:19:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 309 | rd | loadEIAbulk.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{loadEIAbulk}
\alias{loadEIAbulk}
\title{\code{loadEIAbulk}}
\usage{
loadEIAbulk()
}
\value{
Lots of series...
}
\description{
Loads all EIA NG and PET data as a bulk download.
}
\author{
Philippe Cote <coteph@mac.com,philippe.cote@scotiabank.com>
}
|
21e94d1512c540ecf638307511795ef40797d470 | 3fc443b349b05084fdb33a67644e6d2ae6b2b9f8 | /getTickers.R | b17c5d8cd9cdc9c142c22d723822b2613496d28e | [] | no_license | douglasadamson/RStocks | 0f2f9daddadc3064ba675d7b4d1452e3a5301ad3 | fb76c00f928d85eb4e5ea3e581ef3a0d70740622 | refs/heads/master | 2021-07-02T01:34:20.546001 | 2017-09-20T19:28:51 | 2017-09-20T19:28:51 | 100,506,815 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 503 | r | getTickers.R | library(readr)
#
# Ultimately, get this data from a service
#
getTickers <- function(index) {
# Which Index?
filename = switch(index,
"nasdaq" = "nasdaq100list.csv",
"nasdaq100" = "nasdaq100list.csv",
"favorites" = "ticker.csv",
"ticker.csv")
csv <- read_csv(filename, col_names = TRUE, col_types = "cci")
# Put into aplhabetical order and put favorites at top
df <- csv[order(-csv$Favorites, csv$Symbol),]
return(df)
}
|
ef81d44b445c886828c12be3d30e0d7003ad1e2c | 094e952da4fa8698b04fb88b69fbf67668218d24 | /code/ch.8/fig8-1.R | 1d5a4a07d5e4542b6d67e78e25b106441840c54f | [
"MIT"
] | permissive | rhyeu/study_rstan | 42a773beef840f56f64fcd20c5b1b24f88d45e1b | a5b998772358ba64996bc7ca775566f0706fa8f3 | refs/heads/master | 2021-07-08T15:51:37.488890 | 2020-10-04T07:10:03 | 2020-10-04T07:10:03 | 195,388,081 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 928 | r | fig8-1.R | library(ggplot2)
d <- read.csv(file='ch.8/input/data-salary-2.txt')
d$KID <- as.factor(d$KID)
res_lm <- lm(Y ~ X, data=d)
coef <- as.numeric(res_lm$coefficients)
p <- ggplot(d, aes(X, Y, shape=KID))
p <- p + theme_bw(base_size=18)
p <- p + geom_abline(intercept=coef[1], slope=coef[2], size=2, alpha=0.3)
p <- p + geom_point(size=2)
p <- p + scale_shape_manual(values=c(16, 2, 4, 9))
p <- p + labs(x='X', y='Y')
ggsave(p, file='ch.8/output/fig8-1-left.png', dpi=300, w=4, h=3)
p <- ggplot(d, aes(X, Y, shape=KID))
p <- p + theme_bw(base_size=20)
p <- p + geom_abline(intercept=coef[1], slope=coef[2], size=2, alpha=0.3)
p <- p + facet_wrap(~KID)
p <- p + geom_line(stat='smooth', method='lm', se=FALSE, size=1, color='black', linetype='31', alpha=0.8)
p <- p + geom_point(size=3)
p <- p + scale_shape_manual(values=c(16, 2, 4, 9))
p <- p + labs(x='X', y='Y')
ggsave(p, file='ch.8/output/fig8-1-right.png', dpi=300, w=6, h=5)
|
d64485bbaab719fd1dcd94d6f9ed2f28b0d2686b | 1e3c6d8a48d75f9b59cde049d444d381018c3710 | /utility.R | 60be31a019ec7a2ccce19e0b62e80fb21e764f4c | [] | no_license | legolas347/interview | 6080bc018b3619f78f364d2e47d70ac8f5a6990d | a8c5ab1ce33b34b5d16c41d6b987d0e5db8302b9 | refs/heads/master | 2022-11-22T08:55:15.304499 | 2020-07-29T22:52:13 | 2020-07-29T22:52:13 | 282,916,140 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 606 | r | utility.R | library(dplyr)
library(ggplot2)
f_rmse <- function(x,y){
sqrt(mean((x-y)^2))
}
f_eda <- function(df){
df_missing <- data.frame(t(apply(df, 2, function(x){mean(is.na(x))})))
df_summary_num <- data.frame(apply(dplyr::select_if(df, is.numeric), 2, summary))
df_summary_char <- apply(dplyr::select_if(df, is.character), 2, table)
ind_dups <- duplicated(df)
output <- list()
output[['missing_counts']] <- df_missing
output[['summaries_fact_cols']] <- df_summary_char
output[['summaries_num_cols']] <- df_summary_num
output[['duplicate_rows']] <- df[ind_dups,]
return(output)
} |
65738a904e7c8874f4b4d3d1da96469ce5ff04b0 | 8739f0e762414dc911e12af0913af489073b72f6 | /Figures_02_20_20/master_dataframe.R | efa0615602a738766b0f0b41683e852adb9bb743 | [] | no_license | HeatherWelch/melanoma | 5277c6194b3cc556a800df8e4de3488274743a1d | 5093668a248482f3ae82f3909dda1da76318fa3b | refs/heads/master | 2022-09-13T13:52:57.601464 | 2022-08-08T19:28:47 | 2022-08-08T19:28:47 | 231,420,485 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,534 | r | master_dataframe.R | ### putting it all together
source('/Users/heatherwelch/Dropbox/melenoma/melanoma_GitHub/utilities/load_libraries.R')
spatial_dir="/Users/heatherwelch/Dropbox/melenoma/spatial_files"
# medical
b=st_read(glue("{spatial_dir}/AHRF_new_01_30_20.shp")) %>% as.data.frame
ahrf=b %>% dplyr::select(c(STATEFP,NAME,COUNTY_, incm_pc,incm_mh,derm_pk,pcp_pk,docs_pk,wpovr50,wpvr100,HI_65)) %>% rename(COUNTY_FIPS=COUNTY_)
seer=read.csv("/Users/heatherwelch/Dropbox/melenoma/spatial_files/SEER_rate.csv")
# environmental
csv_list=list.files("/Users/heatherwelch/Dropbox/melenoma/spatial_files",pattern=".csv",full.names = T)
for(csv in csv_list){
name=gsub("/Users/heatherwelch/Dropbox/melenoma/spatial_files/","",csv)
name2= gsub(".csv","",name)
print(name2)
a=read.csv(csv) %>% dplyr::select(-X)
print(nrow(a))
assign(name2,a)
}
master=left_join(SEER_rate,anRange_temperature)
master=left_join(master,cancer_gov_UV_exposure)
master=left_join(master,mean_cloud)
master=left_join(master,elevation)
master=left_join(master,mean_temperature)
master=left_join(master,seasonality_cloud)
master=left_join(master,seasonality_temperature)
master=left_join(master,sun_exposure)
master=left_join(master,UV_daily_dose)
master=left_join(master,UV_irradiance)
master=left_join(master,ahrf)
mast=master %>% mutate(anRange_temperature=anRange_temperature/10,mean_temperature=mean_temperature/10,seasonality_temperature=seasonality_temperature/10)
write.csv(mast, "/Users/heatherwelch/Dropbox/melenoma/Figures_04_20_20/master_dataframe.csv")
|
e9ed3986c40f3281a5006c4bdbd062319fcf3911 | f036b4e0b2d850e07feb5975879bb4896ceafdb0 | /R/plot_spcma.R | 16f9cf2f795195bfc9be61ef412659b51d32c3ab | [] | no_license | guhjy/spcma | 736d8b31911d3a9488735ff52aab556374bd9c2a | 670c92bf35ecdea723244ca05ca673932af2c1dc | refs/heads/master | 2020-04-09T10:57:05.950203 | 2018-06-19T20:17:38 | 2018-06-19T20:17:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 995 | r | plot_spcma.R | plot_spcma <-
function(object,plot.coef=c("alpha","beta","IE"),cex.lab=1,cex.axis=1,pt.cex=1,...)
{
plot.idx<-which(names(object)==plot.coef)
out<-as.matrix(object[[plot.idx]][,c(1,3,4)])
colnames(out)<-c("Estimate","LB","UB")
K<-nrow(out)
sig<-as.numeric(out[,2]*out[,3]>0)
neg<-as.numeric(out[,1]<0)*sig*3
pos<-as.numeric(out[,1]>0)*sig*2
pt.type<-(1-sig)+pos+neg
col.tmp<-c(1,2,4)[pt.type]
pt.tmp<-c(19,17,15)[pt.type]
par(mar=c(5,5,3,3))
plot(range(1-0.5/K,K+0.5/K),range(out[,c(2,3)],na.rm=TRUE),type="n",xaxt="n",xlab="",ylab=plot.coef,cex.lab=cex.lab,cex.axis=cex.axis)
axis(side=1,at=1:K,labels=rownames(out),cex.axis=cex.axis)
abline(h=0,lty=2,lwd=2,col=8)
points(1:K,out[,1],pch=pt.tmp,col=col.tmp,cex=pt.cex)
for(j in 1:K)
{
lines(rep(j,2),out[j,c(2,3)],lty=2,lwd=1,col=col.tmp[j])
lines(c(j-0.3/K,j+0.3/K),rep(out[j,2],2),lty=1,lwd=1,col=col.tmp[j])
lines(c(j-0.3/K,j+0.3/K),rep(out[j,3],2),lty=1,lwd=1,col=col.tmp[j])
}
}
|
fbac8b9e46f3cfc586c1765e1656c3e7dbf44fa4 | b7a21432aeec6869ee358edc43d7fd98a11cf4d5 | /R/configure-hurricane-wind.R | c93b776888fc74a86cc18fb7bd77ac450c4c90dc | [
"MIT"
] | permissive | mikoontz/twensday | 052ac297badbdc2ca16f14c31dbfc2b4cbe26529 | 4389dc3e3be5cd2e9b1015a47675d965d9a0baac | refs/heads/master | 2020-09-28T13:41:28.305155 | 2020-03-06T21:53:44 | 2020-03-06T21:53:44 | 226,789,519 | 0 | 0 | MIT | 2020-02-24T22:09:22 | 2019-12-09T05:10:58 | R | UTF-8 | R | false | false | 4,016 | r | configure-hurricane-wind.R | # Resample the Fire risk map to be at the same resolution/extent as the Zillow data
library(tidyverse)
library(raster)
library(googledrive)
# devtools::install_github(repo = "JoshOBrien/rasterDT")
library(rasterDT)
library(gdalUtils)
# make the functions available to download the Zillow grid
source("R/download_grid.R")
# Ensure the directory structure for data output is present
if(!dir.exists(file.path("output", "hazards"))) {
dir.create(file.path("output", "hazards"), recursive = TRUE)
}
# Get the empty template grid of the Zillow dataset
empty_grid <-
download_grid() %>%
raster()
# These set up the variables to be used to get the hazard data and name
# new output files appropriately
hazard_name <- "hurricane-wind"
# hazard_file <- "CycloneFrequency_1980_2000_projected/gdcyc_NAD.tif"
# zip_path <- file.path("data", "hazards", "CycloneFrequency_1980_2000_projected.zip")
#
# # The hurricane wind data is on the Google Drive
# hazard_id <- "1REzIWNeq4zwwZdiTT2YBa7UYXTYA-r2s"
hazard_file <- "gdcyc/gdcyc.asc"
zip_path <- file.path("data", "hazards", "gdcyc_cyclone.zip")
hazard_id <- "1whh-JSmF7v6vJm35lgQAAt5bs01Phb_t"
# Names of the files (to read and manipulate, and then what to call it upon
# export)
hazard_path_src <- file.path("data", "hazards", hazard_name, hazard_file)
hazard_path_out <- file.path("output", "hazards", paste0(hazard_name, "_zillow-grid.tif"))
overwrite <- FALSE
if(!file.exists(hazard_path_out) | overwrite) {
# download the raw data from Google Drive
hazard_metadata <- googledrive::drive_get(id = hazard_id)
googledrive::drive_download(hazard_metadata, path = zip_path)
# unzip the data file
unzip(zip_path, overwrite = TRUE, exdir = file.path("data", "hazards", hazard_name))
unlink(zip_path)
hazard_path_tmp <- file.path("data", "hazards", hazard_name, paste0(hazard_name, "_temp.tif"))
hazard_orig <- raster::raster(hazard_path_src)
gdalwarp(srcfile = hazard_path_src,
dstfile = hazard_path_tmp,
t_srs = crs(empty_grid),
tr = c(250, 250),
overwrite = TRUE,
s_srs = crs(hazard_orig),
r = "bilinear")
hazard <- gdalUtils::align_rasters(unaligned = hazard_path_tmp,
reference = empty_grid@file@name,
dstfile = hazard_path_out,
overwrite = TRUE,
output_Raster = TRUE)
unlink(hazard_path_tmp)
# Mask out the pixels outside of CONUS using the water mask derived from the
# USAboundaries package high resolution CONUS shapefile (rasterized to the Zillow
# grid) and the flood hazard layer, with all values of 999 masked out (representing
# persistent water bodies)
if(!file.exists(file.path("output", "water-mask_zillow-grid.tif"))) {
source("R/configure-flood.R")
}
mask <- raster::raster("output/water-mask_zillow-grid.tif")
hazard <- raster::mask(x = hazard, mask = mask)
# This source represents records for 21 years (Jan 1, 1980 to Dec 31, 2000)
# https://sedac.ciesin.columbia.edu/data/set/ndh-cyclone-hazard-frequency-distribution
# One caveat is that there is a mask applied to 1 km grid cells to exclude
# cells with <5 people per square km or without significant agriculture
hazard_rate <- hazard / 21
# assume an exponential distribution of waiting times, and then we can use
# the cumulative distribution function for the exponential to ask what
# is the probability that an event occurred before a specified waiting time
# (one year, in our case) given a rate of the hazard (which we figured
# empirically)
hazard_prob <- 1 - exp(-1 * hazard_rate)
# write to disk
raster::writeRaster(x = hazard_prob, filename = hazard_path_out, overwrite = TRUE)
}
# Alternative source?
# hazard_file <- "gdcyc/gdcyc.asc"
# hazard_id <- '1whh-JSmF7v6vJm35lgQAAt5bs01Phb_t'
# zip_path <- file.path("data", "hazards", "gdcyc_cyclone.zip")
|
df935be74190d9b17ce87018332cc8c1f380bd05 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/RBesT/R/crohn.R | 8adf30f73f221cd3bc364d6772b900642f1a6ce0 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 950 | r | crohn.R | #' Crohn's disease.
#'
#' Data set containing historical information for placebo arm of
#' relevant studies for the treatment of Crohn's disease. The primary
#' outcome is change from baseline in Crohn's Disease Activity Index
#' (CDAI) over a duration of 6 weeks. Standard deviation of change
#' from baseline endpoint is approximately 88.
#'
#' @format A data frame with 4 rows and 3 variables:
#' \describe{
#' \item{study}{study}
#' \item{n}{study size}
#' \item{y}{mean CDAI change}
#' }
#'
#' @references Hueber W. et. al, \emph{Gut}, 2012, 61(12):1693-1700
#'
#' @template example-start
#' @examples
#' set.seed(546346)
#' map_crohn <- gMAP(cbind(y, y.se) ~ 1 | study,
#' family=gaussian,
#' data=transform(crohn, y.se=88/sqrt(n)),
#' weights=n,
#' tau.dist="HalfNormal", tau.prior=44,
#' beta.prior=cbind(0,88))
#' @template example-stop
"crohn"
|
672cb67ff657f817b0f9b5a7251a3347066f4cca | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/summarytools/vignettes/Introduction.R | aecaed4ea05f5af714772e8beafcd22b93ccc923 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,341 | r | Introduction.R | ## ----setup, include=FALSE------------------------------------------------
library(knitr)
opts_chunk$set(comment=NA, prompt=FALSE, cache=FALSE, results='asis')
library(summarytools)
st_options('footnote', NA)
## ------------------------------------------------------------------------
library(summarytools)
freq(iris$Species, style = "rmarkdown")
## ------------------------------------------------------------------------
freq(iris$Species, report.nas = FALSE, style = "rmarkdown", omit.headings = TRUE)
## ------------------------------------------------------------------------
with(tobacco, print(ctable(smoker, diseased), method = 'render'))
## ------------------------------------------------------------------------
with(tobacco,
print(ctable(smoker, diseased, prop = 'n', totals = FALSE),
omit.headings = TRUE, method = "render"))
## ------------------------------------------------------------------------
descr(iris, style = "rmarkdown")
## ------------------------------------------------------------------------
descr(iris, stats = c("mean", "sd", "min", "med", "max"), transpose = TRUE,
omit.headings = TRUE, style = "rmarkdown")
## ---- eval=FALSE---------------------------------------------------------
# view(dfSummary(iris))
## ------------------------------------------------------------------------
dfSummary(tobacco, plain.ascii = FALSE, style = "grid")
## ------------------------------------------------------------------------
# First save the results
iris_stats_by_species <- by(data = iris,
INDICES = iris$Species,
FUN = descr, stats = c("mean", "sd", "min", "med", "max"),
transpose = TRUE)
# Then use view(), like so:
view(iris_stats_by_species, method = "pander", style = "rmarkdown")
## ---- eval=FALSE---------------------------------------------------------
# view(iris_stats_by_species)
## ------------------------------------------------------------------------
data(tobacco) # tobacco is an example dataframe included in the package
BMI_by_age <- with(tobacco,
by(BMI, age.gr, descr,
stats = c("mean", "sd", "min", "med", "max")))
view(BMI_by_age, "pander", style = "rmarkdown")
## ------------------------------------------------------------------------
BMI_by_age <- with(tobacco,
by(BMI, age.gr, descr, transpose = TRUE,
stats = c("mean", "sd", "min", "med", "max")))
view(BMI_by_age, "pander", style = "rmarkdown", omit.headings = TRUE)
## ---- eval=FALSE---------------------------------------------------------
# tobacco_subset <- tobacco[ ,c("gender", "age.gr", "smoker")]
# freq_tables <- lapply(tobacco_subset, freq)
# view(freq_tables, footnote = NA, file = 'freq-tables.html')
## ---- eval=FALSE, tidy=FALSE---------------------------------------------
# knitr::opts_chunk$set(echo = TRUE, results = 'asis')
## ---- eval=FALSE---------------------------------------------------------
# view(iris_stats_by_species, file = "~/iris_stats_by_species.html")
## ---- eval=FALSE---------------------------------------------------------
# st_options() # display all global options' values
# st_options('round.digits') # display only one option
# st_options('omit.headings', TRUE) # change an option's value
# st_options('footnote', NA) # Turn off the footnote on all outputs.
# # This option was used prior to generating
# # the present document.
## ------------------------------------------------------------------------
age_stats <- freq(tobacco$age.gr) # age_stats contains a regular output for freq
# including headings, NA counts, and Totals
print(age_stats, style = "rmarkdown", report.nas = FALSE,
totals = FALSE, omit.headings = TRUE)
## ---- eval=FALSE---------------------------------------------------------
# view(dfSummary(tobacco), custom.css = 'path/to/custom.css',
# table.classes = 'table-condensed')
## ----what_is, warning=FALSE, results='markup'----------------------------
what.is(iris)
|
97fe4d1961014d70359e68b410e27af826d779bd | d1625e2223c81a6c510ccf8bb847c67ed85f8e2f | /man/make_cll.Rd | 23fa3f021e366cd2acf8c226de90da5cc41584d8 | [] | no_license | bmihaljevic/bnclassify | ea548c832272c54d9e98705bfb2c4b054f047cf3 | 0cb091f49ffa840983fb5cba8946e0ffb194297a | refs/heads/master | 2022-12-08T21:37:53.690791 | 2022-11-20T10:00:18 | 2022-11-20T10:00:18 | 37,710,867 | 20 | 12 | null | 2020-08-13T19:39:24 | 2015-06-19T08:30:56 | R | UTF-8 | R | false | true | 391 | rd | make_cll.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/learn-params-wanbia.R
\name{make_cll}
\alias{make_cll}
\title{Returns a function to compute negative conditional log-likelihood given feature weights}
\usage{
make_cll(class_var, dataset)
}
\description{
Returns a function to compute negative conditional log-likelihood given feature weights
}
\keyword{internal}
|
6f2ee186a5383977af69539f0038443117d3f756 | fef97b3e002359a91e111b6b0d3e670b89aeb7e9 | /vis/lib_modelEval.R | 312ad2614702127f136146f866be4d927a1ec7c3 | [
"Apache-2.0"
] | permissive | IBPA/GNN | 78e9fcaae8fd566fc542bbf51dfc045275236c0c | c98cdaa29bc8c3112800f35e2ae589e23c29f3b1 | refs/heads/master | 2020-04-03T00:13:53.867293 | 2019-08-24T20:14:59 | 2019-08-24T20:14:59 | 154,893,175 | 11 | 5 | Apache-2.0 | 2019-10-05T19:02:52 | 2018-10-26T20:52:24 | Shell | UTF-8 | R | false | false | 2,388 | r | lib_modelEval.R |
getDfDirPrefixSize <- function(strDir, prefix, nSize, train_prefix){
if (missing(train_prefix)){
train_prefix=""
}else{
prefix = sprintf("%s%s", train_prefix, prefix)
}
print(sprintf("%s_n%d_f[0-9]*.csv", prefix, nSize))
filenames <- list.files(strDir, pattern = sprintf("^%s_n%d_f[0-9]*.csv", prefix, nSize),
full.names=TRUE)
fileInfo <- file.info(filenames)
if( nrow(fileInfo[fileInfo$size>0,]) < 5){
return (data.frame())
}
datalist <- lapply(filenames, function(x){read.csv(file=x,header=TRUE, sep = "\t")})
df_pred <- Reduce(function(x,y) {rbind(x,y)}, datalist)
df_pred_flat <- melt(df_pred)
if (nrow(df_pred_flat) ==0){
print("LOOK!!!")
}
filenames <- list.files(strDir, pattern = sprintf("^%sactual_n%d_f[0-9]*.csv", train_prefix, nSize),
full.names=TRUE)
datalist <- lapply(filenames, function(x){read.csv(file=x,header=TRUE, sep = "\t")})
df_actual <- Reduce(function(x,y) {rbind(x,y)}, datalist)
df_actual_flat <- melt(df_actual)
print(nrow(df_pred_flat))
print(nrow(df_actual_flat))
df <- cbind.data.frame(df_pred_flat$variable, df_pred_flat$value, df_actual_flat$value)
colnames(df) <- c("name", "pred", "actual")
summary <- data.frame(prefix=prefix,
nSize=nSize,
MSE=mean((df$actual-df$pred)^2),
MAE=mean(abs(df$actual-df$pred)),
PCC=cor(df$actual, df$pred),
stringsAsFactors=FALSE )
return (summary)
}
getDfDirPrefix <- function(strDir, strPrefix){
dfOne <- data.frame(prefix=character(), nSize=integer(), MSE=numeric(), MAE=numeric(), PCC=numeric(), stringsAsFactors=FALSE )
for (nSize in seq(10, 100, 10)){
dfOne[nrow(dfOne) + 1,] <- getDfDirPrefixSize(strDir, strPrefix, nSize)
}
return (dfOne)
}
getDfSummary <- function(df, groupBy){
dfMean <- aggregate(df[,c("MSE","MAE","PCC" )], list(df[,c(groupBy)]), mean)
colnames(dfMean) <- c(groupBy, "mean_MSE","mean_MAE","mean_PCC" )
dfSd <- aggregate(df[,c("MSE","MAE","PCC" )], list(df[,c(groupBy)]), sd)
colnames(dfSd) <- c(groupBy, "sd_MSE","sd_MAE","sd_PCC" )
dfSummary <- cbind(dfMean[,c(groupBy)], dfMean[,c(2:4)], dfSd[,c(2:4)])
colnames(dfSummary)[1] <- groupBy
dfSummary$prefix <- df$prefix[1]
return(dfSummary)
}
|
c6c17dcb426aacb72b3b6e275b8a0ab7bc21cb3d | 46a23d8ffb23dd4cd845864e8183cba216bc8d68 | /modules/tab_monte_carlo_simulation/mcs_mileage/mcs_mileage_fun.R | 364b24871b37a882911355427e92ab849279e4a6 | [] | no_license | DavidBarke/weibulltools-app | 84099f30c1027ed808f9905ec90d0529aeb46565 | 2fe0a3a793231da1539767d81d9e22773598f386 | refs/heads/main | 2023-04-12T06:34:38.306466 | 2021-04-20T14:01:54 | 2021-04-20T14:01:54 | 329,618,724 | 6 | 0 | null | 2021-02-09T09:22:14 | 2021-01-14T13:08:39 | R | UTF-8 | R | false | false | 1,647 | r | mcs_mileage_fun.R | mcs_mileage_fun_ui <- function(id) {
ns <- shiny::NS(id)
r_function(
name = "mcs_mileage",
varname = "mcs_mileage_result",
placeholder = shiny::uiOutput(
outputId = ns("placeholder"),
container = htmltools::pre
),
r_function_arg(
"x",
shiny::uiOutput(
outputId = ns("x"),
container = htmltools::pre
)
),
r_function_arg(
name = "distribution",
preSelectInput(
inputId = ns("distribution"),
label = NULL,
choices = c("lognormal", "exponential")
)
)
)
}
mcs_mileage_fun_server <- function(id, .values, mcs_mileage_data_r) {
shiny::moduleServer(
id,
function(input, output, session) {
ns <- session$ns
rd_varname <- attr(mcs_mileage_data_r, "shinymetaVarname", exact = TRUE)
output$placeholder <- shiny::renderUI({
glue::glue(
'
x = {x},
distribution = "{distribution}"
',
x = rd_varname,
distribution = input$distribution
)
})
shiny::outputOptions(
output,
"placeholder",
suspendWhenHidden = FALSE
)
output$x <- shiny::renderUI({
varname_link(
tabName = "mcs_mileage_data",
varname = rd_varname
)
})
mcs_mileage_r <- shinymeta::metaReactive({
mcs_mileage(
x = ..(mcs_mileage_data_r()),
distribution = ..(input$distribution)
)
}, varname = "mcs_mileage_result")
return_list <- list(
mcs_mileage_r = mcs_mileage_r
)
return(return_list)
}
)
}
|
ea738b0d35b9891347706f4872e5e8001a95d9a3 | c63cd4eb29aea09ef6e65e2ffe8692555431850e | /app/ui/uiClaster1.R | ee42f1d90c6cd9d11bdbc2556e544fef62f96e42 | [] | no_license | szymonhetmanczyk/ClusterAnalysis | 345704171ff11014f10045991c713d3b0ceb4038 | 262a11f91476a8e39a44dc29d19a6fdd31f1e014 | refs/heads/master | 2020-03-19T00:03:11.004778 | 2018-05-13T23:07:23 | 2018-05-13T23:07:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,180 | r | uiClaster1.R | tagList(fluidRow(
box(
title = "Cluster dendrogram",
width = 12,
selectInput(
inputId = "selectdecostand",
label = "Select method for decostand",
choices = c(
'total',
'max',
'freq',
'normalize',
'range',
'standardize',
'pa',
'chi',
'hellinger',
'log'
)
),
#Select input for vegdist
selectInput(
inputId = "selectvegdist",
label = "Select method for vegdist",
choices = c(
"manhattan",
"euclidean",
"canberra",
"bray"
,
"kulczynski",
"jaccard",
"gower",
"altGower"
,
"morisita",
"horn",
"mountford",
"raup"
,
"binomial",
"chao",
"cao",
"mahalanobis"
)
),
selectInput(
inputId = "selectdendrogram",
label = "Select method",
choices = c(
'single',
'ward',
'complete',
'average'
,
'mcquitty',
'median',
'centroid'
)
),
mainPanel(plotOutput(outputId = "dendrogram"))
)
))
|
c67cc79202c6d100966b91ffccd984d9cd2552ee | 2138198e091d8856257e7b4c8b8044152faa67cd | /man/makehistory.two.Rd | 07b51c7d4b266cf470a528fa9c08a3ac4ae8d3ef | [
"MIT"
] | permissive | muschellij2/confoundr | 6dcdeed7d876153a9b1fa5044ca8bc7ee2cc645a | 7010b65c549996268e80a325737afcf29877179a | refs/heads/master | 2021-09-08T23:09:31.091346 | 2018-03-12T19:50:31 | 2018-03-12T19:50:31 | 108,161,813 | 0 | 0 | null | 2017-10-24T17:37:22 | 2017-10-24T17:37:22 | null | UTF-8 | R | false | true | 1,441 | rd | makehistory.two.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_functions.R
\name{makehistory.two}
\alias{makehistory.two}
\title{Function to create joint exposure history for two distinct time-varying exposures}
\usage{
makehistory.two(input, id, group = NULL, exposure.a, exposure.b,
name.history.a = "ha", name.history.b = "hb", times)
}
\arguments{
\item{input}{dataset in wide format}
\item{id}{unique observation identifier e.g. "id"}
\item{group}{an optional baseline variable upon which to aggregate the exposure history. This argument provides a way to adjust the metrics for a baseline covariate. For example, in the context of a trial, the grouping variable coul be treatment assignment. In the context of a cohort study, this could be site e.g. "v".}
\item{exposure.a}{the root name for the first exposure e.g. "a"}
\item{exposure.b}{the root name for the second exposure e.g. "z"}
\item{name.history.a}{desired root name for the first time-indexed history variables e.g. "ha"}
\item{name.history.b}{desired root name for the second time-indexed history variables e.g. "hb"}
\item{times}{a vector of measurement times e.g. c(0,1,2)}
}
\description{
Function to create joint exposure history for two distinct time-varying exposures
}
\examples{
mydata.history <- makehistory.two(input=mydata, id=id, times=c(0,1,2), exposure.a="a", exposure.b="z", name.history.a="ha", name.history.b="hb", group="v")
}
|
3eec19c340113cb7d3b547562b0aa09d46e0150a | a990023834ca14ca470afbd5db76d14e4faf8951 | /make-plot.R | 09132a544124d87297cc3df9710473beba411372 | [] | no_license | austensen/covid-cases-mexico | b5bf53ece737ac0773a8e375f75ce592a67e151f | 9ee4f4d601c8179c8544c3488c022236f1aa2164 | refs/heads/master | 2022-11-10T06:39:00.438734 | 2020-06-28T23:51:50 | 2020-06-28T23:51:50 | 275,688,761 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,050 | r | make-plot.R | library(tidyverse)
library(lubridate)
library(gganimate)
library(gifski)
# Data from: https://datos.covid-19.conacyt.mx/#DownZCSV
cases <- "Casos_Diarios_Municipio_Confirmados_20200627.csv" %>%
read_csv() %>%
rename(
id = cve_ent,
pop = poblacion,
name = nombre
) %>%
pivot_longer(
cols = matches("\\d{2}-\\d{2}-\\d{4}"),
names_to = "date",
names_transform = list(date = dmy),
values_to = "cases_new",
values_transform = list(date = as.integer)
) %>%
arrange(id, date) %>%
group_by(id) %>%
mutate(cases_acc = cumsum(cases_new)) %>%
ungroup()
p <- cases %>%
# 10 random cities
filter(id %in% sample(unique(id), 10)) %>%
ggplot() +
aes(x = name, y = cases_acc) +
coord_flip() +
geom_col() +
transition_time(date) +
ease_aes('cubic-in-out') +
exit_fade() +
labs(
title = 'Cumulative confirmed cases',
subtitle = '{frame_time}'
)
animate(p, duration = 5, fps = 20, width = 600, height = 600, renderer = gifski_renderer())
anim_save("covid-cases-plot.gif")
|
a422c6f787d1f4d34b6efa81f0c976d804cf126a | 7a8385a795ca4ab677e135e77a1932e1004b4060 | /TM.R | c6179658b8e918d3c66ddb9ac483b0df00f931f9 | [] | no_license | LilisSyarifah/Association-Rule-Mining | 7c7570ffce234cd5bef39e5b6ce7699eb3cffe8e | 19a039458cb3fa283c445218c0951557bc4f1cfc | refs/heads/master | 2020-04-09T04:51:47.717978 | 2018-12-02T11:29:21 | 2018-12-02T11:29:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,913 | r | TM.R |
library(arules)
library(tm)
library(SnowballC)
library(wordcloud)
library(arulesViz)
cname<- file.path("D:","Lilis Syarifah's File Master","S2","Materi Kuliah","semester 3","Tesis","Program","Data")
#cname<- file.path("C:","Users","Apriliantono","Desktop","SIDANG","Program","Data")
docs <- Corpus(DirSource(cname))
#make texts document as one
docs <- tm_map(docs,PlainTextDocument)
docs <- Corpus(VectorSource(docs$content))
#change the word to lower style
docs <- tm_map(docs, tolower)
#remove punctuation in the text
docs <- tm_map(docs, removePunctuation)
#remove number
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, removeWords, stopwords("english"))
#remove blank space
docs <- tm_map(docs, stemDocument)
#remove spesific word
myStopWord <- c('alternative','although','along','ability','aim','aimed','areas','aims','activ','activity',"activities",'addition','among','and','analys','analysis','are','analyses','analysi','analyze','analyzed','analyz','also','accuracy','algorithm','available',"applied",'application','alternatives','base','based','bogor','browser',"best",'become','can','calculate',"characteristics",'complete',"consisted",'consist','consists','combination',"compared",'considered', "condition",'caus',"cause",'causes',"caused",'containing','code','current','compile','computer','conduct','conducted',"character",'criteria',"characters",'content','determine','determin','design','dapat','dari',"days", "done",'due','found','data','dan','different','differ','environment',"evaluate",'evalu',"existing",'experiment','effect','execute', 'except','experiment','experi','especially','effect','effects','four','for','from','file','factor','find','format','formula','function','first','good','get','grow','growth','generate',"highest","however",'howev',"higher",'high','height','ipb','identifi','identification',"information",'implement','identify','important','import','input',"increase",'increas', "increased",'installation','included','includ',"index",'increasing',"indicated",'known','know',"level",'levels','like',"management","medium",'meet','mainly', 'may','measure','method','model','mainwhile','make',"methods",'must',"many","namely", 'need','non','needed','necessary','number','new','obtain','open','observ','object','objectives',"obtained",'one','operate','operation','order','optimal','output','plants','perform','power','previous',"per",'problem','program','produced','produce','produc','provide','purpose','potentially','product','production','productivity',"period",'purposes','preferences',"quality",'rate','regarding','require','research','result','resulted',"results","randomized",'reduced','reduc','recommendation','requires',"respectively",'set','second','shown','shows','status',"several",'signific',"significantly",'studied','significant',"selected",'study','spesific','still','show','sustanable','system','showed','studi','source','summary','system','standard','types','technical','type','the','that','technique','technology',"time",'times','tool','total','two','three','transfer',"therefore","therefor", "test", 'unit','utility',"value",'valu',"whereas", "without","well", 'which','will','was','were','with','yang')
docs <- tm_map(docs, removeWords, myStopWord)
docs <- tm_map(docs, stripWhitespace)
dtm <- DocumentTermMatrix(docs)
dtm
dt <- removeSparseTerms(dtm, 0.95)
dt
mdt<-as.matrix(dt)
write.csv(mdt,file = "dt.csv")
trans <- as(mdt, "transactions")
frequentitemset <- apriori(trans, parameter = list(sup=0.022,minlen=4),
appearance = list(default="rhs",lhs=c("land","seed","fertil","soil","rice","genotyp","forest","fruit",'water','leaf','oil',"palm",'farm',"soybean",'natur','farmer','flower','nutrient','plantat','watersh','seedl','germin','pest','veget','paddi','root','morpholog','gene','physiolog')))
top.support <- sort(frequentitemset, decreasing = TRUE, na.last = NA, by = "support")
items(top.support)
support<-quality(top.support)
support
write.csv(support,file = "support.csv")
frequent_l<- as(items(top.support), "list")
frequent_l
plot(top.support, method="graph")
##
tdm <- TermDocumentMatrix(docs)
td <- removeSparseTerms(tdm, 0.95)
mtd<-as.matrix(td)
View(mtd)
write.csv(mtd,file = "td.csv")
#menghitung jumlah kata dalam setiap satu dokumen
freq <- sort(colSums(as.matrix(mtd)), decreasing=TRUE)
freq
write.csv(freq,file = "freq.csv")
head(freq, 50)
#menghitung frekuensi kata dalam banyak dokumen yang diurutkan dari kemunculan tertinggi
dm <- sort(rowSums(mtd),decreasing=TRUE)
#merubah matrik kedalam data fram dengan memberikan informasi header
dm <- data.frame(word = names(dm),freq=dm)
head(dm,200)
#wordcloud(docs, max.words = 100, random.order = FALSE)
set.seed(142)
wordcloud(words = dm$word, freq = dm$freq, min.freq = 100,
max.words=, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2"))
|
d7f5a08fd5d36436cfec7dcda758f324bde68cd7 | 97df20d91b382f16222fc61f8b2b2e0f06fae91c | /AirPollution.R | 1726563340921b1deeef01b6e08b77ab32fbf882 | [] | no_license | winnie92/ExploratoryDataAnalysis | 1be767b9ef0d554cb33ff2e91f3b213347ccbf05 | a242610f34464ae9d601602080359ddf3a84cc10 | refs/heads/master | 2021-01-10T02:18:43.337901 | 2015-10-24T06:41:41 | 2015-10-24T06:41:41 | 44,855,357 | 0 | 0 | null | null | null | null | IBM852 | R | false | false | 5,357 | r | AirPollution.R | NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#1. make a plot showing the total PM2.5 emission from all sources for each of the years 1999,
#2002, 2005, and 2008.
nei1<-subset(NEI,select=c(Emissions,year))
nei2<-transform(NEI,Emissions,year=factor(year))
nei3 <- tapply(nei2$Emissions,nei2$year,sum)
barplot(nei3,main="Total PM2.5 Emission From All Sources Across the United States",ylab="Emissions",xlab="Year")
dev.copy(png,file="plot1.png")
dev.off()
#2. Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
#Use the base plotting system to make a plot answering this question.
nei4 <- subset(NEI,select=c(Emissions,year),subset=(fips == "24510"))
nei5 <- transform(nei4,Emissions,year=factor(year))
nei6 <- tapply(nei5$Emissions,nei5$year,sum)
barplot(nei6,main="Total PM2.5 Emission From Baltimore City",ylab="Emissions",xlab="Year")
dev.copy(png,file="plot2.png")
dev.off()
#3. Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable, which of these
#four sources have seen decreases in emissions from 1999ĘC2008 for Baltimore City? Which have seen increases in
#emissions from 1999ĘC2008? Use the ggplot2 plotting system to make a plot answer this question.
nei6 <- subset(NEI,select=c(Emissions,year,type),subset=(fips == "24510"))
nei7 <- transform(nei6,year=factor(year),type=factor(type))
nei8 <- tapply(nei7$Emissions,nei7$year,sum)
attach(nei7)
coplot(Emissions~year|type,type="h",lty=1,lwd=5)
dev.copy(png,file="plot3-1.png")
dev.off()
# the graph looks uncleared, so use another method
n <- split(nei7,type)
names(n)<- c("NONROAD","NONPOINT","ONROAD","POINT")
nonroad <- n$NONROAD
nonpoint <- n$NONPOINT
onroad <- n$ONROAD
point <- n$NONPOINT
detach(nei7)
nr <- tapply(nonroad$Emissions,nonroad$year,sum)
np <- tapply(nonpoint$Emissions,nonpoint$year,sum)
or <- tapply(onroad$Emissions,onroad$year,sum)
p <- tapply(point$Emissions,point$year,sum)
par(mfrow=c(2,2),mar=c(5,4,4,1))
barplot(nr,ylab="Emission",main="Emission from Different Years of NON_ROAD Type")
barplot(np,ylab="Emission",main="Emission from Different Years of NON_POINT Type")
barplot(or,ylab="Emission",main="Emission from Different Years of ON_ROAD Type")
barplot(p,ylab="Emission",main="Emission from Different Years of POINT Type")
dev.copy(png,file="plot3-2.png")
dev.off()
#4. # select the SCC value of burning coal
index<-grepl("Coal",levels(SCC$EI.Sector))
coal<-levels(SCC$EI.Sector)[index]
coal2<-subset(SCC,select=SCC,subset=(EI.Sector==coal[1]|EI.Sector==coal[2]|EI.Sector==coal[3]))
# choose the data of burning coal from NEI using coal2 as the index
coal3<-as.character(coal2$SCC)
coal4<-function(i) return(subset(NEI,select=c(Emissions,year),subset=(SCC==coal3[i])))
coal5<-data.frame()
for(i in 1:length(coal3)){
coal5<-rbind(coal5,coal4(i))
}
# calculate the total emissions of each year and plot it
coal5 <- transform(coal5,Emissions=as.numeric(Emissions),year=factor(year))
coal6<-tapply(coal5$Emissions,coal5$year,sum)
barplot(coal6,main="Coal Emissions Across the United States",ylab="Emissions",xlab="Year")
# from the graph we can see that the coal emissions arised from 1992 to 2002 and decreased from
# 2005 to 2008
dev.copy(png,file="plot4.png")
dev.off()
#5. How have emissions from motor vehicle sources changed from 1999ĘC2008 in Baltimore City?
motor0<-grepl("Mobile",levels(SCC$EI.Sector))
motor1<-levels(SCC$EI.Sector)[motor0]
motor2<-function(i){
return(subset(SCC,select=SCC,subset=(EI.Sector==motor1[i])))
}
motor3 <- data.frame()
for(i in 1:length(motor1)){
motor3 <- rbind(motor3,motor2(i))
}
# subset the original dataset
motor4<- as.character(motor3$SCC)
motor5 <- function(j){
return(subset(NEI,select=c(Emissions,year),subset=(SCC==motor4[j] & fips == "24510")))
}
motor6 <- data.frame()
for(i in 1:length(motor4)){
motor6 <- rbind(motor6,motor5(i))
}
motor7 <- tapply(motor6$Emissions,motor6$year,sum)
# make a barplot
barplot(motor7,ylab="Emission",main="Emission of motor vehicles from Baltimore city")
dev.copy(png,file="plot5.png")
dev.off()
#6. Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle
#sources in Los Angeles County, California (fips == "06037"). Which city has seen greater changes
#over time in motor vehicle emissions?
cali0 <- function(j){
return(subset(NEI,select=c(Emissions,year),subset=(SCC==motor4[j] & fips == "06037")))
}
cali1 <- data.frame()
for(i in 1:length(motor4)){
cali1 <- rbind(cali1,cali0(i))
}
# calculate the range and standard deviation of Baltimore city
range_Baltimore <- diff(range(motor7))
sd_Baltimore <- sd(motor7)
# calculate the range and standard deviation of Los Angeles city
cali2 <- tapply(cali1$Emissions,cali1$year,sum)
range_LA <- diff(range(cali2))
sd_LA <- sd(cali2)
# compare the values of them and get the conclusion that Los Angeles have greater changes
result_list <- list(range_Baltimore=range_Baltimore,range_LA=range_LA,sd_Baltimore=sd_Baltimore,sd_LA=sd_LA)
# make a barplot
barplot(c(result_list$range_Baltimore,result_list$range_LA,result_list$sd_Baltimore,result_list$sd_LA),
names.arg=names(result_list),main="Los Angeles have greater changes in Emissions of motor vehicles")
dev.copy(png,file="plot6.png")
dev.off()
|
41fbfaac6117de8bdfde1c9833969c744c1600a4 | b0749f2171b5edae03897fb43732363879333000 | /textMining.R | c6d7a3214de4a8d6c178082635adc30ef251ed66 | [] | no_license | koskot77/DevDataProducts | be10a8c77209b25a166ee0a5799d47c16972e819 | ecb5b94e73c3933be7545e5244e31d9fa682a3b7 | refs/heads/master | 2016-09-05T11:13:35.558135 | 2015-11-27T10:00:28 | 2015-11-27T10:00:28 | 29,824,373 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,134 | r | textMining.R | require(tm)
tw <- read.csv(file="uk.csv",header=T,sep=',')
tw$Text <- sub("@\\w+","",tw$Text,perl = TRUE)
tw$Text <- sub("(:\\w+:)+","",tw$Text,perl = TRUE)
tw$Text <- sub("http[^\\s]*","",tw$Text,perl = TRUE)
corp <- Corpus(VectorSource(tw$Text),readerControl = list(language = "rus"))
corp <- tm_map(corp, stripWhitespace)
corp <- tm_map(corp, removePunctuation)
corp <- tm_map(corp, content_transformer(tolower))
corp <- tm_map(corp, removeWords, stopwords("russian"))
corp <- tm_map(corp, removeWords, stopwords("english"))
dtm <- DocumentTermMatrix(corp, control = list( weighting = function(x) weightTfIdf(x,normalize = FALSE), stopwords = TRUE) )
dtmLight <- removeSparseTerms(dtm, 0.999)
require(proxy)
d <- dist(as.matrix(dtmLight[1:1000,]), method = "cosine")
hc <- hclust(d)
dend = as.dendrogram(hc)
plot(dend)
#inspect(dtmLight[3, which(as.vector(dtmLight[3,])*as.vector(dtmLight[60,])!=0)] )
#as.matrix(d)[3,60]
#writeLines(as.character(corp[[3]]))
#writeLines(as.character(corp[[60]]))
#writeLines(as.character(corp[[48]]))
#table( as.factor( cutree(hc, 100) ) )
#findAssocs(dtm, "make", 0.1)
#findFreqTerms(dtm, 5000)
|
f2a1ce7fd647ebecf4071964f0b5fcad4069653c | d74c4b4169038f19063b2fc703720a5054230713 | /R/robScale.R | fb1b9dfba63bdcd88269cd4dc1f2e1e20a73ed8e | [
"LicenseRef-scancode-dco-1.1"
] | no_license | aadler/revss | a95d644e4e73d384dc28cf39fa0e65c35454ce4b | 5a9002a47c8609dae3fe91a35bae25f1d5f7608b | refs/heads/master | 2023-08-17T14:49:46.382971 | 2023-02-01T16:52:30 | 2023-02-01T16:52:30 | 267,474,994 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 977 | r | robScale.R | # Copyright (c) 2020, Avraham Adler All rights reserved
# SPDX-License-Identifier: BSD-2-Clause
# Robust Scale Estimator found in Rousseeuw & Verboven (2002)
robScale <- function(x, loc = NULL, implbound = 1e-4, na.rm = FALSE,
maxit = 80L, tol = sqrt(.Machine$double.eps)) {
if (na.rm) {
x <- x[!is.na(x)]
} else {
if (anyNA(x)) {
stop("There are NAs in the data yet na.rm is FALSE")
}
}
if (!is.null(loc)) {
x <- x - loc
s <- 1.4826 * median(abs(x))
t <- 0
minobs <- 3L
} else {
s <- mad(x)
t <- median(x)
minobs <- 4L
}
if (length(x) < minobs) {
if (mad(x) <= implbound) {
return(adm(x))
} else {
return(mad(x))
}
} else {
converged <- FALSE
k <- 0L
while (!converged && k < maxit) {
k <- k + 1L
v <- sqrt(2 * mean((2 * plogis(((x - t) / s) / 0.3739) - 1) ^ 2))
converged <- abs(v - 1) <= tol
s <- s * v
}
return(s)
}
}
|
b367911d5f732220bfcfb0bd45687448a8978452 | ae55ccca23e741e6ec249b8c2d53d9f06d2ea431 | /javier/plots.R | a3a6de4557e8d524f2d5cec7b3ab4687841a5a69 | [] | no_license | jmatias/linearRegressionAnalysis | f0fee144355ca4b952651a9efaa0ca22fb849788 | 2029ccaa847f2d29749305749ba77b6ae72b8ba4 | refs/heads/master | 2020-03-25T04:03:07.821711 | 2018-08-03T03:18:29 | 2018-08-03T03:18:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 313 | r | plots.R |
qqplot = function(model, pcol="grey", lcol="dodgerblue", cex=1, title="Normal Q-Q Plot") {
if( length(model$fitted.values) > 1000 ) {
cex = cex*.5 # If there are a lot of points, make them smaller.
}
qqnorm(resid(model), col = pcol, cex = cex, main=title)
qqline(resid(model), col = lcol, lwd = 2)
}
|
9b7719a9f3fef1313b4cbeda710831262d15f52b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/copBasic/examples/RFcop.Rd.R | 98bad60f058bcd76efbe3cbd5d60476f4027fd3f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 720 | r | RFcop.Rd.R | library(copBasic)
### Name: RFcop
### Title: The Raftery Copula
### Aliases: RFcop
### Keywords: Raftery copula copula (formulas) copula Nelsen (2006)
### Examples and Exercises
### ** Examples
# Lower tail dependency of Theta = 0.5 --> 2*(0.5)/(1+0.5) = 2/3 (Nelsen, 2006, p. 214)
taildepCOP(cop=RFcop, para=0.5)$lambdaL # 0.66667
## Not run:
##D # Simulate for a Spearman Rho of 0.7, then extract estimated Theta that
##D # internally is based on Kendall Tau of U and V, then convert estimate
##D # to equivalent Rho.
##D UV <- simCOP(1000, cop=RFcop, RFcop(rho=0.7)$para)
##D Theta <- RFcop(UV$U, UV$V)$para # 0.605093
##D Rho <- Theta*(4-3*Theta)/(2-Theta)^2 # 0.679403 (nearly 0.7)#
## End(Not run)
|
a8b26351cdffb1dbc86fc0f1f6e45f7c1934ef98 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613103312-test.R | fd862f4d1685b17ee0500b2d235e25fefc0854db | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 304 | r | 1613103312-test.R | testlist <- list(A = structure(c(-1.44121736636402e+219, 7.03636119869644e-251, 992913157178624384, 1.25891760799084e-88, 6.92682164882786e-310, 2.02410200510026e-79, 0, 0, 0, 0), .Dim = c(2L, 5L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
3457d658eebe95cde86dd3f5f456bed98e076e6a | 29585dff702209dd446c0ab52ceea046c58e384e | /PKreport/R/PKdata.R | d22068627cf379b5382df60847a24516db4065a8 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,419 | r | PKdata.R | #############################################################################################
## File: PKdata.R
## Author: Xiaoyong Sun
## Date: 10/20/2009
## Goal: read in PK data; file config
## Notes:
## -
#############################################################################################
PKdata <- function(data, match.term=NULL)
{
if (missing(data)) stop("Data is required!")
if (length(data)==0 || nrow(data)==0) stop("Data is Not available!")
## read in data
if (length(colnames(data))==0) stop("Data column does NOT have names!")
# check missing value
sapply(1:ncol(data), function(i)
{
if (all(is.na(data[,i]))) stop(paste("\nData column ", i, " are all NA values!", sep=""))
if (any(is.na(data[,i]))) warning(paste("\nData column ", i, " has missing values!", sep=""))
})
## match term
if (is.null(match.term)) stop("Please input config list!")
mt <- unlist(match.term)
PK.match <- match(mt, colnames(data))
if(length(PK.match[is.na(PK.match)]) > 0) stop(paste(dQuote(mt[is.na(PK.match)]) , "in config list do NOT match data!\n", sep=" "))
## make sure WRES, RES, PRED, IPRE, DV, TIME are only one item ??
# check ID, DV, TIME
if ( (length(match.term$ID)!=1) || (length(match.term$DV)!=1) || (length(match.term$TIME)!=1) )
stop("Please make sure ID, DV and TIME are input with only ONE variable!")
if ( (length(match.term$RES)!=1) || (length(match.term$WRES)!=1) )
stop("Please make sure RES and WRES are input with only ONE variable!")
if ( (length(match.term$PRED)!=1) || (length(match.term$IPRE)!=1) )
stop("Please make sure PRED and IPRE are input with only ONE variable!")
.pkplot$setTerm(match.term)
.pkplot$setPKData(data)
cat("Data is read successfully.\n")
}
# NOTE: "general.list" should be "global.list"
PKconfig <- function(general.list, hist.list, scatter.list)
{
if (any(!(general.list$save.format %in% c("jpeg", "bmp", "png", "tiff", "win.metafile"))))
{
stop("The save format is NOT supported! jpeg, bmp, png, tiff, win.metafile are supported!")
}
# check general.list
if (is.null(general.list$save.format) || !("png" %in% general.list$save.format))
{
general.list$save.format <- c("png", unique(general.list$save.format))
}
else
{
png.ind <- which("png" == general.list$save.format)
general.list$save.format <- c("png", general.list$save.format[-png.ind])
}
## general term setup
sapply(names(general.list), function(i) .pkplot$setGlobalConfig(i,general.list[[i]]))
## graph global term
## - for lattice
lattice.global <- c("col", "span", "type", "layout")
ggplot.global <- c("col", "span")
## hist - lattice setup config term
lattice.list <- hist.list[names(hist.list) %in% lattice.global]
.pkplot$setHistGraph(lattice.list, "lattice")
## hist - ggplot setup config term
ggplot.list <- hist.list[names(hist.list) %in% ggplot.global]
ggplot.list$geom <- c("histogram")
.pkplot$setHistGraph(ggplot.list, "ggplot")
## scatter - lattice setup config term
lattice.list <- scatter.list[names(scatter.list) %in% lattice.global]
.pkplot$setScatterGraph(lattice.list, "lattice")
## scatter - ggplot setup config term
ggplot.list <- scatter.list[names(scatter.list) %in% ggplot.global]
## ggplot type setup || layout setup too
if (!is.null(scatter.list$type))
{
ggplot.list$geom <- switch(paste(scatter.list$type, collapse=""),
p = c("point"),
l = c("line"),
psmooth = c("point", "smooth"),
lsmooth = c("line", "smooth"))
}
else
{
warning("ggplot package does not have matching type at this time!")
}
.pkplot$setScatterGraph(ggplot.list, "ggplot")
others.list <- hist.list[!(names(hist.list) %in% lattice.global)]
.pkplot$setHistGraph(others.list, "others")
others.list <- scatter.list[!(names(scatter.list) %in% lattice.global)]
.pkplot$setScatterGraph(others.list, "others")
invisible(NULL)
} |
1cb7c3912935909afca67f7df573490c9832a0bb | 7475afe3f33869bc1bc60c9332aa2c3b8febc993 | /R/GetAccessionNo.R | f689e69160c9be50d1095411e3be3908045dd21b | [
"MIT"
] | permissive | jasmyace/finreportr | 2409d1a4495a0ed03c495be323fff45fbaf94f21 | 2b8c5f971a75f243fbf629614429214c13a23224 | refs/heads/master | 2022-08-03T02:08:16.753120 | 2020-05-30T21:14:25 | 2020-05-30T21:14:25 | 266,425,723 | 0 | 0 | MIT | 2020-05-23T21:53:54 | 2020-05-23T21:53:53 | null | UTF-8 | R | false | false | 885 | r | GetAccessionNo.R | GetAccessionNo <- function(symbol, year, annual=TRUE, quarter=TRUE) {
# symbol <- "AAPL"
# year <- 2016
# annual <- TRUE
# quarter <- TRUE
year.char <- as.character(year)
reports.df <- Reports(symbol, annual, quarter)
reports.df <- reports.df %>%
dplyr::mutate(filing.year = substr(.data$filing.date, 1, 4)) %>%
dplyr::filter(.data$filing.year == year.char) %>%
dplyr::filter(.data$filing.name == "10-K" | .data$filing.name == "20-F" | .data$filing.name == "10-Q")
accession.no.raw <- reports.df %>%
dplyr::select(.data$accession.no) %>%
dplyr::pull()
# I need to be able to track which is the annual.
names(accession.no.raw) <- reports.df$filing.name
# Error message for function
if(length(accession.no.raw) == 0) {
stop("no filings available for given year")
}
return(accession.no.raw)
} |
21f3b92d3e7035b6dec5b2034a4b21c5ebfd54cc | 6e01e88b65509800c2f9ed73a585db64f9bf06db | /apastats/man/lengthu.Rd | b19b6420a6bfc9a140c6cde04a28ee4936ae63a1 | [] | no_license | achetverikov/APAstats | b2449d7375c9f011688a0aad31fc7685a83bfa5f | 448bb214274ee3837c58fd1238b336076552d918 | refs/heads/master | 2022-02-02T22:44:29.299553 | 2022-01-04T19:22:37 | 2022-01-04T19:22:37 | 33,860,134 | 16 | 4 | null | 2022-01-04T19:22:38 | 2015-04-13T09:38:57 | HTML | UTF-8 | R | false | true | 330 | rd | lengthu.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{lengthu}
\alias{lengthu}
\title{Length of unique values}
\usage{
lengthu(x)
}
\arguments{
\item{x}{a vector}
}
\value{
number of unique values in x
}
\description{
Counts unique values
}
\examples{
x<-c(5,7,8,9,5,7)
length(x)
lengthu(x)
}
|
a86a64e4de594b9e7e9f9e0389b9caaae3c7e544 | a1354a82ca5c9de694baad99421ced7b84a2a3dc | /R/403_graf_comp_primas_rtr.R | b83f220af596985999fd4e7e59bedfa0d630e9bd | [] | no_license | pabloalban/Impacto_501_a_2020 | 98fac5eb3fb996d2d5222e027848f83e5f4a4715 | fbd7d2471e2d2fe9582c687c6655c255480ea62d | refs/heads/master | 2023-08-22T03:33:02.397140 | 2021-09-27T14:09:25 | 2021-09-27T14:09:25 | 381,487,533 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,567 | r | 403_graf_comp_primas_rtr.R | # Datos para gráfico de Tabla 2 del documento "causas de posible desfinanciamiento".
# Tabla 2: Comparación de las primas de aportes: Resolución C.D. 501 – Resolución C.D. 261.
# Carga de datos -----------------------------------------------------------------------------------
message( '\tGráfica de cooparación entre primas de la CD501 y CD261' )
file_prima <- paste0( parametros$RData_seg, 'IESS_RTR_causas_desfinanciamiento.RData' )
load( file = file_prima )
Comparacion_Primas <- (comparacion_primas)
setnames(Comparacion_Primas, c("Año", "C.D.501", "C.D.261", "Etiqueta"))
com_pri_apo <- Comparacion_Primas
com_pri_apo$Año <- as.character(com_pri_apo$Año)
com_pri_apo$Año <- ymd(paste0(com_pri_apo$Año, '/01/01'))
com_pri_apo$Año <- as.Date(com_pri_apo$Año,"%Y-%m-%d")
com_pri_apo$C.D.501 <- com_pri_apo$C.D.501/100
com_pri_apo$C.D.261 <- com_pri_apo$C.D.261/100
com_pri_apo['C.D.501_privados'] <- com_pri_apo$C.D.501
com_pri_apo['C.D.501_publicos'] <- com_pri_apo$C.D.501
com_pri_apo$C.D.501_publicos[c(10:11)] <- c(0.0038,0.0038)
message( paste( rep('-', 100 ), collapse = '' ) )
message( '\tGrafico: Comparación de las primas de aportes' )
source( 'R/401_graf_plantilla.R', encoding = 'UTF-8', echo = FALSE )
#Gráfico de comparación de tasas de aportación------------------------------------------------------
x_lim <- c( 2012, 2022 )
x_brk <- seq( x_lim[1], x_lim[2], 1 )
y_brk<- seq(0.001,0.006,0.0005)
y_lbl <- paste0(formatC(100 * y_brk, digits = 2, format = 'f', big.mark = '.', decimal.mark = ',' ),"%")
plt_com_pri_apo <- ggplot(com_pri_apo, aes(Año)) +
geom_line(aes(y = C.D.261,colour ="C.D.261")) +
geom_line(aes(y = C.D.501_privados,colour ="C.D.501 privados"), size=1.5) +
geom_line(aes(y = C.D.501_publicos,colour ="C.D.501 públicos"),
linetype = "dashed",
alpha=0.8,
size=1.1) +
geom_point(aes(y = C.D.501_privados,colour ="C.D.501 privados"),
shape = 15,
# size = graf_line_size,
size = 3,
color = parametros$iess_green)+
geom_point(aes(y = C.D.501_publicos,colour ="C.D.501 públicos"),
shape = 20,
# size = graf_line_size,
size = 3,
color = 'red' ) +
geom_point( aes(y = C.D.261,colour ="C.D.261"),
shape = 15,
# size = graf_line_size,
size = 2,
color = parametros$iess_blue ) +
scale_x_date(date_breaks = "1 year", date_labels = "%Y")+
scale_y_continuous(breaks = y_brk,
labels = y_lbl,
limits = c(y_brk[1], max(y_brk))) +
scale_colour_manual("",
breaks = c("C.D.501 privados", "C.D.501 públicos" ,"C.D.261"),
values = c("C.D.501 privados" = parametros$iess_green ,
"C.D.501 públicos" = 'red',
"C.D.261" = parametros$iess_blue))+
# geom_text_repel(aes(Año, C.D.501, label =Etiqueta ),
# point.padding = unit(0.19, 'lines'),
# arrow = arrow(length = unit(0.01, 'npc')),
# segment.size = 0.1,
# segment.color = '#cccccc'
# ) +
theme_bw() +
plt_theme +
theme(legend.position="bottom") +
labs( x = '', y = '' )+
theme( axis.text.x = element_text(angle = 90, hjust = 1 ) )
#Guaradando gráfica en formato png------------------------------------------------------------------
ggsave( plot = plt_com_pri_apo,
filename = paste0( parametros$resultado_graficos, 'iess_grafico_tasas_aporte', parametros$graf_ext ),
width = graf_width, height = graf_height, units = graf_units, dpi = graf_dpi )
message( paste( rep('-', 100 ), collapse = '' ) )
rm( list = ls()[ !( ls() %in% c( 'parametros' ) ) ] )
gc() |
7f4b55f5b49eca38c990b3db3ac91a2c6c71a0f3 | ffd09e41f3309a0ac3eaf2adca34674e0920d1b8 | /man/PlotDarkMatter.Rd | 4e10e26e7695d1dbb3f0bccae9934abe63272fb0 | [] | no_license | leipzig/Vennerable | 04b3329c21623176730232f08ffdd79f4515cdc2 | 43b1d2b4f84f85f9578337b92311bc7c35da7952 | refs/heads/master | 2021-01-17T23:15:48.982954 | 2015-12-15T17:22:06 | 2015-12-15T17:22:06 | 51,934,923 | 1 | 0 | null | 2016-02-17T16:00:07 | 2016-02-17T16:00:06 | null | UTF-8 | R | false | false | 721 | rd | PlotDarkMatter.Rd | \name{PlotDarkMatter}
\Rdversion{1.1}
\alias{PlotDarkMatter}
\title{
Plots dark matter
}
\description{
Fills the area of a \code{VennDrawing} universe that is not occupied by any set.
}
\usage{
PlotDarkMatter(VD)
}
\arguments{
\item{VD}{
An object of class \code{VennDrawing}
}
}
\details{
This works by filling the entire universe with grey (not currently changeable) and then plotting
then filling the inside of the dark matter boundary with white.
}
\value{
Executed for its side effecrs
}
\author{
Jonathan Swinton (jonathan@swintons.net)
}
\examples{
VD <- compute.Venn(Venn(n=2))
PlotDarkMatter(VD)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ graphs }
|
0624db02ced429106f3e89e9b6698b497ed12fa3 | eaee4b1b3422ab716bfe565600db6c4914a53e75 | /homework 6.R | b7ff0f2d0cd4b09db581b0c37d5c83ef9c2158f2 | [] | no_license | KejiaHuang/Intro-for-R | 06508f66fcca7026ac63d5f73a8ec4f4571e2dd9 | 2c655f16178a7b167e44a34ea57fb3f5ccf1ceae | refs/heads/master | 2021-05-02T06:47:10.368310 | 2018-02-09T06:40:01 | 2018-02-09T06:40:01 | 120,864,561 | 0 | 0 | null | null | null | null | GB18030 | R | false | false | 3,477 | r | homework 6.R | library(quantmod)
library(bizdays)
# Use package quantmod download the Option Chain of SPY,
# the maturity date is Dec-042015.
spyop <- getOptionChain("SPY", Exp = "2015/2016")
spyop$`12月.04.2015` # don't know why there is Chinese character in opion chain
callprice <- spyop$`12月.04.2015`$calls$Last
putprice <- spyop$`12月.04.2015`$puts$Last
callstrike <- spyop$`12月.04.2015`$calls$Strike
putstrike <- spyop$`12月.04.2015`$puts$Strike
# Download the market quote data as spot
spyq <- getQuote( "SPY", src = "yahoo")
S <- spyq$Last[1]
# set r as 0.002
r <- 0.02
# Maturity
m <- bizdays( "2015-11-13" , "2015-12-4" )/ 252
# Black-scholes
call.BS<-function( S, K, r, Maturity,sigma )
{
d1<-(log(S/K)+(r+(sigma^2)/2)*Maturity)/(sigma*sqrt(Maturity));
d2<-d1-sigma*sqrt(Maturity);
S*pnorm(d1)-K*exp(-r*Maturity)*pnorm(d2)
}
put.BS<-function(S, K, r, Maturity,sigma)
{
d1<-(log(S/K)+(r+(sigma^2)/2)*Maturity)/(sigma*sqrt(Maturity));
d2<-d1-sigma*sqrt(Maturity);
K*exp(-r*Maturity)*pnorm(-d2)-S*pnorm(-d1)
}
# Newton for call
fnewtoncall <- function(calls, callp){
# f(x)
fx <- function(x)
{
y = call.BS( S, calls, r, m,x ) - callp
return (y)
}
# f'(x)
dfx <- function(x)
{
d1<-(log(S/calls)+(r+(x^2)/2)*m)/(x*sqrt(m))
y <- sqrt(m)*S*dnorm(d1)
return(y)
}
x0 = 1 # initial guess
epsilon = 0.0001 # convergent condition
step = 1 # step count
while(1)
{
tmp = x0
deltaX = fx(x0) / dfx(x0)
x0 = x0 - deltaX
step = step + 1
if (abs(x0-tmp) < epsilon)
{
#print("Converged!")
#print(paste("x0 = ", x0, ", step = ", step, sep=""))
break
}
}
return(x0)
}
# Newton for put
fnewtonput <- function(calls, callp){
# f(x)
fx <- function(x)
{
y = put.BS( S, calls, r, m,x ) - callp
return (y)
}
# f'(x)
dfx <- function(x)
{
d1<-(log(S/calls)+(r+(x^2)/2)*m)/(x*sqrt(m))
y <- sqrt(m)*S*dnorm(d1)
return(y)
}
x0 = 5 # initial guess
epsilon = 0.0001 # convergent condition
step = 1 # step count
while(1)
{
tmp = x0
deltaX = fx(x0) / dfx(x0)
x0 = x0 - deltaX
step = step + 1
if (abs(x0-tmp) < epsilon)
{
#print("Converged!")
#print(paste("x0 = ", x0, ", step = ", step, sep=""))
break
}
}
return(x0)
}
# get volatility for put
length(putprice)#57
putstrike <- putstrike[-38]
putprice <- putprice[-38]
putvol <- fnewtonput (putstrike[4], putprice[4])
for ( i in 5: 33){
a <- fnewtonput( putstrike[i], putprice[i])
putvol <- c(putvol,a)
}
# get plot for call
plot(putvol, main = (" volatility smile for put"), ylab = (" volatility "), xlab = (" "))
# get volatility for call
callvol <- fnewtoncall (callstrike[13], callprice[13])
for ( i in 14: 65){
a <- fnewtoncall( callstrike[i], callprice[i])
callvol <- c(callvol,a)
}
# delete abnormal number
callvol <- callvol[-22]
callvol <- callvol[-30]
callvol <- callvol[-42]
callvol <- callvol[-40]
callvol <- callvol[-48]
# get plot for call
plot(callvol, main = (" volatility smile for call"), ylab = (" volatility "), xlab = (" "))
|
1016ed45178842828bb0df0c64103d82d995175c | b39f4a7acf1766383efbeafb4c10ef00cfdaafbe | /R/create_env.R | 37a65e6f619aebca91968ef66cc01824a96c12ea | [] | no_license | hmorzaria/atlantisom | 6e7e465a20f3673d3dbc843f164fe211faddc6fb | b4b8bd8694c2537e70c748e62ce5c457113b796f | refs/heads/master | 2020-04-02T02:24:05.672801 | 2015-12-12T00:06:03 | 2015-12-12T00:06:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 641 | r | create_env.R | #' A function to sample from the environmental truth provided
#' by the Atlantis scenario.
#'
#' @description Uses data provided by an Atlantis scenario and adds
#' observation error and/or bias, along with subsetting, to provide
#' an environmental index for use in subsequent model fitting.
#'
#' @family create functions
#'
#' @param true_env A \code{data.frame} of true environmental data
#' sampled from an Atlantis scenario.
#' @param subset A \code{data.frame} specifying which samples to keep
#'
#' @author Kelli Faye Johnson
#'
#' @export
#'
create_env <- function(true_env, subset) {
#
data <- list()
invisible(data)
}
|
3c4c6802289b0f8536bb6af60860d2b48445b5d5 | 4397afeebe967fcc95355b0df714a041fe9aeab1 | /scripts/wrangle_x_m_origenes_productos.R | de5cbf0180f464399f58dd396fc28d61f18123ea | [] | no_license | ricardomayerb/cap_estudio_2017 | 8bf107c80c037d65ebafda57f05bc336ba88b2f9 | 1caec208aade0070d80803088452cfc90a82e1ac | refs/heads/master | 2021-01-12T06:55:46.830403 | 2017-04-13T12:42:39 | 2017-04-13T12:42:39 | 76,866,240 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,693 | r | wrangle_x_m_origenes_productos.R | library(tidyverse)
library(countrycode)
library(stringr)
load("./produced_data/x_m_por_origen_destino")
load("./produced_data/x_m_por_producto")
load("./produced_data/cepal_20_countries")
load("./produced_data/cepal_33_countries")
imp_by_ori <- bind_rows(dfs_m)
names(imp_by_ori)[1] <- "origin_region"
imp_by_ori$nombre_pais <- str_to_title(imp_by_ori$nombre_pais)
imp_by_ori$nombre_pais <- recode(imp_by_ori$nombre_pais,
"Bolivia" = "Bolivia (Estado Plurinacional de)",
"Venezuela" = "Venezuela (República Bolivariana de)"
)
imp_by_ori$iso2c <- countrycode(imp_by_ori$nombre_pais, "country.name.es",
"iso2c", custom_dict = cepal_33_countries)
imp_by_ori$iso3c <- countrycode(imp_by_ori$nombre_pais, "country.name.es",
"iso3c", custom_dict = cepal_33_countries)
exp_by_dest <- bind_rows(dfs_x)
names(exp_by_dest)[1] <- "dest_region"
exp_by_dest$nombre_pais <- str_to_title(exp_by_dest$nombre_pais)
exp_by_dest$nombre_pais <- recode(exp_by_dest$nombre_pais,
"Bolivia" = "Bolivia (Estado Plurinacional de)",
"Venezuela" = "Venezuela (República Bolivariana de)"
)
exp_by_dest$iso2c <- countrycode(exp_by_dest$nombre_pais, "country.name.es",
"iso2c", custom_dict = cepal_33_countries)
exp_by_dest$iso3c <- countrycode(exp_by_dest$nombre_pais, "country.name.es",
"iso3c", custom_dict = cepal_33_countries)
imp_by_prod <- bind_rows(dfs_m_p)
# names(imp_by_prod)[1] <- "origin_region"
imp_by_prod$nombre_pais <- str_to_title(imp_by_prod$nombre_pais)
imp_by_prod$nombre_pais <- recode(imp_by_prod$nombre_pais,
"Bolivia" = "Bolivia (Estado Plurinacional de)",
"Venezuela" = "Venezuela (República Bolivariana de)",
"Mexico" = "México",
"Rep. Dominicana" = "República Dominicana"
)
imp_by_prod$iso2c <- countrycode(imp_by_prod$nombre_pais, "country.name.es",
"iso2c", custom_dict = cepal_33_countries)
imp_by_prod$iso3c <- countrycode(imp_by_prod$nombre_pais, "country.name.es",
"iso3c", custom_dict = cepal_33_countries)
exp_by_prod <- bind_rows(dfs_x_p)
# names(exp_by_prod)[1] <- "origin_region"
exp_by_prod$nombre_pais <- str_to_title(exp_by_prod$nombre_pais)
exp_by_prod$nombre_pais <- recode(exp_by_prod$nombre_pais,
"Bolivia" = "Bolivia (Estado Plurinacional de)",
"Venezuela" = "Venezuela (República Bolivariana de)",
"Mexico" = "México",
"Rep. Dominicana" = "República Dominicana"
)
exp_by_prod$iso2c <- countrycode(exp_by_prod$nombre_pais, "country.name.es",
"iso2c", custom_dict = cepal_33_countries)
exp_by_prod$iso3c <- countrycode(exp_by_prod$nombre_pais, "country.name.es",
"iso3c", custom_dict = cepal_33_countries)
exp_by_dest_tidy <- exp_by_dest
exp_by_prod_tidy <- exp_by_prod
imp_by_ori_tidy <- imp_by_ori
imp_by_prod_tidy <- imp_by_prod
# save(exp_by_dest_tidy, exp_by_prod_tidy, imp_by_ori_tidy, imp_by_prod_tidy,
# file = "./produced_data/x_m_locations_products_tidy")
save(exp_by_dest_tidy, exp_by_prod_tidy, imp_by_ori_tidy, imp_by_prod_tidy,
file = "./produced_data/data_with_basic_wrangling/x_m_locations_products_tidy")
|
521c0d22c2d518c629a88b7e7f362b4550d20642 | 341c04301ad9ceaa28012a602f96e56d6859c800 | /rtest.r | 8612f2aeded3c975b2b3d6781e876d6406518939 | [] | no_license | aurorakp/treemodel-compare | e2d3210dbda5c28ef91766036e6b316440fe74b9 | d324b53aec43e87e2841c7ff17817728ab01f4ca | refs/heads/master | 2021-01-17T19:09:59.152923 | 2016-06-30T13:24:37 | 2016-06-30T13:24:37 | 39,741,298 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 626 | r | rtest.r | filename1 = <- paste ( self.dist_to_mean_1,sep="")
filename2 = <- paste ( self.dist_to_mean_2,sep="")
data1 <- read.table(filename1, col.names=c("index1","index2","dist"))
data2 <- read.table(filename2, col.names=c("index1","index2","dist"))
plot_title <- paste("QQ Plot for" treefile1 " and" treefile2 ":",sep="")
outfile_pdf <- paste(self.qqfileout ".pdf",sep="")
outfile_png <- paste(self.qqfileout ".png",sep="")
pdf(outfile_pdf)
qqplot(data1$dist, data2$dist, plot.it = TRUE, main = plot_title)
dev.off()
png(outfile_png,width=1000,height=1000)
qqplot(data1$dist, data2$dist, plot.it = TRUE, main = plot_title)
dev.off()
|
1fd5a1088a143da2a653b2a8f9bf0ae6027a23dd | 339975439e4a28d0926efb68a02dbde15310a965 | /source/13. fit_model_3.R | 2afdef69919df5b48a010c8f06ae1fb2dd475e31 | [] | no_license | milicar/master_rad | 1b7dcf619be9d2529a1c9c027562617bcfd72b5d | 0273574840261c93604dbb67079691a90b7ed797 | refs/heads/master | 2022-12-21T13:33:42.608474 | 2020-09-26T11:28:02 | 2020-09-26T11:28:02 | 295,247,332 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,128 | r | 13. fit_model_3.R | # Fitovanje REM modela 3
library(dplyr)
library(relevent)
library(abind)
wie_edgelist_200 <- readRDS("results/edgelist_200_by_timeframe.RDS")
# Ovaj model uključuje sve efekte kao i prvi model, uz dve binarne varijable:
# da li događaj/poruka govori o skupu i da li je događaj pozitivan i govori o skupu
# funkcije koje prave varijable su date ispod
is_organizer_200 <- readRDS("results/is_organizer_cov.RData")
has_topics <- readRDS("results/has_topics.RData")
wie_sentiment_positive <- readRDS("results/wie_sentiment_positive_updated.RDS")
########## binarna varijabla koja govori o tome da li dogadjaj govori o temama skupa
make_single_topics_covar <- function(edgelist, n_participants, has_topics){
ar <- array(0, dim = c(nrow(edgelist), 1, n_participants, n_participants))
event_offset <- edgelist[[1,1]]-1
indices <- edgelist %>% left_join(has_topics) %>% filter(has_bigram == TRUE)
for(i in 1:nrow(indices)){
ar[indices[[i, "event_no"]]-event_offset, 1, indices[[i, "sender"]], indices[[i, "receiver"]]]<-1
}
ar
}
########## binarna varijabla koja govori o tome da li dogadjaj pozitivno govori o temama skupa
make_single_topics_positive_covar <- function(edgelist, n_participants, has_topics, conf_sentiment){
ar <- array(0, dim = c(nrow(edgelist), 1, n_participants, n_participants))
event_offset <- edgelist[[1,1]]-1
indices <- edgelist %>% left_join(has_topics) %>%
left_join(conf_sentiment[,c("id", "positive")]) %>%
filter(has_bigram == TRUE & positive == 1)
for(i in 1:nrow(indices)){
ar[indices[[i, "event_no"]]-event_offset, 1, indices[[i, "sender"]], indices[[i, "receiver"]]]<-1
}
ar
}
# fitovani model
fit_struct_topics <- readRDS("results/fit_struct_topics.RDS")
# funkcija koja fituje model. Izvršavanje funkcije je trajalo oko 6.24 časova, sa do 10Gb RAM memorije
# fit_struct_topics <- list()
# fit_struct_topics <- lapply(wie_edgelist_200,
# function(x)(rem.dyad(x[,1:3], 200,
# effects = c("NIDRec",
# "RRecSnd", "RSndSnd",
# "OTPSnd", "ITPSnd", "OSPSnd", "ISPSnd",
# "CovInt", "CovEvent"),
# covar = list(CovInt = is_organizer_200$covar,
# CovEvent = abind(make_single_topics_covar(x, 200, has_topics),
# make_single_topics_positive_covar(x, 200, has_topics, wie_sentiment_positive),
# along = 2)),
# hessian = TRUE)))
#
# saveRDS(fit_struct_topics, "fit_struct_topics.RDS")
summary(fit_struct_topics$time_1)
summary(fit_struct_topics$time_2)
summary(fit_struct_topics$time_3)
summary(fit_struct_topics$time_4)
summary(fit_struct_topics$time_5)
|
19c243c73f2ee838826668b1d03976d55962e6fb | 23462aba753a872f4c7e89770625da688f0d756f | /mymain.R | 864ffb4266bc44b221845be7eaf7c66955c590cd | [] | no_license | lingyix2/MovieReviewAnalysis | 6b3c3dfa92d25fdfb67e96f64aad2f2b10e48136 | 628ea0b1380f45f8826631ff5bcb5fab19495ae5 | refs/heads/master | 2020-09-15T14:35:25.703612 | 2019-11-22T20:36:48 | 2019-11-22T20:36:48 | 223,477,373 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,922 | r | mymain.R | if (!require("pacman"))
install.packages("pacman")
pacman::p_load(
"text2vec",
"glmnet",
"pROC",
"MASS",
"xgboost",
"slam"
)
#functions
prep_fun = tolower
tok_fun = word_tokenizer
#stopwords
stop_words = c("i", "me", "my", "myself",
"we", "our", "ours", "ourselves",
"you", "your", "yours",
"their", "they", "his", "her",
"she", "he", "a", "an", "and",
"is", "was", "are", "were",
"him", "himself", "has", "have",
"it", "its", "of", "one", "for",
"the", "us", "this")
#Clean html tags
all = read.table("Project2_data.tsv",stringsAsFactors = F,header = T)
all$review = gsub('<.*?>', ' ', all$review)
splits = read.table("Project2_splits.csv", header = T)
#load data
train = all[-which(all$new_id%in%splits[,s]),]
test = all[which(all$new_id%in%splits[,s]),]
ytrain = train$sentiment
ytest = test$sentiment
#built vocabulary and construct DT matrix (maximum 4-grams)
it_train = itoken(train$review,
preprocessor = prep_fun,
tokenizer = tok_fun)
it_test = itoken(test$review,
preprocessor = prep_fun,
tokenizer = tok_fun)
vocab = create_vocabulary(it_train,ngram = c(1L,4L), stopwords = stop_words)
pruned_vocab = prune_vocabulary(vocab,
term_count_min = 5,
doc_proportion_max = 0.5,
doc_proportion_min = 0.001)
bigram_vectorizer = vocab_vectorizer(pruned_vocab)
dtm_train = create_dtm(it_train, bigram_vectorizer)
dtm_test = create_dtm(it_test, bigram_vectorizer)
##lasso
cv.out <- cv.glmnet(dtm_train, train$sentiment, alpha = 1,family="binomial") #glmnt
tmp_lasso <-predict(cv.out, s = cv.out$lambda.min, newx = dtm_test, type="response")
auc_lasso=auc(ytest,tmp_lasso)
#two-sample t-statistic
v.size = dim(dtm_train)[2]
summ = matrix(0, nrow=v.size, ncol=4)
summ[,1] = colapply_simple_triplet_matrix(
as.simple_triplet_matrix(dtm_train[ytrain==1, ]), mean)
summ[,2] = colapply_simple_triplet_matrix(
as.simple_triplet_matrix(dtm_train[ytrain==1, ]), var)
summ[,3] = colapply_simple_triplet_matrix(
as.simple_triplet_matrix(dtm_train[ytrain==0, ]), mean)
summ[,4] = colapply_simple_triplet_matrix(
as.simple_triplet_matrix(dtm_train[ytrain==0, ]), var)
n1=sum(ytrain);
n=length(ytrain)
n0 = n-n1
myp = (summ[,1] - summ[,3])/sqrt(summ[,2]/n1 + summ[,4]/n0)
#order the words by the magnitude of their t-statistics
words = colnames(dtm_train)
id = order(abs(myp), decreasing=TRUE)[1:800]
pos.list = words[id[myp[id]>0]]
neg.list = words[id[myp[id]<0]]
write(words[id], file="myvocab.txt")
myvocab = scan(file = "myvocab.txt", what = character())
pruned_vocab2 = vocab[vocab$term %in% myvocab, ]
bigram_vectorizer2 = vocab_vectorizer(pruned_vocab2)
dtm_train2 = create_dtm(it_train, bigram_vectorizer2)
dtm_test2 = create_dtm(it_test, bigram_vectorizer2)
##lda
model_lda = lda(dtm_train2,ytrain)
pre_lda = predict(model_lda,dtm_test2)$posterior
tmp_lda = pre_lda[,2]
auc_lda=auc(ytest,tmp_lda)
##Xgboost
xgb_model <- xgboost(booster="gblinear",data = dtm_train, label = ytrain, max.depth = 18,
nthread = 10, nrounds = 10, eta =0.03, seed = 3,
colsample_bytree = 0.2, subsample = 0.9 )
tmp_boost <- predict(xgb_model, dtm_test)
auc_xgboost=auc(ytest,tmp_boost)
tmp_lasso=as.vector(tmp_lasso)
prob_lasso <- cbind(new_id=test$new_id, prob=tmp_lasso)
prob_lda <- cbind(new_id=test$new_id, prob=tmp_lda)
prob_xgboost <- cbind(new_id=test$new_id, prob=tmp_boost)
write.csv(prob_lasso, file = "mysubmission1.txt", row.names = FALSE, col.names = TRUE)
write.csv(prob_lda, file = "mysubmission2.txt", row.names = FALSE, col.names = TRUE)
write.csv(prob_xgboost, file = "mysubmission3.txt", row.names = FALSE, col.names = TRUE)
|
9bb0be56d2c69fee246ca94e0e57379bd1efab58 | fe254ef6be0bd316d41b6796ef28f1c9e1d5551e | /man/teachingStuff.Rd | 23e1e455aa9acad7248cf9750b32f23e0030ff34 | [] | no_license | matthias-da/robCompositions | 89b26d1242b5370d78ceb5b99f3792f0b406289f | a8da6576a50b5bac4446310d7b0e7c109307ddd8 | refs/heads/master | 2023-09-02T15:49:40.315508 | 2023-08-23T12:54:36 | 2023-08-23T12:54:36 | 14,552,562 | 8 | 6 | null | 2019-12-12T15:20:57 | 2013-11-20T09:44:25 | C++ | UTF-8 | R | false | true | 1,837 | rd | teachingStuff.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataSets.R
\docType{data}
\name{teachingStuff}
\alias{teachingStuff}
\title{teaching stuff}
\format{
A (tidy) data frame with 1216 observations on the following 4 variables.
\itemize{
\item{\code{country }}{Country of origin}
\item{\code{subject }}{school type: primary, lower secondary, higher secondary and tertiary }
\item{\code{year }}{Year}
\item{\code{value }}{Number of stuff}
}
}
\source{
OECD:
\url{https://data.oecd.org/}
}
\description{
Teaching stuff in selected countries
}
\details{
Teaching staff include professional personnel directly
involved in teaching students, including classroom
teachers, special education teachers and other
teachers who work with students as a whole class,
in small groups, or in one-to-one teaching.
Teaching staff also include department chairs
of whose duties include some teaching, but
it does not include non-professional personnel
who support teachers in providing instruction
to students, such as teachers' aides and other
paraprofessional personnel. Academic staff include
personnel whose primary assignment is instruction,
research or public service, holding an academic
rank with such titles as professor, associate
professor, assistant professor, instructor,
lecturer, or the equivalent of any of these
academic ranks. The category includes personnel
with other titles (e.g. dean, director, associate
dean, assistant dean, chair or head of department),
if their principal activity is instruction or research.
}
\examples{
data(teachingStuff)
str(teachingStuff)
}
\references{
OECD (2017), Teaching staff (indicator). doi: 10.1787/6a32426b-en (Accessed on 27 March 2017)
}
\author{
translated from \url{https://data.oecd.org/} and restructured by Matthias Templ
}
\keyword{datasets}
|
0f07a4d3effbad97d83887a23aa7bc7fc2a2244a | ea77df156d9fb138cb2fc1843df20a18b7132294 | /R/selenium_storm_etc.R | 86377b14ce665b95ccbec0da33d37a92afe6d9f0 | [] | no_license | bedantaguru/RSeleniumTools | be0c85a7464c340061ee58f90ed436af0bc83377 | df24ff23f0f6025967744a39f6b24bcb4e86526a | refs/heads/master | 2023-08-14T05:27:28.976800 | 2021-10-06T07:11:43 | 2021-10-06T07:11:43 | 414,105,158 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,914 | r | selenium_storm_etc.R |
selenium_storm_init <- function(port, check, clean_start = F, singular_pid_sid = T, num_sessions,
Browser, headless, client_profile_set){
if(clean_start){
unlink(get_selenium_storm_storr_base_path(), recursive = T, force = T)
}
if(missing(num_sessions)){
num_sessions <- parallel::detectCores()
}
st <- get_selenium_storm_storr()
if(clean_start){
st$destroy()
st <- get_selenium_storm_storr()
}
create_if_not_exists <- function(key, default, ns = "config", force = F){
if(!st$exists(key, namespace = ns)){
st$set(key, default, namespace = ns)
}
if(force){
st$set(key, default, namespace = ns)
}
st$get(key, ns)
}
last_pid <- create_if_not_exists("last_pid", Sys.getpid())
pid_changed <- F
if(last_pid!=Sys.getpid()){
if(is_pid_active(last_pid)){
stop(paste0("selenium_storm is running already with pid:", last_pid))
}else{
st$set("last_pid", Sys.getpid(), "config")
pid_changed <- T
}
}
# clean locks folder
unlink(get_selenium_storm_lock_path(""), recursive = T)
create_if_not_exists("port", port, force = pid_changed)
create_if_not_exists("check", check, force = pid_changed)
create_if_not_exists("singular_pid_sid", singular_pid_sid, force = pid_changed)
create_if_not_exists("client_profile_set", client_profile_set, force = pid_changed)
if(client_profile_set){
create_if_not_exists("Browser", Browser, ns = "client_config", force = pid_changed)
create_if_not_exists("headless", headless, ns = "client_config", force = pid_changed)
}
create_if_not_exists("num_cores", num_sessions, force = pid_changed)
version <- create_if_not_exists("version", get_version())
if(version!=get_version()){
warning("Version mismatch. (if not running consider clean_start = T)")
}
}
selenium_storm_kill_all <- function(){
st <- get_selenium_storm_storr()
if(st$exists("last_pid", "config")){
last_pid <- st$get("last_pid", "config")
if(last_pid!=Sys.getpid()){
if(is_pid_active(last_pid)){
warning(paste0("selenium_storm is running already with pid:", last_pid, "\nThis will not be killed but ownership will be taken away from this process (attempt will be made)."))
}
}
}
if(st$exists("last_selenium_pid", "config")){
last_selenium_pid <- st$get("last_selenium_pid", "config")
if(is_pid_active(last_selenium_pid)){
warning(paste0("Selenium is running already with pid:", last_selenium_pid, " This will be killed (will be tried to kill)."))
while(is_pid_active(last_selenium_pid)){
try({
pp <- ps::ps_handle(pid = as.integer(last_selenium_pid))
pc <- ps::ps_children(pp, recursive = T)
purrr::map(pc, ps::ps_kill)
ps::ps_kill(pp)
}, silent = T)
}
}
}
sst <- get_selenium_storm_storr(T)
sst$destroy()
st$destroy()
unlink(get_selenium_storm_storr_base_path(), recursive = T, force = T)
}
stored_client_config <- function(){
conf <- list(client_profile_set = F)
st <- get_selenium_storm_storr(session = T)
if(!st$exists("port","config")){
sync_session_config()
}
if(st$exists("client_profile_set","config")){
if(st$get("client_profile_set","config")){
conf$Browser <- st$get("Browser","client_config")
conf$headless <- st$get("headless","client_config")
conf$client_profile_set <- T
}
}
conf
}
get_unified_client_config <- function(Browser, headless){
conf <- list()
if(missing(Browser)){
st_conf <- stored_client_config()
if(st_conf$client_profile_set){
Browser <- st_conf$Browser
}else{
Browser <- valid_Browser()
}
}else{
Browser <- valid_Browser(Browser)
}
if(missing(headless)){
st_conf <- stored_client_config()
if(st_conf$client_profile_set){
headless <- st_conf$headless
}else{
headless <- F
}
}else{
headless <- as.logical(headless[1])
}
conf$Browser <- Browser
conf$headless <- headless
conf
}
client_new <- function(Browser, headless, start_browser = T, ...){
vl <- get_unified_client_config(Browser, headless)
Browser <- vl$Browser
headless <- vl$headless
if (identical(Browser, "internet explorer") &&
!identical(.Platform[["OS.type"]], "windows")) {
stop("Internet Explorer is only available on Windows.")
}
if(!(Browser %in% c("chrome","phantomjs") ) & headless){
stop("Yet not implemented headless browsing with this Browser")
}
if(!check_selenium_strom()){
stop("Can't create remoteDriver instance without selenium_strom properly running")
}
st <- get_selenium_storm_storr(session = T)
if(!st$exists("port","config")){
sync_session_config()
}
exnames <- names(list(...))
if(("extraCapabilities" %in% exnames) & Browser=="chrome" & headless ){
warning("Browser = 'chrome', headless = TRUE and additionally extraCapabilities supplied. \nHence headless will be disabled (possibly it has to be taken care by extraCapabilities)")
headless <- F
}
if(Browser=="chrome" & headless){
eCaps <- list(chromeOptions = list(
args = c('--headless', '--disable-gpu', '--window-size=1280,800')
))
remDr <- RSelenium::remoteDriver(browserName = Browser, port = as.integer(st$get("port", "config")), extraCapabilities = eCaps, ...)
}else{
remDr <- RSelenium::remoteDriver(browserName = Browser, port = as.integer(st$get("port", "config")), ...)
}
count <- 0
while (
inherits(res <- tryCatch(remDr$getStatus(), error = function(e) e), "error")
) {
Sys.sleep(1)
count <- count + 1
if (count > 10) {
warning("Could not determine server status. (after 10 secs)")
break
}
}
if(start_browser){
open_browser(remDr)
}
invisible(remDr)
}
get_control_client <- function(){
sst <- get_selenium_storm_storr(session = T)
if(sst$exists("control_client", "handles")){
cc <- sst$get("control_client", "handles")
}else{
cc <- client_new(start_browser = F)
sst$set("control_client", cc,"handles")
}
invisible(cc)
}
sessions_cleanup.sessions <- function(){
st <- get_selenium_storm_storr()
stored_sessions <- st$list("sessions")
active_sessions <- get_active_sessions(get_control_client()) %>% purrr::map_chr("id")
delinked <- setdiff(stored_sessions, active_sessions)
if(length(delinked)){
delinked %>% purrr::map(~st$del(.x, namespace = "sessions"))
}
invisible(0)
}
sid_pid_map <- function(){
st <- get_selenium_storm_storr()
sst <- get_selenium_storm_storr(session = T)
dsi <- data.frame(si = st$list("sessions"), stringsAsFactors = F)
safeget <- function(x){
gt <- try({st$get(x,namespace = "sessions")}, silent = T)
if(inherits(gt,"try-error")) return(NA)
gt
}
dsi$pid <- dsi$si %>% purrr::map_chr(safeget) %>% as.integer()
if(length(dsi$pid)==0){
dsi$is_dead_pid <- logical(0)
}else{
dsi$is_dead_pid <- !is_pid_active(dsi$pid)
}
sst$set("sid_pid_map", dsi, "cache_data")
dsi
}
sid_pid_map_fast <- function(){
sst <- get_selenium_storm_storr(session = T)
d <- NULL
if(!sst$exists("sid_pid_map", "cache_data")){
sid_pid_map()
}else{
d <- sst$get("sid_pid_map", "cache_data")
if(nrow(d)==0){
sid_pid_map()
d <- sst$get("sid_pid_map", "cache_data")
}
}
d
}
sessions_cleanup.pids <- function(){
st <- get_selenium_storm_storr()
dsi <- sid_pid_map()
if(any(dsi$is_dead_pid)){
dsi$si[dsi$is_dead_pid] %>% purrr::map(~st$del(.x, namespace = "sessions"))
}
invisible(dsi$si[dsi$is_dead_pid])
}
sessions_cleanup <- function(){
sessions_cleanup.sessions()
sessions_cleanup.pids()
}
free_sessions <- function(){
sessions_cleanup()
st <- get_selenium_storm_storr()
stored_sessions <- st$list("sessions")
active_sessions <- get_active_sessions(get_control_client()) %>% purrr::map_chr("id")
setdiff(active_sessions, stored_sessions)
}
client_instant_fast <- function(Browser, headless, ...){
vl <- get_unified_client_config(Browser, headless)
Browser <- vl$Browser
headless <- vl$headless
# only tries to attach
rt <- NULL
st <- get_selenium_storm_storr(session = T)
if(!st$exists("port","config")){
sync_session_config()
}
if(st$get("singular_pid_sid","config")){
dsi <- sid_pid_map_fast()
if(any(dsi$pid == Sys.getpid())){
this_sid <- dsi$si[dsi$pid == Sys.getpid()][1]
dummy_client <- client_new(Browser = Browser, headless = headless, start_browser = F, ...)
# fast attach_to_active_session is also required
attach_ok <- attach_to_active_session(dummy_client, this_sid, fast = T)
if(attach_ok){
# diabled as no extra information will be added
# open_browser(dummy_client)
rt <- dummy_client
}
}
}
rt
}
client_instant <- function(Browser, headless, ...){
vl <- get_unified_client_config(Browser, headless)
Browser <- vl$Browser
headless <- vl$headless
active_sessions <- get_active_sessions(get_control_client())
st <- get_selenium_storm_storr(session = T)
if(!st$exists("port","config")){
sync_session_config()
}
num_cores <- st$get("num_cores","config")
singular_pid_sid <- st$get("singular_pid_sid","config")
fss <- free_sessions()
dummy_client <- client_new(Browser = Browser, headless = headless, start_browser = F, ...)
if(singular_pid_sid){
dsi <- sid_pid_map()
any_sid_for_this_pid <- dsi$si[dsi$pid==Sys.getpid()]
if(length(fss)==0){
fss <- any_sid_for_this_pid
}
}
if(length(fss)){
attach_ok <- attach_to_active_session(dummy_client, fss[1])
if(attach_ok){
open_browser(dummy_client)
return(dummy_client)
}
}else{
if(length(active_sessions) < num_cores){
open_browser(dummy_client)
return(dummy_client)
}
}
return(NULL)
}
|
f39c547043ef917edd1a82ebbe034adecb9deb97 | e37254e0684b7e2bd4d5f82917b0754497d89fe0 | /R Programming/Assignment 1/rprog-data-specdata/corr.R | 4c5e6df6b1705f66a3c8050494d6d1e0380c6a3f | [] | no_license | fformenti/Johns_Hopkins_University | baa9be67b8c1b61eb89dab7a49e1d013c4171356 | 7af1bc74c193e5cb16ec4ae7c79ad7fde0aef23d | refs/heads/master | 2020-06-09T05:09:36.704327 | 2015-03-03T02:11:14 | 2015-03-03T02:11:14 | 28,513,992 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 521 | r | corr.R | ## Assignment 1
corr <- function(directory, threshold = 0) {
filenames <- list.files(directory, pattern="*.csv", full.names=TRUE)
data <- lapply(filenames, read.csv)
v.corr <- c()
for (i in 1:length(data)) {
tmp <- data[[i]]
tmp <- tmp[!(is.na(tmp$Date)) & !(is.na(tmp$nitrate)) & !(is.na(tmp$sulfate)), ]
nobs <- nrow(tmp)
if (nobs >= threshold) {
new.corr <- cor(tmp$nitrate,tmp$sulfate)
v.corr <- c(v.corr, new.corr)
}
}
return(v.corr)
}
|
85e3eac556df0ced5586bce5240950e628c2e1a9 | 5b0d2c2a017d2d765873ca15dddabd93fccc5539 | /helper_functions.R | 06980dd33aa784e01552a35c24140aa11ed718bf | [] | no_license | certara/covid19epi | 92d3772f7790079f8f0b38d7fd06b59130ba6855 | 444b3c46db516107987ef1fb04f5143143ebc71c | refs/heads/master | 2021-05-22T15:07:48.020855 | 2020-05-12T20:37:09 | 2020-05-12T20:37:09 | 252,975,571 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 224 | r | helper_functions.R | # https://stackoverflow.com/questions/21011672/automatically-add-variable-names-to-elements-of-a-list
listN <- function(...){
anonList <- list(...)
names(anonList) <- as.character(substitute(list(...)))[-1]
anonList
}
|
dcbaeca9d564be6a64f22d1f4d82c8ab82ef6a26 | 29585dff702209dd446c0ab52ceea046c58e384e | /pla/R/jitterSteps.R | e3cd597705218392c9b950e0da99b0e3aef0d3d1 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,021 | r | jitterSteps.R | jitterSteps <-
function (dataFrame)
{
iSample <- as.numeric(unlist(dataFrame["Sample"]))
Z <- unlist(dataFrame["Z"])
delta <- max(Z) - min(Z)
nrows <- length(Z)
Response <- unlist(dataFrame["Response"])
o <- order(iSample, Z, Response)
do <- dataFrame[o, ]
do1 <- do[-1, ]
do2 <- do[-nrows, ]
idResp <- c(FALSE, do1["Response"] == do2["Response"])
idSamp <- c(FALSE, do1["Dilution"] == do2["Dilution"])
x <- NULL
counter <- 0
for (i in 1:nrows) {
counter <- ifelse((idSamp & idResp)[i], counter + 1, 0)
x <- c(x, counter)
}
indexOfTreatment <- iSample[o]
meanIndex <- mean(indexOfTreatment)
Zjitter <- unlist(dataFrame["Z"])[o] +
delta * ((indexOfTreatment - meanIndex) * 0.05 + (x - 1) * 0.025)
dataOrder <- cbind(do, indexOfTreatment, dX = x, Zjitter)
q <- order(dataOrder["I"])
dataQ <- dataOrder[q, ]
return(dataQ)
}
|
b57706911c62c20c632bbd7a530514090afa9b4e | 7b73a971dfdb07a7683f93967d2e12b73167d6fd | /man/Rbc1.Rd | 00b390396d6200e9efc49da960cc81b73141d0c0 | [] | no_license | yiransheng/bcprocs | b782d6ab59a8a755eb423fd72454d1928b4e87d1 | a0a16bf1a4babef021b4625ce3aa4f2bafbb9ead | refs/heads/master | 2020-12-25T19:03:49.958994 | 2015-04-22T15:59:48 | 2015-04-22T15:59:48 | 34,399,336 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,518 | rd | Rbc1.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/2.1_Rbc1.R
\name{Rbc1}
\alias{Rbc1}
\title{S3 Class constructor for Rbc1 procedure}
\arguments{
\item{k}{number of treatments}
\item{n}{number of maximum observations to take from each treatment}
\item{c}{design parameter c, cutoff point for choosing a treatment over control}
}
\value{
S3 Class object of the same procedure type
}
\description{
S3 Class constructor for Rbc1 procedure
}
\examples{
# run on a full dataset
# data is a data.frame of dimention (128 ,4), with 0/1 values
proc <- Rbc1(3, 13, 128, data)
# alternative
proc <- Rbc1(3, 13, 128)
summary(run.experiment(proc, data))
# Simulation Example
# Choose: k=3, P1=0.95, P2=0.95, theta_0 = 0.2, delta_1 = 0.05, delta_2 = 0.2
# From Table 2: we have, n = 128, c=13
n <- 128
k <- 3
proc <- Rbc1(k=k, c=13, n=n)
repeat {
p <- c(0.2, runif(k)) # population bernoulie parameters
jmax <- which.max(p[-1]) + 1 # best treatment
if (p[jmax] >= p[1] + 0.2) { # ensure assumptions in Section (2) are satisfied AND there's a best treatment to select
invalid <- sapply(p[-1], function(x) {
x > p[1] + 0.05 & x < p[1] + 0.2
})
if(all(!invalid)) break
}
}
step <- 1
repeat {
sampleObs <- sapply(p, function(prob) { # generate B(1, p) binary r.v. as observations, from population probs.
rbinom(1, 1, prob)
})
proc <- vector.at.a.time(proc, sampleObs)
step <- step + 1
if (step > n | ( is.done(proc) )) break
}
summary(proc)
}
|
d8465a43f3ecd1d748e563494d4ac550d4bb6fa8 | a600aadd04129c95e24d5d0a2a90cb130f01efe5 | /Exercises/05_business_model.R | 36fd251ae964a5511f53bf47251739a50d54824a | [] | no_license | fpgmaas/shiny-tutorial | a3daf27354b3a47b120ae13c4d49df3e00d4cf81 | b0403c3721cd1ee21c2f62dff93aaa156d02134a | refs/heads/master | 2021-09-18T15:18:31.637491 | 2018-07-16T07:15:42 | 2018-07-16T07:15:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,146 | r | 05_business_model.R | # Xiaowen has a new business model! She will just buy 10 new houses and rent them all out.
# However, she is unable to choose which houses to buy. Therefore, she develops an application
# that helps her narrow down her search.
# She think the city, the amount of bedrooms and the amount of baths is very important.
# Therefore, she wants to select a column, and subsequently select values from that column to filter on.
library(shiny)
df = read.table('http://www.rossmanchance.com/iscam2/data/housing.txt',sep='\t',header=T)
ui <- fluidPage(
# Add an input to select a column here
selectizeInput('value','Values: ', choices = NULL, multiple =T), # Initialize with choices = NULL
tableOutput('table1')
)
# The server function
server <- function(input,output, session)
{
# Observer to update the choices of the input 'value' whenever the input$column changes
observeEvent(...,
{
updateSelectizeInput(session,inputId='...',choices=...))
})
output$table1 <- renderTable({
# If input$value is not null, filter the table
return(df)
})
}
shinyApp(ui, server)
|
25908989162ce8eb746fb69c9d112cbbb4f1a78a | d008d74a9c473ca61b96923409811ccb47b96406 | /man/chl.pal.Rd | 86a470fd49f7845b304ed0a84109f3a81c40f108 | [] | no_license | AustralianAntarcticDivision/raadtools | 0246d25a1480888780aa23ea27c1f9985bdaafa0 | d8cc9c553f52e4b5a14c7f2fd738d5bc1318c304 | refs/heads/main | 2023-07-24T23:03:01.099186 | 2023-07-07T01:28:52 | 2023-07-07T01:28:52 | 34,773,324 | 19 | 5 | null | 2023-03-14T10:06:42 | 2015-04-29T05:00:19 | HTML | UTF-8 | R | false | true | 1,661 | rd | chl.pal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datapal.R
\name{chl.pal}
\alias{chl.pal}
\title{Ocean colour colours for chlorophyll-a.}
\usage{
chl.pal(x, palette = FALSE, alpha = 1)
}
\arguments{
\item{x}{a vector of data values or a single number}
\item{palette}{logical, if \code{TRUE} return a list with matching colours and values}
\item{alpha}{value in 0,1 to specify opacity}
}
\value{
colours, palette, or function, see Details
}
\description{
Ocean colour palette for chlorophyll-a.
}
\details{
Flexible control of the chlorophyll-a palette. If \code{x} is a
single number, the function returns that many colours evenly
spaced from the palette. If \code{x} is a vector of multiple
values the palette is queried for colours matching those values,
and these are returned. If \code{x} is missing and \code{palette}
is \code{FALSE} then a function is returned that will generate n
evenly spaced colours from the palette, as per
\code{\link{colorRampPalette}}.
}
\examples{
\dontrun{
chl <- readchla(xylim = c(100, 110, -50, -40))
## just get a small number of evenly space colours
plot(chl, col = chl.pal(10))
## store the full palette and work with values and colours
pal <- chl.pal()
## the standard full palette
plot(chl, breaks = pal$breaks, col = pal$cols)
## a custom set of values with matching colours
plot(chl, col = chl.pal(pal$breaks[seq(1, length(pal$breaks), length = 10)]))
## any number of colours stored as a function
myfun <- chl.pal()
plot(chl, col = myfun(18))
## just n colours
plot(chl, col = chl.pal(18))
}
}
\references{
Derived from \url{http://oceancolor.gsfc.nasa.gov/DOCS/palette_chl_etc.txt}.
}
|
5365610f7f10b7e26ac6e7e9652c7f60543a7a5e | 2e32c18fefc3daadb2bbd25ab40c888b1e21ad68 | /carga fitxero.R | d2d253776fe77290b91b7f7694930fa69180b151 | [] | no_license | ceciliabilbao/Archivos_examen_ds | f992b1ff856a8034fb9b5cc9db7b9d42428be0eb | efeea8e4e0556fa0b44b515e681a2e29e2bdfee3 | refs/heads/master | 2022-08-25T09:48:35.434641 | 2020-05-22T09:05:34 | 2020-05-22T09:05:34 | 266,036,969 | 0 | 1 | null | 2020-05-22T06:36:25 | 2020-05-22T06:36:24 | null | ISO-8859-2 | R | false | false | 1,032 | r | carga fitxero.R | library(stringi)
library(stringr)
library(rebus)
library(dplyr)
setwd("C:/Users/cecil/OneDrive/BUSINESS DATA ANALYTICS/PRIMERO/DATA SCIENCE/RETO 4/EXAMEN DS/Archivos_examen_ds")
load("muestra10.RData", envir = parent.frame(), verbose = FALSE)
dim(muestra10) #dimensiones 335686 3
colnames(muestra10)
#cambiamos nombre de las variables
names (muestra10)[1] = "nif"
names (muestra10)[2] = "nombre"
names (muestra10)[3] = "genero"
#eliminaremos del dataset original todos los registros que no tienen el formato adecuado para esta variable (8 dígitos y una letra)
nif <-muestra10[1]
regexp <- grepl('[[:digit:]]{8}[[:alpha:]]{1}', muestra10$nif)
grepl(pattern = regexp, x =muestra10$nif) #no cumple pq no tiene dos digitos
cumple <- which(regexp == T)
final <- muestra10[cumple, ]
#eliminar las filas false
#imputar género
dim(muestra10)
muestra10<-mutate(muestra10,frecuencia=1)
mujeres<-muestra10%>%
filter(genero=="M")%>%
group_by(nombre)%>%
summarise(frec=sum(frecuencia))
todos<-merge(mujeres,hombres)
|
d18c3bb98cfba2b528dc524592d862f95f0fe41f | dc7549847fa8fe32a2d3e0d181ed2fd2c4030144 | /man/meStack.Rd | 70c58bd449fd3ddd50215227238ecc6617c9b115 | [] | no_license | RRemelgado/CAWaR | 5e312fa7ad6f72ae21c1f9e55cc0f53e1426b2ad | 7536d06ce5073b9b686263871135add46adca8b1 | refs/heads/master | 2021-07-14T19:58:47.962666 | 2020-06-04T11:18:34 | 2020-06-04T11:18:34 | 141,430,724 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 2,190 | rd | meStack.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meStack.R
\name{meStack}
\alias{meStack}
\title{meStack}
\usage{
meStack(x, y, z, agg.fun = mean, derive.stats = FALSE)
}
\arguments{
\item{x}{A \emph{list} of \emph{RasterLayer} objects or a \emph{character} vector with the paths to \emph{raster} objects.}
\item{y}{A spatial object from which an extent can be derived.}
\item{z}{Object of class \emph{Date} with the acquisition date for each element in \emph{x}.}
\item{agg.fun}{Function used to aggregate images collected in the same date. Default is the mean.}
\item{derive.stats}{Logical argument. Default is FALSE.}
}
\value{
A list containing a \emph{RasterStack} and related statistics.
}
\description{
Stacking of raster layers with different extents
}
\details{
{The function stacks the raster objects specified in \emph{x}. For each element
in \emph{x}, the function crops it by the extent of \emph{y} and, if their extents differ,
fits the extent of \emph{x} to the one of \emph{y}. All new pixels are set to NA. If \emph{z}
is provided, the function will then aggregate all bands acquired in the same date using the
function provide with \emph{agg.fun}. If \emph{derive.stats} is set to TRUE, the function will
return basic statistics for each band (i.e. min, max, mean and sd) together with a plot of the
mean values. The final output of the function is a list containing:
\itemize{
\item{\emph{stack} - \emph{RasterStack} object.}
\item{\emph{dates} - Acquisition dates for each layer in \emph{stack}.}
\item{\emph{image.stats} - Statistics for each band in the output \emph{RasterStack}.}
\item{\emph{stats.plot} - Plot showing the mean, minimum and maximum values per band.}
\item{\emph{control} - Logical vector showing which elements in \emph{x} where used to build the \emph{RasterStack}.}}}
}
\examples{
{
require(raster)
r1 <- raster(xmn=1, xmx=90, ymn=1, ymx=90, res=1, vals=1) # image 1
r2 <- raster(xmn=50, xmx=150, ymn=50, ymx=150, res=1, vals=1) # image 2
r0 <- raster(xmn=20, xmx=90, ymn=50, ymx=90, res=1, vals=1) # target extent
crs(r0) <- crs(r2) <- crs(r1)
mes <- meStack(list(r1, r2), r0)
plot(mes$stack)
}
}
|
6f3232ef84ed024665074293f7fa7caf280d0d1c | 3e020c520418157c0c408c7496cd6ed2d7b2ebb3 | /src/Mapping/Teen_Birth_mapping_new.R | 95c21365ac89922f42ef4f19cebdfaf709ffaa8d | [] | no_license | DSPG-Young-Scholars-Program/dspg20halifax | c5e390345fe69696f58686496540f806679ce866 | 2206402db3fded94830cfe40611c067e68d0ab1c | refs/heads/master | 2022-12-20T16:34:58.563462 | 2020-09-23T12:00:01 | 2020-09-23T12:00:01 | 271,644,345 | 1 | 1 | null | 2020-08-03T13:50:08 | 2020-06-11T20:50:51 | HTML | UTF-8 | R | false | false | 2,976 | r | Teen_Birth_mapping_new.R | library(sf)
library(sp)
library(leaflet)
library(dplyr)
library(BAMMtools)
library(here)
library(leaflet.extras)
#Counties_VA_geometry <- st_as_sf(VA_counties) %>%
# mutate(County = NAME) %>%
# select(GEOID, NAME, NAMELSAD, geometry)
#Teen_Births_VA <- Teen_Births_VA %>%
#select(State, County, "BirthRate2018", "BirthRate2015", "BirthRate2012", "BirthRate2009", "BirthRate2006") %>%
#mutate(Teen_Births_VA, GEOID = as.character(GEOID))
#Teen_Births_VA <- inner_join(Teen_Births_VA, Counties_VA_geometry ) %>%
# mutate(is_halifax = case_when(County == "Halifax" ~ "Yes",
# County != "Halifax" ~ "No"))
#Teen_Births_VA <- st_as_sf(Teen_Births_VA)
st_read(here::here("src", "Data_Ingestion", "Teen_Births_VA.geojson"))
Teen_B_colors <- colorBin("YlGnBu", domain = c(0, 100),
bins = c(0, 15, 30, 45, 60, 75, 90, 100))
Teen_Births_VA %>%
leaflet(options = leafletOptions(minZoom = 7, maxZoom = 12)) %>%
setView(-78.6569, 38, 7) %>%
addTiles()%>%
addResetMapButton() %>%
addPolygons(fillColor = ~Teen_B_colors(BirthRate2018), color = "black",
fillOpacity = 1, group = "2018", opacity = 1, weight = 2,
highlight = highlightOptions(color = "white",
bringToFront = TRUE),
label = ~paste0("Birth Rate: ", (BirthRate2018))) %>%
addPolygons(fillColor = ~Teen_B_colors(BirthRate2015), color = "black",
fillOpacity = 1, group = "2015", weight = 2, opacity = 1,
highlight = highlightOptions(color = "white",
bringToFront = TRUE),
label = ~paste0("Birth Rate: ", (BirthRate2015))) %>%
addPolygons(fillColor = ~Teen_B_colors(BirthRate2012), color = "black",
fillOpacity = 1, group = "2012", weight = 2, opacity = 1,
highlight = highlightOptions(color = "white",
bringToFront = TRUE),
label = ~paste0("Birth Rate: ", (BirthRate2012))) %>%
addPolygons(fillColor = ~Teen_B_colors(BirthRate2009), color = "black",
fillOpacity = 1, group = "2009", weight = 2, opacity = 1,
highlight = highlightOptions(color = "white",
bringToFront = TRUE),
label = ~paste0("Birth Rate: ", (BirthRate2009))) %>%
addPolygons(fillColor = ~Teen_B_colors(BirthRate2006), color = "black",
fillOpacity = 1, group = "2006", weight = 2, opacity = 1,
highlight = highlightOptions(color = "white",
bringToFront = TRUE),
label = ~paste0("Birth Rate: ", (BirthRate2006))) %>%
addLayersControl(baseGroups = c("2018", "2015", "2012",
"2009", "2006")) %>%
addLegend(position = "bottomright", pal = Teen_B_colors, values = c(0,100),
title = "Birth Rate")
|
ef5a91f08f22bd5f3ebb22e61e7e5501d9f2ceab | eac1c073635a0f79349c27fb431d816fc3cb2662 | /man/make_regional_plot.Rd | 0510f9974a9ff96257cae1526776de1d2c72394a | [] | no_license | manning-lab/WGSregionalPlot | 8e39cc4d44514975ab1de7b65acac99e57f6913c | e02a77daeed78db95da5d7647bea9aa01d2d1c1f | refs/heads/master | 2020-07-04T17:44:52.333954 | 2019-08-29T15:55:26 | 2019-08-29T15:55:26 | 202,359,297 | 0 | 0 | null | 2019-08-29T15:54:35 | 2019-08-14T13:45:03 | R | UTF-8 | R | false | true | 2,735 | rd | make_regional_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_regional_plot.R
\name{make_regional_plot}
\alias{make_regional_plot}
\title{Generate a regional plot from summary statistics for a desired interval}
\usage{
make_regional_plot(chr, start, end, variant_data, variant_chr_column,
variant_pos_column, variant_y_column, variant_marker_column = NULL,
variant_ld_data = NULL, variant_ld_ref = NULL,
genome_build = "hg19", variant_should_log = T,
variant_horizontal_value = 5e-08, variant_horizontal_color = "red",
variant_horizontal_style = "dashed", variant_background_color = NULL,
variant_background_frame = T, variant_point_color = "#000000",
variant_title = "P-value", gene_highlight = NULL,
gene_title = "Ensembl", gene_background_color = NULL,
gene_frame = T, bed_data = NULL, bed_titles = NULL,
bed_background_colors = NULL, bed_frame = T)
}
\arguments{
\item{chr}{interval chromosome}
\item{start}{interval start}
\item{end}{interval end}
\item{variant_data}{variant-level summary statistics}
\item{variant_chr_column}{chromosome column in \code{variant_data}}
\item{variant_pos_column}{variant position column in \code{variant_data}}
\item{variant_y_column}{p-value column in \code{variant_data}}
\item{variant_marker_column}{unique variant identifier column in \code{variant_data}}
\item{variant_ld_data}{variant LD values, the output of LDGds workflow}
\item{variant_ld_ref}{reference variant unique identifier for LD}
\item{genome_build}{genome build of variants}
\item{variant_should_log}{should the pvalues in \code{variant_data} be -log10'ed?}
\item{variant_horizontal_value}{Y-intercept for horizontal line in Manhattan plot}
\item{variant_horizontal_color}{Color for horizontal line in Manhattan plot}
\item{variant_horizontal_style}{Style for horizontal line in Manhattan plot}
\item{variant_background_color}{Manhattan plot title box color}
\item{variant_background_frame}{Should a frame be placed around the Manhattan plot?}
\item{variant_point_color}{Color of points if no LD information is provided}
\item{variant_title}{Manhattan plot title}
\item{gene_highlight}{Should a gene be highlighted? hgnc symbol (DEPRICATED)}
\item{gene_title}{Title for gene plot}
\item{gene_background_color}{Gene plot title box color}
\item{gene_frame}{Should a frame be placed around the Gene plot?}
\item{bed_data}{List of dataframes in bed format: chr start end label color}
\item{bed_titles}{List of titles for bed plots}
\item{bed_background_colors}{List of background colors for bed plot title bars}
\item{bed_frame}{Should frames be added around each bed plot?}
}
\description{
Generate a regional plot from summary statistics for a desired interval
}
|
c94e372d971870287cbe92a5a398002c23663b72 | 3f1d3eff34ed84b974cf3c8c7c5538e46864d046 | /EA_VDT_EHR_Variance.R | 3fc0dc4061e0426851130355b397a50d6edc71b7 | [] | no_license | Lwylie/DocGraph_MU_Analysis | 1f036a804b572d4bdfae7e96a4eb5699545cde61 | 50652e975ddcbade6268a8d7bfb1343ae3240d3e | refs/heads/master | 2016-09-06T15:52:03.577692 | 2015-08-03T19:31:02 | 2015-08-03T19:31:02 | 40,132,997 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,330 | r | EA_VDT_EHR_Variance.R | #Load relevant libraries
library(ggplot2)
#Load datasets into dataframes
S1EP <- as.data.frame(read.csv("G:/Learning/FredTrotter/MU_EHR_result_replication/view_for_stats_S1_EP.csv"))
S1EH <- as.data.frame(read.csv("G:/Learning/FredTrotter/MU_EHR_result_replication/view_for_stats_S1_EH.csv"))
S2EP <- as.data.frame(read.csv("G:/Learning/FredTrotter/MU_EHR_result_replication/view_for_stats_S2_EP.csv"))
S2EH <- as.data.frame(read.csv("G:/Learning/FredTrotter/MU_EHR_result_replication/view_for_stats_S2_EH.csv"))
#Annoyingly, there is no good way to convert a factor(as r imports the eaccess_c and and VDT_c variables) into a double, so we coerce them
#Note: this will coerce NULL values into NA
S1EH$eaccess_c <- as.numeric(as.character(S1EH$eaccess_c))
S1EP$eaccess_c <- as.numeric(as.character(S1EP$eaccess_c))
S2EP$VDT_c <- as.numeric(as.character(S2EP$VDT_c))
S2EH$VDT_c <- as.numeric(as.character(S2EH$VDT_c))
#Since we want to test if there is significant variance in attestation by vendor, an ANOVA test seems proper
S1EH_ANOVA <- aov(eaccess_c ~ vendor, data = S1EH)
S1EP_ANOVA <- aov(eaccess_c ~ vendor, data = S1EP)
S2EH_ANOVA <- aov(VDT_c ~ vendor, data = S2EH)
S2EP_ANOVA <- aov(VDT_c ~ vendor, data = S2EP)
#It looks like all of our P-values are significant, so we can reject or H0 of equal means among vendors
|
bb6773d7c6d22592389ea938883bd9afa27773db | 0340f7072487228935286537b4590468cb852588 | /man/hotspots.Rd | c229eaf3d455130952d3e83d16072f0380abceb3 | [] | no_license | AMBarbosa/DeadCanMove | b4007db56ea38c3259e63a485b467811e07d1f85 | a5182facd759e5624dd5a228d5c8cf37e638d126 | refs/heads/master | 2021-11-11T15:06:07.326845 | 2021-11-02T17:51:39 | 2021-11-02T17:51:39 | 237,043,280 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,409 | rd | hotspots.Rd | \name{hotspots}
\alias{hotspots}
\title{
Calculate roadkill hotspots
}
\description{
This function identifies the hotspot regions in a dataset, or in a submatrix compared to the total dataset, using an adaptation of the method of Malo et al. (2004).
}
\usage{
hotspots(dataset, submat = NULL, region.column,
subsampl.columns = NULL, n.events.column = NULL, hotspots = TRUE,
confidence = 0.95, min.total.events = 0, min.hotspot.threshold = 2)
}
\arguments{
\item{dataset}{
name of the matrix or dataframe containing the complete data
}
\item{submat}{
name of the matrix or dataframe containing the data of the group and sampling window/gap for which to calculate hotspots
}
\item{region.column}{
name or index number of the column containing the regions (road sectors, sites) to classify as hotspots or non-hotspots
}
\item{subsampl.columns}{
index numbers of the consecutive columns of submat (or, if there is no submat, of the dataset) containing the (daily) sampling data, e.g. 4:180
}
\item{n.events.column}{
alternatively to \code{subsampl.columns}, the name or index number of the column containing the number of events (e.g. individual deaths) in each row
}
\item{hotspots}{
logical, whether to calculate the hotspots
}
\item{confidence}{
confidence threshold to consider hotspots
}
\item{min.total.events}{
minimum total number of events to calculate hotspots. Not totally implemented yet!
}
\item{min.hotspot.threshold}{
minimum number of events for a region to be considered a hotspot. If the Malo method says that regions with less than this value are hotspots, the value returned is NA. The default threshold is 2.
}
}
\value{
A list with elements \code{threshold} (an integer value indicating the number of deaths obtained as a threshold for considering a site a roadkill hotspot) and \code{hotspots} (a data frame showing the total number of deaths per region and whether or not it was considered a hospot.)
}
\references{
Malo, J.E., Suarez, F., Diez, A. (2004) Can we mitigate animal-vehicle accidents using predictive models? J. Appl. Ecol. 41, 701-710 (doi: 10.1111/j.0021-8901.2004.00929.x)
}
\author{
A. Marcia Barbosa, J. Tiago Marques, Sara M. Santos
}
\seealso{
\code{\link{sequential.hotspots}}
}
\examples{
data(roadkills)
hs <- hotspots(dataset = roadkills, submat = NULL, region.column = "segment",
subsampl.columns = 4:ncol(roadkills), confidence = 0.95)
hs
}
|
4a50f62090d24971bac549f0d374ff458c2abe2d | bb54736aa1405caeddff2b0e0766026b15a51fe8 | /tests/testthat.R | 054a6d695359b93338100fdcc3577dcdaf0a72e5 | [] | no_license | bprs/RSvgDevice | df74e12a7db7f10cf82e27bf1af798458721843b | 2aacb62948494ce86e709e2db5b8b59824e97361 | refs/heads/master | 2021-01-21T02:40:36.353955 | 2015-10-10T01:40:27 | 2015-10-10T01:40:27 | 43,988,381 | 0 | 0 | null | 2015-10-10T01:38:25 | 2015-10-10T01:38:25 | null | UTF-8 | R | false | false | 64 | r | testthat.R | library(testthat)
library(RSvgDevice)
test_check("RSvgDevice")
|
5bfee67a6db23b182541a23b21a76513b2e55ad3 | 8d3a51881b0f757a4bc8eb2f7f87685c77acfab6 | /tests/testthat/test-religion.R | e3ff7a86bd1ee5a2e4105deacd5cb4994734dc70 | [] | no_license | cran/wakefield | 679968f83c3c889616941569afa303156585d67b | 734a76be93f1df21651312ff2bbf7ba7288bc4f4 | refs/heads/master | 2021-01-15T15:25:42.747684 | 2020-09-13T16:30:02 | 2020-09-13T16:30:02 | 48,091,046 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 70 | r | test-religion.R | context("Checking religion")
test_that("religion ...",{
})
|
184614713f7802b3d9625a4b3f7bb5d2219d6c72 | 9d218bfb19e09ccb58317f18888e58c151f7d93e | /Assignment 2.R | 463a0cf9cfda3b358c6fdd61a4b9eeb871256984 | [] | no_license | Lukebalaj/BIS244-Lukebalaj | fde9a7e5f4a1b7cd0e1eb27eef8bbb87f67e74fd | a7e5e6ba127db4b8c073cdfcd18d4c70a28d7073 | refs/heads/master | 2022-12-17T17:54:58.952609 | 2020-09-27T18:08:59 | 2020-09-27T18:08:59 | 297,713,840 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 566 | r | Assignment 2.R | library("tidyverse")
#set working directory
setwd("~/Documents/BIS244/BIS-244-balajluke/COVID-19_Project/covid-19-data")
US <- read_csv("us-states.csv")
#filter for just PA
pa_data <- filter(US, state == "Pennsylvania")
pa_data <- mutate(pa_data, adj_deaths = deaths)
n <- length(pa_data$date)
for (i in 1:n){
if (pa_data$date[i]=="2020-04-21"){
pa_data$adj_deaths[i] <- pa_data$deaths[i]-282}
}
for (i in 1:n){
if (pa_data$date[i]=="2020-04-22"){
pa_data$adj_deaths[i] <- pa_data$deaths[i]-297}
}
#sum of all adj_deaths in PA
sum(pa_data$adj_deaths) |
b041fe442ede260fe2096b5f6a44788423406f70 | 4b29eff6d4b18ebffa3a4a85e641c3bef53ee898 | /Verification/maps_urban_areas.R | 10a7b05f83bb10a793b99e7d9d53b45f934656d8 | [] | no_license | pejovic/Spatialization | 206e0ea080bf6fe025ea4ce128ba0c3e14fec914 | bd5ed22e896cb68b318a529dc13c44a6231b6436 | refs/heads/master | 2023-05-02T14:42:09.069365 | 2021-05-28T11:46:29 | 2021-05-28T11:46:29 | 218,220,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,285 | r | maps_urban_areas.R | library(tidyverse)
library(sf)
library(readxl)
library(ggpubr)
library(ggfortify)
library(here)
library(knitr)
library(kableExtra)
library(DT)
library(mapview)
library(rgdal)
library(SerbianCyrLat)
library(stringr)
library(classInt)
library(viridis)
library(gridExtra)
library(ggspatial)
my_theme <- function(base_size = 10, base_family = "sans"){
theme_minimal(base_size = base_size, base_family = base_family) +
theme(
axis.text = element_text(size = 10),
axis.text.x = element_text(angle = 0, vjust = 0.5, hjust = 0.5),
axis.title = element_text(size = 12),
panel.grid.major = element_line(color = "grey"),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "#fffcfc"),
strip.background = element_rect(fill = "#820000", color = "#820000", size =0.5),
strip.text = element_text(face = "bold", size = 10, color = "white"),
legend.position = "bottom",
legend.justification = "center",
legend.background = element_blank(),
panel.border = element_rect(color = "grey30", fill = NA, size = 0.5)
)
}
theme_set(my_theme())
mycolors=c("#f32440","#2185ef","#d421ef")
opstine <- readOGR("Data/opstine/gadm36_SRB_2.shp",
use_iconv=TRUE,
encoding = "UTF-8")
sf_opstine <- st_as_sf(opstine)
unique(sf_opstine$NAME_2)
sf_opstine_pa <- sf_opstine %>% dplyr::filter(NAME_2 == "Pančevo")
sf_opstine_uz <- sf_opstine %>% dplyr::filter(NAME_2 == "Užice")
sf_opstine_va <- sf_opstine %>% dplyr::filter(NAME_2 == "Valjevo")
sf_opstine_kg <- sf_opstine %>% dplyr::filter(NAME_2 == "Kragujevac")
sf_opstine_ni <- sf_opstine %>% dplyr::filter(NAME_2 == "Niš")
clc_18 <- readOGR("Data/clc/CLC18_RS.shp")
sf_clc18 <- st_as_sf(clc_18)
sf_clc18_urb <- subset(sf_clc18, CODE_18 == "111" | CODE_18 == "112") %>% # CLC urban zones
st_transform(crs = "+init=epsg:32634") %>%
dplyr::select(geometry)
sf_municipalities <- sf_opstine %>% dplyr::select(NAME_2, SDG, Br_domacinstva, Br_domacinstva_SDG) %>%
dplyr::rename(NAME = NAME_2, No_houeses_DHS = SDG, No_houses = Br_domacinstva, No_houses_OHS = Br_domacinstva_SDG)
sf_municipalities %<>% st_transform(4326)
sf_municipalities %<>% mutate(Area_mun = st_area(.))
sf_clc18_urb %<>% dplyr::mutate(Area_polygon = sf::st_area(.)) %>% units::drop_units(.)
sf_clc18_urb %<>% st_transform(4326)
sf_clc18_urb_pa <- sf_clc18_urb[sf_opstine_pa,]
sf_clc18_urb_uz <- sf_clc18_urb[sf_opstine_uz,]
sf_clc18_urb_va <- sf_clc18_urb[sf_opstine_va,]
sf_clc18_urb_kg <- sf_clc18_urb[sf_opstine_kg,]
sf_clc18_urb_ni <- sf_clc18_urb[sf_opstine_ni,]
mapview(sf_clc18_urb_ni)
map_pa <- ggplot() +
geom_sf(data = sf_clc18_urb_pa,
aes(fill = "orange")) +
scale_fill_manual(values = "orange",
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
subtitle = paste("Spatial resolution 0.05°x0.05°, Territory of the City of Pančevo \n", "Area (Urban areas): ",
sum(round(sf_clc18_urb_pa$Area_polygon/ 1000000), 2), " km^2",
"\n Buildings DHS: 11986 (CENSUS)" , "\n Buildings OHS: 31158 (CENSUS)", sep = ""),
caption = "UBFCE (2021)") +
theme(line = element_blank(),
axis.title = element_blank(),
legend.position = "None",
panel.background = element_blank()) +
geom_sf(data = sf_opstine_pa, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
map_pa
ggsave(plot = map_pa,
filename = "Verification/Map_Pancevo_urban_areas.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi=600)
map_uz <- ggplot() +
geom_sf(data = sf_clc18_urb_uz,
aes(fill = "orange")) +
scale_fill_manual(values = "orange",
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
subtitle = paste("Spatial resolution 0.05°x0.05°, Territory of the City of Užice \n", "Area (Urban areas): ",
sum(round(sf_clc18_urb_uz$Area_polygon/ 1000000), 2), " km^2",
"\n Buildings DHS: 5786 (CENSUS)" , "\n Buildings OHS: 21191 (CENSUS)", sep = ""),
caption = "UBFCE (2021)") +
theme(line = element_blank(),
axis.title = element_blank(),
legend.position = "None",
panel.background = element_blank()) +
geom_sf(data = sf_opstine_uz, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
map_uz
ggsave(plot = map_uz,
filename = "Verification/Map_Uzice_urban_areas.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi=600)
map_va <- ggplot() +
geom_sf(data = sf_clc18_urb_va,
aes(fill = "orange")) +
scale_fill_manual(values = "orange",
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
subtitle = paste("Spatial resolution 0.05°x0.05°, Territory of the City of Valjevo \n", "Area (Urban areas): ",
sum(round(sf_clc18_urb_va$Area_polygon/ 1000000), 2), " km^2",
"\n Buildings DHS: 4232 (CENSUS)" , "\n Buildings OHS: 27169 (CENSUS)", sep = ""),
caption = "UBFCE (2021)") +
theme(line = element_blank(),
axis.title = element_blank(),
legend.position = "None",
panel.background = element_blank()) +
geom_sf(data = sf_opstine_va, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
map_va
ggsave(plot = map_va,
filename = "Verification/Map_Valjevo_urban_areas.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi=600)
map_kg <- ggplot() +
geom_sf(data = sf_clc18_urb_kg,
aes(fill = "orange")) +
scale_fill_manual(values = "orange",
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
subtitle = paste("Spatial resolution 0.05°x0.05°, Territory of the City of Kragujevac \n", "Area (Urban areas): ",
sum(round(sf_clc18_urb_kg$Area_polygon/ 1000000), 2), " km^2",
"\n Buildings DHS: 19693 (CENSUS)" , "\n Buildings OHS: 40298 (CENSUS)", sep = ""),
caption = "UBFCE (2021)") +
theme(line = element_blank(),
axis.title = element_blank(),
legend.position = "None",
panel.background = element_blank()) +
geom_sf(data = sf_opstine_kg, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
map_kg
ggsave(plot = map_kg,
filename = "Verification/Map_Kragujevac_urban_areas.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi=600)
map_ni <- ggplot() +
geom_sf(data = sf_clc18_urb_ni,
aes(fill = "orange")) +
scale_fill_manual(values = "orange",
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
subtitle = paste("Spatial resolution 0.05°x0.05°, Territory of the City of Niš \n", "Area (Urban areas): ",
sum(round(sf_clc18_urb_ni$Area_polygon/ 1000000), 2), " km^2",
"\n Buildings DHS: 0 (CENSUS)" , "\n Buildings OHS: 89903 (CENSUS)", sep = ""),
caption = "UBFCE (2021)") +
theme(line = element_blank(),
axis.title = element_blank(),
legend.position = "None",
panel.background = element_blank()) +
geom_sf(data = sf_opstine_ni, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
map_ni
ggsave(plot = map_ni,
filename = "Verification/Map_Nis_urban_areas.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi=600)
maps <- grid.arrange(map_pa, map_uz, map_va, map_kg, map_ni, ncol = 2)
ggsave(plot = maps,
filename = "Verification/Map_municipalities_urban_areas.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi=600)
|
7602e6ed7b9cda73f4866a93809b8fe1a3e783a2 | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/gtkTreeViewCollapseAll.Rd | 974884299fef1fb42fe549b456e19f8502e6aeef | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 350 | rd | gtkTreeViewCollapseAll.Rd | \alias{gtkTreeViewCollapseAll}
\name{gtkTreeViewCollapseAll}
\title{gtkTreeViewCollapseAll}
\description{Recursively collapses all visible, expanded nodes in \code{tree.view}.}
\usage{gtkTreeViewCollapseAll(object)}
\arguments{\item{\verb{object}}{A \code{\link{GtkTreeView}}.}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
c94126bf7412df83750a4852d58ec75066db75b1 | 97c2cfd517cdf2a348a3fcb73e9687003f472201 | /R/src/QFFX/R/FXConvertOptionTriToUSD.R | 080049a36657d99c9eed980f96ee5a28489cb1ff | [] | no_license | rsheftel/ratel | b1179fcc1ca55255d7b511a870a2b0b05b04b1a0 | e1876f976c3e26012a5f39707275d52d77f329b8 | refs/heads/master | 2016-09-05T21:34:45.510667 | 2015-05-12T03:51:05 | 2015-05-12T03:51:05 | 32,461,975 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 728 | r | FXConvertOptionTriToUSD.R | setConstructorS3("FXConvertOptionTriToUSD", function(...)
{
this <- extend(RObject(), "FXConvertOptionTriToUSD")
this
})
setMethodS3("convertLocalCcyTri","FXConvertOptionTriToUSD",function(this,fxCurr,tenor,putCall,rebalPeriod,rebalDelta,...)
{
CurrInst <- FXConvertOptionTriToUSD()
ccy <- paste(fxCurr$over(),fxCurr$under(),sep="")
tri_type <- paste(rebalPeriod,(rebalDelta*100),"d",sep="")
tri_tag <- "tri_local_ccy"
local_ccy_series_name <- paste(ccy,tenor,putCall,tri_tag,tri_type,sep="_")
tri_tag <- "tri"
target_series_name <- paste(ccy,tenor,putCall,tri_tag,tri_type,sep="_")
result <- FXConvertTriToUSD$convertTriToUSD(localTRI=local_ccy_series_name,targetTRI=target_series_name)
}) |
0dd935e2192cd456be5280bd417fd763ed879438 | 3f46036e845eb9c69a137beaa04305bc56dc3e3a | /Aula2/Ex1Sida.R | 356916bdccc7b184bda2458e1d80ae982690f58e | [] | no_license | natawild/Analise-de-Clusters | a51261cedcb184b983f298b24461563ba891ac58 | a98f17fe52353a64d1d143f207f7fdac1ac18e59 | refs/heads/master | 2022-11-04T17:58:17.199833 | 2022-10-26T10:40:38 | 2022-10-26T10:40:38 | 211,271,289 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 832 | r | Ex1Sida.R | #criar um vetor com os valores dos nrs de casos de SIDA
x=c(38.3, 6.2, 3.7, 2.6, 2.1, 14.6, 5.6, 3.7, 2.3, 2.0, 11.9, 5.5, 3.4, 2.2, 2.0, 6.6, 4.6, 3.1, 2.2, 1.9, 6.3, 4.5, 2.7, 2.1, 1.8)
#calculo da media
media=mean(x)
media
#calculo da mediana
mediana=median(x)
mediana
#a mediana é menor que a média, isto significa que
summary(x)
range(x)
R=max(x)-min(x)
#Distancia inter quartilica
DIQ=IQR(x)
#desenhar um histograma
hist(x)
#mudar a cor
#FREQ=TRUE-> =F-> densidade frq relativas, distribuição de valores
hist(x, breaks = "Sturges",
freq = FALSE,ylim = c(0,.15), ylab = "fri" )
#DESENHAR UMA BOX PLOT
boxplot(
x
)
#inverter a boxplot
boxplot(
x,horizontal = T,col = "TOMATO",
notch = T
# outline = F -> retira os outliers
)
boxplot.stats(x,coef = 1.5, do.conf = T, do.out = T)
|
31d8f07ccdeb17d88863fece62a8799eb29cdadb | 895510ce5c4585b8efeb6b1df22170b9a3909dd2 | /src/main.R | eb93208c90c1fbb8bc3b34ad2c51f656ba8f328d | [] | no_license | Jieatgit/decode_brain_signal_MS_Azure_ML_Competition | a5fcb2a767f83a7c1d66ea2be77a9075cebb727b | 7199b416635e8ac79ad31b6332f5cb670dbc52c9 | refs/heads/master | 2021-01-20T20:10:55.033736 | 2016-07-06T22:37:45 | 2016-07-06T22:37:45 | 62,414,104 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,737 | r | main.R | ### set environment
srcDir <- "/Users/Jie/decode_brain/src3/"
dataDir <- "/Users/Jie/decode_brain/datasets/"
setwd(srcDir)
source(paste(srcDir, "models.R", sep=""))
source(paste(srcDir, "data_processing.R", sep=""))
### load data, data is downloaded from
# http://az754797.vo.msecnd.net/competition/ecog/datasets/ecog_train_with_labels.csv
datafile <- paste(inDir, "ecog_train_with_labels.csv", sep="")
dataset1 <- read.csv(datafile, sep=",", header=T, stringsAsFactor=F)
summary(dataset1)
### train/test split
r <- train_test_slipt_indecies(dataset1,0.75)
dataset_train <- dataset1[r[,1],]
dataset_valid <- dataset1[r[,2],]
rm(r)
### build features
# feature type 1: signal similarity
templates <- fh_templates(dataset_train)
f1_train <- fh_project_2_templates(dataset_train, templates)
f1_valid <- fh_project_2_templates(dataset_valid, templates)
# feature type 2: broadband frequency projection of each event window
psd_train <- compute_psd(dataset_train)
psd_valid <- compute_psd(dataset_valid)
psd_rbind <- psd_train
psd_rbind <- rbind(psd_train, psd_valid)
psd_psc <- compute_psc(psd_rbind)
f2_train <- compute_psd_proj(psd_train, psd_psc)
f2_valid <- compute_psd_proj(psd_valid, psd_psc)
# feature type 3: broadband psd similarity in continues time window
bb <- compute_bb(dataset1)
write.csv(bb,"/Users/Jie/decode_brain/output/model_8.3_bb_mean_psc.csv")
#bb <- read.csv("/Users/Jie/decode_brain/output/model_8.3_bb_mean_psc.csv",
# sep=",", header=T, stringsAsFactor=F); bb <- bb[,2:68]
psd_mean <- bb[(nrow(bb)-240*4+1):(nrow(bb)-240*2),] # need to compute test bb
psc <- bb[(nrow(bb)-240*2+1):nrow(bb),] # need to compute test bb
bb <- bb[1:(nrow(bb)-240*4),]
nrow(bb)-nrow(dataset1) # test sizes, should be of the same size
# splite train/test
r <- train_test_slipt_indecies(bb,0.75)
bb_train <- bb[r[,1],]
bb_valid <- bb[r[,2],]
rm(r)
# projection onto templates
bb_templates <- fh_templates(bb)
f3_train <- fh_project_2_templates(bb_train, bb_templates)
f3_valid <- fh_project_2_templates(bb_valid, bb_templates)
# modify colnames
ncols1 <- ncol(dataset1)
col_names <- rep('',(ncols1-3)*2+3)
col_names[1] <- 'PatientID'
for (i in 1:(ncols1-3)){
col_names[i+1] <- paste('bb_Chanel_',i,'_F', sep='')
col_names[i+1+ncols1-3] <- paste('bb_Chanel_',i,'_H',sep='')
}
col_names[2*(ncols1-3)+2] <- 'Stimulus_Type'
col_names[2*(ncols1-3)+3] <- 'Stimulus_ID'
names(f3_train) <- col_names
names(f3_valid) <- col_names
|
80b5294e61e8d396758cc7e015a83bf61941ac60 | 6d0143324fff5a20522701cb53a147f771e8c50b | /man/ga_custom_datasource.Rd | 16c61933b6414c10f90a213993a0c3dae975b6cb | [] | no_license | gofullthrottle/googleAnalyticsR | b582ab7a267c57b11f197ae5a35bbb9a47de2918 | 1f2853d2fab830a4a869d93448ba8d8919797fa8 | refs/heads/master | 2020-06-01T00:41:56.219006 | 2019-05-21T09:32:33 | 2019-05-21T09:32:33 | 190,563,576 | 1 | 0 | null | 2019-06-06T10:34:24 | 2019-06-06T10:34:24 | null | UTF-8 | R | false | true | 771 | rd | ga_custom_datasource.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/custom_data_source.R
\name{ga_custom_datasource}
\alias{ga_custom_datasource}
\title{List Custom Data Sources}
\usage{
ga_custom_datasource(accountId, webPropertyId)
}
\arguments{
\item{accountId}{Account Id}
\item{webPropertyId}{Web Property Id}
}
\value{
Custom Data Source
}
\description{
Get a list of custom data sources you have configured in Google Analytics web UI.
}
\details{
You primarily need this to get the \code{customDataSourceId} for the uploads via \link{ga_custom_upload_file}
}
\seealso{
Other custom datasource functions: \code{\link{ga_custom_upload_file}},
\code{\link{ga_custom_upload_list}},
\code{\link{ga_custom_upload}}
}
\concept{custom datasource functions}
|
f3ca64a8e95b7aa39047b2f9a50f9db38f63817a | ca854c674cadd8aec00f8690bcb1bf3ee3fc7cac | /SDM/dev/99.model.list_delete.lessthan10.r | 7025df9e5df3ba1caf3104891cb4f789c777bd20 | [] | no_license | LaurenHodgson/CliMAS-R | 4fc697d30707b91c8f1eb12b0e2a4bece0502862 | f59d8af3527cfebbb59d0618ad47e922d740c6c9 | refs/heads/master | 2021-01-23T14:46:40.074709 | 2013-01-08T05:53:39 | 2013-01-08T05:53:39 | 7,125,069 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,336 | r | 99.model.list_delete.lessthan10.r |
library(SDMTools)
taxa=c('amphibians','reptiles','mammals')
actionlist=NULL
for (tax in taxa) { cat(tax, '\n') #BUT RUN EACH TAXA SEPERATELY, POSSIBLY BREAK IT UP MORE!
work.dir=paste('/scratch/jc148322/AP02/',tax, '/models/',sep="")
species = list.files(work.dir) #get a list of species
taxa.action=matrix(NA,nr=length(species),nc=3)
i=1
#cycle through each of the species
for (spp in species) { cat(spp, '\n')
spp.dir = paste(work.dir,spp,'/',sep=''); setwd(spp.dir) #set the working directory to the species directory
toccur=read.csv('occur.csv')
if (nrow(toccur)>=10) {
action='modelled'
taxa.action[i,]=c(spp, tax, action)
} else {
#delete ascii folder
wd=paste(spp.dir,'/output/',sep='');setwd(wd)
asc.dir=list.files(pattern='ascii')
unlink(asc.dir, recursive=T)
action='not_modelled'
taxa.action[i,]=c(spp, tax, action)
}
i=i+1
}
actionlist=rbind(actionlist,taxa.action)
}
colnames(actionlist)=c('species','class','action')
write.csv(actionlist, '/scratch/jc148322/AP02/actionlist.csv',row.names=FALSE)
library(SDMTools)
taxa=c('amphibians','reptiles','mammals')
tax=taxa[3]
work.dir=paste('/scratch/jc148322/AP02/',tax, '/models/',sep="")
species = list.files(work.dir) #get a list of species
not.modelled=NULL
#cycle through each of the species
for (spp in species) {
spp.dir = paste(work.dir,spp,'/',sep=''); setwd(spp.dir) #set the working directory to the species directory
toccur=read.csv('occur.csv')
if (nrow(toccur)>=10) {
} else {
not.modelled=c(not.modelled,spp)
}
}
#####################################
#delete not modelled asciis and summaires
library(SDMTools)
taxa=c('amphibians','reptiles','mammals')
tax=taxa[3]
work.dir=paste('/scratch/jc148322/AP02/',tax, '/models/',sep="")
species = list.files(work.dir) #get a list of species
#cycle through each of the species
for (spp in species) { cat(spp, '\n')
spp.dir = paste(work.dir,spp,'/',sep=''); setwd(spp.dir) #set the working directory to the species directory
toccur=read.csv('occur.csv')
if (nrow(toccur)>=10) {
} else { cat(spp, '\n')
#delete ascii folder
wd=paste(spp.dir,'/output/',sep='');setwd(wd)
asc.dir=list.files(pattern='ascii')
unlink(asc.dir, recursive=T)
sum.dir=list.files(pattern='summaries')
unlink(sum.dir, recursive=T)
}
}
|
821d84f28a67dd5213c5e76b879e977d00d8c253 | f1971a5cbf1829ce6fab9f5144db008d8d9a23e1 | /packrat/lib/x86_64-pc-linux-gnu/3.2.5/rbokeh/tests/testthat/test-figures.R | 8f1dd09577398bc0bf0a19cf6f23c5e2b887bf4d | [] | no_license | harryprince/seamonster | cc334c87fda44d1c87a0436139d34dab310acec6 | ddfd738999cd302c71a11aad20b3af2f4538624f | refs/heads/master | 2021-01-12T03:44:33.452985 | 2016-12-22T19:17:01 | 2016-12-22T19:17:01 | 78,260,652 | 1 | 0 | null | 2017-01-07T05:30:42 | 2017-01-07T05:30:42 | null | UTF-8 | R | false | false | 24,242 | r | test-figures.R |
## difficult to test what the plots actually look like
## currently just run a wide variety of things and make
## sure there are no errors in creating or preparing
## then we can use widget2png and inspect the phantomjs output
## if there is anything output, it means there has been an error
fname <- ifelse(Sys.info()["sysname"] == "Windows", "nul", "/dev/null") # nolint
p <- vector(length = 105, mode = "list")
test_that("examples", {
p[[1]] <- figure() %>%
ly_annular_wedge(Sepal.Length, Sepal.Width, data = iris,
color = Species, inner_radius = 0.1, outer_radius = 0.15,
alpha = 0.5, hover = Species)
print_model_json(p[[1]], file = fname)
p[[2]] <- figure() %>%
ly_wedge(Sepal.Length, Sepal.Width, data = iris,
color = Species, radius = 0.15, alpha = 0.5, hover = Species)
print_model_json(p[[2]], file = fname)
p[[3]] <- figure() %>%
ly_arc(Sepal.Length, Sepal.Width, data = iris,
color = Species, alpha = 0.5)
print_model_json(p[[3]], file = fname)
p[[4]] <- figure() %>%
ly_annulus(Sepal.Length, Sepal.Width, data = iris,
color = Species, hover = Species)
print_model_json(p[[4]], file = fname)
p[[5]] <- figure() %>%
ly_points(rexp(1000), rexp(1000)) %>%
x_axis(label = "x", log = TRUE) %>%
y_axis(label = "y", log = TRUE)
print_model_json(p[[5]], file = fname)
# prepare data
data(elements, package = "rbokeh")
# create figure
p[[7]] <- figure(title = "Periodic Table", tools = c("hover"),
ylim = as.character(c(7:1)), xlim = as.character(1:18),
xgrid = FALSE, ygrid = FALSE, xlab = "", ylab = "",
height = 600, width = 1200) %>%
# plot rectangles
ly_crect(group, period, data = elements, 0.9, 0.9,
fill_color = color, line_color = color, fill_alpha = 0.6,
hover = list(name, atomic.number, type, atomic.mass,
electronic.configuration)) %>%
# add symbol text
ly_text(symx, period, text = symbol, data = elements,
font_style = "bold", font_size = "15pt",
align = "left", baseline = "middle") %>%
# add atomic number text
ly_text(symx, numbery, text = atomic.number, data = elements,
font_size = "9pt", align = "left", baseline = "middle") %>%
# add name text
ly_text(symx, namey, text = name, data = elements,
font_size = "6pt", align = "left", baseline = "middle") %>%
# add atomic mass text
ly_text(symx, massy, text = atomic.mass, data = elements,
font_size = "6pt", align = "left", baseline = "middle")
print_model_json(p[[7]], file = fname)
data(flightfreq, package = "rbokeh")
p[[8]] <- figure(width = 1000) %>%
ly_points(date, Freq, data = flightfreq,
hover = list(date, Freq, dow), size = 5) %>%
ly_abline(v = as.Date("2001-09-11"))
print_model_json(p[[8]], file = fname)
p[[16]] <- figure(xlim = c(0, 1), ylim = c(0, 1), title = "Volcano") %>%
ly_image(volcano) %>%
ly_contour(volcano)
print_model_json(p[[16]], file = fname)
# check palette with ly_image
# should reject a single color
expect_error(
pp <- figure(width = 700, height = 400) %>%
ly_image(volcano, palette = "#FF00FF")
)
# should accept no palette and use default
p[[18]] <- figure(width = 700, height = 400) %>%
ly_image(volcano)
print_model_json(p[[18]], file = fname)
# should accept a Bokeh palette name
p[[19]] <- figure(width = 700, height = 400) %>%
ly_image(volcano, palette = "Greys9")
print_model_json(p[[19]], file = fname)
# should accept a vector of colors
p[[20]] <- figure(width = 700, height = 400) %>%
ly_image(volcano, palette = blues9)
print_model_json(p[[20]], file = fname)
url <- c(" http://bokeh.pydata.org/en/latest/_static/images/logo.png",
"http://developer.r-project.org/Logo/Rlogo-4.png")
ss <- seq(0, 2 * pi, length = 13)[-1]
ws <- runif(12, 2.5, 5) * rep(c(1, 0.8), 6)
imgdat <- data.frame(
x = sin(ss) * 10, y = cos(ss) * 10,
w = ws, h = ws * rep(c(1, 0.76), 6),
url = rep(url, 6)
)
p[[21]] <- figure(xlab = "x", ylab = "y") %>%
ly_image_url(x, y, w = w, h = h, image_url = url, data = imgdat,
anchor = "center") %>%
ly_lines(sin(c(ss, ss[1])) * 10, cos(c(ss, ss[1])) * 10,
width = 15, alpha = 0.1)
print_model_json(p[[21]], file = fname)
z <- lm(dist ~ speed, data = cars)
p[[22]] <- figure() %>%
ly_points(cars, hover = cars) %>%
ly_lines(lowess(cars), legend = "lowess") %>%
ly_abline(z, type = 2, legend = "lm", width = 2)
print_model_json(p[[22]], file = fname)
mtcars$model <- row.names(mtcars)
p[[23]] <- figure() %>%
ly_points(disp, mpg, data = mtcars, color = cyl,
hover = "This <strong>@model</strong><br>has @hp horsepower!")
print_model_json(p[[23]], file = fname)
p[[24]] <- figure() %>%
ly_points(Sepal.Length, Sepal.Width, data = iris,
color = Species, glyph = Species,
hover = list(Sepal.Length, Sepal.Width))
print_model_json(p[[24]], file = fname)
# get data from Duluth site in 'barley' data
du <- subset(lattice::barley, site == "Duluth")
# plot with default ranges
p[[25]] <- figure(width = 600) %>%
ly_points(yield, variety, color = year, data = du)
print_model_json(p[[25]], file = fname)
# y axis is alphabetical
# manually set x and y axis (y in order of 1932 yield)
p[[26]] <- p[[25]] %>%
x_range(c(20, 40)) %>%
y_range(du$variety[order(subset(du, year == 1932)$yield)])
print_model_json(p[[26]], file = fname)
# google map
print_model_json(gmap(), file = fname)
p[[27]] <- gmap(lat = 40.74, lng = -73.95, zoom = 11,
width = 600, height = 600,
map_style = gmap_style("blue_water"))
print_model_json(p[[27]], file = fname)
## axis
##---------------------------------------------------------
p[[28]] <- figure() %>%
ly_points(rexp(1000), rexp(1000)) %>%
x_axis(label = "x", log = TRUE) %>%
y_axis(label = "y", log = TRUE)
print_model_json(p[[28]], file = fname)
p[[29]] <- figure() %>%
ly_points(2 ^ (1:10)) %>%
y_axis(log = 2)
print_model_json(p[[29]], file = fname)
# disable scientific tick labels
p[[30]] <- figure() %>%
ly_points(rnorm(10), rnorm(10) / 1000) %>%
y_axis(use_scientific = FALSE)
print_model_json(p[[30]], file = fname)
# specify datetime tick labels
# the appropriate datetime units are automatically chosen
big_range <- seq(as.Date("2012-01-01"), as.Date("2012-12-31"), by = "days")
small_range <- seq(as.Date("2012-01-01"), as.Date("2012-02-01"), by = "days")
p[[31]] <- figure() %>%
ly_lines(big_range, rnorm(366)) %>%
x_axis(label = "Date", format = list(months = "%b-%Y", days = "%d"))
print_model_json(p[[31]], file = fname)
p[[32]] <- figure() %>%
ly_lines(small_range, rnorm(32)) %>%
x_axis(label = "Date", format = list(months = "%b-%Y", days = "%d"))
print_model_json(p[[32]], file = fname)
# specify numeric tick labels
p[[33]] <- figure() %>%
ly_points(rnorm(10), rnorm(10) * 10000) %>%
y_axis(number_formatter = "numeral", format = "0,000")
print_model_json(p[[33]], file = fname)
p[[34]] <- figure() %>%
ly_points(rnorm(10), rnorm(10) * 100) %>%
y_axis(number_formatter = "printf", format = "%0.1f%%")
print_model_json(p[[34]], file = fname)
## bar
##---------------------------------------------------------
# count of variety
p[[35]] <- figure() %>%
ly_bar(variety, data = lattice::barley) %>%
theme_axis("x", major_label_orientation = 90)
print_model_json(p[[35]], file = fname)
# total yield per variety
p[[36]] <- figure() %>%
ly_bar(variety, yield, data = lattice::barley, hover = TRUE) %>%
theme_axis("x", major_label_orientation = 90)
print_model_json(p[[36]], file = fname)
# swap axes and add hover
p[[37]] <- figure() %>%
ly_bar(yield, variety, data = lattice::barley, hover = TRUE)
print_model_json(p[[37]], file = fname)
# stack by year
p[[38]] <- figure() %>%
ly_bar(variety, yield, color = year, data = lattice::barley, hover = TRUE) %>%
theme_axis("x", major_label_orientation = 90)
print_model_json(p[[38]], file = fname)
# proportional bars
p[[39]] <- figure() %>%
ly_bar(variety, yield, color = year,
data = lattice::barley, position = "fill", width = 1) %>%
theme_axis("x", major_label_orientation = 90) %>%
set_palette(discrete_color = pal_color(c("red", "blue")))
print_model_json(p[[39]], file = fname)
# swap axes and use different palette
p[[40]] <- figure() %>%
ly_bar(yield, variety, color = year,
data = lattice::barley, position = "fill") %>%
set_palette(discrete_color = pal_color(c("red", "blue")))
print_model_json(p[[40]], file = fname)
# side by side bars
p[[41]] <- figure() %>%
ly_bar(variety, yield, color = year,
data = lattice::barley, position = "dodge") %>%
theme_axis("x", major_label_orientation = 90)
print_model_json(p[[41]], file = fname)
# use a different theme
p[[42]] <- figure() %>%
ly_bar(variety, yield, color = year,
data = lattice::barley, position = "dodge") %>%
theme_axis("x", major_label_orientation = 90)
print_model_json(p[[42]], file = fname)
## boxplot
##---------------------------------------------------------
p[[43]] <- figure(ylab = "Height (inches)", width = 600) %>%
ly_boxplot(voice.part, height, data = lattice::singer)
print_model_json(p[[43]], file = fname)
# change orientation of x axis labels
p[[44]] <- figure(ylab = "Height (inches)", width = 600) %>%
ly_boxplot(voice.part, height, data = lattice::singer) %>%
theme_axis("x", major_label_orientation = 90)
print_model_json(p[[44]], file = fname)
d <- data.frame(x = rnorm(1000), y = sample(letters[1:5], 1000, replace = TRUE))
p[[103]] <- figure() %>%
ly_boxplot(y, x, outlier_glyph = NA, data = d)
print_model_json(p[[103]], file = fname)
## callbacks
##---------------------------------------------------------
p[[45]] <- figure() %>%
ly_points(1:10) %>%
x_range(callback = console_callback()) %>%
y_range(callback = console_callback())
print_model_json(p[[45]], file = fname)
# debug callback
p[[46]] <- figure() %>%
ly_points(1:10) %>%
x_range(callback = debug_callback())
print_model_json(p[[46]], file = fname)
# character callback
p[[47]] <- figure() %>%
ly_points(1:10) %>%
x_range(callback = "console.log('hi')")
print_model_json(p[[47]], file = fname)
# console callback (prints cb_data and cb_obj when hovered)
p[[48]] <- figure() %>%
ly_points(1:10, lname = "points") %>%
tool_hover(console_callback(), "points") %>%
tool_selection(console_callback(), "points")
print_model_json(p[[48]], file = fname)
# debug callback (launches debugger)
p[[49]] <- figure() %>%
ly_points(1:10, lname = "points") %>%
tool_hover(debug_callback("points"), "points")
print_model_json(p[[49]], file = fname)
# just hover
p[[50]] <- figure() %>%
ly_points(1:10, hover = data.frame(a = 1:10))
print_model_json(p[[50]], file = fname)
# both hover and hover callback
p[[51]] <- figure() %>%
ly_points(1:10, hover = data.frame(a = 1:10), lname = "points") %>%
tool_hover(console_callback(), "points")
print_model_json(p[[51]], file = fname)
# two different glyphs with different hover callbacks
p[[52]] <- figure() %>%
ly_points(1:10, lname = "points1") %>%
ly_points(2:12, lname = "points2") %>%
tool_hover("if(cb_data.index['1d'].indices.length > 0) console.log('1')", "points1") %>%
tool_hover("if(cb_data.index['1d'].indices.length > 0) console.log('2')", "points2")
print_model_json(p[[52]], file = fname)
# tool_hover with references to lnames made available callback
# is only triggered on l1 hover
p[[53]] <- figure() %>%
ly_points(1:10, lname = "l1") %>%
ly_points(2:11, lname = "l2") %>%
tool_hover(debug_callback(c("l1", "l2")), "l1")
print_model_json(p[[53]], file = fname)
dd <- data.frame(x = 1:10, link = paste0("http://google.com#q=", 1:10))
# just url
p[[54]] <- figure() %>%
ly_points(x, url = "@link", data = dd, lname = "points")
print_model_json(p[[54]], file = fname)
# console callback (prints cb_obj and cb_data (empty) when point is clicked)
p[[55]] <- figure() %>%
ly_points(x, data = dd, lname = "points") %>%
tool_tap(console_callback(), "points")
print_model_json(p[[55]], file = fname)
# debug callback
p[[56]] <- figure() %>%
ly_points(x, data = dd, lname = "points") %>%
tool_tap(debug_callback("points"), "points")
print_model_json(p[[56]], file = fname)
# both console and url (note that you can toggle which one in toolbar)
# but would be good to be able to do both
p[[57]] <- figure() %>%
ly_points(x, url = "@link", data = dd, lname = "points") %>%
tool_tap(console_callback(), "points")
print_model_json(p[[57]], file = fname)
# two layers both with different tap callback
# only first is honored (no matter what point is clicked)
# would be good if could do both
# https://github.com/bokeh/bokeh/issues/3804
p[[58]] <- figure() %>%
ly_points(1:10, lname = "l1") %>%
ly_points(2:11, lname = "l2") %>%
tool_tap("console.log('l1')", "l1") %>%
tool_tap("console.log('l2')", "l2")
print_model_json(p[[58]], file = fname)
p[[59]] <- figure(tools = "lasso_select") %>%
ly_points(1:10, lname = "points") %>%
tool_selection(debug_callback(), "points")
print_model_json(p[[59]], file = fname)
p[[60]] <- figure(tools = "box_select") %>%
ly_points(1:10, lname = "points") %>%
tool_selection(debug_callback(), "points")
print_model_json(p[[60]], file = fname)
dat <- data.frame(x = rnorm(10), y = rnorm(10))
p[[61]] <- figure() %>% ly_points(x = x, y = y, data = dat,
hover = list(x, y), lname = "points") %>%
tool_hover(shiny_callback("hover_info"), "points") %>%
tool_tap(shiny_callback("tap_info"), "points") %>%
tool_box_select(shiny_callback("selection_info"), "points") %>%
x_range(callback = shiny_callback("x_range")) %>%
y_range(callback = shiny_callback("y_range"))
print_model_json(p[[61]], file = fname)
## grid
##---------------------------------------------------------
idx <- split(1:150, iris$Species)
figs <- lapply(idx, function(x) {
figure(width = 300, height = 300) %>%
ly_points(Sepal.Length, Sepal.Width, data = iris[x, ],
hover = list(Sepal.Length, Sepal.Width))
})
# 1 row, 3 columns
p[[63]] <- grid_plot(figs)
print_model_json(p[[63]], file = fname)
# specify xlim and ylim to be applied to all panels
p[[64]] <- grid_plot(figs, xlim = c(4, 8), ylim = c(1.5, 4.5))
print_model_json(p[[64]], file = fname)
# unnamed list will remove labels
p[[65]] <- grid_plot(unname(figs))
print_model_json(p[[65]], file = fname)
# 2 rows, 2 columns
p[[66]] <- grid_plot(figs, nrow = 2)
print_model_json(p[[66]], file = fname)
# x and y axis with same (and linked) limits
p[[67]] <- grid_plot(figs, same_axes = TRUE)
print_model_json(p[[67]], file = fname)
# x axis with same (and linked) limits
p[[68]] <- grid_plot(figs, same_axes = c(TRUE, FALSE), nrow = 2)
print_model_json(p[[68]], file = fname)
# x axis with same (and linked) limits and custom xlim
p[[69]] <- grid_plot(figs, same_axes = c(TRUE, FALSE), xlim = c(5, 7), nrow = 2)
print_model_json(p[[69]], file = fname)
# send lists instead of specifying nrow and ncol
p[[70]] <- grid_plot(list(
c(list(figs[[1]]), list(figs[[3]])),
c(list(NULL), list(figs[[2]]))
))
print_model_json(p[[70]], file = fname)
# a null entry will be skipped in the grid
figs2 <- figs
figs2[1] <- list(NULL)
p[[71]] <- grid_plot(figs2, nrow = 2)
print_model_json(p[[71]], file = fname)
# link data across plots in the grid (try box_select tool)
# (data sources must be the same)
tools <- c("pan", "wheel_zoom", "box_zoom", "box_select", "reset")
p1 <- figure(tools = tools, width = 500, height = 500) %>%
ly_points(Sepal.Length, Sepal.Width, data = iris, color = Species)
p2 <- figure(tools = tools, width = 500, height = 500) %>%
ly_points(Petal.Length, Petal.Width, data = iris, color = Species)
p[[72]] <- grid_plot(list(p1, p2), same_axes = TRUE, link_data = TRUE)
print_model_json(p[[72]], file = fname)
## themes
##---------------------------------------------------------
p[[73]] <- figure() %>%
ly_points(1:10) %>%
theme_plot(background_fill_color = "#E6E6E6",
outline_line_color = "white") %>%
theme_grid(c("x", "y"), grid_line_color = "white",
minor_grid_line_color = "white",
minor_grid_line_alpha = 0.4) %>%
theme_axis(c("x", "y"), axis_line_color = "white",
major_label_text_color = "#7F7F7F",
major_tick_line_color = "#7F7F7F",
minor_tick_line_alpha = 0, num_minor_ticks = 2)
print_model_json(p[[73]], file = fname)
# or use the built in ggplot theme (under development)
p[[74]] <- figure(data = iris, legend = "top_left", tools = NULL) %>%
ly_points(Sepal.Length, Petal.Length, color = Species) %>%
set_theme(bk_ggplot_theme())
print_model_json(p[[74]], file = fname)
p[[75]] <- figure(data = iris, legend = "top_left", tools = NULL) %>%
ly_points(Sepal.Length, Petal.Length, color = Species) %>%
set_theme(bk_default_theme())
print_model_json(p[[75]], file = fname)
## other
##---------------------------------------------------------
p[[76]] <- figure() %>% ly_hexbin(rnorm(10000), rnorm(10000))
print_model_json(p[[76]], file = fname)
library(maps)
data(world.cities)
caps <- subset(world.cities, capital == 1)
caps$population <- prettyNum(caps$pop, big.mark = ",")
p[[77]] <- figure(width = 800, height = 450, padding_factor = 0) %>%
ly_map("world", col = "gray") %>%
ly_points(long, lat, data = caps, size = 5,
hover = c(name, country.etc, population))
print_model_json(p[[77]], file = fname)
p[[78]] <- figure(data = lattice::singer) %>%
ly_points(catjitter(voice.part), jitter(height), color = "black") %>%
ly_boxplot(voice.part, height, with_outliers = FALSE)
print_model_json(p[[78]], file = fname)
p[[81]] <- point_types()
print_model_json(p[[81]], file = fname)
p[[82]] <- figure(legend_location = "top_left") %>%
ly_points(1:10, legend = "a") %>%
theme_legend(border_line_width = 2)
print_model_json(p[[82]], file = fname)
p[[83]] <- figure() %>%
ly_points(Sepal.Length, Sepal.Width, data = iris,
color = Species, glyph = Species) %>%
set_palette(discrete_color = pal_color(c("red", "blue", "green")))
print_model_json(p[[83]], file = fname)
pp <- figure() %>% ly_points(1:10)
get_object_refs(pp)
p[[85]] <- figure(width = 600, height = 400) %>%
ly_hist(eruptions, data = faithful, breaks = 40, freq = FALSE) %>%
ly_density(eruptions, data = faithful)
print_model_json(p[[85]], file = fname)
p[[86]] <- figure(legend_location = "top_left") %>%
ly_quantile(Sepal.Length, group = Species, data = iris)
print_model_json(p[[86]], file = fname)
# wa_cancer <- droplevels(subset(latticeExtra::USCancerRates, state == "Washington"))
# rownames(wa_cancer) <- NULL
# wa_cancer <- wa_cancer[, c("LCL95.male", "UCL95.male", "rate.male", "county")]
# wa_cancer <- wa_cancer[order(wa_cancer$rate.male, decreasing = TRUE),]
# wa_cancer <- wa_cancer[1:10, ]
wa_cancer <- data.frame(
LCL95.male = c(237, 233.1, 266, 219.8, 227.5, 239.7, 245.4, 237.5, 208, 216.2),
UCL95.male = c(466, 471.6, 316.8, 347.2, 303, 283.4, 263.3, 268.1, 300.3, 290.5),
rate.male = c(332, 329.8, 290.5, 276.4, 263, 260.8, 254.2, 252.4, 250.6, 250.3),
county = c("Columbia", "Wahkiakum", "Grays Harbor", "Pend Oreille", "Franklin",
"Cowlitz", "Pierce", "Thurston", "Klickitat", "Pacific"),
stringsAsFactors = FALSE)
## y axis sorted by male rate
ylim <- levels(with(wa_cancer, reorder(county, rate.male)))
p[[88]] <- figure(ylim = ylim, tools = NULL, data = wa_cancer) %>%
ly_segments(LCL95.male, county, UCL95.male,
county, color = NULL, width = 2) %>%
ly_points(rate.male, county, glyph = 16)
print_model_json(p[[88]], file = fname)
chippy <- function(x) sin(cos(x) * exp(-x / 2))
p[[89]] <- figure(width = 800) %>%
ly_curve(chippy, -8, 7, n = 2001)
print_model_json(p[[89]], file = fname)
p[[90]] <- figure() %>%
ly_ray(Sepal.Length, Sepal.Width,
data = iris, length = runif(150),
angle = runif(150, max = 2 * pi),
color = Species)
print_model_json(p[[90]], file = fname)
# broken!!
p[[91]] <- figure() %>%
ly_bezier(
x0 = Sepal.Length,
x1 = Sepal.Length + runif(150),
cx0 = Sepal.Length + runif(150),
cx1 = Sepal.Length + runif(150),
y0 = Sepal.Width,
y1 = Sepal.Width + runif(150),
cy0 = Sepal.Width + runif(150),
cy1 = Sepal.Width + runif(150),
color = Species,
data = iris,
)
print_model_json(p[[91]], file = fname)
p[[92]] <- figure() %>%
ly_quadratic(
x0 = Sepal.Length,
x1 = Sepal.Length + runif(150),
cx = Sepal.Length + runif(150),
y0 = Sepal.Width,
y1 = Sepal.Width + runif(150),
cy = Sepal.Width + runif(150),
color = Species,
data = iris,
)
print_model_json(p[[92]], file = fname)
xs <- list()
ys <- list()
for (i in 1:500) {
count <- sample(1:10, 1)
angles <- runif(count + 1, 0, 2 * pi)
x_dists <- (1 / 2) ^ (0:count) * cos(angles)
y_dists <- (1 / 2) ^ (0:count) * sin(angles)
xs[[length(xs) + 1]] <- c(cumsum(x_dists))
ys[[length(ys) + 1]] <- c(cumsum(y_dists))
}
p[[93]] <- figure() %>%
ly_multi_line(xs = xs, ys = ys)
print_model_json(p[[93]], file = fname)
p[[94]] <- figure() %>%
ly_points(1:26, letters) %>%
ly_abline(h = "j") %>%
ly_abline(v = 10)
print_model_json(p[[94]], file = fname)
p[[95]] <- figure() %>%
ly_points(1:10) %>%
ly_abline(v = 1:10) %>%
ly_abline(h = 1:10)
print_model_json(p[[95]], file = fname)
p[[96]] <- figure() %>%
ly_points(0:10) %>%
ly_abline(0, seq(0, 1, by = 0.1))
print_model_json(p[[96]], file = fname)
p[[97]] <- figure() %>%
ly_oval(Sepal.Length, Sepal.Width, data = iris, color = Species, alpha = 0.5)
print_model_json(p[[97]], file = fname)
# single patch doesn't allow line and fill color to come from data source
# p[[98]] <- figure() %>%
# ly_patch(Sepal.Length, Sepal.Width, data = iris, color = Species, alpha = 0.5)
# print_model_json(p[[98]], file = fname)
# p[[99]] <- figure() %>%
# ly_patch(Sepal.Length, Sepal.Width, data = iris, color = "blue", alpha = 0.5)
# print_model_json(p[[99]], file = fname)
p[[100]] <- figure() %>%
ly_points(disp, mpg, data = mtcars, color = cyl,
hover = "cyl")
print_model_json(p[[100]], file = fname)
p[[101]] <- figure() %>%
ly_boxplot(rnorm(500))
print_model_json(p[[101]], file = fname)
p[[102]] <- figure() %>%
ly_boxplot(sample(1:20, 500, replace = TRUE), rnorm(500))
print_model_json(p[[102]], file = fname)
p[[103]] <- grid_plot(figs) %>%
theme_title(text_color = "red") %>%
theme_plot(background_fill_color = "#E6E6E6",
outline_line_color = "white") %>%
theme_grid(c("x", "y"), grid_line_color = "white",
minor_grid_line_color = "white",
minor_grid_line_alpha = 0.4) %>%
theme_axis(c("x", "y"), axis_line_color = "white",
major_label_text_color = "#7F7F7F",
major_tick_line_color = "#7F7F7F",
minor_tick_line_alpha = 0, num_minor_ticks = 2)
print_model_json(p[[103]], file = fname)
p[[104]] <- grid_plot(figs) %>%
set_theme(bk_ggplot_theme)
print_model_json(p[[104]], file = fname)
pp <- figure() %>% ly_points(1:10)
rbokeh2html(pp)
# tf <- tempfile(fileext = ".png")
# figure(tools = NULL) %>%
# ly_points(1:10) %>%
# widget2png(tf)
# system2("open", tf)
})
# # sapply(p, length)
# for (i in seq_along(p)) {
# if (!is.null(p[[i]])) {
# res <- widget2png(p[[i]], file = tempfile(fileext = ".png"))
# if (length(res) > 0)
# message("JS errors for plot ", i)
# # expect(length(res) == 0, message = paste("JS errors for plot", i))
# }
# }
|
1fa761a9f27a18f4853c75a9b4a6b1f83451343c | 587b6ad96d000e7e70b2a64f8e1beaf9a9758722 | /server.R | 1bdf2bb5a96e6f851c56b5e8ea2810a1e4b1f5db | [] | no_license | Deleetdk/discretization_relative_risk | 392a96f7b1b76aea95e0d297b02c01d5cdcd7e23 | c43db8175d6a02bcd9c6dab06db44f08452c1c4a | refs/heads/master | 2021-08-08T09:52:24.748056 | 2020-06-12T22:49:11 | 2020-06-12T22:49:11 | 41,199,395 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,431 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
#functions
#returns the beta of the error variance
get_error = function(beta) {
beta_var = beta^2 #sq. beta to get % var accounted for
remain_var = 1 - beta_var #subtract from 1 to get % remain var
remain_beta = sqrt(remain_var) #sqrt to get the beta for the remainder
return(remain_beta)
}
#gets both the probability for event and non-event
get_both_probs = function(x) {
return(c(x, 1 - x))
}
library(shiny)
library(plyr)
library(ggplot2)
library(reshape)
library(stringr)
library(grid)
shinyServer(function(input, output) {
#reactive data, case-level
r_d = reactive({
#reproducible
set.seed(1)
n = 1e4
#make latent pred var
d = data.frame(pred_con = rnorm(n))
#make discrete predictor
d$pred_discrete = cut(d$pred_con, quantile(d$pred_con, seq(0, 1, length.out = input$n_groups + 1)), labels = F, include.lowest = T)
d$pred_discrete = as.factor(d$pred_discrete) #as factor
#make latent outcome var
d$out_con = d$pred_con * input$cor + get_error(input$cor) * rnorm(n)
#make binary outcome var
#modify add in the
x_adjust = qnorm(input$base_rate_adj) #converts the z score adjustment to get the given probability
x_pnorm = pnorm(d$out_con + x_adjust) #adds the z score
#roll the dice and get results
x_binary = numeric()
for (idx in seq_along(x_pnorm)) {
x_binary[idx] = sample(x = 1:0, #sample from 0 and 1
size = 1, #get 1 sample
prob = get_both_probs(x_pnorm[idx]) #with these probabilities
)
}
#save to d
d$out_binary = x_binary
return(d)
})
r_base_rate = reactive({
#fetch d
d = r_d()
#return baseline
return(mean(d$out_binary))
})
#reactive data, group-level
r_d2 = reactive({
#fetch d
d = r_d()
#proportion of true's in each category
d2 = ddply(.data = d, .variables = .(pred_discrete),
.fun = summarize,
prop = mean(out_binary))
#relative risks
d2$RR_low = d2$prop / d2[1, "prop"]
d2$RR_high = d2$prop / d2[nrow(d2), "prop"]
central_row = floor(median(1:nrow(d2))) #get median value, round down. Otherwise if the number of groups is an even number, median will be a non-integer
d2$RR_central = d2$prop / d2[central_row, "prop"]
return(d2)
})
#latent correlation
output$scatter <- renderPlot({
#fetch data
d = r_d()
#plot
g = ggplot(d, aes(pred_con, out_con)) +
geom_point(aes(color = pred_discrete)) +
geom_smooth(method = lm, se = F) +
xlab("Underlying predictor phenotype") +
ylab("Underlying outcome phenotype") +
scale_color_discrete(name = "Predictor group") +
theme_bw()
return(g)
})
#absolute risk by group
output$abs_risk = renderPlot({
#fetch data
d2 = r_d2()
base_rate = round(r_base_rate(), 3)
#text grob
geom_text(x = length(d2$pred_discrete), y = .05, label = round(r_base_rate(), 3))
text = str_c("Base rate = ", base_rate)
grob = grobTree(textGrob(text, x = .95, y = .1, hjust = 1,
gp=gpar(col="black", fontsize=13)))
grob_bg = grobTree(rectGrob(gp=gpar(fill="white"), x = .89, y = .1, width = .15, height = .1))
#plot prop by group
g = ggplot(d2, aes(pred_discrete, prop)) +
geom_bar(stat = "identity") +
xlab("Predictor group") +
ylab("Absolute risk") +
annotation_custom(grob_bg) +
annotation_custom(grob) +
theme_bw()
return(g)
})
#relative risk by group
output$rel_risk = renderPlot({
#fetch data
d2 = r_d2()
#to long form
d2$prop = NULL #remove
d3 = melt(d2)
d3$variable = factor(d3$variable, c("RR_low", "RR_central", "RR_high"))
#plot
g = ggplot(d3, aes(pred_discrete, value)) +
geom_bar(aes(group = variable, fill = variable), stat = "identity", position = "dodge") +
xlab("Predictor group") +
ylab("Relative risk") +
geom_hline(yintercept = 1, linetype = "dashed") +
scale_fill_discrete(name = "Comparison\ngroup", label = c("Lowest", "Central", "Highest")) +
theme_bw()
return(g)
})
})
|
46c1c521382b1f47cc96b458b08633a1dcd41250 | 01b256a70928c19af6a939c88facf5992429cdda | /jaccard_vertex.R | 5a3bb54b8823ddf7b6409330f02903a25373074d | [] | no_license | labio-unesp/Bioinfo | 5a9a0bab6f4ccbbdbc5c7e6a989ad2efa54e49bf | a2e63dfdd22de4a08d7da4736c557f6e8819151a | refs/heads/master | 2020-06-14T21:29:29.535233 | 2020-05-17T16:09:31 | 2020-05-17T16:09:31 | 195,131,368 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 165 | r | jaccard_vertex.R | jaccard_Vertex <- function(g1,g2){
vA <- vertex_attr(g1)[[1]]
vB <- vertex_attr(g2)[[1]]
jV <- length(intersect(vA,vB))/length(union(vA,vB))
return(jV)
}
|
002b187d2bf044c2db373f47bf4fe98a8f110778 | 90613c1a502a34ecbe3901237c8720d98cfa7e8e | /R/Mloglikelihood.R | 35b5f7fabce536e728f4a75e146b96b85063cd46 | [] | no_license | cran/parfm | 48345c471ca26c6000d72dbe10b8446a63b5ca01 | da8a1afaed7160d0b3ed3b254be27c015e52ba9b | refs/heads/master | 2023-01-24T09:09:45.168377 | 2023-01-17T21:40:02 | 2023-01-17T21:40:02 | 17,698,226 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 14,006 | r | Mloglikelihood.R | ################################################################################
# Minus the log-likelihood #
################################################################################
# #
# This function computes minus the logarithm of the likelihood function #
# #
# Its parameters are #
# - p : the parameters vector, in the form #
# c( frailty distribution parameter(s), #
# baseline hazard parameter(s), #
# regression parameter(s) ) #
# - obs : the observed data, in the form #
# list( time = event/censoring times, #
# [trunc = left truncation times, ] #
# event = event indicators, #
# x = covariate data.frame, intercept included #
# cluster = cluster ID vector, #
# ncl = number of clusters, #
# di = vector giving the numbers of events per cluster #
# - dist : the baseline hazard distribution name #
# - frailty: the frailty distribution name #
# - correct : (only for possta) the correction to use in case of many #
# events per cluster to get finite likelihood values. #
# When correct!=0 the likelihood is divided by #
# 10^(#clusters * correct) for computation, #
# but the value of the log-likelihood in the output #
# is the re-adjusted value. #
# - transform: should the parameters tranformed to their parameter space or #
# are they assumed to be already on their scale? #
# The first case (TRUE, the default) is for Mll optimization, #
# The secand case (FALSE) is used to straightforwardly compute #
# the Hessian matrix on the risght parameter scale #
# #
################################################################################
# #
# Date: December 19, 2011 #
# Last modification: January 31, 2017 #
# #
################################################################################
Mloglikelihood <- function(p,
obs,
dist,
frailty,
correct,
transform = TRUE) {
# ---- Assign the number of frailty parameters 'obs$nFpar' --------------- #
# ---- and compute Sigma for the Positive Stable frailty ----------------- #
if (frailty %in% c("gamma", "ingau")) {
theta <- ifelse(transform, exp(p[1]), p[1])
} else if (frailty == "lognormal") {
sigma2 <- ifelse(transform, exp(p[1]), p[1])
} else if (frailty == "possta") {
nu <- ifelse(transform, exp(-exp(p[1])), p[1])
D <- max(obs$dqi)
Omega <- Omega(D, correct = correct, nu = nu)
}
# ---- Baseline hazard --------------------------------------------------- #
if (frailty == 'none') obs$nFpar <- 0
# baseline parameters
if (dist %in% c("weibull", "inweibull", "frechet")) {
if (transform) {
pars <- cbind(rho = exp(p[obs$nFpar + 1:obs$nstr]),
lambda = exp(p[obs$nFpar + obs$nstr + 1:obs$nstr]))
} else {
pars <- cbind(rho = p[obs$nFpar + 1:obs$nstr],
lambda = p[obs$nFpar + obs$nstr + 1:obs$nstr])
}
beta <- p[-(1:(obs$nFpar + 2 * obs$nstr))]
} else if (dist == "exponential") {
if (transform) {
pars <- cbind(lambda = exp(p[obs$nFpar + 1:obs$nstr]))
} else {
pars <- cbind(lambda = p[obs$nFpar + 1:obs$nstr])
}
beta <- p[-(1:(obs$nFpar + obs$nstr))]
} else if (dist == "gompertz") {
if (transform) {
pars <- cbind(gamma = exp(p[obs$nFpar + 1:obs$nstr]),
lambda = exp(p[obs$nFpar + obs$nstr + 1:obs$nstr]))
} else {
pars <- cbind(gamma = p[obs$nFpar + 1:obs$nstr],
lambda = p[obs$nFpar + obs$nstr + 1:obs$nstr])
}
beta <- p[-(1:(obs$nFpar + 2 * obs$nstr))]
} else if (dist == "lognormal") {
if (transform) {
pars <- cbind(mu = p[obs$nFpar + 1:obs$nstr],
sigma = exp(p[obs$nFpar + obs$nstr + 1:obs$nstr]))
} else {
pars <- cbind(mu = p[obs$nFpar + 1:obs$nstr],
sigma = p[obs$nFpar + obs$nstr + 1:obs$nstr])
}
beta <- p[-(1:(obs$nFpar + 2 * obs$nstr))]
} else if (dist == "loglogistic") {
if (transform) {
pars <- cbind(alpha = p[obs$nFpar + 1:obs$nstr],
kappa = exp(p[obs$nFpar + obs$nstr + 1:obs$nstr]))
} else {
pars <- cbind(alpha = p[obs$nFpar + 1:obs$nstr],
kappa = p[obs$nFpar + obs$nstr + 1:obs$nstr])
}
beta <- p[-(1:(obs$nFpar + 2 * obs$nstr))]
} else if (dist == "logskewnormal") {
if (transform) {
pars <- cbind(mu = p[obs$nFpar + 1:obs$nstr],
sigma = exp(p[obs$nFpar + obs$nstr + 1:obs$nstr]),
alpha = exp(p[obs$nFpar + 2 * obs$nstr + 1:obs$nstr]))
} else {
pars <- cbind(mu = p[obs$nFpar + 1:obs$nstr],
sigma = p[obs$nFpar + obs$nstr + 1:obs$nstr],
alpha = p[obs$nFpar + 2 * obs$nstr + 1:obs$nstr])
}
beta <- p[-(1:(obs$nFpar + 3 * obs$nstr))]
}
rownames(pars) <- levels(as.factor(obs$strata))
# baseline: from string to the associated function
dist <- eval(parse(text = dist))
# ---- Cumulative Hazard by cluster and by strata ------------------------- #
cumhaz <- NULL
cumhaz <- matrix(unlist(
sapply(levels(as.factor(obs$strata)),
function(x) {t(
cbind(dist(pars[x, ], obs$time[obs$strata == x], what = "H"
) * exp(as.matrix(obs$x)[
obs$strata == x, -1, drop = FALSE] %*% as.matrix(beta)),
obs$cluster[obs$strata == x]))
})), ncol = 2, byrow = TRUE)
cumhaz <- aggregate(cumhaz[, 1], by = list(cumhaz[, 2]),
FUN = sum)[, 2, drop = FALSE]
### NO FRAILTY
if (frailty == "none") cumhaz <- sum(cumhaz)
# Possible truncation
if (!is.null(obs$trunc)) {
cumhazT <- matrix(unlist(
sapply(levels(as.factor(obs$strata)),
function(x) {t(
cbind(dist(pars[x, ], obs$trunc[obs$strata == x], what = "H"
) * exp(as.matrix(obs$x)[
obs$strata == x, -1, drop = FALSE] %*% as.matrix(beta)),
obs$cluster[obs$strata == x]))
})), ncol = 2, byrow = TRUE)
cumhazT <- aggregate(cumhazT[, 1], by = list(cumhazT[, 2]),
FUN = sum)[, 2, drop = FALSE]
### NO FRAILTY
if (frailty == "none") cumhazT <- sum(cumhazT)
}
# ---- log-hazard by cluster --------------------------------------------- #
loghaz <- NULL
if (frailty != "none") {
loghaz <- matrix(unlist(
sapply(levels(as.factor(obs$strata)),
function(x) {
t(cbind(obs$event[obs$strata == x] * (
dist(pars[x, ], obs$time[obs$strata == x],
what = "lh") +
as.matrix(obs$x)[
obs$strata == x, -1, drop = FALSE] %*%
as.matrix(beta)),
obs$cluster[obs$strata == x]))
})), ncol = 2, byrow = TRUE)
loghaz <- aggregate(loghaz[, 1], by = list(loghaz[, 2]), FUN = sum)[
, 2, drop = FALSE]
} else {
loghaz <- sum(apply(cbind(rownames(pars), pars), 1,
function(x) {
sum(obs$event[obs$strata == x[1]] * (
dist(as.numeric(x[-1]),
obs$time[obs$strata == x[1]],
what = "lh") +
as.matrix(obs$x[
obs$strata == x[1], -1, drop = FALSE]
) %*% as.matrix(beta)))
}))
}
# ---- log[ (-1)^di L^(di)(cumhaz) ]-------------------------------------- #
logSurv <- NULL
if (frailty == "gamma") {
logSurv <- mapply(fr.gamma,
k = obs$di, s = as.numeric(cumhaz[[1]]),
theta = rep(theta, obs$ncl),
what = "logLT")
} else if (frailty == "ingau") {
logSurv <- mapply(fr.ingau,
k = obs$di, s = as.numeric(cumhaz[[1]]),
theta = rep(theta, obs$ncl),
what = "logLT")
} else if (frailty == "possta") {
logSurv <- sapply(1:obs$ncl,
function(x) fr.possta(k = obs$di[x],
s = as.numeric(cumhaz[[1]])[x],
nu = nu, Omega = Omega,
what = "logLT",
correct = correct))
} else if (frailty == "lognormal") {
logSurv <- mapply(fr.lognormal,
k = obs$di, s = as.numeric(cumhaz[[1]]),
sigma2 = rep(sigma2, obs$ncl),
what = "logLT")
} else if (frailty == "none") {
logSurv <- mapply(fr.none, s = cumhaz, what = "logLT")
}
### Possible left truncation
if (!is.null(obs$trunc)) {
logSurvT <- NULL
if (frailty == "gamma") {
logSurvT <- mapply(fr.gamma,
k = 0, s = as.numeric(cumhazT[[1]]),
theta = rep(theta, obs$ncl),
what = "logLT")
} else if (frailty == "ingau") {
logSurvT <- mapply(fr.ingau,
k = 0, s = as.numeric(cumhazT[[1]]),
theta = rep(theta, obs$ncl),
what = "logLT")
} else if (frailty == "possta") {
logSurvT <- sapply(1:obs$ncl,
function(x) fr.possta(
k = 0,
s = as.numeric(cumhazT[[1]])[x],
nu = nu, Omega = Omega,
what = "logLT",
correct = correct))
} else if (frailty == "lognormal") {
logSurvT <- mapply(fr.lognormal,
k = 0, s = as.numeric(cumhazT[[1]]),
sigma2 = rep(sigma2, obs$ncl),
what = "logLT")
} else if (frailty == "none") {
logSurvT <- mapply(fr.none, s = cumhazT, what = "logLT")
}
}
# ---- Minus the log likelihood ------------------------------------------ #
Mloglik <- -sum(as.numeric(loghaz[[1]]) + logSurv)
if (!is.null(obs$trunc)) {
Mloglik <- Mloglik + sum(logSurvT)
}
attr(Mloglik, "cumhaz") <- as.numeric(cumhaz[[1]])
if (!is.null(obs$trunc)) {
attr(Mloglik, "cumhazT") <- as.numeric(cumhazT[[1]])
} else {
attr(Mloglik, "cumhazT") <- NULL
}
attr(Mloglik, "loghaz") <- as.numeric(loghaz[[1]])
attr(Mloglik, "logSurv") <- logSurv
if (!is.null(obs$trunc)) {
attr(Mloglik, "logSurvT") <- logSurvT
}
return(Mloglik)
}
################################################################################
################################################################################
################################################################################
# the same as Mloglikelihood, without attributes, to be passed to optimx() #
################################################################################
optMloglikelihood <- function(p, obs, dist, frailty, correct) {
res <- Mloglikelihood(p = p, obs = obs, dist = dist,
frailty = frailty, correct = correct)
as.numeric(res)}
################################################################################
|
79dcd76a7aa4c2f574e7bb72f269b9166a3d72f8 | fd209674700df159667bddb6d63b88d7d66947c2 | /man/dummy_mod_onLoad.Rd | 8680af414f980f4ccb10a328f77f17066b434f35 | [] | no_license | shambhu112/shinyspring | 2b4677a8396843970af997a51dc6866fa5d33a25 | 8ea4306c6c196c66285c75fe496ddbbb90822612 | refs/heads/master | 2023-06-14T23:29:32.959454 | 2021-07-07T15:43:46 | 2021-07-07T15:43:46 | 373,526,842 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 336 | rd | dummy_mod_onLoad.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dummy_mod.R
\name{dummy_mod_onLoad}
\alias{dummy_mod_onLoad}
\title{onLoad for Dummy Mod}
\usage{
dummy_mod_onLoad(control, params)
}
\arguments{
\item{control}{the controller \code{app_master}}
\item{params}{for the mod}
}
\description{
A shiny Module.
}
|
5994b284dc651d4b759c24d06ddf5bd33fa8cec4 | 068ee9d6621002cdd89ee782ccc3ae09bcb3a3d3 | /uvoz/uvoz.r | 3b73b3e6f700c6d1569f1ce4b51c9f1fc2129e23 | [
"MIT"
] | permissive | PirnovarA/APPR-2015-16 | 54119b9dbebf72b7ee6afd46644668a979b12342 | 02d355446dfccd9f8af30334648ebb409c6987b9 | refs/heads/master | 2020-12-28T07:00:37.640291 | 2016-03-01T22:15:16 | 2016-03-01T22:15:16 | 45,528,489 | 0 | 0 | null | 2015-11-04T09:28:23 | 2015-11-04T09:28:23 | null | UTF-8 | R | false | false | 3,965 | r | uvoz.r | # 2. faza: Uvoz podatkov
source("uvoz/funkcije.r", encoding = "UTF-8")
### National and states ##################################################################
source("uvoz/uvoz_national.r", encoding = "UTF-8")
### Mean per capita in States ######################################################################
#Uvoz tabele s povprečnim prihodkom na državljana v zveznih državah
if(file.exists("podatki/per_capita.csv")){
per_capita <- read.csv2("podatki/per_capita.csv", fileEncoding = "UTF-8", stringsAsFactors = FALSE)
names(per_capita) <- gsub("X","",names(per_capita))
}else{
u= "http://www.infoplease.com/ipa/A0104652.html"
kategorije=as.character(c("State", 1980, 1990, 1995, 2000, 2003, 2006, 2009, 2012)) #Imena stolpcev
tables = readHTMLTable(u, fileEncoding="UTF-8") #Prebral HTML tabelce
per_capita= tables[[2]] #Izbral ta pravo tabelo
colnames(per_capita) <- kategorije #Poimenovanje stolpcev
per_capita <- per_capita[complete.cases(per_capita[,length(names(per_capita))]),][-1,]
#Odstranim dolarje, , nadomestim s . in spremenim v numeric
per_capita$State <- sapply(per_capita$State, function(x) as.character(gsub("[$]", "",x)))
indx <- sapply(per_capita, is.factor)
per_capita[indx] <- lapply(per_capita[indx], function(x) as.numeric(gsub("[,$]", "", x)))
per_capita <- replace(per_capita,per_capita=="DC","Washington, D.C.") #Zamenjam ime zv. države
#Tabelco per_capita priredimo za inflacijo, da bomo lahko primerjali vrednosti skozi leta med sabo(NPV bo v 2015)
inflacije = c(2.880,1.816,1.557,1.378,1.290,1.177,1.106,1.034)
per_capita[2:9] <- round(data.frame(mapply(`*`,per_capita[2:9],inflacije)),0)
#Uredimo tabelco per_capita
per_capita <- arrange(per_capita,desc(`2012`),desc(`2009`))
write.csv2(per_capita, "podatki/per_capita.csv", fileEncoding = "UTF-8",row.names = FALSE)
}
#Tidy data
per_capita_tidy <- melt(per_capita,id="State")
per_capita_tidy <- dplyr::rename(per_capita_tidy,Year=variable,Wage=value)
### Cost of living #########################################################################
#S https://www.missourieconomy.org/indicators/cost_of_living/index.stm dobimo tabelco z indeksi
#za cost of living za posamezen state.. Ker je to (vsaj zastonjsko) težje najti kot spodobnega
#republikanskega kandidata, se zadovoljimo s tem in tudi za samo eno leto(2015), ker.. pac ja...
if(file.exists("podatki/cost_of_living.csv")){
cost_of_living<- read.csv2("podatki/cost_of_living.csv",encoding ="UTF-8",stringsAsFactors = FALSE)
}else{
u=GET("https://www.missourieconomy.org/indicators/cost_of_living/index.stm") #Dobimo link, ker je https se malo "pomatramo"
tables = readHTMLTable(content(u), fileEncoding = "UTF-8",stringsAsFactors=FALSE) #Dobimo tabele z linka
cost_of_living = tables[[1]] #Izberemo ta pravo tabelo
names(cost_of_living) <- cost_of_living[1,] #Prvo vrstico uporabimo za imena stolpcev
cost_of_living <- (cost_of_living[-1,])[-2] #Znebimo se prve vrstice in stolpca Rank
cost_of_living[c(2:8)] <- lapply((cost_of_living[c(2:8)]), function(x) as.numeric(x)) #Indekse spremenimo v numeric
write.csv2(cost_of_living, "podatki/cost_of_living.csv",fileEncoding="UTF-8",row.names = FALSE)
}
#Tidy data
cost_of_living_tidy <- melt(cost_of_living, id=c("State"))
cost_of_living_tidy <- dplyr::rename(cost_of_living_tidy, Cost.of.Living=value, Type= variable)
### Uvoz ISO kod ############
if(file.exists("podatki/iso_state.csv")){
iso_state <- read.csv2("podatki/iso_state.csv", fileEncoding = "UTF-8", stringsAsFactors = FALSE)
}else{
iso_state <- read.csv("http://www.fonz.net/blog/wp-content/uploads/2008/04/states.csv",encoding="UTF-8")
iso_state <- dplyr::rename(iso_state,Code=Abbreviation)
write.csv2(iso_state,"podatki/iso_state.csv",fileEncoding ="UTF-8",row.names = FALSE)
}
############################################################################
source("uvoz/grafi.r", encoding = "UTF-8")
|
23d84d857c2ad1abee862577417413ebea79a3df | f0352034f8467e2c82a31443ae6e3125039879ac | /R/AllClassesCE.R | 1dc73bc188f7bddc38b00e3433ef9519c2404cf1 | [] | no_license | epurdom/clusterExperiment | 8d5d43a250a1a3c28d4745aae4b72285458ba1a2 | ae86ee09697c13ccd5d32f964e28ab7d82b455d6 | refs/heads/master | 2022-11-04T01:54:19.806886 | 2022-10-11T22:00:27 | 2022-10-11T22:00:27 | 47,139,877 | 39 | 15 | null | 2021-01-27T21:26:28 | 2015-11-30T19:06:53 | R | UTF-8 | R | false | false | 17,677 | r | AllClassesCE.R | #' @include AllChecks.R
#' @importClassesFrom HDF5Array HDF5Matrix
#' @importClassesFrom DelayedArray DelayedArray DelayedArray1 DelayedMatrix RleArray RleMatrix
#' @importClassesFrom phylobase phylo4 phylo4d
#' @rawNamespace import(phylobase, except = plot)
#' @import Matrix
setClassUnion("matrixOrMissing",members=c("matrix", "missing"))
setClassUnion("phylo4OrNULL",members=c("phylo4d", "NULL"))
setClassUnion("matrixOrNULL",members=c("matrix", "NULL"))
setClassUnion("listOrNULL",members=c("list", "NULL"))
setClassUnion("functionOrNULL",members=c("function", "NULL"))
setClassUnion("data.frameOrNULL",members=c("data.frame", "NULL"))
setClassUnion("matrixOrHDF5",members=c("matrix", "DelayedArray","HDF5Matrix")) #Sometimes it appears necessary to have HDF5Matrix listed separately, not sure why, but otherwise not finding it.
setClassUnion("matrixOrHDF5OrNULL",members=c("matrix","DelayedArray","HDF5Matrix","NULL"))
setClassUnion("sparseOrHDF5OrNULL",members=c("numeric","sparseMatrix","DelayedArray","HDF5Matrix","NULL"))
#############################################################
#############################################################
############### ClusterExperiment Class #####################
#############################################################
#############################################################
#' @title Class ClusterExperiment
#'
#' @description \code{ClusterExperiment} is a class that extends
#' \code{SingleCellExperiment} and is used to store the data
#' and clustering information.
#'
#' @docType class
#' @aliases ClusterExperiment ClusterExperiment-class ClusterExperiment
#'
#' @description In addition to the slots of the \code{SingleCellExperiment}
#' class, the \code{ClusterExperiment} object has the additional slots described
#' in the Slots section.
#'
#' @description There are several methods implemented for this class. The most
#' important methods (e.g., \code{\link{clusterMany}}, \code{\link{makeConsensus}},
#' ...) have their own help page. Simple helper methods are described in the
#' Methods section. For a comprehensive list of methods specific to this class
#' see the Reference Manual.
#'
#' @slot transformation function. Function to transform the data by when methods
#' that assume normal-like data (e.g. log)
#' @slot clusterMatrix matrix. A matrix giving the integer-valued cluster ids
#' for each sample. The rows of the matrix correspond to clusterings and columns
#' to samples. The integer values are assigned in the order that the clusters
#' were found, if found by setting sequential=TRUE in clusterSingle. "-1" indicates
#' the sample was not clustered.
#' @slot primaryIndex numeric. An index that specifies the primary set of
#' labels.
#' @slot clusterInfo list. A list with info about the clustering.
#' If created from \code{\link{clusterSingle}}, clusterInfo will include the
#' parameter used for the call, and the call itself. If \code{sequential = TRUE}
#' it will also include the following components.
#' \itemize{
#' \item{\code{clusterInfo}}{if sequential=TRUE and clusters were successfully
#' found, a matrix of information regarding the algorithm behavior for each
#' cluster (the starting and stopping K for each cluster, and the number of
#' iterations for each cluster).}
#' \item{\code{whyStop}}{if sequential=TRUE and clusters were successfully
#' found, a character string explaining what triggered the algorithm to stop.}
#' }
#' @slot merge_index index of the current merged cluster
#' @slot merge_cutoff value for the cutoff used to determine whether to merge
#' clusters
#' @slot merge_dendrocluster_index index of the cluster merged with the current
#' merge
#' @slot merge_nodeMerge data.frame of information about nodes merged in the
#' current merge. See \code{\link{mergeClusters}}
#' @slot merge_nodeProp data.frame of information of proportion estimated
#' non-null at each node of dendrogram. See \code{\link{mergeClusters}}
#' @slot merge_method character indicating method used for merging. See
#' \code{\link{mergeClusters}}
#' @slot merge_demethod character indicating the DE method used for merging. See
#' \code{\link{mergeClusters}}
#' @slot clusterTypes character vector with the origin of each column of
#' clusterMatrix.
#' @slot dendro_samples \code{\link[phylobase]{phylo4d}} object. A dendrogram
#' containing the cluster relationship (leaves are samples; see
#' \code{\link{clusterDendrogram}} for details).
#' @slot dendro_clusters \code{\link[phylobase]{phylo4d}} object. A dendrogram
#' containing the cluster relationship (leaves are clusters; see see
#' \code{\link{sampleDendrogram}} for details).
#' @slot dendro_index numeric. An integer giving the cluster that was used to
#' make the dendrograms. NA_real_ value if no dendrograms are saved.
#' @slot coClustering One of \itemize{
#' \item{\code{NULL}, i.e. empty}
#' \item{a
#' numeric vector, signifying the indices of the clusterings in the
#' clusterMatrix that were used for \code{makeConsensus}. This allows for the
#' recreation of the distance matrix (using hamming distance) if needed for
#' function \code{plotClusters} but doesn't require storage of full NxN
#' matrix.}
#' \item{a \code{\link[Matrix]{sparseMatrix}} object -- a sparse
#' representation of the NxN matrix with the cluster co-occurrence
#' information; this can either be based on subsampling or on co-clustering
#' across parameter sets (see \code{clusterMany}). The matrix is a square
#' matrix with number of rows/columns equal to the number of samples.}
#' }
#' @slot clusterLegend a list, one per cluster in \code{clusterMatrix}. Each
#' element of the list is a matrix with nrows equal to the number of different
#' clusters in the clustering, and consisting of at least two columns with the
#' following column names: "clusterId" and "color".
#' @slot orderSamples a numeric vector (of integers) defining the order of
#' samples to be used for plotting of samples. Usually set internally by other
#' functions.
#' @seealso \code{\link[Matrix]{sparseMatrix}} \code{\link[phylobase]{phylo4d}}
#' @name ClusterExperiment-class
#' @aliases ClusterExperiment
#' @rdname ClusterExperiment-class
#' @import SingleCellExperiment
#' @import SummarizedExperiment
#' @import methods
#' @export
#'
setClass(
Class = "ClusterExperiment",
contains = "SingleCellExperiment",
slots = list(
transformation="function",
clusterMatrix = "matrix",
primaryIndex = "numeric",
clusterInfo = "list",
clusterTypes = "character",
clusterLegend="list",
orderSamples="numeric",
dendro_samples = "phylo4OrNULL",
dendro_clusters = "phylo4OrNULL",
dendro_index = "numeric",
coClustering = "sparseOrHDF5OrNULL",
merge_index="numeric",
merge_dendrocluster_index="numeric",
merge_method="character",
merge_demethod="character",
merge_cutoff="numeric",
merge_nodeProp="data.frameOrNULL",
merge_nodeMerge="data.frameOrNULL"
),
prototype = prototype(
dendro_samples = NULL,
dendro_clusters = NULL,
dendro_index=NA_real_,
merge_index=NA_real_,
merge_dendrocluster_index=NA_real_,
coClustering = NULL,
merge_method=NA_character_,
merge_demethod=NA_character_,
merge_cutoff=NA_real_,
merge_nodeProp=NULL,
merge_nodeMerge=NULL
)
)
setValidity("ClusterExperiment", function(object) {
####
#test that clusterInfo not have names
####
if(!is.null(names(object@clusterInfo))) return("clusterInfo should not have names")
ch<-.checkClusterMatrix(object)
if(!is.logical(ch)) return(ch)
ch<-.checkPrimaryIndex(object)
if(!is.logical(ch)) return(ch)
ch<-.checkClusterTypes(object)
if(!is.logical(ch)) return(ch)
ch<-.checkClusterLegend(object)
if(!is.logical(ch)) return(ch)
ch<-.checkOrderSamples(object)
if(!is.logical(ch)) return(ch)
ch<-.checkClusterLabels(object)
if(!is.logical(ch)) return(ch)
ch<-.checkMerge(object)
if(!is.logical(ch)) return(ch)
ch<-.checkDendrogram(object)
if(!is.logical(ch)) return(ch)
ch<-.checkCoClustering(object)
if(!is.logical(ch)) return(ch)
return(TRUE)
})
#'@description The constructor \code{ClusterExperiment} creates an object of the
#' class \code{ClusterExperiment}. However, the typical way of creating these
#' objects is the result of a call to \code{\link{clusterMany}} or
#' \code{\link{clusterSingle}}.
#'
#'@description Note that when subsetting the data, the co-clustering and
#' dendrogram information are lost.
#'
#'@param object a matrix or \code{SummarizedExperiment} or
#' \code{SingleCellExperiment} containing the data that was clustered.
#'@param clusters can be either a numeric or character vector, a factor, or a
#' numeric matrix, containing the cluster labels.
#'@param transformation function. A function to transform the data before
#' performing steps that assume normal-like data (i.e. constant variance), such
#' as the log.
#'@param ... The arguments \code{transformation}, \code{clusterTypes} and
#' \code{clusterInfo} to be passed to the constructor for signature
#' \code{SingleCellExperiment,matrix}.
#'
#'@return A \code{ClusterExperiment} object.
#'
#'@examples
#'
#'sce <- matrix(data=rnorm(200), ncol=10)
#'labels <- gl(5, 2)
#'
#'cc <- ClusterExperiment(sce, as.numeric(labels), transformation =
#'function(x){x})
#'
#' @rdname ClusterExperiment-class
#' @export
setGeneric(
name = "ClusterExperiment",
def = function(object, clusters,...) {
standardGeneric("ClusterExperiment")
}
)
#' @rdname ClusterExperiment-class
#' @export
setMethod(
f = "ClusterExperiment",
signature = signature("matrixOrHDF5","ANY"),
definition = function(object, clusters, ...){
ClusterExperiment(SummarizedExperiment(object), clusters, ...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "ClusterExperiment",
signature = signature("SummarizedExperiment", "ANY"),
definition = function(object, clusters, ...){
ClusterExperiment(as(object, "SingleCellExperiment"),clusters,...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "ClusterExperiment",
signature = signature("SingleCellExperiment", "numeric"),
definition = function(object, clusters, ...){
ClusterExperiment(object,matrix(clusters, ncol=1),...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "ClusterExperiment",
signature = signature("SingleCellExperiment","character"),
definition = function(object, clusters,...){
ClusterExperiment(object,matrix(clusters,ncol=1),...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "ClusterExperiment",
signature = signature("SingleCellExperiment","factor"),
definition = function(object, clusters,...){
clusters <- as.character(clusters)
ClusterExperiment(object,clusters,...)
})
#'@rdname ClusterExperiment-class
#'@param clusterTypes a string describing the nature of the clustering. The
#' values `clusterSingle`, `clusterMany`, `mergeClusters`, `makeConsensus` are
#' reserved for the clustering coming from the package workflow and should not
#' be used when creating a new object with the constructor.
#'@param clusterInfo a list with information on the clustering (see Slots).
#'@param primaryIndex integer. Sets the `primaryIndex` slot (see Slots).
#'@param orderSamples a vector of integers. Sets the `orderSamples` slot (see
#' Slots).
#'@param dendro_samples phylo4 object. Sets the `dendro_samples` slot (see Slots).
#'@param dendro_clusters phylo4 object. Sets the `dendro_clusters` slot (see
#' Slots).
#'@param dendro_index numeric. Sets the \code{dendro_index} slot (see Slots).
#'@param coClustering matrix. Sets the \code{coClustering} slot (see Slots).
#'@param checkTransformAndAssay logical. Whether to check the content of the
#' assay and given transformation function for whether they are valid.
#'@param merge_index integer. Sets the \code{merge_index} slot (see Slots)
#'@param merge_cutoff numeric. Sets the \code{merge_cutoff} slot (see Slots)
#'@param merge_dendrocluster_index integer. Sets the
#' \code{merge_dendrocluster_index} slot (see Slots)
#'@param merge_demethod character, Sets the
#' \code{merge_demethod} slot (see Slots)
#'@param merge_nodeMerge data.frame. Sets the \code{merge_nodeMerge} slot (see
#' Slots)
#'@param merge_nodeProp data.frame. Sets the \code{merge_nodeProp} slot (see
#' Slots)
#'@param merge_method character, Sets the \code{merge_method} slot (see Slots)
#'@param clusterLegend list, Sets the \code{clusterLegend} slot (see details).
#' @details The \code{clusterLegend} argument to \code{ClusterExperiment}
#' must be a valid clusterLegend format and match the values in \code{clusters},
#' in that the "clusterIds" column must matches the value in the clustering matrix
#' \code{clusters}. If \code{names(clusterLegend)==NULL}, it is assumed that the
#' entries of \code{clusterLegend} are in the same order as the columns of
#' \code{clusters}. Generally, this is not a good way for users to set the
#' clusterLegend slot.
#' @details The \code{ClusterExperiment} constructor function gives
#' clusterLabels based on the column names of the input
#' matrix/SingleCellExperiment. If missing, will assign labels
#' "cluster1","cluster2", etc.
#' @details Note that the validity check when creating a new
#' \code{ClusterExperiment} object with \code{new} is less extensive than when
#' using \code{ClusterExperiment} function with
#' \code{checkTransformAndAssay=TRUE} (the default). Users are advised to use
#' \code{ClusterExperiment} to create new \code{ClusterExperiment} objects.
setMethod(
f = "ClusterExperiment",
signature = signature("SingleCellExperiment","matrix"),
definition = function(object, clusters,
transformation=function(x){x},
primaryIndex=1,
clusterTypes="User",
clusterInfo=NULL,
orderSamples=seq_len(ncol(object)),
dendro_samples=NULL,
dendro_index=NA_real_,
dendro_clusters=NULL,
coClustering=NULL,
merge_index=NA_real_,
merge_cutoff=NA_real_,
merge_dendrocluster_index=NA_real_,
merge_nodeProp=NULL,
merge_nodeMerge=NULL,
merge_method=NA_character_,
merge_demethod=NA_character_,
clusterLegend=NULL,
checkTransformAndAssay=TRUE
){
if(NCOL(object) != nrow(clusters)) {
stop("`clusters` must be a matrix of rows equal to the number of
samples.")
}
if(length(clusterTypes)==1) {
clusterTypes <- rep(clusterTypes, length=NCOL(clusters))
}
if(is.null(clusterInfo)) {
clusterInfo<-rep(list(NULL),length=NCOL(clusters))
}
if(length(clusterTypes)!=NCOL(clusters)) {
stop("clusterTypes must be of length equal to number of clusters in
`clusters`")
}
#fix up names of clusters and match
if(is.null(colnames(clusters))){
colnames(clusters)<-paste("cluster",seq_len(NCOL(clusters)),sep="")
}
if(any(duplicated(colnames(clusters)))){#probably not possible
colnames(clusters)<-make.names(colnames(clusters),unique=TRUE)
}
if(length(clusterTypes) == 1) {
clusterTypes <- rep(clusterTypes, length=NCOL(clusters))
}
if(is.null(clusterInfo)) {
clusterInfo <- rep(list(NULL), length=NCOL(clusters))
}
#make clusters consecutive integer valued:
if(nrow(clusters)>0){
tmp<-.makeColors(clusters, colors=massivePalette,matchClusterLegend=clusterLegend,matchTo="givenIds")
if(is.null(clusterLegend)){
clusterLegend<-tmp$colorList
}
else{
#need to check matches the clusters, which .makeColors doesn't do.
clusterLegend<-unname(clusterLegend)
ch<-.checkClustersWithClusterLegend(clusters,clusterLegend)
if(!is.logical(ch)) stop(ch)
clusterLegend<-tmp$colorList
}
clustersNum<-tmp$numClusters
colnames(clustersNum)<-colnames(clusters)
}
else{
clustersNum<-clusters
clusterLegend<-lapply(seq_len(ncol(clusters)),function(ii){
out<-matrix(nrow=0,ncol=3)
colnames(out)<-c("clusterIds","color","name")
return(out)
})
}
#can just give object in constructor, and then don't loose any information!
out <- new("ClusterExperiment",
object,
transformation=transformation,
clusterMatrix = clustersNum,
primaryIndex = primaryIndex,
clusterTypes = unname(clusterTypes),
clusterInfo=unname(clusterInfo),
clusterLegend=unname(clusterLegend),
orderSamples=seq_len(ncol(object)),
dendro_samples=dendro_samples,
dendro_clusters=dendro_clusters,
dendro_index=dendro_index,
merge_index=merge_index,
merge_cutoff=merge_cutoff,
merge_dendrocluster_index=merge_dendrocluster_index,
merge_nodeProp=merge_nodeProp,
merge_nodeMerge=merge_nodeMerge,
merge_method=merge_method,
merge_demethod=merge_demethod,
coClustering=coClustering
)
if(checkTransformAndAssay){
chass<-.checkAssays(out)
if(is.logical(chass) && !chass) stop(chass)
chtrans<-.checkTransform(out)
if(is.logical(chtrans) && !chtrans) stop(chtrans)
}
return(out)
})
|
3b11f791030d572674bae5b6a55fd8e1a49cb19e | ccbd480a014d0aa3e3a7829e492e459aa703cedf | /tests/testthat/test_nas.R | 77bf1c97adc2c5b0c4d52320f13571ede608bc3e | [] | no_license | gaaj1980/dataprep | 123a42e629faf7f056d976d7060aa25b322e7286 | ad22c4ab20d4a08702274bda0b68f37e67b847ac | refs/heads/master | 2020-09-16T08:10:07.159789 | 2017-06-16T00:14:26 | 2017-06-16T00:14:26 | 94,486,588 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,659 | r | test_nas.R | #importing packages
library(dataprep)
library(testthat)
context("Preprocess NaN rows")
nadf <- startupssmall
nadf[2,1] <- NaN
nadf[3,2] <- NaN
nadf[4,3] <- NaN
nadf[6,3] <- NaN
nadf[7,5] <- NaN
test_that("Removes NaN rows",{
current <- preprocessNaN(nadf, naBehavior = 0)
expect_equal(length(current[,1]), 45)
expect_false(any(is.na(current)))
})
test_that("Mean NaN columns",{
current <- preprocessNaN(nadf, naBehavior = 1)
expect_equal(length(current[,1]), 50)
expect_false(any(is.na(current)))
expect_equal(round(current[2,1],digits=2), 71907.82)
expect_equal(round(current[3,2],digits=2), 121756.87)
expect_equal(round(current[4,3],digits=2), 204274.87)
expect_equal(round(current[6,3],digits=2), 204274.87)
expect_equal(round(current[7,5],digits=2), 111112.44)
})
test_that("Median NaN columns",{
current <- preprocessNaN(nadf, naBehavior = 2)
expect_equal(length(current[,1]), 50)
expect_false(any(is.na(current)))
expect_equal(round(current[2,1],digits=1), 72107.6)
expect_equal(round(current[3,2],digits=1), 122782.8)
expect_equal(round(current[4,3],digits=1), 208157.7)
expect_equal(round(current[6,3],digits=1), 208157.7)
expect_equal(round(current[7,5],digits=1), 107404.3)
})
test_that("Mode NaN columns",{
current <- preprocessNaN(nadf, naBehavior = 3)
expect_equal(length(current[,1]), 50)
expect_false(any(is.na(current)))
expect_equal(current[2,1], 0)
expect_equal(current[3,2], 136897.80)
expect_equal(current[4,3], 0.0)
expect_equal(current[6,3], 0.0)
expect_equal(current[7,5], 192261.83)
})
test_that("Invalid naBehavior",{
expect_error(preprocessNaN(nadf, naBehavior = 4))
}) |
f5df06a1bb23215b841ac1707af0de22da03648b | bad132f51935944a52a00e20e90395990afd378a | /tests/testthat/test_ISOSpatialTemporalExtent.R | de240a803070be49121668260c3e2ba92c3c0479 | [] | no_license | cran/geometa | 9612ad75b72956cfd4225b764ed8f048804deff1 | b87c8291df8ddd6d526aa27d78211e1b8bd0bb9f | refs/heads/master | 2022-11-10T21:10:25.899335 | 2022-10-27T22:45:13 | 2022-10-27T22:45:13 | 92,486,874 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 910 | r | test_ISOSpatialTemporalExtent.R | # test_ISOSpatialTemporalExtent.R
# Author: Emmanuel Blondel <emmanuel.blondel1@gmail.com>
#
# Description: Unit tests for ISOSpatialTemporalExtent.R
#=======================
require(geometa, quietly = TRUE)
require(testthat)
context("ISOSpatialTemporalExtent")
test_that("encoding",{
testthat::skip_on_cran()
#encoding
md <- ISOSpatialTemporalExtent$new()
start <- ISOdate(2000, 1, 12, 12, 59, 45)
end <- ISOdate(2010, 8, 22, 13, 12, 43)
tp <- GMLTimePeriod$new(beginPosition = start, endPosition = end)
md$setTimePeriod(tp)
spatialExtent <- ISOGeographicBoundingBox$new(minx = -180, miny = -90, maxx = 180, maxy = 90)
md$addSpatialExtent(spatialExtent)
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
#decoding
md2 <- ISOSpatialTemporalExtent$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
}) |
c1163e688717063011c29e6771102f6f0949915f | edc5010116e09e7c9305dfb5574f259535f191f6 | /InvestigationsDashboard/server.R | fed6ccaf698d213d9d98ef57bd6b54c40b75538d | [
"MIT"
] | permissive | davidmeza1/KA_Interns | 18341faa06ec28f09a7942096977fa1d8a75b894 | a20d723f3029e851a7233dc23bd0cc29d3b4d338 | refs/heads/master | 2020-05-21T17:39:50.759995 | 2017-01-05T15:49:44 | 2017-01-05T15:49:53 | 64,764,165 | 0 | 1 | null | 2016-12-14T15:05:31 | 2016-08-02T14:31:35 | HTML | UTF-8 | R | false | false | 1,287 | r | server.R | server <- function(input, output, session) {
## Tab 1 --------------------------------------------------------------------
source("tab1/server.R", local=TRUE)
## Tab 2 --------------------------------------------------------------------
source("tab2/server.R", local=TRUE)
## Tab 3 --------------------------------------------------------------------
source("tab3/server.R", local=TRUE)
## Tab 4 --------------------------------------------------------------------
source("tab4/server.R", local=TRUE)
## Tab 5 --------------------------------------------------------------------
source("tab5/server.R", local=TRUE)
## Tab 6 --------------------------------------------------------------------
source("tab6/server.R", local=TRUE)
## Tab 7 --------------------------------------------------------------------
source("tab7/server.R", local=TRUE)
## Tab 8 --------------------------------------------------------------------
source("tab8/server.R", local=TRUE)
## Tab 9 --------------------------------------------------------------------
source("tab9/server.R", local=TRUE)
## Tab 10 --------------------------------------------------------------------
source("tab10/server.R", local=TRUE)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.